repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
cch1999/protein_dynamics
[ "f24031d19f527f196d7c8bb822435ec1ac657da0" ]
[ "obselete/model4.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset\nfrom torch.nn.functional import normalize\n\nfrom random import shuffle\n\nfrom utils import MLP, read_input_file, _compute_connectivity, rmsd, save_structure\nimport matplotlib.pyplot as plt\nimport os\nfrom pykeops.torch import LazyTensor\nfrom tqdm import tqdm\n\nmodel_dir = os.path.dirname(os.path.realpath(__file__))\ndataset_dir = os.path.join(model_dir, \"datasets\")\ntrain_val_dir = os.path.join(model_dir, \"protein_data\", \"train_val\")\ntrained_model_file = os.path.join(model_dir, \"test_model2.pt\")\n\ntrain_proteins = [l.rstrip() for l in open(os.path.join(dataset_dir, \"train.txt\"))]\nval_proteins = [l.rstrip() for l in open(os.path.join(dataset_dir, \"val.txt\" ))]\n\ndevice = \"cuda:5\"\n\ntorch.set_num_threads(12)\n\n\natoms = [\"N\", \"CA\", \"C\", \"cent\"]\n\n# Last value is the number of atoms in the next residue\nangles = [\n\t(\"N\", \"CA\", \"C\" , 0), (\"CA\", \"C\" , \"N\" , 1), (\"C\", \"N\", \"CA\", 2),\n\t(\"N\", \"CA\", \"cent\", 0), (\"C\" , \"CA\", \"cent\", 0),\n]\n\n# Last value is the number of atoms in the next residue\ndihedrals = [\n\t(\"C\", \"N\", \"CA\", \"C\" , 3), (\"N\" , \"CA\", \"C\", \"N\", 1), (\"CA\", \"C\", \"N\", \"CA\", 2),\n\t(\"C\", \"N\", \"CA\", \"cent\", 3), (\"cent\", \"CA\", \"C\", \"N\", 1),\n]\n\naas = [\n\t\"A\", \"R\", \"N\", \"D\", \"C\", \"E\", \"Q\", \"G\", \"H\", \"I\",\n\t\"L\", \"K\", \"M\", \"F\", \"P\", \"S\", \"T\", \"W\", \"Y\", \"V\",\n]\nn_aas = len(aas)\n\nclass ProteinDataset(Dataset):\n def __init__(self, pdbids, coord_dir, device=\"cpu\"):\n self.pdbids = pdbids\n self.coord_dir = coord_dir\n self.set_size = len(pdbids)\n self.device = device\n\n def __len__(self):\n return self.set_size\n\n def __getitem__(self, index):\n fp = os.path.join(self.coord_dir, self.pdbids[index] + \".txt\")\n return get_features(fp, device=self.device)\n\nclass DistanceForces(nn.Module):\n\t\"\"\"\n\tCalculates forces between two atoms based on their \n\t\t1. atoms types\n\t\t2. Euclidian distance\n\t\t3. Seperation along the sequence\n\n\tInput dim = 50 (24*2 + 2)\n\tOutput dim = 1 (a scalar force)\n\t\"\"\"\n\tdef __init__(self, input_size, hidden_size, output_size):\n\t\tsuper(DistanceForces, self).__init__()\n\n\n\t\tself.model = nn.Sequential(\n\t\t\tnn.Linear((2*24)+2, hidden_size),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Linear(hidden_size, hidden_size),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Linear(hidden_size, hidden_size),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Linear(hidden_size, hidden_size),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Linear(hidden_size, hidden_size),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Linear(hidden_size, output_size))\n\n\tdef forward(self, atom1, atom2, edges):\n\n\t\tmessages = torch.cat([atom1, atom2, edges], dim=1)\n\n\t\treturn self.model(messages)\n\nclass AngleForces(nn.Module):\n\t\"\"\"\n\tCalculates forces between three atoms making an angle on their \n\t\t1. central atom types\n\t\t2. angle around the central atom\n\n\tInput dim = 25 (24 + 1)\n\tOutput dim = 1 (a scalar force)\n\t\"\"\"\n\tdef __init__(self, input_size, hidden_size, output_size):\n\t\tsuper(AngleForces, self).__init__()\n\n\t\tself.model = nn.Sequential(\n\t\t\tnn.Linear(input_size, hidden_size),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Linear(hidden_size, hidden_size),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Linear(hidden_size, hidden_size),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Linear(hidden_size, output_size))\n\n\tdef forward(self, central_atom, angles):\n\n\t\tmessages = torch.cat([central_atom, angles[:,:,None]], dim=2)\n\n\t\treturn self.model(messages)\n\nclass Simulator(nn.Module):\n\tdef __init__(self, input_size, hidden_size, output_size):\n\t\tsuper(Simulator, self).__init__()\n\n\t\tself.distance_forces = DistanceForces(50, 128, 1)\n\t\tself.angle_forces = AngleForces(24+1, 128, 1)\n\n\tdef forward(self, coords, node_f, res_numbers, masses, seq,\n\t\t\t\tradius, n_steps, timestep, temperature, animation, device):\n\n\t\tn_atoms = coords.shape[0]\n\t\tn_res = n_atoms // len(atoms)\n\t\tmodel_n = 0\n\n\t\tvels = torch.randn(coords.shape).to(device) * temperature\n\t\taccs_last = torch.zeros(coords.shape).to(device)\n\t\trandn_coords = coords + vels * timestep * n_steps\n\t\tloss, passed = rmsd(randn_coords, coords)\t\t\n\n\t\tfor i in range(n_steps):\n\n\t\t\tcoords = coords + vels * timestep + 0.5 * accs_last * timestep * timestep\n\n\t\t\tk = 15\n\t\t\tidx = knn(coords, k+1)\n\t\t\tsenders = idx[:,0].repeat_interleave(k)\n\t\t\treceivers = idx[:,1:].reshape(n_atoms*k)\n\n\t\t\t# Calc Euclidian distance\n\t\t\tdiffs = coords[senders] - coords[receivers]\n\t\t\tdists = diffs.norm(dim=1)\n\t\t\tnorm_diffs = diffs / dists.clamp(min=0.01).unsqueeze(1)\n\n\t\t\t# Calc sequence seperation\n\t\t\tseq_sep = abs(res_numbers[senders] - res_numbers[receivers])/5\n\t\t\tmask = seq_sep > 1\n\t\t\tseq_sep[mask] = 1\n\n\t\t\t# Concat edge features\n\t\t\tedges = torch.cat([dists.unsqueeze(1), seq_sep], dim=1)\n\n\t\t\t# Compute forces using MLP\n\t\t\tforces = self.distance_forces(node_f[senders], node_f[receivers], edges)\n\t\t\tforces = forces * norm_diffs\n\t\t\ttotal_forces = forces.view(n_atoms, k, 3).sum(1)/100\n\t\t\t\n\t\t\tbatch_size = 1\n\t\t\tatom_types = node_f.view(batch_size, n_res, len(atoms), 24)\n\t\t\tatom_coords = coords.view(batch_size, n_res, 3 * len(atoms))\n\t\t\tatom_accs = torch.zeros(batch_size, n_res, 3 * len(atoms), device=device)\n\t\t\t# Angle forces\n\t\t\t# across_res is the number of atoms in the next residue, starting from atom_3\n\t\t\tfor ai, (atom_1, atom_2, atom_3, across_res) in enumerate(angles):\n\t\t\t\t# Calc vectors and angle between atoms\n\t\t\t\tai_1, ai_2, ai_3 = atoms.index(atom_1), atoms.index(atom_2), atoms.index(atom_3)\n\t\t\t\tif across_res == 0:\n\t\t\t\t\tba = atom_coords[:, : , (ai_1 * 3):(ai_1 * 3 + 3)] - atom_coords[:, : , (ai_2 * 3):(ai_2 * 3 + 3)]\n\t\t\t\t\tbc = atom_coords[:, : , (ai_3 * 3):(ai_3 * 3 + 3)] - atom_coords[:, : , (ai_2 * 3):(ai_2 * 3 + 3)]\n\t\t\t\telif across_res == 1:\n\t\t\t\t\tba = atom_coords[:, :-1, (ai_1 * 3):(ai_1 * 3 + 3)] - atom_coords[:, :-1, (ai_2 * 3):(ai_2 * 3 + 3)]\n\t\t\t\t\tbc = atom_coords[:, 1: , (ai_3 * 3):(ai_3 * 3 + 3)] - atom_coords[:, :-1, (ai_2 * 3):(ai_2 * 3 + 3)]\n\t\t\t\telif across_res == 2:\n\t\t\t\t\tba = atom_coords[:, :-1, (ai_1 * 3):(ai_1 * 3 + 3)] - atom_coords[:, 1: , (ai_2 * 3):(ai_2 * 3 + 3)]\n\t\t\t\t\tbc = atom_coords[:, 1: , (ai_3 * 3):(ai_3 * 3 + 3)] - atom_coords[:, 1: , (ai_2 * 3):(ai_2 * 3 + 3)]\n\t\t\t\tba_norms = ba.norm(dim=2)\n\t\t\t\tbc_norms = bc.norm(dim=2)\n\t\t\t\tangs = torch.acos((ba * bc).sum(dim=2) / (ba_norms * bc_norms))\n\t\t\t\t# Get central atom properties\n\t\t\t\tif ai == 0 or ai == 3 or ai == 4:\n\t\t\t\t\tcentral_atom_types = atom_types[:,:,1,:]\n\t\t\t\telif ai == 1:\n\t\t\t\t\tcentral_atom_types = atom_types[:,:-1,2,:]\n\t\t\t\telif ai == 2:\n\t\t\t\t\tcentral_atom_types = atom_types[:,1:,0,:]\n\n\t\t\t\tangle_forces = self.angle_forces(central_atom_types, angs)\n\n\t\t\t\tcross_ba_bc = torch.cross(ba, bc, dim=2)\n\t\t\t\tfa = angle_forces * normalize(torch.cross( ba, cross_ba_bc, dim=2), dim=2) / ba_norms.unsqueeze(2)\n\t\t\t\tfc = angle_forces * normalize(torch.cross(-bc, cross_ba_bc, dim=2), dim=2) / bc_norms.unsqueeze(2)\n\t\t\t\tfb = -fa -fc\n\t\t\t\tif across_res == 0:\n\t\t\t\t\tatom_accs[:, : , (ai_1 * 3):(ai_1 * 3 + 3)] += fa\n\t\t\t\t\tatom_accs[:, : , (ai_2 * 3):(ai_2 * 3 + 3)] += fb\n\t\t\t\t\tatom_accs[:, : , (ai_3 * 3):(ai_3 * 3 + 3)] += fc\n\t\t\t\telif across_res == 1:\n\t\t\t\t\tatom_accs[:, :-1, (ai_1 * 3):(ai_1 * 3 + 3)] += fa\n\t\t\t\t\tatom_accs[:, :-1, (ai_2 * 3):(ai_2 * 3 + 3)] += fb\n\t\t\t\t\tatom_accs[:, 1: , (ai_3 * 3):(ai_3 * 3 + 3)] += fc\n\t\t\t\telif across_res == 2:\n\t\t\t\t\tatom_accs[:, :-1, (ai_1 * 3):(ai_1 * 3 + 3)] += fa\n\t\t\t\t\tatom_accs[:, 1: , (ai_2 * 3):(ai_2 * 3 + 3)] += fb\n\t\t\t\t\tatom_accs[:, 1: , (ai_3 * 3):(ai_3 * 3 + 3)] += fc\n\n\t\t\t# Calc distance accs\n\t\t\taccs = total_forces/masses.unsqueeze(1)\n\t\t\t# Calc angle accs\n\t\t\taccs += atom_accs.view(n_atoms, 3) / (masses.unsqueeze(1)*100)\n\n\n\t\t\tvels = vels + 0.5 * (accs_last + accs) * timestep\n\t\t\taccs_last = accs\n\n\t\t\tif animation:\n\t\t\t\tmodel_n += 1\n\t\t\t\tsave_structure(coords[None,:,:], \"animation.pdb\", seq, model_n)\n\n\t\treturn coords, loss\n\ndef knn(coords, k):\n\t\"\"\"\n\tFinds the k-nearest neibours\n\t\"\"\"\n\tcoords = coords.to(device)\n\n\tN, D = coords.shape\n\txyz_i = LazyTensor(coords[:, None, :])\n\txyz_j = LazyTensor(coords[None, :, :])\n\n\tpairwise_distance_ij = ((xyz_i - xyz_j) ** 2).sum(-1)\n\n\tidx = pairwise_distance_ij.argKmin(K=k, axis=1) # (N, K)\n\n\treturn idx\n\ndef get_features(fp, device):\n\n\n\tnative_coords, inters_ang, inters_dih, masses, seq = read_input_file(fp)\n\n\tone_hot_atoms = torch.tensor([[1,0,0,0],\n\t\t\t\t\t\t\t\t[0,1,0,0],\n\t\t\t\t\t\t\t\t[0,0,1,0],\n\t\t\t\t\t\t\t\t[0,0,0,1]])\n\tone_hot_atoms = one_hot_atoms.repeat(len(seq), 1)\n\n\tone_hot_seq = torch.zeros(len(seq)*4, 20)\n\tfor i, aa in enumerate(seq):\n\t\tindex = aas.index(aa)\n\t\tone_hot_seq[i*4:(i+1)*4, index] = 1\n\n\tres_numbers = torch.cat([torch.ones(4,1)*i for i in range(len(seq))])\n\n\tnode_f = torch.cat([one_hot_atoms, one_hot_seq], dim=1)\n\n\treturn native_coords.to(device), node_f.to(device), res_numbers.to(device), masses.to(device), seq\n\nif __name__ == \"__main__\":\n\n\tdata_dir = \"protein_data/train_val/\"\n\tdata = os.listdir(data_dir)\n\n\tmodel = Simulator(50, 128, 1).to(device)\n\n\toptimizer = torch.optim.Adam(model.parameters(), lr=0.0005)\n\n\n\tlosses = []\n\n\tpytorch_total_params = sum(p.numel() for p in model.parameters())\n\tprint(pytorch_total_params)\n\n\ttrain_set = ProteinDataset(train_proteins, train_val_dir, device=device)\n\tval_set = ProteinDataset(val_proteins , train_val_dir, device=device)\n\n\tfor i in range(20):\n\t\tprint(f\"Starting Epoch {i}:\")\n\n\t\ttrain_inds = list(range(len(train_set)))\n\t\tval_inds = list(range(len(val_set)))\n\t\tshuffle(train_inds)\n\t\tshuffle(val_inds)\n\t\tmodel.train()\n\t\toptimizer.zero_grad()\n\t\tfor protein in tqdm(train_inds):\n\n\t\t\tcoords, node_f, res_numbers, masses, seq = train_set[protein]\n\n\t\t\tmodel.train()\n\t\t\tprint('Forward')\n\t\t\tout, basic_loss = model(coords, node_f, res_numbers, masses, seq, 10, \n\t\t\t\t\t\t\tn_steps=800, timestep=0.02, temperature=0.02,\n\t\t\t\t\t\t\tanimation=False, device=device)\n\t\t\tprint('done forward')\n\t\t\tloss, passed = rmsd(out, coords)\n\t\t\tloss_log = torch.log(1.0 + loss)\n\t\t\tloss_log.backward()\n\t\t\tprint('Done backprop')\n\t\t\toptimizer.step()\n\t\t\toptimizer.zero_grad()\n\t\t\tlosses.append(loss - basic_loss)\n\n\t\t\tprint(\"Epoch:\", i)\n\t\t\tprint(\"Basic loss:\", round(basic_loss.item(),3))\n\t\t\tprint(\"----- Loss:\", round(loss.item(),3))\n\t\t\tprint(\"-Loss diff:\", round(loss.item() - basic_loss.item(), 3))\n\n\t\tmodel.eval()\n\t\twith torch.no_grad():\n\t\t\tcoords, node_f, res_numbers, masses, seq = get_features(\"protein_data/example/1CRN.txt\", device=device)\n\n\t\t\tout, basic_loss = model(coords, node_f, res_numbers, masses, seq, 10, \n\t\t\t\t\t\t\tn_steps=500, timestep=0.02, temperature=0.2,\n\t\t\t\t\t\t\tanimation=False, device=device)\n\t\t\n\n\t\ttorch.save(model.state_dict(), os.path.join(model_dir, f\"models/model_ang{i}.pt\"))\n\n\t\n\t\tplt.plot(losses)\n\t\tplt.xlim(0)\n\t\tplt.ylabel(\"Loss - RMSD (A)\")\n\t\tplt.xlabel(\"Epoch\")\n\t\tplt.title(f'No. epochs = {i+1}')\n\t\tplt.legend()\n\t\tplt.savefig('with_angles.png')\n" ]
[ [ "torch.ones", "matplotlib.pyplot.legend", "torch.nn.Linear", "torch.randn", "matplotlib.pyplot.savefig", "torch.no_grad", "torch.tensor", "matplotlib.pyplot.xlim", "torch.set_num_threads", "matplotlib.pyplot.title", "torch.nn.ReLU", "torch.log", "matplotlib.pyplot.ylabel", "torch.cross", "torch.zeros", "matplotlib.pyplot.plot", "torch.cat", "matplotlib.pyplot.xlabel" ] ]
contactlp/xgboost
[ "1d0ca49761d6a7dace5aec6af80c4aef7367fc2f" ]
[ "python-package/xgboost/training.py" ]
[ "# coding: utf-8\n# pylint: disable=too-many-locals, too-many-arguments, invalid-name\n# pylint: disable=too-many-branches, too-many-statements\n\"\"\"Training Library containing training routines.\"\"\"\nfrom __future__ import absolute_import\n\nimport warnings\nimport numpy as np\nfrom .core import Booster, STRING_TYPES, XGBoostError, CallbackEnv, EarlyStopException\nfrom .compat import (SKLEARN_INSTALLED, XGBStratifiedKFold)\nfrom . import rabit\nfrom . import callback\n\n\ndef _train_internal(params, dtrain,\n num_boost_round=10, evals=(),\n obj=None, feval=None,\n xgb_model=None, callbacks=None):\n \"\"\"internal training function\"\"\"\n callbacks = [] if callbacks is None else callbacks\n evals = list(evals)\n if isinstance(params, dict) \\\n and 'eval_metric' in params \\\n and isinstance(params['eval_metric'], list):\n params = dict((k, v) for k, v in params.items())\n eval_metrics = params['eval_metric']\n params.pop(\"eval_metric\", None)\n params = list(params.items())\n for eval_metric in eval_metrics:\n params += [('eval_metric', eval_metric)]\n\n bst = Booster(params, [dtrain] + [d[0] for d in evals])\n nboost = 0\n num_parallel_tree = 1\n\n if xgb_model is not None:\n bst = Booster(params, [dtrain] + [d[0] for d in evals],\n model_file=xgb_model)\n nboost = len(bst.get_dump())\n\n _params = dict(params) if isinstance(params, list) else params\n\n if 'num_parallel_tree' in _params:\n num_parallel_tree = _params['num_parallel_tree']\n nboost //= num_parallel_tree\n if 'num_class' in _params:\n nboost //= _params['num_class']\n\n # Distributed code: Load the checkpoint from rabit.\n version = bst.load_rabit_checkpoint()\n assert rabit.get_world_size() != 1 or version == 0\n rank = rabit.get_rank()\n start_iteration = int(version / 2)\n nboost += start_iteration\n\n callbacks_before_iter = [\n cb for cb in callbacks if cb.__dict__.get('before_iteration', False)]\n callbacks_after_iter = [\n cb for cb in callbacks if not cb.__dict__.get('before_iteration', False)]\n\n for i in range(start_iteration, num_boost_round):\n for cb in callbacks_before_iter:\n cb(CallbackEnv(model=bst,\n cvfolds=None,\n iteration=i,\n begin_iteration=start_iteration,\n end_iteration=num_boost_round,\n rank=rank,\n evaluation_result_list=None))\n # Distributed code: need to resume to this point.\n # Skip the first update if it is a recovery step.\n if version % 2 == 0:\n bst.update(dtrain, i, obj)\n bst.save_rabit_checkpoint()\n version += 1\n\n assert rabit.get_world_size() == 1 or version == rabit.version_number()\n\n nboost += 1\n evaluation_result_list = []\n # check evaluation result.\n if evals:\n bst_eval_set = bst.eval_set(evals, i, feval)\n if isinstance(bst_eval_set, STRING_TYPES):\n msg = bst_eval_set\n else:\n msg = bst_eval_set.decode()\n res = [x.split(':') for x in msg.split()]\n evaluation_result_list = [(k, float(v)) for k, v in res[1:]]\n try:\n for cb in callbacks_after_iter:\n cb(CallbackEnv(model=bst,\n cvfolds=None,\n iteration=i,\n begin_iteration=start_iteration,\n end_iteration=num_boost_round,\n rank=rank,\n evaluation_result_list=evaluation_result_list))\n except EarlyStopException:\n break\n # do checkpoint after evaluation, in case evaluation also updates booster.\n bst.save_rabit_checkpoint()\n version += 1\n\n if bst.attr('best_score') is not None:\n bst.best_score = float(bst.attr('best_score'))\n bst.best_iteration = int(bst.attr('best_iteration'))\n else:\n bst.best_iteration = nboost - 1\n bst.best_ntree_limit = (bst.best_iteration + 1) * num_parallel_tree\n return bst\n\n\ndef train(params, dtrain, num_boost_round=10, evals=(), obj=None, feval=None,\n maximize=False, early_stopping_rounds=None, evals_result=None,\n verbose_eval=True, xgb_model=None, callbacks=None, learning_rates=None):\n # pylint: disable=too-many-statements,too-many-branches, attribute-defined-outside-init\n \"\"\"Train a booster with given parameters.\n\n Parameters\n ----------\n params : dict\n Booster params.\n dtrain : DMatrix\n Data to be trained.\n num_boost_round: int\n Number of boosting iterations.\n evals: list of pairs (DMatrix, string)\n List of validation sets for which metrics will evaluated during training.\n Validation metrics will help us track the performance of the model.\n obj : function\n Customized objective function.\n feval : function\n Customized evaluation function.\n maximize : bool\n Whether to maximize feval.\n early_stopping_rounds: int\n Activates early stopping. Validation metric needs to improve at least once in\n every **early_stopping_rounds** round(s) to continue training.\n Requires at least one item in **evals**.\n The method returns the model from the last iteration (not the best one).\n If there's more than one item in **evals**, the last entry will be used\n for early stopping.\n If there's more than one metric in the **eval_metric** parameter given in\n **params**, the last metric will be used for early stopping.\n If early stopping occurs, the model will have three additional fields:\n ``bst.best_score``, ``bst.best_iteration`` and ``bst.best_ntree_limit``.\n (Use ``bst.best_ntree_limit`` to get the correct value if\n ``num_parallel_tree`` and/or ``num_class`` appears in the parameters)\n evals_result: dict\n This dictionary stores the evaluation results of all the items in watchlist.\n\n Example: with a watchlist containing\n ``[(dtest,'eval'), (dtrain,'train')]`` and\n a parameter containing ``('eval_metric': 'logloss')``,\n the **evals_result** returns\n\n .. code-block:: python\n\n {'train': {'logloss': ['0.48253', '0.35953']},\n 'eval': {'logloss': ['0.480385', '0.357756']}}\n\n verbose_eval : bool or int\n Requires at least one item in **evals**.\n If **verbose_eval** is True then the evaluation metric on the validation set is\n printed at each boosting stage.\n If **verbose_eval** is an integer then the evaluation metric on the validation set\n is printed at every given **verbose_eval** boosting stage. The last boosting stage\n / the boosting stage found by using **early_stopping_rounds** is also printed.\n Example: with ``verbose_eval=4`` and at least one item in **evals**, an evaluation metric\n is printed every 4 boosting stages, instead of every boosting stage.\n learning_rates: list or function (deprecated - use callback API instead)\n List of learning rate for each boosting round\n or a customized function that calculates eta in terms of\n current number of round and the total number of boosting round (e.g. yields\n learning rate decay)\n xgb_model : file name of stored xgb model or 'Booster' instance\n Xgb model to be loaded before training (allows training continuation).\n callbacks : list of callback functions\n List of callback functions that are applied at end of each iteration.\n It is possible to use predefined callbacks by using\n :ref:`Callback API <callback_api>`.\n Example:\n\n .. code-block:: python\n\n [xgb.callback.reset_learning_rate(custom_rates)]\n\n Returns\n -------\n Booster : a trained booster model\n \"\"\"\n callbacks = [] if callbacks is None else callbacks\n\n # Most of legacy advanced options becomes callbacks\n if isinstance(verbose_eval, bool) and verbose_eval:\n callbacks.append(callback.print_evaluation())\n else:\n if isinstance(verbose_eval, int):\n callbacks.append(callback.print_evaluation(verbose_eval))\n\n if early_stopping_rounds is not None:\n callbacks.append(callback.early_stop(early_stopping_rounds,\n maximize=maximize,\n verbose=bool(verbose_eval)))\n if evals_result is not None:\n callbacks.append(callback.record_evaluation(evals_result))\n\n if learning_rates is not None:\n warnings.warn(\"learning_rates parameter is deprecated - use callback API instead\",\n DeprecationWarning)\n callbacks.append(callback.reset_learning_rate(learning_rates))\n\n return _train_internal(params, dtrain,\n num_boost_round=num_boost_round,\n evals=evals,\n obj=obj, feval=feval,\n xgb_model=xgb_model, callbacks=callbacks)\n\n\nclass CVPack(object):\n \"\"\"\"Auxiliary datastruct to hold one fold of CV.\"\"\"\n def __init__(self, dtrain, dtest, param):\n \"\"\"\"Initialize the CVPack\"\"\"\n self.dtrain = dtrain\n self.dtest = dtest\n self.watchlist = [(dtrain, 'train'), (dtest, 'test')]\n self.bst = Booster(param, [dtrain, dtest])\n\n def update(self, iteration, fobj):\n \"\"\"\"Update the boosters for one iteration\"\"\"\n self.bst.update(self.dtrain, iteration, fobj)\n\n def eval(self, iteration, feval):\n \"\"\"\"Evaluate the CVPack for one iteration.\"\"\"\n return self.bst.eval_set(self.watchlist, iteration, feval)\n\n\ndef groups_to_rows(groups, boundaries):\n \"\"\"\n Given group row boundaries, convert ground indexes to row indexes\n :param groups: list of groups for testing\n :param boundaries: rows index limits of each group\n :return: row in group\n \"\"\"\n return np.concatenate([np.arange(boundaries[g], boundaries[g+1]) for g in groups])\n\n\ndef mkgroupfold(dall, nfold, param, evals=(), fpreproc=None, shuffle=True):\n \"\"\"\n Make n folds for cross-validation maintaining groups\n :return: cross-validation folds\n \"\"\"\n # we have groups for pairwise ranking... get a list of the group indexes\n group_boundaries = dall.get_uint_info('group_ptr')\n group_sizes = np.diff(group_boundaries)\n\n if shuffle is True:\n idx = np.random.permutation(len(group_sizes))\n else:\n idx = np.arange(len(group_sizes))\n # list by fold of test group indexes\n out_group_idset = np.array_split(idx, nfold)\n # list by fold of train group indexes\n in_group_idset = [np.concatenate([out_group_idset[i] for i in range(nfold) if k != i])\n for k in range(nfold)]\n # from the group indexes, convert them to row indexes\n in_idset = [groups_to_rows(in_groups, group_boundaries) for in_groups in in_group_idset]\n out_idset = [groups_to_rows(out_groups, group_boundaries) for out_groups in out_group_idset]\n\n # build the folds by taking the appropriate slices\n ret = []\n for k in range(nfold):\n # perform the slicing using the indexes determined by the above methods\n dtrain = dall.slice(in_idset[k], allow_groups=True)\n dtrain.set_group(group_sizes[in_group_idset[k]])\n dtest = dall.slice(out_idset[k], allow_groups=True)\n dtest.set_group(group_sizes[out_group_idset[k]])\n # run preprocessing on the data set if needed\n if fpreproc is not None:\n dtrain, dtest, tparam = fpreproc(dtrain, dtest, param.copy())\n else:\n tparam = param\n plst = list(tparam.items()) + [('eval_metric', itm) for itm in evals]\n ret.append(CVPack(dtrain, dtest, plst))\n return ret\n\n\ndef mknfold(dall, nfold, param, seed, evals=(), fpreproc=None, stratified=False,\n folds=None, shuffle=True):\n \"\"\"\n Make an n-fold list of CVPack from random indices.\n \"\"\"\n evals = list(evals)\n np.random.seed(seed)\n\n if stratified is False and folds is None:\n # Do standard k-fold cross validation. Automatically determine the folds.\n if len(dall.get_uint_info('group_ptr')) > 1:\n return mkgroupfold(dall, nfold, param, evals=evals, fpreproc=fpreproc, shuffle=shuffle)\n\n if shuffle is True:\n idx = np.random.permutation(dall.num_row())\n else:\n idx = np.arange(dall.num_row())\n out_idset = np.array_split(idx, nfold)\n in_idset = [np.concatenate([out_idset[i] for i in range(nfold) if k != i])\n for k in range(nfold)]\n elif folds is not None:\n # Use user specified custom split using indices\n try:\n in_idset = [x[0] for x in folds]\n out_idset = [x[1] for x in folds]\n except TypeError:\n # Custom stratification using Sklearn KFoldSplit object\n splits = list(folds.split(X=dall.get_label(), y=dall.get_label()))\n in_idset = [x[0] for x in splits]\n out_idset = [x[1] for x in splits]\n nfold = len(out_idset)\n else:\n # Do standard stratefied shuffle k-fold split\n sfk = XGBStratifiedKFold(n_splits=nfold, shuffle=True, random_state=seed)\n splits = list(sfk.split(X=dall.get_label(), y=dall.get_label()))\n in_idset = [x[0] for x in splits]\n out_idset = [x[1] for x in splits]\n nfold = len(out_idset)\n\n ret = []\n for k in range(nfold):\n # perform the slicing using the indexes determined by the above methods\n dtrain = dall.slice(in_idset[k])\n dtest = dall.slice(out_idset[k])\n # run preprocessing on the data set if needed\n if fpreproc is not None:\n dtrain, dtest, tparam = fpreproc(dtrain, dtest, param.copy())\n else:\n tparam = param\n plst = list(tparam.items()) + [('eval_metric', itm) for itm in evals]\n ret.append(CVPack(dtrain, dtest, plst))\n return ret\n\n\ndef aggcv(rlist):\n # pylint: disable=invalid-name\n \"\"\"\n Aggregate cross-validation results.\n\n If verbose_eval is true, progress is displayed in every call. If\n verbose_eval is an integer, progress will only be displayed every\n `verbose_eval` trees, tracked via trial.\n \"\"\"\n cvmap = {}\n idx = rlist[0].split()[0]\n for line in rlist:\n arr = line.split()\n assert idx == arr[0]\n for metric_idx, it in enumerate(arr[1:]):\n if not isinstance(it, STRING_TYPES):\n it = it.decode()\n k, v = it.split(':')\n if (metric_idx, k) not in cvmap:\n cvmap[(metric_idx, k)] = []\n cvmap[(metric_idx, k)].append(float(v))\n msg = idx\n results = []\n for (metric_idx, k), v in sorted(cvmap.items(), key=lambda x: x[0][0]):\n v = np.array(v)\n if not isinstance(msg, STRING_TYPES):\n msg = msg.decode()\n mean, std = np.mean(v), np.std(v)\n results.extend([(k, mean, std)])\n return results\n\n\ndef cv(params, dtrain, num_boost_round=10, nfold=3, stratified=False, folds=None,\n metrics=(), obj=None, feval=None, maximize=False, early_stopping_rounds=None,\n fpreproc=None, as_pandas=True, verbose_eval=None, show_stdv=True,\n seed=0, callbacks=None, shuffle=True):\n # pylint: disable = invalid-name\n \"\"\"Cross-validation with given parameters.\n\n Parameters\n ----------\n params : dict\n Booster params.\n dtrain : DMatrix\n Data to be trained.\n num_boost_round : int\n Number of boosting iterations.\n nfold : int\n Number of folds in CV.\n stratified : bool\n Perform stratified sampling.\n folds : a KFold or StratifiedKFold instance or list of fold indices\n Sklearn KFolds or StratifiedKFolds object.\n Alternatively may explicitly pass sample indices for each fold.\n For ``n`` folds, **folds** should be a length ``n`` list of tuples.\n Each tuple is ``(in,out)`` where ``in`` is a list of indices to be used\n as the training samples for the ``n`` th fold and ``out`` is a list of\n indices to be used as the testing samples for the ``n`` th fold.\n metrics : string or list of strings\n Evaluation metrics to be watched in CV.\n obj : function\n Custom objective function.\n feval : function\n Custom evaluation function.\n maximize : bool\n Whether to maximize feval.\n early_stopping_rounds: int\n Activates early stopping. Cross-Validation metric (average of validation\n metric computed over CV folds) needs to improve at least once in\n every **early_stopping_rounds** round(s) to continue training.\n The last entry in the evaluation history will represent the best iteration.\n If there's more than one metric in the **eval_metric** parameter given in\n **params**, the last metric will be used for early stopping.\n fpreproc : function\n Preprocessing function that takes (dtrain, dtest, param) and returns\n transformed versions of those.\n as_pandas : bool, default True\n Return pd.DataFrame when pandas is installed.\n If False or pandas is not installed, return np.ndarray\n verbose_eval : bool, int, or None, default None\n Whether to display the progress. If None, progress will be displayed\n when np.ndarray is returned. If True, progress will be displayed at\n boosting stage. If an integer is given, progress will be displayed\n at every given `verbose_eval` boosting stage.\n show_stdv : bool, default True\n Whether to display the standard deviation in progress.\n Results are not affected, and always contains std.\n seed : int\n Seed used to generate the folds (passed to numpy.random.seed).\n callbacks : list of callback functions\n List of callback functions that are applied at end of each iteration.\n It is possible to use predefined callbacks by using\n :ref:`Callback API <callback_api>`.\n Example:\n\n .. code-block:: python\n\n [xgb.callback.reset_learning_rate(custom_rates)]\n shuffle : bool\n Shuffle data before creating folds.\n\n Returns\n -------\n evaluation history : list(string)\n \"\"\"\n if stratified is True and not SKLEARN_INSTALLED:\n raise XGBoostError('sklearn needs to be installed in order to use stratified cv')\n\n if isinstance(metrics, str):\n metrics = [metrics]\n\n if isinstance(params, list):\n _metrics = [x[1] for x in params if x[0] == 'eval_metric']\n params = dict(params)\n if 'eval_metric' in params:\n params['eval_metric'] = _metrics\n else:\n params = dict((k, v) for k, v in params.items())\n\n if (not metrics) and 'eval_metric' in params:\n if isinstance(params['eval_metric'], list):\n metrics = params['eval_metric']\n else:\n metrics = [params['eval_metric']]\n\n params.pop(\"eval_metric\", None)\n\n results = {}\n cvfolds = mknfold(dtrain, nfold, params, seed, metrics, fpreproc,\n stratified, folds, shuffle)\n\n # setup callbacks\n callbacks = [] if callbacks is None else callbacks\n if early_stopping_rounds is not None:\n callbacks.append(callback.early_stop(early_stopping_rounds,\n maximize=maximize,\n verbose=False))\n\n if isinstance(verbose_eval, bool) and verbose_eval:\n callbacks.append(callback.print_evaluation(show_stdv=show_stdv))\n else:\n if isinstance(verbose_eval, int):\n callbacks.append(callback.print_evaluation(verbose_eval, show_stdv=show_stdv))\n\n callbacks_before_iter = [\n cb for cb in callbacks if cb.__dict__.get('before_iteration', False)]\n callbacks_after_iter = [\n cb for cb in callbacks if not cb.__dict__.get('before_iteration', False)]\n\n for i in range(num_boost_round):\n for cb in callbacks_before_iter:\n cb(CallbackEnv(model=None,\n cvfolds=cvfolds,\n iteration=i,\n begin_iteration=0,\n end_iteration=num_boost_round,\n rank=0,\n evaluation_result_list=None))\n for fold in cvfolds:\n fold.update(i, obj)\n res = aggcv([f.eval(i, feval) for f in cvfolds])\n\n for key, mean, std in res:\n if key + '-mean' not in results:\n results[key + '-mean'] = []\n if key + '-std' not in results:\n results[key + '-std'] = []\n results[key + '-mean'].append(mean)\n results[key + '-std'].append(std)\n try:\n for cb in callbacks_after_iter:\n cb(CallbackEnv(model=None,\n cvfolds=cvfolds,\n iteration=i,\n begin_iteration=0,\n end_iteration=num_boost_round,\n rank=0,\n evaluation_result_list=res))\n except EarlyStopException as e:\n for k in results:\n results[k] = results[k][:(e.best_iteration + 1)]\n break\n if as_pandas:\n try:\n import pandas as pd\n results = pd.DataFrame.from_dict(results)\n except ImportError:\n pass\n return results\n" ]
[ [ "numpy.diff", "pandas.DataFrame.from_dict", "numpy.random.seed", "numpy.arange", "numpy.array_split", "numpy.array", "numpy.std", "numpy.mean" ] ]
Weiqi97/LilyPadz
[ "b374908444b8594e3f3a2ccf4bc39e3e731f31aa" ]
[ "lilypadz/model/clustering.py" ]
[ "import numpy as np\nimport pandas as pd\nimport plotly.graph_objs as go\nfrom typing import List\nfrom flask import jsonify\nfrom plotly.offline import plot\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\nfrom lilypadz.model.data_processor import get_toad_processed_hop\n\n\ndef get_all_clustering_result(n_clusters: int,\n names: List[str],\n variable: List[str]):\n \"\"\"Generate a 3D plot that contains just the dots for K means result.\n\n :return: A plotly object hat has been converted to HTML format string.\n \"\"\"\n # Get the force plate column names.\n fp_variables = list(\n {\"Fore-Aft\", \"Lateral\", \"Normal\"}.intersection(variable)\n )\n\n # Get the kinematic column names.\n kinematic_variables = list(\n {\"Elbow flexion/extension\",\n \"Humeral protraction/retraction\",\n \"Humeral depression/elevation\"}.intersection(variable)\n )\n\n # Get desired toad data.\n toads_hop = [\n get_toad_processed_hop(name=name) for name in names\n ]\n\n # Get all data.\n all_data = [\n [f\"{data_name} {data.sight}\"] +\n list(data.kinematic[kinematic_variables].mean(axis=\"index\")) +\n list(data.force_plate[fp_variables].mean(axis=\"index\"))\n for one_toad_hop in toads_hop\n for data_name, data in one_toad_hop.items()\n ]\n\n data = pd.DataFrame(\n index=[data[0] for data in all_data],\n data=[data[1:] for data in all_data]\n ).dropna(axis=\"index\")\n\n # Get kMeans analyze result and unpack it.\n k_means = KMeans(n_clusters=n_clusters)\n reduced_data = PCA(n_components=3).fit_transform(data)\n k_means_index = k_means.fit_predict(reduced_data)\n\n # Get hop names.\n labels = data.index.values\n\n # Separate x, y, z coordinates from the reduced data set.\n x_value = reduced_data[:, 0]\n y_value = reduced_data[:, 1]\n z_value = reduced_data[:, 2]\n\n # Create plot for each cluster so the color will differ among clusters.\n data = [\n go.Scatter3d(\n x=x_value[np.where(group_number == k_means_index)],\n y=y_value[np.where(group_number == k_means_index)],\n z=z_value[np.where(group_number == k_means_index)],\n text=labels[np.where(group_number == k_means_index)],\n mode=\"markers\",\n name=f\"Cluster {group_number + 1}\",\n hoverinfo=\"text\",\n marker=dict(\n size=12,\n line=dict(width=1)\n )\n )\n for group_number in np.unique(k_means_index)\n ]\n\n # Set the layout of the plot, mainly set the background color to grey.\n layout = go.Layout(\n height=500,\n hovermode=\"closest\",\n title=\"K-Means Two Dimensional Scatter Plot\",\n scene=dict(\n xaxis=dict(\n title=\"PC1\",\n showline=False,\n showbackground=True,\n backgroundcolor=\"rgb(230,230,230)\"),\n yaxis=dict(\n title=\"PC2\",\n showline=False,\n showbackground=True,\n backgroundcolor=\"rgb(230,230,230)\"),\n zaxis=dict(\n title=\"PC3\",\n showline=False,\n showbackground=True,\n backgroundcolor=\"rgb(230,230,230)\"),\n )\n )\n\n table = pd.DataFrame(data={\n \"Cluster #\": [index + 1 for index in k_means_index],\n \"Document\": labels,\n \"X-Coordinate\": reduced_data[:, 0],\n \"Y-Coordinate\": reduced_data[:, 1],\n \"Z-Coordinate\": reduced_data[:, 2]\n }).to_html(\n index=False,\n classes=\"table table-striped table-bordered text-center\"\n )\n\n # Return the plotly figure and table.\n return jsonify(\n table=table,\n plot=plot(\n go.Figure(data=data, layout=layout),\n show_link=False,\n output_type=\"div\",\n include_plotlyjs=False\n )\n )\n\n\ndef get_one_clustering_result(n_clusters: int,\n name: str,\n variable: List[str]):\n \"\"\"Generate a 3D plot that contains just the dots for K means result.\n\n :return: A plotly object hat has been converted to HTML format string.\n \"\"\"\n # Get the force plate column names.\n fp_variables = list(\n {\"Fore-Aft\", \"Lateral\", \"Normal\"}.intersection(variable)\n )\n\n # Get the kinematic column names.\n kinematic_variables = list(\n {\"Elbow flexion/extension\",\n \"Humeral protraction/retraction\",\n \"Humeral depression/elevation\"}.intersection(variable)\n )\n\n # Get all data.\n all_data = [\n [f\"{data_name} {data.sight}\"] +\n list(data.kinematic[kinematic_variables].mean(axis=\"index\")) +\n list(data.force_plate[fp_variables].mean(axis=\"index\"))\n for data_name, data in get_toad_processed_hop(name=name).items()\n ]\n\n data = pd.DataFrame(\n index=[data[0] for data in all_data],\n data=[data[1:] for data in all_data]\n ).dropna(axis=\"index\")\n\n # Get kMeans analyze result and unpack it.\n k_means = KMeans(n_clusters=n_clusters)\n reduced_data = PCA(n_components=3).fit_transform(data)\n k_means_index = k_means.fit_predict(reduced_data)\n\n # Get hop names.\n labels = data.index.values\n\n # Separate x, y, z coordinates from the reduced data set.\n x_value = reduced_data[:, 0]\n y_value = reduced_data[:, 1]\n z_value = reduced_data[:, 2]\n\n # Create plot for each cluster so the color will differ among clusters.\n data = [\n go.Scatter3d(\n x=x_value[np.where(group_number == k_means_index)],\n y=y_value[np.where(group_number == k_means_index)],\n z=z_value[np.where(group_number == k_means_index)],\n text=labels[np.where(group_number == k_means_index)],\n mode=\"markers\",\n name=f\"Cluster {group_number + 1}\",\n hoverinfo=\"text\",\n marker=dict(\n size=12,\n line=dict(width=1)\n )\n )\n for group_number in np.unique(k_means_index)\n ]\n\n # Set the layout of the plot, mainly set the background color to grey.\n layout = go.Layout(\n height=500,\n hovermode=\"closest\",\n title=\"K-Means Two Dimensional Scatter Plot\",\n scene=dict(\n xaxis=dict(\n title=\"PC1\",\n showline=False,\n showbackground=True,\n backgroundcolor=\"rgb(230,230,230)\"),\n yaxis=dict(\n title=\"PC2\",\n showline=False,\n showbackground=True,\n backgroundcolor=\"rgb(230,230,230)\"),\n zaxis=dict(\n title=\"PC3\",\n showline=False,\n showbackground=True,\n backgroundcolor=\"rgb(230,230,230)\"),\n )\n )\n\n table = pd.DataFrame(data={\n \"Cluster #\": [index + 1 for index in k_means_index],\n \"Document\": labels,\n \"X-Coordinate\": reduced_data[:, 0],\n \"Y-Coordinate\": reduced_data[:, 1],\n \"Z-Coordinate\": reduced_data[:, 2]\n }).to_html(\n index=False,\n classes=\"table table-striped table-bordered text-center\"\n )\n\n # Return the plotly figure and table.\n return jsonify(\n table=table,\n plot=plot(\n go.Figure(data=data, layout=layout),\n show_link=False,\n output_type=\"div\",\n include_plotlyjs=False\n )\n )\n" ]
[ [ "pandas.DataFrame", "sklearn.cluster.KMeans", "numpy.where", "numpy.unique", "sklearn.decomposition.PCA" ] ]
rakesh1988/pandas_market_calendars
[ "327763f6c7692b575b1844c67acf2bdd0835360e" ]
[ "tests/test_hkex_calendar.py" ]
[ "import datetime\n\nimport pandas as pd\nimport pytz\n\nfrom pandas_market_calendars.exchange_calendar_hkex import HKEXExchangeCalendar\n\n\ndef test_time_zone():\n assert HKEXExchangeCalendar().tz == pytz.timezone('Asia/Shanghai')\n assert HKEXExchangeCalendar().name == 'HKEX'\n\n\ndef test_2018_holidays():\n hkex = HKEXExchangeCalendar()\n trading_days = hkex.valid_days('2018-01-01', '2018-12-31')\n holidays = ['2018-01-01', '2018-02-16', '2018-02-17', '2018-02-18',\n '2018-02-19', '2018-03-30', '2018-04-02', '2018-04-05',\n '2018-05-01', '2018-05-22', '2018-06-18', '2018-07-02',\n '2018-09-25', '2018-10-01', '2018-10-17', '2018-12-25',\n '2018-12-26']\n for date in holidays:\n assert pd.Timestamp(date, tz='UTC') not in trading_days\n for date in ['2018-05-02']:\n assert pd.Timestamp(date, tz='UTC') in trading_days\n\n\ndef test_hkex_closes_at_lunch():\n hkex = HKEXExchangeCalendar()\n schedule = hkex.schedule(\n start_date=datetime.datetime(2015, 1, 14, tzinfo=pytz.timezone('Asia/Shanghai')),\n end_date=datetime.datetime(2015, 1, 16, tzinfo=pytz.timezone('Asia/Shanghai'))\n )\n\n assert HKEXExchangeCalendar.open_at_time(\n schedule=schedule,\n timestamp=datetime.datetime(2015, 1, 14, 11, 0, tzinfo=pytz.timezone('Asia/Shanghai'))\n )\n\n assert not HKEXExchangeCalendar.open_at_time(\n schedule=schedule,\n timestamp=datetime.datetime(2015, 1, 14, 12, 10, tzinfo=pytz.timezone('Asia/Shanghai'))\n )\n" ]
[ [ "pandas.Timestamp" ] ]
JLUNeverMore/FAR-HO
[ "1f381f50f99b83ee5bed9700fcbfa4375096a0ca" ]
[ "far_ho/hyper_gradients.py" ]
[ "from __future__ import absolute_import, print_function, division\n\nimport sys\nfrom collections import defaultdict, deque\n\nimport tensorflow as tf\nfrom tensorflow.python.training import slot_creator\nfrom tensorflow.contrib.opt import ScipyOptimizerInterface\n\nfrom far_ho import utils\nfrom far_ho.optimizer import OptimizerDict\nfrom far_ho.utils import dot, maybe_add, reduce_all_sums\n\nRAISE_ERROR_ON_DETACHED = False\n\n\nclass HyperGradient(object):\n def __init__(self, name):\n self._optimizer_dicts = set()\n self._inner_objectives = None\n self._hypergrad_dictionary = defaultdict(list) # dictionary (hyperparameter, list of hypergradients)\n self._ts = None\n\n self._initialization = None\n self._iteration = None\n self._state = None\n self._name = name\n\n _ERROR_NOT_OPTIMIZER_DICT = \"\"\"\n Looks like {} is not an `OptimizerDict`. Use optimizers in far_ho.optimizers for obtaining an OptimizerDict.\n \"\"\"\n\n _ERROR_HYPER_DETACHED = \"\"\"\n Hyperparameter {} is detached from this optimization dynamics.\n \"\"\"\n\n def compute_gradients(self, outer_objective, optimizer_dict, hyper_list=None):\n # Doesn't do anything useful here. To be overridden.\n \"\"\"\n Function overridden by specific methods.\n\n :param optimizer_dict: OptimzerDict object resulting from the inner objective optimization.\n :param outer_objective: A loss function for the hyperparameters (scalar tensor)\n :param hyper_list: Optional list of hyperparameters to consider. If not provided will get all variables in the\n hyperparameter collection in the current scope.\n\n :return: list of hyperparameters involved in the computation\n \"\"\"\n assert isinstance(optimizer_dict, OptimizerDict), HyperGradient._ERROR_NOT_OPTIMIZER_DICT.format(optimizer_dict)\n self._optimizer_dicts.add(optimizer_dict)\n\n if hyper_list is None: # get default hyperparameters\n hyper_list = utils.hyperparameters(tf.get_variable_scope().name)\n return hyper_list\n\n @property\n def initialization(self):\n if self._initialization is None:\n self._initialization = [opt_dict.initialization for opt_dict in sorted(self._optimizer_dicts)]\n return self._initialization\n\n @property\n def iteration(self):\n if self._iteration is None:\n self._iteration = [opt_dict.iteration for opt_dict in sorted(self._optimizer_dicts)]\n return self._iteration\n\n @property\n def state(self):\n for opt_dict in sorted(self._optimizer_dicts):\n for v in opt_dict.state:\n yield v\n\n @property\n def inner_objectives(self):\n if self._inner_objectives is None:\n self._inner_objectives = [opt.objective if hasattr(opt, 'objective') else tf.constant(False)\n for opt in sorted(self._optimizer_dicts)]\n return self._inner_objectives\n\n @property\n def ts(self):\n if self._ts is None:\n self._ts = tf.group(*[opt_dict.ts for opt_dict in sorted(self._optimizer_dicts)])\n return self._ts\n\n def run(self, T_or_generator, inner_objective_feed_dicts=None, outer_objective_feed_dicts=None,\n initializer_feed_dict=None, global_step=None, session=None, online=False, callback=None):\n \"\"\"\n Runs the inner optimization dynamics for T iterations (T_or_generator can be indeed a generator) and computes\n in the meanwhile.\n\n :param T_or_generator: integer or generator that should yield a step. Express either a total number of\n iterations of inner objective optimization dynamics, or could implement a stopping\n condition, or variables number of steps.\n :param inner_objective_feed_dicts: Optional feed dictionary for the inner objective\n :param outer_objective_feed_dicts: Optional feed dictionary for the outer objective\n (note that this is not used in ForwardHG since hypergradients are not\n variables)\n :param initializer_feed_dict: Optional feed dictionary for the inner objective\n :param global_step: Optional global step for the\n :param session: Optional session (otherwise will take the default session)\n :param online: Performs the computation of the hypergradient in the online (or \"real time\") mode. Note that\n `ReverseHG` and `ForwardHG` behave differently.\n :param callback: callback funciton for the forward optimization\n\n \"\"\"\n raise NotImplementedError()\n\n def hgrads_hvars(self, hyper_list=None, aggregation_fn=None, process_fn=None):\n \"\"\"\n Method for getting hypergradient and hyperparameters as required by apply_gradient methods from tensorflow \n optimizers.\n \n :param hyper_list: Optional list of hyperparameters to consider. If not provided will get all variables in the\n hyperparameter collection in the current scope.\n :param aggregation_fn: Optional operation to aggregate multiple hypergradients (for the same hyperparameter),\n by default reduce_mean\n :param process_fn: Optional operation like clipping to be applied.\n :return: \n \"\"\"\n if hyper_list is None:\n hyper_list = utils.hyperparameters(tf.get_variable_scope().name)\n\n assert all([h in self._hypergrad_dictionary for h in hyper_list]), 'FINAL ERROR!'\n\n if aggregation_fn is None:\n aggregation_fn = lambda hgrad_list: tf.reduce_mean(hgrad_list, axis=0)\n\n def _aggregate_process_manage_collection(_hg_lst):\n if len(_hg_lst) == 1: # avoid useless operations...\n aggr = _hg_lst[0]\n else:\n with tf.name_scope(_hg_lst[0].op.name):\n aggr = aggregation_fn(_hg_lst) if len(_hg_lst) > 1 else _hg_lst[0]\n if process_fn is not None:\n with tf.name_scope('process_gradients'):\n aggr = process_fn(aggr)\n tf.add_to_collection(utils.GraphKeys.HYPERGRADIENTS, aggr)\n return aggr\n\n return [(_aggregate_process_manage_collection(self._hypergrad_dictionary[h]),\n h) for h in hyper_list]\n\n @property\n def name(self):\n return self._name\n\n @staticmethod\n def need_scalar_hyperparameters():\n return False\n\n # noinspection PyMethodMayBeStatic\n def _make_callback(self):\n \"\"\"\n Template for callbacks\n \"\"\"\n values = []\n\n # noinspection PyUnusedLocal\n def _callback(t, feed_dcit, session):\n values.append(0) # these should not depend from any feed dictionary\n\n return values, _callback\n\n def __str__(self):\n return self._name\n\n\nclass ReverseHG(HyperGradient):\n\n def __init__(self, history=None, name='ReverseHG'):\n super(ReverseHG, self).__init__(name)\n self._alpha_iter = tf.no_op()\n self._reverse_initializer = tf.no_op()\n self._history = history if history is not None else []\n\n @staticmethod\n def _truncated(max_items, name='TruncatedReverseHG'):\n \"\"\"\n Utility method to initialize truncated reverse HG (not necessarily online)\n\n :param max_items: Maximum number of iterations that will be stored\n :param name: a name for the operations and variables that will be created\n :return: ReverseHG object\n \"\"\"\n return ReverseHG(deque(maxlen=max_items + 1), name=name)\n\n # noinspection SpellCheckingInspection\n def compute_gradients(self, outer_objective, optimizer_dict, hyper_list=None):\n \"\"\"\n Function that adds to the computational graph all the operations needend for computing\n the hypergradients in a \"dynamic\" way, without unrolling the entire optimization graph.\n The resulting computation, while being roughly 2x more expensive then unrolling the\n optimizaiton dynamics, requires much less (GPU) memory and is more flexible, allowing\n to set a termination condition to the parameters optimizaiton routine.\n\n :param optimizer_dict: OptimzerDict object resulting from the inner objective optimization.\n :param outer_objective: A loss function for the hyperparameters (scalar tensor)\n :param hyper_list: Optional list of hyperparameters to consider. If not provided will get all variables in the\n hyperparameter collection in the current scope.\n\n :return: list of hyperparameters involved in the computation\n \"\"\"\n hyper_list = super(ReverseHG, self).compute_gradients(outer_objective, optimizer_dict, hyper_list)\n\n # derivative of outer objective w.r.t. state\n with tf.variable_scope(outer_objective.op.name): # for some reason without this there is a cathastrofic\n # failure...\n doo_ds = tf.gradients(outer_objective, list(optimizer_dict.state))\n\n alphas = self._create_lagrangian_multipliers(optimizer_dict, doo_ds)\n\n alpha_vec = utils.vectorize_all(alphas)\n dyn_vec = utils.vectorize_all(list(optimizer_dict.dynamics))\n lag_phi_t = utils.dot(alpha_vec, dyn_vec, name='iter_wise_lagrangian_part1')\n # TODO outer_objective might be a list... handle this case\n\n # iterative computation of hypergradients\n doo_dypers = tf.gradients(outer_objective, hyper_list) # (direct) derivative of outer objective w.r.t. hyp.\n alpha_dot_B = tf.gradients(lag_phi_t, hyper_list)\n # check that optimizer_dict has initial ops (phi_0)\n if optimizer_dict.init_dynamics is not None:\n lag_phi0 = utils.dot(alpha_vec, utils.vectorize_all([d for (s, d) in optimizer_dict.init_dynamics]))\n alpha_dot_B0 = tf.gradients(lag_phi0, hyper_list)\n else:\n alpha_dot_B0 = [None] * len(hyper_list)\n\n # here, if some of this is None it may mean that the hyperparameter compares inside phi_0: check that and\n # if it is not the case raise error...\n hyper_grad_vars, hyper_grad_step = [], tf.no_op()\n for dl_dh, doo_dh, a_d_b0, hyper in zip(alpha_dot_B, doo_dypers, alpha_dot_B0, hyper_list):\n assert dl_dh is not None or a_d_b0 is not None, HyperGradient._ERROR_HYPER_DETACHED.format(hyper)\n hgv = None\n if dl_dh is not None: # \"normal hyperparameter\"\n hgv = self._create_hypergradient(hyper, doo_dh)\n\n hyper_grad_step = tf.group(hyper_grad_step, hgv.assign_add(dl_dh))\n if a_d_b0 is not None:\n hgv = hgv + a_d_b0 if hgv is not None else a_d_b0\n # here hyper_grad_step has nothing to do...\n hyper_grad_vars.append(hgv) # save these...\n\n with tf.control_dependencies([hyper_grad_step]): # first update hypergradinet then alphas.\n _alpha_iter = tf.group(*[alpha.assign(dl_ds) for alpha, dl_ds\n in zip(alphas, tf.gradients(lag_phi_t, list(optimizer_dict.state)))])\n self._alpha_iter = tf.group(self._alpha_iter, _alpha_iter) # put all the backward iterations toghether\n\n [self._hypergrad_dictionary[h].append(hg) for h, hg in zip(hyper_list, hyper_grad_vars)]\n\n self._reverse_initializer = tf.group(self._reverse_initializer,\n tf.variables_initializer(alphas),\n tf.variables_initializer([h for h in hyper_grad_vars\n if hasattr(h, 'initializer')])) # some ->\n # hypergradients (those coming form initial dynamics) might be just tensors and not variables...\n\n return hyper_list\n\n @staticmethod\n def _create_lagrangian_multipliers(optimizer_dict, doo_ds):\n lag_mul = [slot_creator.create_slot(v.initialized_value(), utils.val_or_zero(der, v), 'alpha') for v, der\n in zip(optimizer_dict.state, doo_ds)]\n [tf.add_to_collection(utils.GraphKeys.LAGRANGIAN_MULTIPLIERS, lm) for lm in lag_mul]\n utils.remove_from_collection(utils.GraphKeys.GLOBAL_VARIABLES, *lag_mul)\n # this prevents the 'automatic' initialization with tf.global_variables_initializer.\n return lag_mul\n\n @staticmethod\n def _create_hypergradient(hyper, doo_dhypers):\n \"\"\"\n Creates one hyper-gradient as a variable. doo_dhypers: initialization, that is the derivative of\n the outer objective w.r.t this hyper\n \"\"\"\n hgs = slot_creator.create_slot(hyper, utils.val_or_zero(doo_dhypers, hyper), 'hypergradient')\n utils.remove_from_collection(utils.GraphKeys.GLOBAL_VARIABLES, hgs)\n return hgs\n\n def _state_feed_dict_generator(self, history, T_or_generator):\n for t, his in zip(utils.solve_int_or_generator(T_or_generator), history):\n yield t, utils.merge_dicts(\n *[od.state_feed_dict(h) for od, h in zip(sorted(self._optimizer_dicts), his)]\n )\n\n def run(self, T_or_generator, inner_objective_feed_dicts=None, outer_objective_feed_dicts=None,\n initializer_feed_dict=None, global_step=None, session=None, online=False, callback=None):\n # callback may be a pair, first for froward pass, second for reverse pass\n callback = utils.as_tuple_or_list(callback)\n # same thing for T\n T_or_generator = utils.as_tuple_or_list(T_or_generator)\n\n ss = session or tf.get_default_session()\n\n self._history.clear()\n if not online:\n _fd = utils.maybe_call(initializer_feed_dict, utils.maybe_eval(global_step, ss))\n self._save_history(ss.run(self.initialization, feed_dict=_fd))\n\n # else: # not totally clear if i should add this\n # self._save_history(ss.run(list(self.state)))\n\n T = 0 # this is useful if T_or_generator is indeed a generator...\n for t in utils.solve_int_or_generator(T_or_generator[0]):\n # nonlocal t # with nonlocal would not be necessary the variable T... not compatible with 2.7\n _fd = utils.maybe_call(inner_objective_feed_dicts, t)\n self._save_history(ss.run(self.iteration, feed_dict=_fd))\n T = t\n\n utils.maybe_call(callback[0], t, _fd, ss) # callback\n\n # initialization of support variables (supports stochastic evaluation of outer objective via global_step ->\n # variable)\n # TODO (maybe tf bug or oddity) for some strange reason, if some variable's initializer depends on\n # a placeholder, then the initializer of alpha SEEMS TO DEPEND ALSO ON THAT placeholder,\n # as if the primary variable should be reinitialized as well, but, I've checked, the primary variable is NOT\n # actually reinitialized. This doesn't make sense since the primary variable is already initialized\n # and Tensorflow seems not to care... should maybe look better into this issue\n reverse_init_fd = utils.maybe_call(outer_objective_feed_dicts, utils.maybe_eval(global_step, ss))\n # now adding also the initializer_feed_dict because of tf quirk...\n maybe_init_fd = utils.maybe_call(initializer_feed_dict, utils.maybe_eval(global_step, ss))\n reverse_init_fd = utils.merge_dicts(reverse_init_fd, maybe_init_fd)\n ss.run(self._reverse_initializer, feed_dict=reverse_init_fd)\n\n del self._history[-1] # do not consider last point\n\n for pt, state_feed_dict in self._state_feed_dict_generator(reversed(self._history), T_or_generator[-1]):\n # this should be fine also for truncated reverse... but check again the index t\n t = T - pt - 1 # if T is int then len(self.history) is T + 1 and this numerator\n # shall start at T-1\n _fd = utils.merge_dicts(state_feed_dict, utils.maybe_call(inner_objective_feed_dicts, t))\n ss.run(self._alpha_iter, _fd)\n if len(callback) == 2: utils.maybe_call(callback[1], t, _fd, ss)\n\n def _save_history(self, weights):\n self._history.append(weights)\n\n def hypergrad_callback(self, hyperparameter=None, flatten=True):\n \"\"\"callback that records the partial hypergradients on the reverse pass\"\"\"\n values = []\n gs = list(self._hypergrad_dictionary.values()) if hyperparameter is None else \\\n self._hypergrad_dictionary[hyperparameter]\n if flatten: gs = utils.vectorize_all(gs)\n\n # noinspection PyUnusedLocal\n def _callback(_, __, ss):\n values.append(ss.run(gs)) # these should not depend from any feed dictionary\n\n return values, _callback\n\n\nclass ReverseHg(ReverseHG):\n\n def __init__(self, history=None):\n print('WARNING, DEPRECATED: please use the class ReverseHG', file=sys.stderr)\n super(ReverseHg, self).__init__(history)\n\n\nclass ForwardHG(HyperGradient):\n def __init__(self, name='ForwardHG'):\n super(ForwardHG, self).__init__(name)\n self._forward_initializer = tf.no_op()\n self._zs = {} # hyperparameter - zs dictionary\n self._z_iter = tf.no_op()\n self._iteration = None\n self.A_dot_zs = {}\n\n _HYPER_RANK_ERROR_MESSAGE = \"\"\"\n ForwardHG: Only scalar hyperparameters accepted.\\n\n Hyperparameter tensor {} has rank {}.\\n\n Use keyword argument far_ho.get_hyperparameter(..., scalar=True) on hyperparameter creation.\n \"\"\"\n\n def compute_gradients(self, outer_objective, optimizer_dict, hyper_list=None):\n hyper_list = super(ForwardHG, self).compute_gradients(outer_objective, optimizer_dict, hyper_list)\n\n # scalar_hyper_list\n\n with tf.variable_scope(outer_objective.op.name):\n # dynamics_vec = vectorize_all(optimizer_dict.dynamics) # in the new implementation there's no need of\n # vectorizing... it might be more efficient since it's better to avoid too many reshaping operations...\n d_oo_d_state = tf.gradients(outer_objective, list(optimizer_dict.state))\n\n with tf.name_scope('DUMMY'): # variables to compute forward propagation\n # TODO avoid this computation if optimizer_dict has already been seen.\n aux_vs = [tf.zeros_like(v) for v in optimizer_dict.state]\n dynamics_dot_aux_v = reduce_all_sums(list(optimizer_dict.dynamics), aux_vs)\n\n der_dynamics_dot_aux_v = tf.gradients(dynamics_dot_aux_v, list(optimizer_dict.state))\n # this is a list of jacobians times aux_vs that have the same dimension of states variables.\n\n init_dynamics_dot_aux_v = None\n if optimizer_dict.init_dynamics:\n # init_dynamics_dot_aux_v = dot(vectorize_all(optimizer_dict.init_dynamics), aux_v_vec) # old impl\n init_dynamics_dot_aux_v = reduce_all_sums(\n optimizer_dict.init_dynamics, aux_vs)\n\n for hyp in hyper_list:\n assert hyp.shape.ndims == 0, ForwardHG._HYPER_RANK_ERROR_MESSAGE.format(hyp, hyp.shape.ndims)\n\n d_init_dyn_d_hyp = None if init_dynamics_dot_aux_v is None else \\\n tf.gradients(init_dynamics_dot_aux_v, hyp)[0]\n d_dyn_d_hyp = tf.gradients(dynamics_dot_aux_v, hyp)[0]\n d_oo_d_hyp = tf.gradients(outer_objective, hyp)[0]\n\n # ------------------------------------------------------------\n # check detached hyperparameters (for which hypergradient would be always null)\n hyper_ok = d_init_dyn_d_hyp is not None or d_dyn_d_hyp is not None or d_oo_d_hyp is not None\n if RAISE_ERROR_ON_DETACHED:\n # try:\n assert hyper_ok, HyperGradient._ERROR_HYPER_DETACHED.format(hyp)\n # ex\n else:\n if not hyper_ok:\n print(HyperGradient._ERROR_HYPER_DETACHED.format(hyp), file=sys.stderr)\n hyper_list.remove(hyp)\n # -------------------------------------------------------------\n\n # UPDATE OF TOTAL DERIVATIVE OF STATE W.R.T. HYPERPARAMETER\n zs = ForwardHG._create_zs(\n optimizer_dict, hyp, None if d_init_dyn_d_hyp is None else tf.gradients(d_init_dyn_d_hyp, aux_vs)\n ) # this is one z for each variable\n self._zs[hyp] = zs # store a reference for the total derivatives for easy access\n Bs = tf.gradients(d_dyn_d_hyp, aux_vs)\n\n A_dot_zs = tf.gradients(reduce_all_sums(der_dynamics_dot_aux_v, zs), aux_vs)\n\n self.A_dot_zs[hyp] = A_dot_zs\n\n _z_iter = tf.group(*[\n z.assign(maybe_add(A_dot_z, B)) for z, A_dot_z, B\n in zip(zs, A_dot_zs, Bs)\n ])\n self._z_iter = tf.group(self._z_iter, _z_iter)\n\n # -- HYPERGRADIENT -----\n d_E_T = [dot(d_oo_d_s, z) for d_oo_d_s, z in zip(d_oo_d_state, zs)\n if d_oo_d_s is not None and z is not None] # list of dot products\n hg = maybe_add(tf.reduce_sum(d_E_T), d_oo_d_hyp) # sum the partial dot products and possibly ->\n # adds the ''direct derivative'' term d(E( . , \\lambda))/d \\lambda\n\n self._hypergrad_dictionary[hyp].append(hg)\n self._forward_initializer = tf.group(self._forward_initializer,\n tf.variables_initializer(zs))\n return hyper_list\n\n @staticmethod\n def _create_zs(optimizer_dict, hyper, d_init_dynamics_d_hyper):\n if d_init_dynamics_d_hyper is None: d_init_dynamics_d_hyper = [None] * len(optimizer_dict)\n with tf.variable_scope('Z'):\n z = [slot_creator.create_slot(v, utils.val_or_zero(der, v), hyper.op.name) for v, der\n in zip(optimizer_dict.state, d_init_dynamics_d_hyper)]\n [tf.add_to_collection(utils.GraphKeys.ZS, lm) for lm in z]\n # in this case it is completely fine to keep zs into the global variable...\n return z\n\n def run(self, T_or_generator, inner_objective_feed_dicts=None, outer_objective_feed_dicts=None,\n initializer_feed_dict=None, global_step=None, session=None, online=False, callback=None):\n\n ss = session or tf.get_default_session()\n\n if not online:\n self._run_batch_initialization(ss, utils.maybe_call(\n initializer_feed_dict, utils.maybe_eval(global_step, ss)))\n\n for t in utils.solve_int_or_generator(T_or_generator):\n _fd = utils.maybe_call(inner_objective_feed_dicts, t)\n self._forward_step(ss, _fd)\n utils.maybe_call(callback, t, _fd, ss)\n\n def _forward_step(self, ss, _fd):\n ss.run(self._z_iter, _fd)\n ss.run(self.iteration, _fd)\n\n def _run_batch_initialization(self, ss, fd):\n ss.run(self.initialization, feed_dict=fd)\n ss.run(self._forward_initializer, feed_dict=fd)\n\n @staticmethod\n def need_scalar_hyperparameters():\n return True\n\n @property\n def w_dots(self):\n # if hyper: return self._zs[hyper]\n return [{h: self._zs[h][k] for h in self._zs} for k, _ in enumerate(self.state)]\n\n def z_callback(self, hyperparameter=None, flatten=True):\n zs_values = []\n zs = list(self._zs.values()) if hyperparameter is None else self._zs[hyperparameter]\n if flatten: zs = utils.vectorize_all(zs)\n\n # noinspection PyUnusedLocal\n def _callback(_, __, ss):\n zs_values.append(ss.run(zs)) # these should not depend from any feed dictionary\n\n return zs_values, _callback\n\n\nclass ImplicitHG(HyperGradient):\n \"\"\"\n Implementation follows Pedregosa's algorithm HOAG\n \"\"\"\n\n def __init__(self, linear_system_solver_gen=None, tolerance=None, name='ImplicitHG'):\n super(ImplicitHG, self).__init__(name)\n if linear_system_solver_gen is None:\n linear_system_solver_gen = lambda _obj, var_list, _tolerance: ScipyOptimizerInterface(\n _obj, var_list=var_list, options={'maxiter': 100}, method='cg', tol=_tolerance)\n self.linear_system_solver = linear_system_solver_gen\n\n if tolerance is None:\n tolerance = lambda _k: 0.1 * (0.9 ** _k)\n self.tolerance = tolerance\n\n self._lin_sys = []\n self._qs = []\n\n def compute_gradients(self, outer_objective, optimizer_dict, hyper_list=None):\n hyper_list = super(ImplicitHG, self).compute_gradients(outer_objective, optimizer_dict, hyper_list)\n state = list(optimizer_dict.state)\n\n with tf.variable_scope(outer_objective.op.name):\n g1 = utils.vectorize_all(tf.gradients(outer_objective, state))\n grads_inner_obj_vec = utils.vectorize_all(tf.gradients(optimizer_dict.objective, state))\n\n q = self._create_q(g1)\n obj = tf.norm(\n utils.vectorize_all(tf.gradients(utils.dot(grads_inner_obj_vec, q), state)) - g1\n ) # using the norm seems to produce better results then squared norm...\n # (even though is more costly)\n\n self._lin_sys.append(lambda _tolerance: self.linear_system_solver(obj, [q], _tolerance))\n\n g2s = tf.gradients(outer_objective, hyper_list)\n cross_ders = tf.gradients(utils.dot(grads_inner_obj_vec, q), hyper_list)\n for g2, cd, hyper in zip(g2s, cross_ders, hyper_list):\n assert g2 is not None or cd is not None, HyperGradient._ERROR_HYPER_DETACHED.format(hyper)\n hg = utils.maybe_add(-cd, g2)\n if hg is None: # this would be strange...\n print('WARNING, outer objective is only directly dependent on hyperparameter {}. ' +\n 'Direct optimization would be better!'.format(hyper))\n hg = g2\n self._hypergrad_dictionary[hyper].append(hg)\n\n return hyper_list\n\n def _create_q(self, d_oo_d_state):\n self._qs.append(slot_creator.create_zeros_slot(d_oo_d_state, 'q'))\n return self._qs[-1]\n\n def run(self, T_or_generator, inner_objective_feed_dicts=None, outer_objective_feed_dicts=None,\n initializer_feed_dict=None, global_step=None, session=None, online=False, callback=None):\n ss = session or tf.get_default_session()\n\n inner_objective_feed_dicts = utils.as_tuple_or_list(inner_objective_feed_dicts)\n if not online:\n self._run_batch_initialization(ss, utils.maybe_call(\n initializer_feed_dict, utils.maybe_eval(global_step, ss)))\n\n for t in utils.solve_int_or_generator(T_or_generator):\n _fd = utils.maybe_call(inner_objective_feed_dicts[0], t)\n self._forward_step(ss, _fd)\n utils.maybe_call(callback, t, _fd, ss)\n\n # end of optimization. Solve linear systems.\n tol_val = utils.maybe_call(self.tolerance, utils.maybe_eval(global_step, ss)) # decreasing tolerance (seq.)\n # feed dictionaries (could...in theory, implement stochastic solution of this linear system...)\n _fd = utils.maybe_call(inner_objective_feed_dicts[-1], -1)\n _fd_outer = utils.maybe_call(outer_objective_feed_dicts, utils.maybe_eval(global_step, ss))\n _fd = utils.merge_dicts(_fd, _fd_outer)\n\n for lin_sys in self._lin_sys:\n lin_sys(tol_val).minimize(ss, _fd) # implicitly warm restarts with previously found q\n\n def _forward_step(self, ss, _fd):\n ss.run(self.iteration, _fd)\n\n def _run_batch_initialization(self, ss, fd):\n ss.run(self.initialization, feed_dict=fd)\n" ]
[ [ "tensorflow.no_op", "tensorflow.python.training.slot_creator.create_zeros_slot", "tensorflow.variable_scope", "tensorflow.reduce_mean", "tensorflow.get_default_session", "tensorflow.zeros_like", "tensorflow.name_scope", "tensorflow.gradients", "tensorflow.contrib.opt.ScipyOptimizerInterface", "tensorflow.get_variable_scope", "tensorflow.reduce_sum", "tensorflow.constant", "tensorflow.group", "tensorflow.add_to_collection", "tensorflow.variables_initializer", "tensorflow.control_dependencies" ] ]
seono/SKKALBERT
[ "0fe68260558656c9732205391539aa202e70ba67" ]
[ "src/utils/models/word_utils.py" ]
[ "import argparse, os, sys, math\nimport numpy as np\nfrom gensim.models import Word2Vec\nfrom sklearn.decomposition import TruncatedSVD\nfrom soynlp.word import pmi\nfrom soynlp.vectorizer import sent_to_word_contexts_matrix\nfrom collections import defaultdict\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\nfrom .preprocess import get_tokenizer\n\n\ndef train_word2vec(corpus_fname, model_fname, max_num_tokens_per_doc):\n make_save_path(model_fname)\n corpus_data = open(corpus_fname, 'r').readlines()\n max_num_tokens = np.max([len(sent.replace('\\n', '').strip().split(\" \")) for sent in corpus_data])\n if max_num_tokens_per_doc is None:\n max_num_tokens_per_doc = max_num_tokens\n else:\n max_num_tokens_per_doc = int(max_num_tokens_per_doc)\n print(\"Maximum number of tokens in corpus: \", max_num_tokens)\n print(\"Maximum token length per document: \", max_num_tokens_per_doc)\n corpus = []\n for sent in corpus_data:\n tokens = sent.replace('\\n', '').strip().split(\" \")\n while len(tokens) > max_num_tokens_per_doc:\n corpus.append(tokens[:max_num_tokens_per_doc])\n tokens = tokens[max_num_tokens_per_doc:]\n if len(tokens) > 0:\n corpus.append(tokens)\n model = Word2Vec(corpus, size=100, workers=4, sg=1)\n model.save(model_fname)\n\n\"\"\"\nLatent Semantic Analysis\nInspired by:\nhttps://lovit.github.io/nlp/2018/04/22/context_vector_for_word_similarity\nhttps://lovit.github.io/nlp/2018/04/22/implementing_pmi_numpy_practice\n\"\"\"\ndef latent_semantic_analysis(corpus_fname, output_fname):\n make_save_path(output_fname)\n corpus = [sent.replace('\\n', '').strip() for sent in open(corpus_fname, 'r').readlines()]\n # construct co-occurrence matrix (=word_context)\n # dynamic weight if True. co-occurrence weight = [1, (w-1)/w, (w-2)/w, ... 1/w]\n input_matrix, idx2vocab = sent_to_word_contexts_matrix(\n corpus,\n windows=3,\n min_tf=10,\n dynamic_weight=True,\n verbose=True)\n # compute truncated SVD\n cooc_svd = TruncatedSVD(n_components=100)\n cooc_vecs = cooc_svd.fit_transform(input_matrix)\n with open(output_fname + \"-cooc.vecs\", 'w') as f1:\n for word, vec in zip(idx2vocab, cooc_vecs):\n str_vec = [str(el) for el in vec]\n f1.writelines(word + ' ' + ' '.join(str_vec) + \"\\n\")\n # Shift PPMI at k=0, (equal PPMI)\n # pmi(word, contexts)\n # px: Probability of rows(items)\n # py: Probability of columns(features)\n pmi_matrix, _, _ = pmi(input_matrix, min_pmi=math.log(5))\n # compute truncated SVD\n pmi_svd = TruncatedSVD(n_components=100)\n pmi_vecs = pmi_svd.fit_transform(input_matrix)\n with open(output_fname + \"-pmi.vecs\", 'w') as f2:\n for word, vec in zip(idx2vocab, pmi_vecs):\n str_vec = [str(el) for el in vec]\n f2.writelines(word + ' ' + ' '.join(str_vec) + \"\\n\")\n\n\nclass CBoWModel(object):\n\n def __init__(self, train_fname, embedding_fname, model_fname, embedding_corpus_fname,\n embedding_method=\"fasttext\", is_weighted=True, average=False, dim=100, tokenizer_name=\"mecab\"):\n # configurations\n make_save_path(model_fname)\n self.dim = dim\n self.average = average\n if is_weighted:\n model_full_fname = model_fname + \"-weighted\"\n else:\n model_full_fname = model_fname + \"-original\"\n self.tokenizer = get_tokenizer(tokenizer_name)\n if is_weighted:\n # ready for weighted embeddings\n self.embeddings = self.load_or_construct_weighted_embedding(embedding_fname, embedding_method, embedding_corpus_fname)\n print(\"loading weighted embeddings, complete!\")\n else:\n # ready for original embeddings\n words, vectors = self.load_word_embeddings(embedding_fname, embedding_method)\n self.embeddings = defaultdict(list)\n for word, vector in zip(words, vectors):\n self.embeddings[word] = vector\n print(\"loading original embeddings, complete!\")\n if not os.path.exists(model_full_fname):\n print(\"train Continuous Bag of Words model\")\n self.model = self.train_model(train_fname, model_full_fname)\n else:\n print(\"load Continuous Bag of Words model\")\n self.model = self.load_model(model_full_fname)\n\n def evaluate(self, test_data_fname, batch_size=3000, verbose=False):\n print(\"evaluation start!\")\n test_data = self.load_or_tokenize_corpus(test_data_fname)\n data_size = len(test_data)\n num_batches = int((data_size - 1) / batch_size) + 1\n eval_score = 0\n for batch_num in range(num_batches):\n batch_sentences = []\n batch_tokenized_sentences = []\n batch_labels = []\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n features = test_data[start_index:end_index]\n for feature in features:\n sentence, tokens, label = feature\n batch_sentences.append(sentence)\n batch_tokenized_sentences.append(tokens)\n batch_labels.append(label)\n preds, curr_eval_score = self.predict_by_batch(batch_tokenized_sentences, batch_labels)\n eval_score += curr_eval_score\n if verbose:\n for sentence, pred, label in zip(batch_sentences, preds, batch_labels):\n print(sentence, \", pred:\", pred, \", label:\", label)\n print(\"# of correct:\", str(eval_score), \", total:\", str(len(test_data)), \", score:\", str(eval_score / len(test_data)))\n\n def predict(self, sentence):\n tokens = self.tokenizer.morphs(sentence)\n sentence_vector = self.get_sentence_vector(tokens)\n scores = np.dot(self.model[\"vectors\"], sentence_vector)\n pred = self.model[\"labels\"][np.argmax(scores)]\n return pred\n\n def predict_by_batch(self, tokenized_sentences, labels):\n sentence_vectors, eval_score = [], 0\n for tokens in tokenized_sentences:\n sentence_vectors.append(self.get_sentence_vector(tokens))\n scores = np.dot(self.model[\"vectors\"], np.array(sentence_vectors).T)\n preds = np.argmax(scores, axis=0)\n for pred, label in zip(preds, labels):\n if self.model[\"labels\"][pred] == label:\n eval_score += 1\n return preds, eval_score\n\n def get_sentence_vector(self, tokens):\n vector = np.zeros(self.dim)\n for token in tokens:\n if token in self.embeddings.keys():\n vector += self.embeddings[token]\n if not self.average:\n vector /= len(tokens)\n vector_norm = np.linalg.norm(vector)\n if vector_norm != 0:\n unit_vector = vector / vector_norm\n else:\n unit_vector = np.zeros(self.dim)\n return unit_vector\n\n def load_or_tokenize_corpus(self, fname):\n data = []\n if os.path.exists(fname + \"-tokenized\"):\n with open(fname + \"-tokenized\", \"r\") as f1:\n for line in f1:\n sentence, tokens, label = line.strip().split(\"\\u241E\")\n data.append([sentence, tokens.split(), label])\n else:\n with open(fname, \"r\") as f2, open(fname + \"-tokenized\", \"w\") as f3:\n for line in f2:\n sentence, label = line.strip().split(\"\\u241E\")\n tokens = self.tokenizer.morphs(sentence)\n data.append([sentence, tokens, label])\n f3.writelines(sentence + \"\\u241E\" + ' '.join(tokens) + \"\\u241E\" + label + \"\\n\")\n return data\n\n def compute_word_frequency(self, embedding_corpus_fname):\n total_count = 0\n words_count = defaultdict(int)\n with open(embedding_corpus_fname, \"r\") as f:\n for line in f:\n tokens = line.strip().split()\n for token in tokens:\n words_count[token] += 1\n total_count += 1\n return words_count, total_count\n\n def load_word_embeddings(self, vecs_fname, method):\n if method == \"word2vec\":\n model = Word2Vec.load(vecs_fname)\n words = model.wv.index2word\n vecs = model.wv.vectors\n else:\n words, vecs = [], []\n with open(vecs_fname, 'r', encoding='utf-8') as f1:\n if \"fasttext\" in method:\n next(f1) # skip head line\n for line in f1:\n if method == \"swivel\":\n splited_line = line.replace(\"\\n\", \"\").strip().split(\"\\t\")\n else:\n splited_line = line.replace(\"\\n\", \"\").strip().split(\" \")\n words.append(splited_line[0])\n vec = [float(el) for el in splited_line[1:]]\n vecs.append(vec)\n return words, vecs\n\n def load_or_construct_weighted_embedding(self, embedding_fname, embedding_method, embedding_corpus_fname, a=0.0001):\n dictionary = {}\n if os.path.exists(embedding_fname + \"-weighted\"):\n # load weighted word embeddings\n with open(embedding_fname + \"-weighted\", \"r\") as f2:\n for line in f2:\n word, weighted_vector = line.strip().split(\"\\u241E\")\n weighted_vector = [float(el) for el in weighted_vector.split()]\n dictionary[word] = weighted_vector\n else:\n # load pretrained word embeddings\n words, vecs = self.load_word_embeddings(embedding_fname, embedding_method)\n # compute word frequency\n words_count, total_word_count = self.compute_word_frequency(embedding_corpus_fname)\n # construct weighted word embeddings\n with open(embedding_fname + \"-weighted\", \"w\") as f3:\n for word, vec in zip(words, vecs):\n if word in words_count.keys():\n word_prob = words_count[word] / total_word_count\n else:\n word_prob = 0.0\n weighted_vector = (a / (word_prob + a)) * np.asarray(vec)\n dictionary[word] = weighted_vector\n f3.writelines(word + \"\\u241E\" + \" \".join([str(el) for el in weighted_vector]) + \"\\n\")\n return dictionary\n\n def train_model(self, train_data_fname, model_fname):\n model = {\"vectors\": [], \"labels\": [], \"sentences\": []}\n train_data = self.load_or_tokenize_corpus(train_data_fname)\n with open(model_fname, \"w\") as f:\n for sentence, tokens, label in train_data:\n tokens = self.tokenizer.morphs(sentence)\n sentence_vector = self.get_sentence_vector(tokens)\n model[\"sentences\"].append(sentence)\n model[\"vectors\"].append(sentence_vector)\n model[\"labels\"].append(label)\n str_vector = \" \".join([str(el) for el in sentence_vector])\n f.writelines(sentence + \"\\u241E\" + \" \".join(tokens) + \"\\u241E\" + str_vector + \"\\u241E\" + label + \"\\n\")\n return model\n\n def load_model(self, model_fname):\n model = {\"vectors\": [], \"labels\": [], \"sentences\": []}\n with open(model_fname, \"r\") as f:\n for line in f:\n sentence, _, vector, label = line.strip().split(\"\\u241E\")\n vector = np.array([float(el) for el in vector.split()])\n model[\"sentences\"].append(sentence)\n model[\"vectors\"].append(vector)\n model[\"labels\"].append(label)\n return model\n\n\ndef make_save_path(full_path):\n if full_path[:4] == \"data\":\n full_path = os.path.join(os.path.abspath(\".\"), full_path)\n model_path = '/'.join(full_path.split(\"/\")[:-1])\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--method', type=str, help='method')\n parser.add_argument('--input_path', type=str, help='Location of input files')\n parser.add_argument('--output_path', type=str, help='Location of output files')\n parser.add_argument('--embedding_path', type=str, help='Location of embedding model')\n parser.add_argument('--is_weighted', type=str, help='Use weighted method or not')\n parser.add_argument('--train_corpus_path', type=str, help='Location of train corpus')\n parser.add_argument('--test_corpus_path', type=str, help='Location of test corpus')\n parser.add_argument('--embedding_name', type=str, help='embedding name')\n parser.add_argument('--embedding_corpus_path', type=str, help='embedding corpus path')\n parser.add_argument('--max_num_tokens_per_doc', type=str, help='maximum number of tokens(word2vec)')\n parser.add_argument('--average', type=str, default=\"False\", help='average or not')\n args = parser.parse_args()\n\n def str2bool(str):\n return str.lower() in [\"true\", \"t\"]\n\n if args.method == \"train_word2vec\":\n train_word2vec(args.input_path, args.output_path, args.max_num_tokens_per_doc)\n elif args.method == \"latent_semantic_analysis\":\n latent_semantic_analysis(args.input_path, args.output_path)\n elif args.method == \"cbow\":\n model = CBoWModel(args.train_corpus_path, args.embedding_path,\n args.output_path, args.embedding_corpus_path,\n args.embedding_name, str2bool(args.is_weighted),\n str2bool(args.average))\n model.evaluate(args.test_corpus_path)" ]
[ [ "numpy.zeros", "numpy.asarray", "numpy.argmax", "sklearn.decomposition.TruncatedSVD", "numpy.array", "numpy.dot", "numpy.linalg.norm" ] ]
ness001/NeteaseMusic-playlist-download-app
[ "a06bc04493cc8b0ae4ad08eeb2db1b05d96bd3d2" ]
[ "batch.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'batch.ui'\n#\n# Created by: PyQt5 UI code generator 5.14.2\n#\n# WARNING! All changes made in this file will be lost!\n\n\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtCore import QThread, pyqtSignal\nfrom PyQt5.QtWidgets import QFileDialog\nfrom time import sleep\nfrom requests import get\nfrom urllib.request import urlretrieve, install_opener, build_opener, urlopen\nimport eyed3\nfrom os import path, chdir, getcwd, listdir\nfrom configparser import ConfigParser\nfrom pathlib import Path\nimport pandas as pd\n\n\nclass mythread(QThread):\n val = pyqtSignal(int)\n msg = pyqtSignal(str)\n max = pyqtSignal(int)\n\n def __init__(self, parent=None):\n super(mythread, self).__init__(parent)\n self.pid = None\n\n def run(self):\n\n print(getcwd())\n # self.msg.emit(getcwd())\n try:\n pid = int(self.pid) # 就算是数字,从textedit接收到的也是string\n r = get('http://music.163.com/api/playlist/detail?id=' + str(pid))\n if r.json()['code'] != 200:\n self.msg.emit(\"未找到歌单\")\n else:\n meta = r.json()['result']\n print('更新来自【' + meta['creator']['nickname'] + '】的歌单--' + meta['name'])\n self.msg.emit('更新来自【' + meta['creator']['nickname'] + '】的歌单--' + meta['name'])\n # QtWidgets.QApplication.processEvents()\n ids = []\n titles = []\n artists = []\n albums = []\n album_pics = []\n\n for i in range(len(r.json()['result']['tracks'])):\n ids.append(r.json()['result']['tracks'][i]['id'])\n titles.append(r.json()['result']['tracks'][i]['name'])\n artists.append(r.json()['result']['tracks'][i]['artists'][0]['name'])\n albums.append(r.json()['result']['tracks'][i]['album']['name'])\n album_pics.append(r.json()['result']['tracks'][i]['album']['blurPicUrl'])\n\n opener = build_opener()\n opener.addheaders = [('User-agent', 'Mozilla/5.0')]\n install_opener(opener)\n Path('./music').mkdir(parents=True, exist_ok=True) # an robust way to add a new folder\n mp3_path = './music/'\n Path('./img').mkdir(parents=True, exist_ok=True)\n img_path = './img/'\n\n d = {'ids': ids, 'titles': titles, 'artists': artists, 'albums': albums, 'album_pics': album_pics}\n df = pd.DataFrame(data=d)\n\n old = [path.splitext(item)[0] for item in listdir(mp3_path)]\n new = list(set(titles) - set(old))\n print('本次更新曲目', *new, sep=',')\n self.msg.emit(\"本次更新曲目...\")\n self.max.emit(len(new) - 1)\n for i in range(0, len(new)):\n self.msg.emit(str(i + 1) + '. ' + new[i])\n self.val.emit(i)\n # row=df.loc[df.titles == new[i]]\n # reference cell value row.titles.values[0]\n # better way\n row = df.loc[df.titles == new[i]].to_dict(orient='records')[0]\n song_url = 'http://music.163.com/song/media/outer/url?id=' + str(row['ids']) + '.mp3'\n r = urlopen(song_url)\n if r.geturl() == 'https://music.163.com/404':\n print('歌曲【' + row['titles'] + \"】无资源!\")\n self.msg.emit('歌曲【' + row['titles'] + \"】无资源!\")\n else:\n mp3_name = mp3_path + row['titles'].replace('/', '-') + '.mp3'\n urlretrieve(song_url, mp3_name)\n\n img_name = img_path + row['albums'].replace('/', '-') + '.jpg'\n urlretrieve(row['album_pics'], img_name)\n\n audiofile = eyed3.load(mp3_name)\n audiofile.tag.artist = str(row['artists'])\n audiofile.tag.album = str(row['albums'])\n # audiofile.tag.images.set(type_=3,img_data=None,mime_type='image/jpeg',img_url=album_pics[i])\n # simple url reference won't work, you have to download it to your disk\n # plus this method doesn't informed on the documentation, I found it on the stackoverflow\n audiofile.tag.images.set(type_=3, img_data=None, mime_type='image/jpeg',\n img_url=open(img_name, 'rb').read())\n # id3 version is important, encoding is important\n # the former granteened the id3 tag will be recognized by music players like Apple Music\n # the latter made the saving process ending up no error like 'Latin1' error\n audiofile.tag.save(version=eyed3.id3.ID3_V2_3, encoding='utf-8')\n sleep(0.5)\n print(\"更新完成!\")\n self.msg.emit(\"更新完成\")\n except ValueError:\n # assert isinstance(pid, int), \"id格式为纯数字\"\n self.msg.emit('⚠️id格式应为纯数字')\n # print('aaaa')\n\n\nclass Ui_MainWindow(object):\n def __init__(self):\n self.folder = None\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(306, 298)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.verticalLayout.addItem(spacerItem)\n self.pid = QtWidgets.QLineEdit(self.centralwidget)\n self.pid.setObjectName(\"pid\")\n self.verticalLayout.addWidget(self.pid)\n self.pid.setText('5022293116')\n\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\n self.submit = QtWidgets.QPushButton(self.centralwidget)\n self.submit.setObjectName(\"submit\")\n self.horizontalLayout_2.addWidget(self.submit)\n self.folder_btn = QtWidgets.QToolButton(self.centralwidget)\n self.folder_btn.setObjectName(\"toolButton\")\n self.horizontalLayout_2.addWidget(self.folder_btn)\n self.verticalLayout.addLayout(self.horizontalLayout_2)\n self.info = QtWidgets.QTextBrowser(self.centralwidget)\n self.info.setObjectName(\"info\")\n self.verticalLayout.addWidget(self.info)\n self.pbar = QtWidgets.QProgressBar(self.centralwidget)\n self.pbar.setProperty(\"value\", 0)\n self.pbar.setObjectName(\"pbar\")\n self.verticalLayout.addWidget(self.pbar)\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"网易云歌单下载\"))\n self.submit.setText(_translate(\"MainWindow\", \"submit\"))\n self.folder_btn.setText(_translate(\"MainWindow\", \"...\"))\n\n self.submit.clicked.connect(self.start_pbar) #### no mycode()\n self.folder_btn.clicked.connect(self.select_folder)\n # for .app file\n # self.current_path=Path(QtCore.QCoreApplication.applicationDirPath()).parents[1]\n\n # for unix file\n self.current_path = path.expanduser(\"~\")\n # print(self.current_path)\n self.cp = ConfigParser()\n\n self.config_dir = self.current_path + '/.batch'\n # print(self.config_dir)\n self.config_file = self.config_dir + '/settings.ini'\n # print(self.config_file)\n\n if not path.isfile(self.config_file):\n Path('./.batch').mkdir(parents=True, exist_ok=True)\n Path(self.config_file).touch(exist_ok=True)\n self.cp.add_section('Default')\n self.cp.add_section('User')\n self.cp['Default']['folder'] = self.current_path\n self.cp['User']['folder'] = ''\n with open(self.config_file, 'w') as f:\n self.cp.write(f)\n # load ini file\n self.cp.read(self.config_file)\n # if user had set cwd, use that cwd\n if self.cp['User']['folder'] != '':\n self.current_path = self.cp['User']['folder']\n input(\"Press enter to close program\")\n # #####################\n # if getattr(sys,'frozen',False):\n # # self.statusbar.showMessage(sys.executable)\n # # chdir(path.dirname(sys.executable))\n # # self.statusbar.showMessage(sys.argv[0])\n # # self.current_path=sys.executable\n # self.current_path=sys.argv[0]\n # # chdir(path.dirname(sys.argv[0]))\n # else:\n # self.current_path = path.abspath(path.dirname(__file__))\n # #####################\n\n chdir(self.current_path)\n self.statusbar.showMessage('当前目录:' + getcwd())\n\n # self.statusbar.showMessage('当前目录:'+getcwd())\n # os getcwd未必一直正确\n # os path,你需要判断是script还是application bundle\n\n def start_pbar(self):\n self.thread = mythread()\n self.pbar.setValue(0)\n self.thread.pid = self.pid.text()\n # self.thread.folder = self.folder\n self.thread.val.connect(self.set_pbar)\n self.thread.msg.connect(self.set_msg)\n self.thread.max.connect(self.set_pbar_max)\n self.thread.start()\n # msg=QtWidgets.QMessageBox()\n # msg.setText(\"abc\")\n\n def set_pbar(self, val):\n self.pbar.setValue(val)\n\n def set_pbar_max(self, max):\n self.pbar.setMaximum(max)\n\n def set_msg(self, msg):\n self.info.append(msg)\n\n def select_folder(self):\n self.folder = QFileDialog.getExistingDirectory()\n chdir(QFileDialog.getExistingDirectory())\n # ini file is load before, so you can just edit it\n self.cp['User']['folder'] = self.folder\n with open(self.config_file, 'w') as f:\n self.cp.write(f)\n self.statusbar.showMessage('当前目录:' + str(self.folder))\n\n\nif __name__ == \"__main__\":\n import sys\n\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n" ]
[ [ "pandas.DataFrame" ] ]
itsvetkov/pyqtgraph
[ "aa26d8ac82e00ea9ba992fef365933960e9e8aa2" ]
[ "pyqtgraph/functions.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nfunctions.py - Miscellaneous functions with no other home\nCopyright 2010 Luke Campagnola\nDistributed under MIT/X11 license. See license.txt for more infomation.\n\"\"\"\n\nfrom __future__ import division\nfrom .python2_3 import asUnicode\nfrom .Qt import QtGui, QtCore, USE_PYSIDE\nColors = {\n 'b': QtGui.QColor(0,0,255,255),\n 'g': QtGui.QColor(0,255,0,255),\n 'r': QtGui.QColor(255,0,0,255),\n 'c': QtGui.QColor(0,255,255,255),\n 'm': QtGui.QColor(255,0,255,255),\n 'y': QtGui.QColor(255,255,0,255),\n 'k': QtGui.QColor(0,0,0,255),\n 'w': QtGui.QColor(255,255,255,255),\n 'd': QtGui.QColor(150,150,150,255),\n 'l': QtGui.QColor(200,200,200,255),\n 's': QtGui.QColor(100,100,150,255),\n} \n\nSI_PREFIXES = asUnicode('yzafpnµm kMGTPEZY')\nSI_PREFIXES_ASCII = 'yzafpnum kMGTPEZY'\n\n\n\nfrom .Qt import QtGui, QtCore, USE_PYSIDE\nfrom . import getConfigOption, setConfigOptions\nimport numpy as np\nimport decimal, re\nimport ctypes\nimport sys, struct\n\nfrom . import debug\n\ndef siScale(x, minVal=1e-25, allowUnicode=True):\n \"\"\"\n Return the recommended scale factor and SI prefix string for x.\n \n Example::\n \n siScale(0.0001) # returns (1e6, 'μ')\n # This indicates that the number 0.0001 is best represented as 0.0001 * 1e6 = 100 μUnits\n \"\"\"\n \n if isinstance(x, decimal.Decimal):\n x = float(x)\n \n try:\n if np.isnan(x) or np.isinf(x):\n return(1, '')\n except:\n print(x, type(x))\n raise\n if abs(x) < minVal:\n m = 0\n x = 0\n else:\n m = int(np.clip(np.floor(np.log(abs(x))/np.log(1000)), -9.0, 9.0))\n \n if m == 0:\n pref = ''\n elif m < -8 or m > 8:\n pref = 'e%d' % (m*3)\n else:\n if allowUnicode:\n pref = SI_PREFIXES[m+8]\n else:\n pref = SI_PREFIXES_ASCII[m+8]\n p = .001**m\n \n return (p, pref) \n\ndef siFormat(x, precision=3, suffix='', space=True, error=None, minVal=1e-25, allowUnicode=True):\n \"\"\"\n Return the number x formatted in engineering notation with SI prefix.\n \n Example::\n siFormat(0.0001, suffix='V') # returns \"100 μV\"\n \"\"\"\n \n if space is True:\n space = ' '\n if space is False:\n space = ''\n \n \n (p, pref) = siScale(x, minVal, allowUnicode)\n if not (len(pref) > 0 and pref[0] == 'e'):\n pref = space + pref\n \n if error is None:\n fmt = \"%.\" + str(precision) + \"g%s%s\"\n return fmt % (x*p, pref, suffix)\n else:\n if allowUnicode:\n plusminus = space + asUnicode(\"±\") + space\n else:\n plusminus = \" +/- \"\n fmt = \"%.\" + str(precision) + \"g%s%s%s%s\"\n return fmt % (x*p, pref, suffix, plusminus, siFormat(error, precision=precision, suffix=suffix, space=space, minVal=minVal))\n \ndef siEval(s):\n \"\"\"\n Convert a value written in SI notation to its equivalent prefixless value\n \n Example::\n \n siEval(\"100 μV\") # returns 0.0001\n \"\"\"\n \n s = asUnicode(s)\n m = re.match(r'(-?((\\d+(\\.\\d*)?)|(\\.\\d+))([eE]-?\\d+)?)\\s*([u' + SI_PREFIXES + r']?).*$', s)\n if m is None:\n raise Exception(\"Can't convert string '%s' to number.\" % s)\n v = float(m.groups()[0])\n p = m.groups()[6]\n #if p not in SI_PREFIXES:\n #raise Exception(\"Can't convert string '%s' to number--unknown prefix.\" % s)\n if p == '':\n n = 0\n elif p == 'u':\n n = -2\n else:\n n = SI_PREFIXES.index(p) - 8\n return v * 1000**n\n \n\nclass Color(QtGui.QColor):\n def __init__(self, *args):\n QtGui.QColor.__init__(self, mkColor(*args))\n \n def glColor(self):\n \"\"\"Return (r,g,b,a) normalized for use in opengl\"\"\"\n return (self.red()/255., self.green()/255., self.blue()/255., self.alpha()/255.)\n \n def __getitem__(self, ind):\n return (self.red, self.green, self.blue, self.alpha)[ind]()\n \n \ndef mkColor(*args):\n \"\"\"\n Convenience function for constructing QColor from a variety of argument types. Accepted arguments are:\n \n ================ ================================================\n 'c' one of: r, g, b, c, m, y, k, w \n R, G, B, [A] integers 0-255\n (R, G, B, [A]) tuple of integers 0-255\n float greyscale, 0.0-1.0\n int see :func:`intColor() <pyqtgraph.intColor>`\n (int, hues) see :func:`intColor() <pyqtgraph.intColor>`\n \"RGB\" hexadecimal strings; may begin with '#'\n \"RGBA\" \n \"RRGGBB\" \n \"RRGGBBAA\" \n QColor QColor instance; makes a copy.\n ================ ================================================\n \"\"\"\n err = 'Not sure how to make a color from \"%s\"' % str(args)\n if len(args) == 1:\n if isinstance(args[0], basestring):\n c = args[0]\n if c[0] == '#':\n c = c[1:]\n if len(c) == 1:\n try:\n return Colors[c]\n except KeyError:\n raise Exception('No color named \"%s\"' % c)\n if len(c) == 3:\n r = int(c[0]*2, 16)\n g = int(c[1]*2, 16)\n b = int(c[2]*2, 16)\n a = 255\n elif len(c) == 4:\n r = int(c[0]*2, 16)\n g = int(c[1]*2, 16)\n b = int(c[2]*2, 16)\n a = int(c[3]*2, 16)\n elif len(c) == 6:\n r = int(c[0:2], 16)\n g = int(c[2:4], 16)\n b = int(c[4:6], 16)\n a = 255\n elif len(c) == 8:\n r = int(c[0:2], 16)\n g = int(c[2:4], 16)\n b = int(c[4:6], 16)\n a = int(c[6:8], 16)\n elif isinstance(args[0], QtGui.QColor):\n return QtGui.QColor(args[0])\n elif isinstance(args[0], float):\n r = g = b = int(args[0] * 255)\n a = 255\n elif hasattr(args[0], '__len__'):\n if len(args[0]) == 3:\n (r, g, b) = args[0]\n a = 255\n elif len(args[0]) == 4:\n (r, g, b, a) = args[0]\n elif len(args[0]) == 2:\n return intColor(*args[0])\n else:\n raise Exception(err)\n elif type(args[0]) == int:\n return intColor(args[0])\n else:\n raise Exception(err)\n elif len(args) == 3:\n (r, g, b) = args\n a = 255\n elif len(args) == 4:\n (r, g, b, a) = args\n else:\n raise Exception(err)\n \n args = [r,g,b,a]\n args = [0 if np.isnan(a) or np.isinf(a) else a for a in args]\n args = list(map(int, args))\n return QtGui.QColor(*args)\n\n\ndef mkBrush(*args, **kwds):\n \"\"\"\n | Convenience function for constructing Brush.\n | This function always constructs a solid brush and accepts the same arguments as :func:`mkColor() <pyqtgraph.mkColor>`\n | Calling mkBrush(None) returns an invisible brush.\n \"\"\"\n if 'color' in kwds:\n color = kwds['color']\n elif len(args) == 1:\n arg = args[0]\n if arg is None:\n return QtGui.QBrush(QtCore.Qt.NoBrush)\n elif isinstance(arg, QtGui.QBrush):\n return QtGui.QBrush(arg)\n else:\n color = arg\n elif len(args) > 1:\n color = args\n return QtGui.QBrush(mkColor(color))\n\ndef mkPen(*args, **kargs):\n \"\"\"\n Convenience function for constructing QPen. \n \n Examples::\n \n mkPen(color)\n mkPen(color, width=2)\n mkPen(cosmetic=False, width=4.5, color='r')\n mkPen({'color': \"FF0\", width: 2})\n mkPen(None) # (no pen)\n \n In these examples, *color* may be replaced with any arguments accepted by :func:`mkColor() <pyqtgraph.mkColor>` \"\"\"\n \n color = kargs.get('color', None)\n width = kargs.get('width', 1)\n style = kargs.get('style', None)\n dash = kargs.get('dash', None)\n cosmetic = kargs.get('cosmetic', True)\n hsv = kargs.get('hsv', None)\n \n if len(args) == 1:\n arg = args[0]\n if isinstance(arg, dict):\n return mkPen(**arg)\n if isinstance(arg, QtGui.QPen):\n return QtGui.QPen(arg) ## return a copy of this pen\n elif arg is None:\n style = QtCore.Qt.NoPen\n else:\n color = arg\n if len(args) > 1:\n color = args\n \n if color is None:\n color = mkColor('l')\n if hsv is not None:\n color = hsvColor(*hsv)\n else:\n color = mkColor(color)\n \n pen = QtGui.QPen(QtGui.QBrush(color), width)\n pen.setCosmetic(cosmetic)\n if style is not None:\n pen.setStyle(style)\n if dash is not None:\n pen.setDashPattern(dash)\n return pen\n\ndef hsvColor(hue, sat=1.0, val=1.0, alpha=1.0):\n \"\"\"Generate a QColor from HSVa values. (all arguments are float 0.0-1.0)\"\"\"\n c = QtGui.QColor()\n c.setHsvF(hue, sat, val, alpha)\n return c\n\n \ndef colorTuple(c):\n \"\"\"Return a tuple (R,G,B,A) from a QColor\"\"\"\n return (c.red(), c.green(), c.blue(), c.alpha())\n\ndef colorStr(c):\n \"\"\"Generate a hex string code from a QColor\"\"\"\n return ('%02x'*4) % colorTuple(c)\n\ndef intColor(index, hues=9, values=1, maxValue=255, minValue=150, maxHue=360, minHue=0, sat=255, alpha=255, **kargs):\n \"\"\"\n Creates a QColor from a single index. Useful for stepping through a predefined list of colors.\n \n The argument *index* determines which color from the set will be returned. All other arguments determine what the set of predefined colors will be\n \n Colors are chosen by cycling across hues while varying the value (brightness). \n By default, this selects from a list of 9 hues.\"\"\"\n hues = int(hues)\n values = int(values)\n ind = int(index) % (hues * values)\n indh = ind % hues\n indv = ind / hues\n if values > 1:\n v = minValue + indv * ((maxValue-minValue) / (values-1))\n else:\n v = maxValue\n h = minHue + (indh * (maxHue-minHue)) / hues\n \n c = QtGui.QColor()\n c.setHsv(h, sat, v)\n c.setAlpha(alpha)\n return c\n\ndef glColor(*args, **kargs):\n \"\"\"\n Convert a color to OpenGL color format (r,g,b,a) floats 0.0-1.0\n Accepts same arguments as :func:`mkColor <pyqtgraph.mkColor>`.\n \"\"\"\n c = mkColor(*args, **kargs)\n return (c.red()/255., c.green()/255., c.blue()/255., c.alpha()/255.)\n\n \n\ndef makeArrowPath(headLen=20, tipAngle=20, tailLen=20, tailWidth=3, baseAngle=0):\n \"\"\"\n Construct a path outlining an arrow with the given dimensions.\n The arrow points in the -x direction with tip positioned at 0,0.\n If *tipAngle* is supplied (in degrees), it overrides *headWidth*.\n If *tailLen* is None, no tail will be drawn.\n \"\"\"\n headWidth = headLen * np.tan(tipAngle * 0.5 * np.pi/180.)\n path = QtGui.QPainterPath()\n path.moveTo(0,0)\n path.lineTo(headLen, -headWidth)\n if tailLen is None:\n innerY = headLen - headWidth * np.tan(baseAngle*np.pi/180.)\n path.lineTo(innerY, 0)\n else:\n tailWidth *= 0.5\n innerY = headLen - (headWidth-tailWidth) * np.tan(baseAngle*np.pi/180.)\n path.lineTo(innerY, -tailWidth)\n path.lineTo(headLen + tailLen, -tailWidth)\n path.lineTo(headLen + tailLen, tailWidth)\n path.lineTo(innerY, tailWidth)\n path.lineTo(headLen, headWidth)\n path.lineTo(0,0)\n return path\n \n \n \ndef affineSlice(data, shape, origin, vectors, axes, order=1, returnCoords=False, **kargs):\n \"\"\"\n Take a slice of any orientation through an array. This is useful for extracting sections of multi-dimensional arrays such as MRI images for viewing as 1D or 2D data.\n \n The slicing axes are aribtrary; they do not need to be orthogonal to the original data or even to each other. It is possible to use this function to extract arbitrary linear, rectangular, or parallelepiped shapes from within larger datasets. The original data is interpolated onto a new array of coordinates using scipy.ndimage.map_coordinates if it is available (see the scipy documentation for more information about this). If scipy is not available, then a slower implementation of map_coordinates is used.\n \n For a graphical interface to this function, see :func:`ROI.getArrayRegion <pyqtgraph.ROI.getArrayRegion>`\n \n ============== ====================================================================================================\n **Arguments:**\n *data* (ndarray) the original dataset\n *shape* the shape of the slice to take (Note the return value may have more dimensions than len(shape))\n *origin* the location in the original dataset that will become the origin of the sliced data.\n *vectors* list of unit vectors which point in the direction of the slice axes. Each vector must have the same \n length as *axes*. If the vectors are not unit length, the result will be scaled relative to the \n original data. If the vectors are not orthogonal, the result will be sheared relative to the \n original data.\n *axes* The axes in the original dataset which correspond to the slice *vectors*\n *order* The order of spline interpolation. Default is 1 (linear). See scipy.ndimage.map_coordinates\n for more information.\n *returnCoords* If True, return a tuple (result, coords) where coords is the array of coordinates used to select\n values from the original dataset.\n *All extra keyword arguments are passed to scipy.ndimage.map_coordinates.*\n --------------------------------------------------------------------------------------------------------------------\n ============== ====================================================================================================\n \n Note the following must be true: \n \n | len(shape) == len(vectors) \n | len(origin) == len(axes) == len(vectors[i])\n \n Example: start with a 4D fMRI data set, take a diagonal-planar slice out of the last 3 axes\n \n * data = array with dims (time, x, y, z) = (100, 40, 40, 40)\n * The plane to pull out is perpendicular to the vector (x,y,z) = (1,1,1) \n * The origin of the slice will be at (x,y,z) = (40, 0, 0)\n * We will slice a 20x20 plane from each timepoint, giving a final shape (100, 20, 20)\n \n The call for this example would look like::\n \n affineSlice(data, shape=(20,20), origin=(40,0,0), vectors=((-1, 1, 0), (-1, 0, 1)), axes=(1,2,3))\n \n \"\"\"\n try:\n import scipy.ndimage\n have_scipy = True\n except ImportError:\n have_scipy = False\n have_scipy = False\n\n # sanity check\n if len(shape) != len(vectors):\n raise Exception(\"shape and vectors must have same length.\")\n if len(origin) != len(axes):\n raise Exception(\"origin and axes must have same length.\")\n for v in vectors:\n if len(v) != len(axes):\n raise Exception(\"each vector must be same length as axes.\")\n \n shape = list(map(np.ceil, shape))\n\n ## transpose data so slice axes come first\n trAx = list(range(data.ndim))\n for x in axes:\n trAx.remove(x)\n tr1 = tuple(axes) + tuple(trAx)\n data = data.transpose(tr1)\n #print \"tr1:\", tr1\n ## dims are now [(slice axes), (other axes)]\n \n ## make sure vectors are arrays\n if not isinstance(vectors, np.ndarray):\n vectors = np.array(vectors)\n if not isinstance(origin, np.ndarray):\n origin = np.array(origin)\n origin.shape = (len(axes),) + (1,)*len(shape)\n \n ## Build array of sample locations. \n grid = np.mgrid[tuple([slice(0,x) for x in shape])] ## mesh grid of indexes\n #print shape, grid.shape\n x = (grid[np.newaxis,...] * vectors.transpose()[(Ellipsis,) + (np.newaxis,)*len(shape)]).sum(axis=1) ## magic\n x += origin\n #print \"X values:\"\n #print x\n ## iterate manually over unused axes since map_coordinates won't do it for us\n if have_scipy:\n extraShape = data.shape[len(axes):]\n output = np.empty(tuple(shape) + extraShape, dtype=data.dtype)\n for inds in np.ndindex(*extraShape):\n ind = (Ellipsis,) + inds\n output[ind] = scipy.ndimage.map_coordinates(data[ind], x, order=order, **kargs)\n else:\n # map_coordinates expects the indexes as the first axis, whereas\n # interpolateArray expects indexes at the last axis. \n tr = tuple(range(1,x.ndim)) + (0,)\n output = interpolateArray(data, x.transpose(tr))\n \n \n tr = list(range(output.ndim))\n trb = []\n for i in range(min(axes)):\n ind = tr1.index(i) + (len(shape)-len(axes))\n tr.remove(ind)\n trb.append(ind)\n tr2 = tuple(trb+tr)\n\n ## Untranspose array before returning\n output = output.transpose(tr2)\n if returnCoords:\n return (output, x)\n else:\n return output\n\ndef interpolateArray(data, x, default=0.0):\n \"\"\"\n N-dimensional interpolation similar scipy.ndimage.map_coordinates.\n \n This function returns linearly-interpolated values sampled from a regular\n grid of data. \n \n *data* is an array of any shape containing the values to be interpolated.\n *x* is an array with (shape[-1] <= data.ndim) containing the locations\n within *data* to interpolate. \n \n Returns array of shape (x.shape[:-1] + data.shape)\n \n For example, assume we have the following 2D image data::\n \n >>> data = np.array([[1, 2, 4 ],\n [10, 20, 40 ],\n [100, 200, 400]])\n \n To compute a single interpolated point from this data::\n \n >>> x = np.array([(0.5, 0.5)])\n >>> interpolateArray(data, x)\n array([ 8.25])\n \n To compute a 1D list of interpolated locations:: \n \n >>> x = np.array([(0.5, 0.5),\n (1.0, 1.0),\n (1.0, 2.0),\n (1.5, 0.0)])\n >>> interpolateArray(data, x)\n array([ 8.25, 20. , 40. , 55. ])\n \n To compute a 2D array of interpolated locations::\n \n >>> x = np.array([[(0.5, 0.5), (1.0, 2.0)],\n [(1.0, 1.0), (1.5, 0.0)]])\n >>> interpolateArray(data, x)\n array([[ 8.25, 40. ],\n [ 20. , 55. ]])\n \n ..and so on. The *x* argument may have any shape as long as \n ```x.shape[-1] <= data.ndim```. In the case that \n ```x.shape[-1] < data.ndim```, then the remaining axes are simply \n broadcasted as usual. For example, we can interpolate one location\n from an entire row of the data::\n \n >>> x = np.array([[0.5]])\n >>> interpolateArray(data, x)\n array([[ 5.5, 11. , 22. ]])\n\n This is useful for interpolating from arrays of colors, vertexes, etc.\n \"\"\"\n \n prof = debug.Profiler()\n \n result = np.empty(x.shape[:-1] + data.shape, dtype=data.dtype)\n nd = data.ndim\n md = x.shape[-1]\n\n # First we generate arrays of indexes that are needed to \n # extract the data surrounding each point\n fields = np.mgrid[(slice(0,2),) * md]\n xmin = np.floor(x).astype(int)\n xmax = xmin + 1\n indexes = np.concatenate([xmin[np.newaxis, ...], xmax[np.newaxis, ...]])\n fieldInds = []\n totalMask = np.ones(x.shape[:-1], dtype=bool) # keep track of out-of-bound indexes\n for ax in range(md):\n mask = (xmin[...,ax] >= 0) & (x[...,ax] <= data.shape[ax]-1) \n # keep track of points that need to be set to default\n totalMask &= mask \n \n # ..and keep track of indexes that are out of bounds \n # (note that when x[...,ax] == data.shape[ax], then xmax[...,ax] will be out\n # of bounds, but the interpolation will work anyway)\n mask &= (xmax[...,ax] < data.shape[ax])\n axisIndex = indexes[...,ax][fields[ax]]\n #axisMask = mask.astype(np.ubyte).reshape((1,)*(fields.ndim-1) + mask.shape)\n axisIndex[axisIndex < 0] = 0\n axisIndex[axisIndex >= data.shape[ax]] = 0\n fieldInds.append(axisIndex)\n prof()\n \n # Get data values surrounding each requested point\n # fieldData[..., i] contains all 2**nd values needed to interpolate x[i]\n fieldData = data[tuple(fieldInds)]\n prof()\n \n ## Interpolate\n s = np.empty((md,) + fieldData.shape, dtype=float)\n dx = x - xmin\n # reshape fields for arithmetic against dx\n for ax in range(md):\n f1 = fields[ax].reshape(fields[ax].shape + (1,)*(dx.ndim-1))\n sax = f1 * dx[...,ax] + (1-f1) * (1-dx[...,ax])\n sax = sax.reshape(sax.shape + (1,) * (s.ndim-1-sax.ndim))\n s[ax] = sax\n s = np.product(s, axis=0)\n result = fieldData * s\n for i in range(md):\n result = result.sum(axis=0)\n\n prof()\n totalMask.shape = totalMask.shape + (1,) * (nd - md)\n result[~totalMask] = default\n prof()\n return result\n\n\ndef transformToArray(tr):\n \"\"\"\n Given a QTransform, return a 3x3 numpy array.\n Given a QMatrix4x4, return a 4x4 numpy array.\n \n Example: map an array of x,y coordinates through a transform::\n \n ## coordinates to map are (1,5), (2,6), (3,7), and (4,8)\n coords = np.array([[1,2,3,4], [5,6,7,8], [1,1,1,1]]) # the extra '1' coordinate is needed for translation to work\n \n ## Make an example transform\n tr = QtGui.QTransform()\n tr.translate(3,4)\n tr.scale(2, 0.1)\n \n ## convert to array\n m = pg.transformToArray()[:2] # ignore the perspective portion of the transformation\n \n ## map coordinates through transform\n mapped = np.dot(m, coords)\n \"\"\"\n #return np.array([[tr.m11(), tr.m12(), tr.m13()],[tr.m21(), tr.m22(), tr.m23()],[tr.m31(), tr.m32(), tr.m33()]])\n ## The order of elements given by the method names m11..m33 is misleading--\n ## It is most common for x,y translation to occupy the positions 1,3 and 2,3 in\n ## a transformation matrix. However, with QTransform these values appear at m31 and m32.\n ## So the correct interpretation is transposed:\n if isinstance(tr, QtGui.QTransform):\n return np.array([[tr.m11(), tr.m21(), tr.m31()], [tr.m12(), tr.m22(), tr.m32()], [tr.m13(), tr.m23(), tr.m33()]])\n elif isinstance(tr, QtGui.QMatrix4x4):\n return np.array(tr.copyDataTo()).reshape(4,4)\n else:\n raise Exception(\"Transform argument must be either QTransform or QMatrix4x4.\")\n\ndef transformCoordinates(tr, coords, transpose=False):\n \"\"\"\n Map a set of 2D or 3D coordinates through a QTransform or QMatrix4x4.\n The shape of coords must be (2,...) or (3,...)\n The mapping will _ignore_ any perspective transformations.\n \n For coordinate arrays with ndim=2, this is basically equivalent to matrix multiplication.\n Most arrays, however, prefer to put the coordinate axis at the end (eg. shape=(...,3)). To \n allow this, use transpose=True.\n \n \"\"\"\n \n if transpose:\n ## move last axis to beginning. This transposition will be reversed before returning the mapped coordinates.\n coords = coords.transpose((coords.ndim-1,) + tuple(range(0,coords.ndim-1)))\n \n nd = coords.shape[0]\n if isinstance(tr, np.ndarray):\n m = tr\n else:\n m = transformToArray(tr)\n m = m[:m.shape[0]-1] # remove perspective\n \n ## If coords are 3D and tr is 2D, assume no change for Z axis\n if m.shape == (2,3) and nd == 3:\n m2 = np.zeros((3,4))\n m2[:2, :2] = m[:2,:2]\n m2[:2, 3] = m[:2,2]\n m2[2,2] = 1\n m = m2\n \n ## if coords are 2D and tr is 3D, ignore Z axis\n if m.shape == (3,4) and nd == 2:\n m2 = np.empty((2,3))\n m2[:,:2] = m[:2,:2]\n m2[:,2] = m[:2,3]\n m = m2\n \n ## reshape tr and coords to prepare for multiplication\n m = m.reshape(m.shape + (1,)*(coords.ndim-1))\n coords = coords[np.newaxis, ...]\n \n # separate scale/rotate and translation \n translate = m[:,-1] \n m = m[:, :-1]\n \n ## map coordinates and return\n mapped = (m*coords).sum(axis=1) ## apply scale/rotate\n mapped += translate\n \n if transpose:\n ## move first axis to end.\n mapped = mapped.transpose(tuple(range(1,mapped.ndim)) + (0,))\n return mapped\n \n \n\n \ndef solve3DTransform(points1, points2):\n \"\"\"\n Find a 3D transformation matrix that maps points1 onto points2.\n Points must be specified as either lists of 4 Vectors or \n (4, 3) arrays.\n \"\"\"\n import numpy.linalg\n pts = []\n for inp in (points1, points2):\n if isinstance(inp, np.ndarray):\n A = np.empty((4,4), dtype=float)\n A[:,:3] = inp[:,:3]\n A[:,3] = 1.0\n else:\n A = np.array([[inp[i].x(), inp[i].y(), inp[i].z(), 1] for i in range(4)])\n pts.append(A)\n \n ## solve 3 sets of linear equations to determine transformation matrix elements\n matrix = np.zeros((4,4))\n for i in range(3):\n ## solve Ax = B; x is one row of the desired transformation matrix\n matrix[i] = numpy.linalg.solve(pts[0], pts[1][:,i]) \n \n return matrix\n \ndef solveBilinearTransform(points1, points2):\n \"\"\"\n Find a bilinear transformation matrix (2x4) that maps points1 onto points2.\n Points must be specified as a list of 4 Vector, Point, QPointF, etc.\n \n To use this matrix to map a point [x,y]::\n \n mapped = np.dot(matrix, [x*y, x, y, 1])\n \"\"\"\n import numpy.linalg\n ## A is 4 rows (points) x 4 columns (xy, x, y, 1)\n ## B is 4 rows (points) x 2 columns (x, y)\n A = np.array([[points1[i].x()*points1[i].y(), points1[i].x(), points1[i].y(), 1] for i in range(4)])\n B = np.array([[points2[i].x(), points2[i].y()] for i in range(4)])\n \n ## solve 2 sets of linear equations to determine transformation matrix elements\n matrix = np.zeros((2,4))\n for i in range(2):\n matrix[i] = numpy.linalg.solve(A, B[:,i]) ## solve Ax = B; x is one row of the desired transformation matrix\n \n return matrix\n \ndef rescaleData(data, scale, offset, dtype=None):\n \"\"\"Return data rescaled and optionally cast to a new dtype::\n \n data => (data-offset) * scale\n \n Uses scipy.weave (if available) to improve performance.\n \"\"\"\n if dtype is None:\n dtype = data.dtype\n else:\n dtype = np.dtype(dtype)\n \n try:\n if not getConfigOption('useWeave'):\n raise Exception('Weave is disabled; falling back to slower version.')\n try:\n import scipy.weave\n except ImportError:\n raise Exception('scipy.weave is not importable; falling back to slower version.')\n \n ## require native dtype when using weave\n if not data.dtype.isnative:\n data = data.astype(data.dtype.newbyteorder('='))\n if not dtype.isnative:\n weaveDtype = dtype.newbyteorder('=')\n else:\n weaveDtype = dtype\n \n newData = np.empty((data.size,), dtype=weaveDtype)\n flat = np.ascontiguousarray(data).reshape(data.size)\n size = data.size\n \n code = \"\"\"\n double sc = (double)scale;\n double off = (double)offset;\n for( int i=0; i<size; i++ ) {\n newData[i] = ((double)flat[i] - off) * sc;\n }\n \"\"\"\n scipy.weave.inline(code, ['flat', 'newData', 'size', 'offset', 'scale'], compiler='gcc')\n if dtype != weaveDtype:\n newData = newData.astype(dtype)\n data = newData.reshape(data.shape)\n except:\n if getConfigOption('useWeave'):\n if getConfigOption('weaveDebug'):\n debug.printExc(\"Error; disabling weave.\")\n setConfigOptions(useWeave=False)\n \n #p = np.poly1d([scale, -offset*scale])\n #data = p(data).astype(dtype)\n d2 = data-offset\n d2 *= scale\n data = d2.astype(dtype)\n return data\n \ndef applyLookupTable(data, lut):\n \"\"\"\n Uses values in *data* as indexes to select values from *lut*.\n The returned data has shape data.shape + lut.shape[1:]\n \n Note: color gradient lookup tables can be generated using GradientWidget.\n \"\"\"\n if data.dtype.kind not in ('i', 'u'):\n data = data.astype(int)\n \n return np.take(lut, data, axis=0, mode='clip') \n \n\ndef makeRGBA(*args, **kwds):\n \"\"\"Equivalent to makeARGB(..., useRGBA=True)\"\"\"\n kwds['useRGBA'] = True\n return makeARGB(*args, **kwds)\n\ndef makeARGB(data, lut=None, levels=None, scale=None, useRGBA=False): \n \"\"\" \n Convert an array of values into an ARGB array suitable for building QImages, OpenGL textures, etc.\n \n Returns the ARGB array (values 0-255) and a boolean indicating whether there is alpha channel data.\n This is a two stage process:\n \n 1) Rescale the data based on the values in the *levels* argument (min, max).\n 2) Determine the final output by passing the rescaled values through a lookup table.\n \n Both stages are optional.\n \n ============== ==================================================================================\n **Arguments:**\n data numpy array of int/float types. If \n levels List [min, max]; optionally rescale data before converting through the\n lookup table. The data is rescaled such that min->0 and max->*scale*::\n \n rescaled = (clip(data, min, max) - min) * (*scale* / (max - min))\n \n It is also possible to use a 2D (N,2) array of values for levels. In this case,\n it is assumed that each pair of min,max values in the levels array should be \n applied to a different subset of the input data (for example, the input data may \n already have RGB values and the levels are used to independently scale each \n channel). The use of this feature requires that levels.shape[0] == data.shape[-1].\n scale The maximum value to which data will be rescaled before being passed through the \n lookup table (or returned if there is no lookup table). By default this will\n be set to the length of the lookup table, or 256 is no lookup table is provided.\n For OpenGL color specifications (as in GLColor4f) use scale=1.0\n lut Optional lookup table (array with dtype=ubyte).\n Values in data will be converted to color by indexing directly from lut.\n The output data shape will be input.shape + lut.shape[1:].\n \n Note: the output of makeARGB will have the same dtype as the lookup table, so\n for conversion to QImage, the dtype must be ubyte.\n \n Lookup tables can be built using GradientWidget.\n useRGBA If True, the data is returned in RGBA order (useful for building OpenGL textures). \n The default is False, which returns in ARGB order for use with QImage \n (Note that 'ARGB' is a term used by the Qt documentation; the _actual_ order \n is BGRA).\n ============== ==================================================================================\n \"\"\"\n profile = debug.Profiler()\n \n if lut is not None and not isinstance(lut, np.ndarray):\n lut = np.array(lut)\n if levels is not None and not isinstance(levels, np.ndarray):\n levels = np.array(levels)\n \n if levels is not None:\n if levels.ndim == 1:\n if len(levels) != 2:\n raise Exception('levels argument must have length 2')\n elif levels.ndim == 2:\n if lut is not None and lut.ndim > 1:\n raise Exception('Cannot make ARGB data when bot levels and lut have ndim > 2')\n if levels.shape != (data.shape[-1], 2):\n raise Exception('levels must have shape (data.shape[-1], 2)')\n else:\n print(levels)\n raise Exception(\"levels argument must be 1D or 2D.\")\n\n profile()\n\n if scale is None:\n if lut is not None:\n scale = lut.shape[0]\n else:\n scale = 255.\n\n ## Apply levels if given\n if levels is not None:\n \n if isinstance(levels, np.ndarray) and levels.ndim == 2:\n ## we are going to rescale each channel independently\n if levels.shape[0] != data.shape[-1]:\n raise Exception(\"When rescaling multi-channel data, there must be the same number of levels as channels (data.shape[-1] == levels.shape[0])\")\n newData = np.empty(data.shape, dtype=int)\n for i in range(data.shape[-1]):\n minVal, maxVal = levels[i]\n if minVal == maxVal:\n maxVal += 1e-16\n newData[...,i] = rescaleData(data[...,i], scale/(maxVal-minVal), minVal, dtype=int)\n data = newData\n else:\n minVal, maxVal = levels\n if minVal == maxVal:\n maxVal += 1e-16\n if maxVal == minVal:\n data = rescaleData(data, 1, minVal, dtype=int)\n else:\n data = rescaleData(data, scale/(maxVal-minVal), minVal, dtype=int)\n\n profile()\n\n ## apply LUT if given\n if lut is not None:\n data = applyLookupTable(data, lut)\n else:\n if data.dtype is not np.ubyte:\n data = np.clip(data, 0, 255).astype(np.ubyte)\n\n profile()\n\n ## copy data into ARGB ordered array\n imgData = np.empty(data.shape[:2]+(4,), dtype=np.ubyte)\n\n profile()\n\n if useRGBA:\n order = [0,1,2,3] ## array comes out RGBA\n else:\n order = [2,1,0,3] ## for some reason, the colors line up as BGR in the final image.\n \n if data.ndim == 2:\n # This is tempting:\n # imgData[..., :3] = data[..., np.newaxis]\n # ..but it turns out this is faster:\n for i in range(3):\n imgData[..., i] = data\n elif data.shape[2] == 1:\n for i in range(3):\n imgData[..., i] = data[..., 0]\n else:\n for i in range(0, data.shape[2]):\n imgData[..., i] = data[..., order[i]] \n \n profile()\n \n if data.ndim == 2 or data.shape[2] == 3:\n alpha = False\n imgData[..., 3] = 255\n else:\n alpha = True\n \n profile()\n return imgData, alpha\n\n\ndef makeQImage(imgData, alpha=None, copy=True, transpose=True):\n \"\"\"\n Turn an ARGB array into QImage.\n By default, the data is copied; changes to the array will not\n be reflected in the image. The image will be given a 'data' attribute\n pointing to the array which shares its data to prevent python\n freeing that memory while the image is in use.\n \n ============== ===================================================================\n **Arguments:**\n imgData Array of data to convert. Must have shape (width, height, 3 or 4) \n and dtype=ubyte. The order of values in the 3rd axis must be \n (b, g, r, a).\n alpha If True, the QImage returned will have format ARGB32. If False,\n the format will be RGB32. By default, _alpha_ is True if\n array.shape[2] == 4.\n copy If True, the data is copied before converting to QImage.\n If False, the new QImage points directly to the data in the array.\n Note that the array must be contiguous for this to work\n (see numpy.ascontiguousarray).\n transpose If True (the default), the array x/y axes are transposed before \n creating the image. Note that Qt expects the axes to be in \n (height, width) order whereas pyqtgraph usually prefers the \n opposite.\n ============== =================================================================== \n \"\"\"\n ## create QImage from buffer\n profile = debug.Profiler()\n \n ## If we didn't explicitly specify alpha, check the array shape.\n if alpha is None:\n alpha = (imgData.shape[2] == 4)\n \n copied = False\n if imgData.shape[2] == 3: ## need to make alpha channel (even if alpha==False; QImage requires 32 bpp)\n if copy is True:\n d2 = np.empty(imgData.shape[:2] + (4,), dtype=imgData.dtype)\n d2[:,:,:3] = imgData\n d2[:,:,3] = 255\n imgData = d2\n copied = True\n else:\n raise Exception('Array has only 3 channels; cannot make QImage without copying.')\n \n if alpha:\n imgFormat = QtGui.QImage.Format_ARGB32\n else:\n imgFormat = QtGui.QImage.Format_RGB32\n \n if transpose:\n imgData = imgData.transpose((1, 0, 2)) ## QImage expects the row/column order to be opposite\n\n profile()\n\n if not imgData.flags['C_CONTIGUOUS']:\n if copy is False:\n extra = ' (try setting transpose=False)' if transpose else ''\n raise Exception('Array is not contiguous; cannot make QImage without copying.'+extra)\n imgData = np.ascontiguousarray(imgData)\n copied = True\n \n if copy is True and copied is False:\n imgData = imgData.copy()\n \n if USE_PYSIDE:\n ch = ctypes.c_char.from_buffer(imgData, 0)\n img = QtGui.QImage(ch, imgData.shape[1], imgData.shape[0], imgFormat)\n else:\n #addr = ctypes.addressof(ctypes.c_char.from_buffer(imgData, 0))\n ## PyQt API for QImage changed between 4.9.3 and 4.9.6 (I don't know exactly which version it was)\n ## So we first attempt the 4.9.6 API, then fall back to 4.9.3\n #addr = ctypes.c_char.from_buffer(imgData, 0)\n #try:\n #img = QtGui.QImage(addr, imgData.shape[1], imgData.shape[0], imgFormat)\n #except TypeError: \n #addr = ctypes.addressof(addr)\n #img = QtGui.QImage(addr, imgData.shape[1], imgData.shape[0], imgFormat)\n try:\n img = QtGui.QImage(imgData.ctypes.data, imgData.shape[1], imgData.shape[0], imgFormat)\n except:\n if copy:\n # does not leak memory, is not mutable\n img = QtGui.QImage(buffer(imgData), imgData.shape[1], imgData.shape[0], imgFormat)\n else:\n # mutable, but leaks memory\n img = QtGui.QImage(memoryview(imgData), imgData.shape[1], imgData.shape[0], imgFormat)\n \n img.data = imgData\n return img\n #try:\n #buf = imgData.data\n #except AttributeError: ## happens when image data is non-contiguous\n #buf = imgData.data\n \n #profiler()\n #qimage = QtGui.QImage(buf, imgData.shape[1], imgData.shape[0], imgFormat)\n #profiler()\n #qimage.data = imgData\n #return qimage\n\ndef imageToArray(img, copy=False, transpose=True):\n \"\"\"\n Convert a QImage into numpy array. The image must have format RGB32, ARGB32, or ARGB32_Premultiplied.\n By default, the image is not copied; changes made to the array will appear in the QImage as well (beware: if \n the QImage is collected before the array, there may be trouble).\n The array will have shape (width, height, (b,g,r,a)).\n \"\"\"\n fmt = img.format()\n ptr = img.bits()\n if USE_PYSIDE:\n arr = np.frombuffer(ptr, dtype=np.ubyte)\n else:\n ptr.setsize(img.byteCount())\n arr = np.asarray(ptr)\n if img.byteCount() != arr.size * arr.itemsize:\n # Required for Python 2.6, PyQt 4.10\n # If this works on all platforms, then there is no need to use np.asarray..\n arr = np.frombuffer(ptr, np.ubyte, img.byteCount())\n \n if fmt == img.Format_RGB32:\n arr = arr.reshape(img.height(), img.width(), 3)\n elif fmt == img.Format_ARGB32 or fmt == img.Format_ARGB32_Premultiplied:\n arr = arr.reshape(img.height(), img.width(), 4)\n \n if copy:\n arr = arr.copy()\n \n if transpose:\n return arr.transpose((1,0,2))\n else:\n return arr\n \ndef colorToAlpha(data, color):\n \"\"\"\n Given an RGBA image in *data*, convert *color* to be transparent. \n *data* must be an array (w, h, 3 or 4) of ubyte values and *color* must be \n an array (3) of ubyte values.\n This is particularly useful for use with images that have a black or white background.\n \n Algorithm is taken from Gimp's color-to-alpha function in plug-ins/common/colortoalpha.c\n Credit:\n /*\n * Color To Alpha plug-in v1.0 by Seth Burgess, [email protected] 1999/05/14\n * with algorithm by clahey\n */\n \n \"\"\"\n data = data.astype(float)\n if data.shape[-1] == 3: ## add alpha channel if needed\n d2 = np.empty(data.shape[:2]+(4,), dtype=data.dtype)\n d2[...,:3] = data\n d2[...,3] = 255\n data = d2\n \n color = color.astype(float)\n alpha = np.zeros(data.shape[:2]+(3,), dtype=float)\n output = data.copy()\n \n for i in [0,1,2]:\n d = data[...,i]\n c = color[i]\n mask = d > c\n alpha[...,i][mask] = (d[mask] - c) / (255. - c)\n imask = d < c\n alpha[...,i][imask] = (c - d[imask]) / c\n \n output[...,3] = alpha.max(axis=2) * 255.\n \n mask = output[...,3] >= 1.0 ## avoid zero division while processing alpha channel\n correction = 255. / output[...,3][mask] ## increase value to compensate for decreased alpha\n for i in [0,1,2]:\n output[...,i][mask] = ((output[...,i][mask]-color[i]) * correction) + color[i]\n output[...,3][mask] *= data[...,3][mask] / 255. ## combine computed and previous alpha values\n \n #raise Exception()\n return np.clip(output, 0, 255).astype(np.ubyte)\n\ndef gaussianFilter(data, sigma):\n \"\"\"\n Drop-in replacement for scipy.ndimage.gaussian_filter.\n \n (note: results are only approximately equal to the output of\n gaussian_filter)\n \"\"\"\n if np.isscalar(sigma):\n sigma = (sigma,) * data.ndim\n \n baseline = data.mean()\n filtered = data - baseline\n for ax in range(data.ndim):\n s = sigma[ax]\n if s == 0:\n continue\n \n # generate 1D gaussian kernel\n ksize = int(s * 6)\n x = np.arange(-ksize, ksize)\n kernel = np.exp(-x**2 / (2*s**2))\n kshape = [1,] * data.ndim\n kshape[ax] = len(kernel)\n kernel = kernel.reshape(kshape)\n \n # convolve as product of FFTs\n shape = data.shape[ax] + ksize\n scale = 1.0 / (abs(s) * (2*np.pi)**0.5)\n filtered = scale * np.fft.irfft(np.fft.rfft(filtered, shape, axis=ax) * \n np.fft.rfft(kernel, shape, axis=ax), \n axis=ax)\n \n # clip off extra data\n sl = [slice(None)] * data.ndim\n sl[ax] = slice(filtered.shape[ax]-data.shape[ax],None,None)\n filtered = filtered[sl]\n return filtered + baseline\n \n \ndef downsample(data, n, axis=0, xvals='subsample'):\n \"\"\"Downsample by averaging points together across axis.\n If multiple axes are specified, runs once per axis.\n If a metaArray is given, then the axis values can be either subsampled\n or downsampled to match.\n \"\"\"\n ma = None\n if (hasattr(data, 'implements') and data.implements('MetaArray')):\n ma = data\n data = data.view(np.ndarray)\n \n \n if hasattr(axis, '__len__'):\n if not hasattr(n, '__len__'):\n n = [n]*len(axis)\n for i in range(len(axis)):\n data = downsample(data, n[i], axis[i])\n return data\n \n nPts = int(data.shape[axis] / n)\n s = list(data.shape)\n s[axis] = nPts\n s.insert(axis+1, n)\n sl = [slice(None)] * data.ndim\n sl[axis] = slice(0, nPts*n)\n d1 = data[tuple(sl)]\n #print d1.shape, s\n d1.shape = tuple(s)\n d2 = d1.mean(axis+1)\n \n if ma is None:\n return d2\n else:\n info = ma.infoCopy()\n if 'values' in info[axis]:\n if xvals == 'subsample':\n info[axis]['values'] = info[axis]['values'][::n][:nPts]\n elif xvals == 'downsample':\n info[axis]['values'] = downsample(info[axis]['values'], n)\n return MetaArray(d2, info=info)\n\n\ndef arrayToQPath(x, y, connect='all'):\n \"\"\"Convert an array of x,y coordinats to QPainterPath as efficiently as possible.\n The *connect* argument may be 'all', indicating that each point should be\n connected to the next; 'pairs', indicating that each pair of points\n should be connected, or an array of int32 values (0 or 1) indicating\n connections.\n \"\"\"\n\n ## Create all vertices in path. The method used below creates a binary format so that all\n ## vertices can be read in at once. This binary format may change in future versions of Qt,\n ## so the original (slower) method is left here for emergencies:\n #path.moveTo(x[0], y[0])\n #if connect == 'all':\n #for i in range(1, y.shape[0]):\n #path.lineTo(x[i], y[i])\n #elif connect == 'pairs':\n #for i in range(1, y.shape[0]):\n #if i%2 == 0:\n #path.lineTo(x[i], y[i])\n #else:\n #path.moveTo(x[i], y[i])\n #elif isinstance(connect, np.ndarray):\n #for i in range(1, y.shape[0]):\n #if connect[i] == 1:\n #path.lineTo(x[i], y[i])\n #else:\n #path.moveTo(x[i], y[i])\n #else:\n #raise Exception('connect argument must be \"all\", \"pairs\", or array')\n\n ## Speed this up using >> operator\n ## Format is:\n ## numVerts(i4) 0(i4)\n ## x(f8) y(f8) 0(i4) <-- 0 means this vertex does not connect\n ## x(f8) y(f8) 1(i4) <-- 1 means this vertex connects to the previous vertex\n ## ...\n ## 0(i4)\n ##\n ## All values are big endian--pack using struct.pack('>d') or struct.pack('>i')\n\n path = QtGui.QPainterPath()\n\n #profiler = debug.Profiler()\n n = x.shape[0]\n # create empty array, pad with extra space on either end\n arr = np.empty(n+2, dtype=[('x', '>f8'), ('y', '>f8'), ('c', '>i4')])\n # write first two integers\n #profiler('allocate empty')\n byteview = arr.view(dtype=np.ubyte)\n byteview[:12] = 0\n byteview.data[12:20] = struct.pack('>ii', n, 0)\n #profiler('pack header')\n # Fill array with vertex values\n arr[1:-1]['x'] = x\n arr[1:-1]['y'] = y\n\n # decide which points are connected by lines\n if connect == 'pairs':\n connect = np.empty((n/2,2), dtype=np.int32)\n if connect.size != n:\n raise Exception(\"x,y array lengths must be multiple of 2 to use connect='pairs'\")\n connect[:,0] = 1\n connect[:,1] = 0\n connect = connect.flatten()\n if connect == 'finite':\n connect = np.isfinite(x) & np.isfinite(y)\n arr[1:-1]['c'] = connect\n if connect == 'all':\n arr[1:-1]['c'] = 1\n elif isinstance(connect, np.ndarray):\n arr[1:-1]['c'] = connect\n else:\n raise Exception('connect argument must be \"all\", \"pairs\", or array')\n\n #profiler('fill array')\n # write last 0\n lastInd = 20*(n+1)\n byteview.data[lastInd:lastInd+4] = struct.pack('>i', 0)\n #profiler('footer')\n # create datastream object and stream into path\n\n ## Avoiding this method because QByteArray(str) leaks memory in PySide\n #buf = QtCore.QByteArray(arr.data[12:lastInd+4]) # I think one unnecessary copy happens here\n\n path.strn = byteview.data[12:lastInd+4] # make sure data doesn't run away\n try:\n buf = QtCore.QByteArray.fromRawData(path.strn)\n except TypeError:\n buf = QtCore.QByteArray(bytes(path.strn))\n #profiler('create buffer')\n ds = QtCore.QDataStream(buf)\n\n ds >> path\n #profiler('load')\n\n return path\n\n#def isosurface(data, level):\n #\"\"\"\n #Generate isosurface from volumetric data using marching tetrahedra algorithm.\n #See Paul Bourke, \"Polygonising a Scalar Field Using Tetrahedrons\" (http://local.wasp.uwa.edu.au/~pbourke/geometry/polygonise/)\n \n #*data* 3D numpy array of scalar values\n #*level* The level at which to generate an isosurface\n #\"\"\"\n \n #facets = []\n \n ### mark everything below the isosurface level\n #mask = data < level\n \n #### make eight sub-fields \n #fields = np.empty((2,2,2), dtype=object)\n #slices = [slice(0,-1), slice(1,None)]\n #for i in [0,1]:\n #for j in [0,1]:\n #for k in [0,1]:\n #fields[i,j,k] = mask[slices[i], slices[j], slices[k]]\n \n \n \n ### split each cell into 6 tetrahedra\n ### these all have the same 'orienation'; points 1,2,3 circle \n ### clockwise around point 0\n #tetrahedra = [\n #[(0,1,0), (1,1,1), (0,1,1), (1,0,1)],\n #[(0,1,0), (0,1,1), (0,0,1), (1,0,1)],\n #[(0,1,0), (0,0,1), (0,0,0), (1,0,1)],\n #[(0,1,0), (0,0,0), (1,0,0), (1,0,1)],\n #[(0,1,0), (1,0,0), (1,1,0), (1,0,1)],\n #[(0,1,0), (1,1,0), (1,1,1), (1,0,1)]\n #]\n \n ### each tetrahedron will be assigned an index\n ### which determines how to generate its facets.\n ### this structure is: \n ### facets[index][facet1, facet2, ...]\n ### where each facet is triangular and its points are each \n ### interpolated between two points on the tetrahedron\n ### facet = [(p1a, p1b), (p2a, p2b), (p3a, p3b)]\n ### facet points always circle clockwise if you are looking \n ### at them from below the isosurface.\n #indexFacets = [\n #[], ## all above\n #[[(0,1), (0,2), (0,3)]], # 0 below\n #[[(1,0), (1,3), (1,2)]], # 1 below\n #[[(0,2), (1,3), (1,2)], [(0,2), (0,3), (1,3)]], # 0,1 below\n #[[(2,0), (2,1), (2,3)]], # 2 below\n #[[(0,3), (1,2), (2,3)], [(0,3), (0,1), (1,2)]], # 0,2 below\n #[[(1,0), (2,3), (2,0)], [(1,0), (1,3), (2,3)]], # 1,2 below\n #[[(3,0), (3,1), (3,2)]], # 3 above\n #[[(3,0), (3,2), (3,1)]], # 3 below\n #[[(1,0), (2,0), (2,3)], [(1,0), (2,3), (1,3)]], # 0,3 below\n #[[(0,3), (2,3), (1,2)], [(0,3), (1,2), (0,1)]], # 1,3 below\n #[[(2,0), (2,3), (2,1)]], # 0,1,3 below\n #[[(0,2), (1,2), (1,3)], [(0,2), (1,3), (0,3)]], # 2,3 below\n #[[(1,0), (1,2), (1,3)]], # 0,2,3 below\n #[[(0,1), (0,3), (0,2)]], # 1,2,3 below\n #[] ## all below\n #]\n \n #for tet in tetrahedra:\n \n ### get the 4 fields for this tetrahedron\n #tetFields = [fields[c] for c in tet]\n \n ### generate an index for each grid cell\n #index = tetFields[0] + tetFields[1]*2 + tetFields[2]*4 + tetFields[3]*8\n \n ### add facets\n #for i in xrange(index.shape[0]): # data x-axis\n #for j in xrange(index.shape[1]): # data y-axis\n #for k in xrange(index.shape[2]): # data z-axis\n #for f in indexFacets[index[i,j,k]]: # faces to generate for this tet\n #pts = []\n #for l in [0,1,2]: # points in this face\n #p1 = tet[f[l][0]] # tet corner 1\n #p2 = tet[f[l][1]] # tet corner 2\n #pts.append([(p1[x]+p2[x])*0.5+[i,j,k][x]+0.5 for x in [0,1,2]]) ## interpolate between tet corners\n #facets.append(pts)\n\n #return facets\n \n\ndef isocurve(data, level, connected=False, extendToEdge=False, path=False):\n \"\"\"\n Generate isocurve from 2D data using marching squares algorithm.\n \n ============== =========================================================\n **Arguments:**\n data 2D numpy array of scalar values\n level The level at which to generate an isosurface\n connected If False, return a single long list of point pairs\n If True, return multiple long lists of connected point \n locations. (This is slower but better for drawing \n continuous lines)\n extendToEdge If True, extend the curves to reach the exact edges of \n the data. \n path if True, return a QPainterPath rather than a list of \n vertex coordinates. This forces connected=True.\n ============== =========================================================\n \n This function is SLOW; plenty of room for optimization here.\n \"\"\" \n \n if path is True:\n connected = True\n \n if extendToEdge:\n d2 = np.empty((data.shape[0]+2, data.shape[1]+2), dtype=data.dtype)\n d2[1:-1, 1:-1] = data\n d2[0, 1:-1] = data[0]\n d2[-1, 1:-1] = data[-1]\n d2[1:-1, 0] = data[:, 0]\n d2[1:-1, -1] = data[:, -1]\n d2[0,0] = d2[0,1]\n d2[0,-1] = d2[1,-1]\n d2[-1,0] = d2[-1,1]\n d2[-1,-1] = d2[-1,-2]\n data = d2\n \n sideTable = [\n [],\n [0,1],\n [1,2],\n [0,2],\n [0,3],\n [1,3],\n [0,1,2,3],\n [2,3],\n [2,3],\n [0,1,2,3],\n [1,3],\n [0,3],\n [0,2],\n [1,2],\n [0,1],\n []\n ]\n \n edgeKey=[\n [(0,1), (0,0)],\n [(0,0), (1,0)],\n [(1,0), (1,1)],\n [(1,1), (0,1)]\n ]\n \n \n lines = []\n \n ## mark everything below the isosurface level\n mask = data < level\n \n ### make four sub-fields and compute indexes for grid cells\n index = np.zeros([x-1 for x in data.shape], dtype=np.ubyte)\n fields = np.empty((2,2), dtype=object)\n slices = [slice(0,-1), slice(1,None)]\n for i in [0,1]:\n for j in [0,1]:\n fields[i,j] = mask[slices[i], slices[j]]\n #vertIndex = i - 2*j*i + 3*j + 4*k ## this is just to match Bourk's vertex numbering scheme\n vertIndex = i+2*j\n #print i,j,k,\" : \", fields[i,j,k], 2**vertIndex\n index += fields[i,j] * 2**vertIndex\n #print index\n #print index\n \n ## add lines\n for i in range(index.shape[0]): # data x-axis\n for j in range(index.shape[1]): # data y-axis \n sides = sideTable[index[i,j]]\n for l in range(0, len(sides), 2): ## faces for this grid cell\n edges = sides[l:l+2]\n pts = []\n for m in [0,1]: # points in this face\n p1 = edgeKey[edges[m]][0] # p1, p2 are points at either side of an edge\n p2 = edgeKey[edges[m]][1]\n v1 = data[i+p1[0], j+p1[1]] # v1 and v2 are the values at p1 and p2\n v2 = data[i+p2[0], j+p2[1]]\n f = (level-v1) / (v2-v1)\n fi = 1.0 - f\n p = ( ## interpolate between corners\n p1[0]*fi + p2[0]*f + i + 0.5, \n p1[1]*fi + p2[1]*f + j + 0.5\n )\n if extendToEdge:\n ## check bounds\n p = (\n min(data.shape[0]-2, max(0, p[0]-1)),\n min(data.shape[1]-2, max(0, p[1]-1)), \n )\n if connected:\n gridKey = i + (1 if edges[m]==2 else 0), j + (1 if edges[m]==3 else 0), edges[m]%2\n pts.append((p, gridKey)) ## give the actual position and a key identifying the grid location (for connecting segments)\n else:\n pts.append(p)\n \n lines.append(pts)\n\n if not connected:\n return lines\n \n ## turn disjoint list of segments into continuous lines\n\n #lines = [[2,5], [5,4], [3,4], [1,3], [6,7], [7,8], [8,6], [11,12], [12,15], [11,13], [13,14]]\n #lines = [[(float(a), a), (float(b), b)] for a,b in lines]\n points = {} ## maps each point to its connections\n for a,b in lines:\n if a[1] not in points:\n points[a[1]] = []\n points[a[1]].append([a,b])\n if b[1] not in points:\n points[b[1]] = []\n points[b[1]].append([b,a])\n\n ## rearrange into chains\n for k in list(points.keys()):\n try:\n chains = points[k]\n except KeyError: ## already used this point elsewhere\n continue\n #print \"===========\", k\n for chain in chains:\n #print \" chain:\", chain\n x = None\n while True:\n if x == chain[-1][1]:\n break ## nothing left to do on this chain\n \n x = chain[-1][1]\n if x == k: \n break ## chain has looped; we're done and can ignore the opposite chain\n y = chain[-2][1]\n connects = points[x]\n for conn in connects[:]:\n if conn[1][1] != y:\n #print \" ext:\", conn\n chain.extend(conn[1:])\n #print \" del:\", x\n del points[x]\n if chain[0][1] == chain[-1][1]: # looped chain; no need to continue the other direction\n chains.pop()\n break\n \n\n ## extract point locations \n lines = []\n for chain in points.values():\n if len(chain) == 2:\n chain = chain[1][1:][::-1] + chain[0] # join together ends of chain\n else:\n chain = chain[0]\n lines.append([p[0] for p in chain])\n \n if not path:\n return lines ## a list of pairs of points\n \n path = QtGui.QPainterPath()\n for line in lines:\n path.moveTo(*line[0])\n for p in line[1:]:\n path.lineTo(*p)\n \n return path\n \n \ndef traceImage(image, values, smooth=0.5):\n \"\"\"\n Convert an image to a set of QPainterPath curves.\n One curve will be generated for each item in *values*; each curve outlines the area\n of the image that is closer to its value than to any others.\n \n If image is RGB or RGBA, then the shape of values should be (nvals, 3/4)\n The parameter *smooth* is expressed in pixels.\n \"\"\"\n try:\n import scipy.ndimage as ndi\n except ImportError:\n raise Exception(\"traceImage() requires the package scipy.ndimage, but it is not importable.\")\n \n if values.ndim == 2:\n values = values.T\n values = values[np.newaxis, np.newaxis, ...].astype(float)\n image = image[..., np.newaxis].astype(float)\n diff = np.abs(image-values)\n if values.ndim == 4:\n diff = diff.sum(axis=2)\n \n labels = np.argmin(diff, axis=2)\n \n paths = []\n for i in range(diff.shape[-1]): \n d = (labels==i).astype(float)\n d = gaussianFilter(d, (smooth, smooth))\n lines = isocurve(d, 0.5, connected=True, extendToEdge=True)\n path = QtGui.QPainterPath()\n for line in lines:\n path.moveTo(*line[0])\n for p in line[1:]:\n path.lineTo(*p)\n \n paths.append(path)\n return paths\n \n \n \nIsosurfaceDataCache = None\ndef isosurface(data, level):\n \"\"\"\n Generate isosurface from volumetric data using marching cubes algorithm.\n See Paul Bourke, \"Polygonising a Scalar Field\" \n (http://paulbourke.net/geometry/polygonise/)\n \n *data* 3D numpy array of scalar values\n *level* The level at which to generate an isosurface\n \n Returns an array of vertex coordinates (Nv, 3) and an array of \n per-face vertex indexes (Nf, 3) \n \"\"\"\n ## For improvement, see:\n ## \n ## Efficient implementation of Marching Cubes' cases with topological guarantees.\n ## Thomas Lewiner, Helio Lopes, Antonio Wilson Vieira and Geovan Tavares.\n ## Journal of Graphics Tools 8(2): pp. 1-15 (december 2003)\n \n ## Precompute lookup tables on the first run\n global IsosurfaceDataCache\n if IsosurfaceDataCache is None:\n ## map from grid cell index to edge index.\n ## grid cell index tells us which corners are below the isosurface,\n ## edge index tells us which edges are cut by the isosurface.\n ## (Data stolen from Bourk; see above.)\n edgeTable = np.array([\n 0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,\n 0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,\n 0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,\n 0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,\n 0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,\n 0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,\n 0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,\n 0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,\n 0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,\n 0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,\n 0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,\n 0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,\n 0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,\n 0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,\n 0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,\n 0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,\n 0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,\n 0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,\n 0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,\n 0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,\n 0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,\n 0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,\n 0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,\n 0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,\n 0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,\n 0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,\n 0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,\n 0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,\n 0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,\n 0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,\n 0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,\n 0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0 ], dtype=np.uint16)\n \n ## Table of triangles to use for filling each grid cell.\n ## Each set of three integers tells us which three edges to\n ## draw a triangle between.\n ## (Data stolen from Bourk; see above.)\n triTable = [\n [],\n [0, 8, 3],\n [0, 1, 9],\n [1, 8, 3, 9, 8, 1],\n [1, 2, 10],\n [0, 8, 3, 1, 2, 10],\n [9, 2, 10, 0, 2, 9],\n [2, 8, 3, 2, 10, 8, 10, 9, 8],\n [3, 11, 2],\n [0, 11, 2, 8, 11, 0],\n [1, 9, 0, 2, 3, 11],\n [1, 11, 2, 1, 9, 11, 9, 8, 11],\n [3, 10, 1, 11, 10, 3],\n [0, 10, 1, 0, 8, 10, 8, 11, 10],\n [3, 9, 0, 3, 11, 9, 11, 10, 9],\n [9, 8, 10, 10, 8, 11],\n [4, 7, 8],\n [4, 3, 0, 7, 3, 4],\n [0, 1, 9, 8, 4, 7],\n [4, 1, 9, 4, 7, 1, 7, 3, 1],\n [1, 2, 10, 8, 4, 7],\n [3, 4, 7, 3, 0, 4, 1, 2, 10],\n [9, 2, 10, 9, 0, 2, 8, 4, 7],\n [2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4],\n [8, 4, 7, 3, 11, 2],\n [11, 4, 7, 11, 2, 4, 2, 0, 4],\n [9, 0, 1, 8, 4, 7, 2, 3, 11],\n [4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1],\n [3, 10, 1, 3, 11, 10, 7, 8, 4],\n [1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4],\n [4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3],\n [4, 7, 11, 4, 11, 9, 9, 11, 10],\n [9, 5, 4],\n [9, 5, 4, 0, 8, 3],\n [0, 5, 4, 1, 5, 0],\n [8, 5, 4, 8, 3, 5, 3, 1, 5],\n [1, 2, 10, 9, 5, 4],\n [3, 0, 8, 1, 2, 10, 4, 9, 5],\n [5, 2, 10, 5, 4, 2, 4, 0, 2],\n [2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8],\n [9, 5, 4, 2, 3, 11],\n [0, 11, 2, 0, 8, 11, 4, 9, 5],\n [0, 5, 4, 0, 1, 5, 2, 3, 11],\n [2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5],\n [10, 3, 11, 10, 1, 3, 9, 5, 4],\n [4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10],\n [5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3],\n [5, 4, 8, 5, 8, 10, 10, 8, 11],\n [9, 7, 8, 5, 7, 9],\n [9, 3, 0, 9, 5, 3, 5, 7, 3],\n [0, 7, 8, 0, 1, 7, 1, 5, 7],\n [1, 5, 3, 3, 5, 7],\n [9, 7, 8, 9, 5, 7, 10, 1, 2],\n [10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3],\n [8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2],\n [2, 10, 5, 2, 5, 3, 3, 5, 7],\n [7, 9, 5, 7, 8, 9, 3, 11, 2],\n [9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11],\n [2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7],\n [11, 2, 1, 11, 1, 7, 7, 1, 5],\n [9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11],\n [5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0],\n [11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0],\n [11, 10, 5, 7, 11, 5],\n [10, 6, 5],\n [0, 8, 3, 5, 10, 6],\n [9, 0, 1, 5, 10, 6],\n [1, 8, 3, 1, 9, 8, 5, 10, 6],\n [1, 6, 5, 2, 6, 1],\n [1, 6, 5, 1, 2, 6, 3, 0, 8],\n [9, 6, 5, 9, 0, 6, 0, 2, 6],\n [5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8],\n [2, 3, 11, 10, 6, 5],\n [11, 0, 8, 11, 2, 0, 10, 6, 5],\n [0, 1, 9, 2, 3, 11, 5, 10, 6],\n [5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11],\n [6, 3, 11, 6, 5, 3, 5, 1, 3],\n [0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6],\n [3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9],\n [6, 5, 9, 6, 9, 11, 11, 9, 8],\n [5, 10, 6, 4, 7, 8],\n [4, 3, 0, 4, 7, 3, 6, 5, 10],\n [1, 9, 0, 5, 10, 6, 8, 4, 7],\n [10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4],\n [6, 1, 2, 6, 5, 1, 4, 7, 8],\n [1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7],\n [8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6],\n [7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9],\n [3, 11, 2, 7, 8, 4, 10, 6, 5],\n [5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11],\n [0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6],\n [9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6],\n [8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6],\n [5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11],\n [0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7],\n [6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9],\n [10, 4, 9, 6, 4, 10],\n [4, 10, 6, 4, 9, 10, 0, 8, 3],\n [10, 0, 1, 10, 6, 0, 6, 4, 0],\n [8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10],\n [1, 4, 9, 1, 2, 4, 2, 6, 4],\n [3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4],\n [0, 2, 4, 4, 2, 6],\n [8, 3, 2, 8, 2, 4, 4, 2, 6],\n [10, 4, 9, 10, 6, 4, 11, 2, 3],\n [0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6],\n [3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10],\n [6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1],\n [9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3],\n [8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1],\n [3, 11, 6, 3, 6, 0, 0, 6, 4],\n [6, 4, 8, 11, 6, 8],\n [7, 10, 6, 7, 8, 10, 8, 9, 10],\n [0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10],\n [10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0],\n [10, 6, 7, 10, 7, 1, 1, 7, 3],\n [1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7],\n [2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9],\n [7, 8, 0, 7, 0, 6, 6, 0, 2],\n [7, 3, 2, 6, 7, 2],\n [2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7],\n [2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7],\n [1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11],\n [11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1],\n [8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6],\n [0, 9, 1, 11, 6, 7],\n [7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0],\n [7, 11, 6],\n [7, 6, 11],\n [3, 0, 8, 11, 7, 6],\n [0, 1, 9, 11, 7, 6],\n [8, 1, 9, 8, 3, 1, 11, 7, 6],\n [10, 1, 2, 6, 11, 7],\n [1, 2, 10, 3, 0, 8, 6, 11, 7],\n [2, 9, 0, 2, 10, 9, 6, 11, 7],\n [6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8],\n [7, 2, 3, 6, 2, 7],\n [7, 0, 8, 7, 6, 0, 6, 2, 0],\n [2, 7, 6, 2, 3, 7, 0, 1, 9],\n [1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6],\n [10, 7, 6, 10, 1, 7, 1, 3, 7],\n [10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8],\n [0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7],\n [7, 6, 10, 7, 10, 8, 8, 10, 9],\n [6, 8, 4, 11, 8, 6],\n [3, 6, 11, 3, 0, 6, 0, 4, 6],\n [8, 6, 11, 8, 4, 6, 9, 0, 1],\n [9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6],\n [6, 8, 4, 6, 11, 8, 2, 10, 1],\n [1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6],\n [4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9],\n [10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3],\n [8, 2, 3, 8, 4, 2, 4, 6, 2],\n [0, 4, 2, 4, 6, 2],\n [1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8],\n [1, 9, 4, 1, 4, 2, 2, 4, 6],\n [8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1],\n [10, 1, 0, 10, 0, 6, 6, 0, 4],\n [4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3],\n [10, 9, 4, 6, 10, 4],\n [4, 9, 5, 7, 6, 11],\n [0, 8, 3, 4, 9, 5, 11, 7, 6],\n [5, 0, 1, 5, 4, 0, 7, 6, 11],\n [11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5],\n [9, 5, 4, 10, 1, 2, 7, 6, 11],\n [6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5],\n [7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2],\n [3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6],\n [7, 2, 3, 7, 6, 2, 5, 4, 9],\n [9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7],\n [3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0],\n [6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8],\n [9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7],\n [1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4],\n [4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10],\n [7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10],\n [6, 9, 5, 6, 11, 9, 11, 8, 9],\n [3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5],\n [0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11],\n [6, 11, 3, 6, 3, 5, 5, 3, 1],\n [1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6],\n [0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10],\n [11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5],\n [6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3],\n [5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2],\n [9, 5, 6, 9, 6, 0, 0, 6, 2],\n [1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8],\n [1, 5, 6, 2, 1, 6],\n [1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6],\n [10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0],\n [0, 3, 8, 5, 6, 10],\n [10, 5, 6],\n [11, 5, 10, 7, 5, 11],\n [11, 5, 10, 11, 7, 5, 8, 3, 0],\n [5, 11, 7, 5, 10, 11, 1, 9, 0],\n [10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1],\n [11, 1, 2, 11, 7, 1, 7, 5, 1],\n [0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11],\n [9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7],\n [7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2],\n [2, 5, 10, 2, 3, 5, 3, 7, 5],\n [8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5],\n [9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2],\n [9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2],\n [1, 3, 5, 3, 7, 5],\n [0, 8, 7, 0, 7, 1, 1, 7, 5],\n [9, 0, 3, 9, 3, 5, 5, 3, 7],\n [9, 8, 7, 5, 9, 7],\n [5, 8, 4, 5, 10, 8, 10, 11, 8],\n [5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0],\n [0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5],\n [10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4],\n [2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8],\n [0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11],\n [0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5],\n [9, 4, 5, 2, 11, 3],\n [2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4],\n [5, 10, 2, 5, 2, 4, 4, 2, 0],\n [3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9],\n [5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2],\n [8, 4, 5, 8, 5, 3, 3, 5, 1],\n [0, 4, 5, 1, 0, 5],\n [8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5],\n [9, 4, 5],\n [4, 11, 7, 4, 9, 11, 9, 10, 11],\n [0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11],\n [1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11],\n [3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4],\n [4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2],\n [9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3],\n [11, 7, 4, 11, 4, 2, 2, 4, 0],\n [11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4],\n [2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9],\n [9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7],\n [3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10],\n [1, 10, 2, 8, 7, 4],\n [4, 9, 1, 4, 1, 7, 7, 1, 3],\n [4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1],\n [4, 0, 3, 7, 4, 3],\n [4, 8, 7],\n [9, 10, 8, 10, 11, 8],\n [3, 0, 9, 3, 9, 11, 11, 9, 10],\n [0, 1, 10, 0, 10, 8, 8, 10, 11],\n [3, 1, 10, 11, 3, 10],\n [1, 2, 11, 1, 11, 9, 9, 11, 8],\n [3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9],\n [0, 2, 11, 8, 0, 11],\n [3, 2, 11],\n [2, 3, 8, 2, 8, 10, 10, 8, 9],\n [9, 10, 2, 0, 9, 2],\n [2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8],\n [1, 10, 2],\n [1, 3, 8, 9, 1, 8],\n [0, 9, 1],\n [0, 3, 8],\n []\n ] \n edgeShifts = np.array([ ## maps edge ID (0-11) to (x,y,z) cell offset and edge ID (0-2)\n [0, 0, 0, 0], \n [1, 0, 0, 1],\n [0, 1, 0, 0],\n [0, 0, 0, 1],\n [0, 0, 1, 0],\n [1, 0, 1, 1],\n [0, 1, 1, 0],\n [0, 0, 1, 1],\n [0, 0, 0, 2],\n [1, 0, 0, 2],\n [1, 1, 0, 2],\n [0, 1, 0, 2],\n #[9, 9, 9, 9] ## fake\n ], dtype=np.uint16) # don't use ubyte here! This value gets added to cell index later; will need the extra precision.\n nTableFaces = np.array([len(f)/3 for f in triTable], dtype=np.ubyte)\n faceShiftTables = [None]\n for i in range(1,6):\n ## compute lookup table of index: vertexes mapping\n faceTableI = np.zeros((len(triTable), i*3), dtype=np.ubyte)\n faceTableInds = np.argwhere(nTableFaces == i)\n faceTableI[faceTableInds[:,0]] = np.array([triTable[j] for j in faceTableInds])\n faceTableI = faceTableI.reshape((len(triTable), i, 3))\n faceShiftTables.append(edgeShifts[faceTableI])\n \n ## Let's try something different:\n #faceTable = np.empty((256, 5, 3, 4), dtype=np.ubyte) # (grid cell index, faces, vertexes, edge lookup)\n #for i,f in enumerate(triTable):\n #f = np.array(f + [12] * (15-len(f))).reshape(5,3)\n #faceTable[i] = edgeShifts[f]\n \n \n IsosurfaceDataCache = (faceShiftTables, edgeShifts, edgeTable, nTableFaces)\n else:\n faceShiftTables, edgeShifts, edgeTable, nTableFaces = IsosurfaceDataCache\n\n\n \n ## mark everything below the isosurface level\n mask = data < level\n \n ### make eight sub-fields and compute indexes for grid cells\n index = np.zeros([x-1 for x in data.shape], dtype=np.ubyte)\n fields = np.empty((2,2,2), dtype=object)\n slices = [slice(0,-1), slice(1,None)]\n for i in [0,1]:\n for j in [0,1]:\n for k in [0,1]:\n fields[i,j,k] = mask[slices[i], slices[j], slices[k]]\n vertIndex = i - 2*j*i + 3*j + 4*k ## this is just to match Bourk's vertex numbering scheme\n index += fields[i,j,k] * 2**vertIndex\n \n ### Generate table of edges that have been cut\n cutEdges = np.zeros([x+1 for x in index.shape]+[3], dtype=np.uint32)\n edges = edgeTable[index]\n for i, shift in enumerate(edgeShifts[:12]): \n slices = [slice(shift[j],cutEdges.shape[j]+(shift[j]-1)) for j in range(3)]\n cutEdges[slices[0], slices[1], slices[2], shift[3]] += edges & 2**i\n \n ## for each cut edge, interpolate to see where exactly the edge is cut and generate vertex positions\n m = cutEdges > 0\n vertexInds = np.argwhere(m) ## argwhere is slow!\n vertexes = vertexInds[:,:3].astype(np.float32)\n dataFlat = data.reshape(data.shape[0]*data.shape[1]*data.shape[2])\n \n ## re-use the cutEdges array as a lookup table for vertex IDs\n cutEdges[vertexInds[:,0], vertexInds[:,1], vertexInds[:,2], vertexInds[:,3]] = np.arange(vertexInds.shape[0])\n \n for i in [0,1,2]:\n vim = vertexInds[:,3] == i\n vi = vertexInds[vim, :3]\n viFlat = (vi * (np.array(data.strides[:3]) // data.itemsize)[np.newaxis,:]).sum(axis=1)\n v1 = dataFlat[viFlat]\n v2 = dataFlat[viFlat + data.strides[i]//data.itemsize]\n vertexes[vim,i] += (level-v1) / (v2-v1)\n \n ### compute the set of vertex indexes for each face. \n \n ## This works, but runs a bit slower.\n #cells = np.argwhere((index != 0) & (index != 255)) ## all cells with at least one face\n #cellInds = index[cells[:,0], cells[:,1], cells[:,2]]\n #verts = faceTable[cellInds]\n #mask = verts[...,0,0] != 9\n #verts[...,:3] += cells[:,np.newaxis,np.newaxis,:] ## we now have indexes into cutEdges\n #verts = verts[mask]\n #faces = cutEdges[verts[...,0], verts[...,1], verts[...,2], verts[...,3]] ## and these are the vertex indexes we want.\n \n \n ## To allow this to be vectorized efficiently, we count the number of faces in each \n ## grid cell and handle each group of cells with the same number together.\n ## determine how many faces to assign to each grid cell\n nFaces = nTableFaces[index]\n totFaces = nFaces.sum()\n faces = np.empty((totFaces, 3), dtype=np.uint32)\n ptr = 0\n #import debug\n #p = debug.Profiler()\n \n ## this helps speed up an indexing operation later on\n cs = np.array(cutEdges.strides)//cutEdges.itemsize\n cutEdges = cutEdges.flatten()\n\n ## this, strangely, does not seem to help.\n #ins = np.array(index.strides)/index.itemsize\n #index = index.flatten()\n\n for i in range(1,6):\n ### expensive:\n #profiler()\n cells = np.argwhere(nFaces == i) ## all cells which require i faces (argwhere is expensive)\n #profiler()\n if cells.shape[0] == 0:\n continue\n cellInds = index[cells[:,0], cells[:,1], cells[:,2]] ## index values of cells to process for this round\n #profiler()\n \n ### expensive:\n verts = faceShiftTables[i][cellInds]\n #profiler()\n verts[...,:3] += cells[:,np.newaxis,np.newaxis,:] ## we now have indexes into cutEdges\n verts = verts.reshape((verts.shape[0]*i,)+verts.shape[2:])\n #profiler()\n \n ### expensive:\n verts = (verts * cs[np.newaxis, np.newaxis, :]).sum(axis=2)\n vertInds = cutEdges[verts]\n #profiler()\n nv = vertInds.shape[0]\n #profiler()\n faces[ptr:ptr+nv] = vertInds #.reshape((nv, 3))\n #profiler()\n ptr += nv\n \n return vertexes, faces\n\n\n \ndef invertQTransform(tr):\n \"\"\"Return a QTransform that is the inverse of *tr*.\n Rasises an exception if tr is not invertible.\n \n Note that this function is preferred over QTransform.inverted() due to\n bugs in that method. (specifically, Qt has floating-point precision issues\n when determining whether a matrix is invertible)\n \"\"\"\n try:\n import numpy.linalg\n arr = np.array([[tr.m11(), tr.m12(), tr.m13()], [tr.m21(), tr.m22(), tr.m23()], [tr.m31(), tr.m32(), tr.m33()]])\n inv = numpy.linalg.inv(arr)\n return QtGui.QTransform(inv[0,0], inv[0,1], inv[0,2], inv[1,0], inv[1,1], inv[1,2], inv[2,0], inv[2,1])\n except ImportError:\n inv = tr.inverted()\n if inv[1] is False:\n raise Exception(\"Transform is not invertible.\")\n return inv[0]\n \n \ndef pseudoScatter(data, spacing=None, shuffle=True, bidir=False):\n \"\"\"\n Used for examining the distribution of values in a set. Produces scattering as in beeswarm or column scatter plots.\n \n Given a list of x-values, construct a set of y-values such that an x,y scatter-plot\n will not have overlapping points (it will look similar to a histogram).\n \"\"\"\n inds = np.arange(len(data))\n if shuffle:\n np.random.shuffle(inds)\n \n data = data[inds]\n \n if spacing is None:\n spacing = 2.*np.std(data)/len(data)**0.5\n s2 = spacing**2\n \n yvals = np.empty(len(data))\n if len(data) == 0:\n return yvals\n yvals[0] = 0\n for i in range(1,len(data)):\n x = data[i] # current x value to be placed\n x0 = data[:i] # all x values already placed\n y0 = yvals[:i] # all y values already placed\n y = 0\n \n dx = (x0-x)**2 # x-distance to each previous point\n xmask = dx < s2 # exclude anything too far away\n \n if xmask.sum() > 0:\n if bidir:\n dirs = [-1, 1]\n else:\n dirs = [1]\n yopts = []\n for direction in dirs:\n y = 0\n dx2 = dx[xmask]\n dy = (s2 - dx2)**0.5 \n limits = np.empty((2,len(dy))) # ranges of y-values to exclude\n limits[0] = y0[xmask] - dy\n limits[1] = y0[xmask] + dy \n while True:\n # ignore anything below this y-value\n if direction > 0:\n mask = limits[1] >= y\n else:\n mask = limits[0] <= y\n \n limits2 = limits[:,mask]\n \n # are we inside an excluded region?\n mask = (limits2[0] < y) & (limits2[1] > y)\n if mask.sum() == 0:\n break\n \n if direction > 0:\n y = limits2[:,mask].max()\n else:\n y = limits2[:,mask].min()\n yopts.append(y)\n if bidir:\n y = yopts[0] if -yopts[0] < yopts[1] else yopts[1]\n else:\n y = yopts[0]\n yvals[i] = y\n \n return yvals[np.argsort(inds)] ## un-shuffle values before returning\n" ]
[ [ "numpy.ones", "numpy.argwhere", "numpy.take", "numpy.dtype", "numpy.argsort", "numpy.asarray", "numpy.log", "numpy.ascontiguousarray", "numpy.isscalar", "numpy.ndindex", "numpy.isfinite", "numpy.argmin", "numpy.abs", "numpy.isnan", "numpy.fft.rfft", "numpy.zeros", "numpy.arange", "numpy.tan", "numpy.std", "numpy.random.shuffle", "numpy.empty", "numpy.isinf", "numpy.floor", "numpy.exp", "numpy.clip", "numpy.product", "numpy.array", "numpy.concatenate", "numpy.frombuffer" ] ]
Sam-Armstrong/MNIST-GAN
[ "56143cecf50df5dd331278eaf4e0e387fe59bf5c" ]
[ "run_model.py" ]
[ "\"\"\"\nAuthor: Sam Armstrong\nDate: Autumn 2021\n\nDescription: The code for generating a single sample using the model (saves the image to the local folder)\n\"\"\"\n\nimport torch\nimport numpy as np\nfrom Generator import Generator\nfrom PIL import Image\nfrom matplotlib import cm\nfrom torch.autograd import Variable\nfrom torch import Tensor\n\ndevice = torch.device('cuda')\n\ndef run_model():\n generator = Generator()\n generator.load_state_dict(torch.load('generator-model.pickle'))\n generator.eval()\n \n z = Variable(Tensor(np.random.rand(1, 16)))\n image_array = generator(z).detach().numpy()\n image_array = image_array.reshape(28, 28)\n data = Image.fromarray(image_array)\n data = Image.fromarray(np.uint8(cm.gist_earth(image_array) * 255))\n data.show()\n data.save('GAN-Image.png')\n\nif __name__ == '__main__':\n run_model()\n" ]
[ [ "numpy.random.rand", "torch.device", "torch.load", "matplotlib.cm.gist_earth" ] ]
JosephKJ/hat
[ "a6386c8a5435573034f3a55c86438c0a82ee9d8d" ]
[ "src/dataloaders/mixture.py" ]
[ "import os,sys\nimport os.path\nimport numpy as np\nimport torch\nimport torch.utils.data\nfrom torchvision import datasets,transforms\nfrom sklearn.utils import shuffle\nimport urllib.request\nfrom PIL import Image\nimport pickle\nimport utils\n\n########################################################################################################################\n\ndef get(seed=0,fixed_order=False,pc_valid=0.15):\n data={}\n taskcla=[]\n size=[3,32,32]\n\n idata=np.arange(8)\n if not fixed_order:\n idata=list(shuffle(idata,random_state=seed))\n print('Task order =',idata)\n\n if not os.path.isdir('../dat/binary_mixture/'):\n os.makedirs('../dat/binary_mixture')\n # Pre-load\n for n,idx in enumerate(idata):\n if idx==0:\n # CIFAR10\n mean=[x/255 for x in [125.3,123.0,113.9]]\n std=[x/255 for x in [63.0,62.1,66.7]]\n dat={}\n dat['train']=datasets.CIFAR10('../dat/',train=True,download=True,transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n dat['test']=datasets.CIFAR10('../dat/',train=False,download=True,transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n data[n]={}\n data[n]['name']='cifar10'\n data[n]['ncla']=10\n for s in ['train','test']:\n loader=torch.utils.data.DataLoader(dat[s],batch_size=1,shuffle=False)\n data[n][s]={'x': [],'y': []}\n for image,target in loader:\n data[n][s]['x'].append(image)\n data[n][s]['y'].append(target.numpy()[0])\n\n elif idx==1:\n # CIFAR100\n mean=[x/255 for x in [125.3,123.0,113.9]]\n std=[x/255 for x in [63.0,62.1,66.7]]\n dat={}\n dat['train']=datasets.CIFAR100('../dat/',train=True,download=True,transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n dat['test']=datasets.CIFAR100('../dat/',train=False,download=True,transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n data[n]={}\n data[n]['name']='cifar100'\n data[n]['ncla']=100\n for s in ['train','test']:\n loader=torch.utils.data.DataLoader(dat[s],batch_size=1,shuffle=False)\n data[n][s]={'x': [],'y': []}\n for image,target in loader:\n data[n][s]['x'].append(image)\n data[n][s]['y'].append(target.numpy()[0])\n\n elif idx==2:\n # MNIST\n #mean=(0.1307,) # Mean and std without including the padding\n #std=(0.3081,)\n mean=(0.1,) # Mean and std including the padding\n std=(0.2752,)\n dat={}\n dat['train']=datasets.MNIST('../dat/',train=True,download=True,transform=transforms.Compose([\n transforms.Pad(padding=2,fill=0),transforms.ToTensor(),transforms.Normalize(mean,std)]))\n dat['test']=datasets.MNIST('../dat/',train=False,download=True,transform=transforms.Compose([\n transforms.Pad(padding=2,fill=0),transforms.ToTensor(),transforms.Normalize(mean,std)]))\n data[n]={}\n data[n]['name']='mnist'\n data[n]['ncla']=10\n for s in ['train','test']:\n loader=torch.utils.data.DataLoader(dat[s],batch_size=1,shuffle=False)\n data[n][s]={'x': [],'y': []}\n for image,target in loader:\n image=image.expand(1,3,image.size(2),image.size(3)) # Create 3 equal channels\n data[n][s]['x'].append(image)\n data[n][s]['y'].append(target.numpy()[0])\n\n elif idx == 3:\n # SVHN\n mean=[0.4377,0.4438,0.4728]\n std=[0.198,0.201,0.197]\n dat = {}\n dat['train']=datasets.SVHN('../dat/',split='train',download=True,transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n dat['test']=datasets.SVHN('../dat/',split='test',download=True,transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n data[n] = {}\n data[n]['name']='svhn'\n data[n]['ncla']=10\n for s in ['train','test']:\n loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)\n data[n][s] = {'x': [], 'y': []}\n for image, target in loader:\n data[n][s]['x'].append(image)\n data[n][s]['y'].append(target.numpy()[0])\n\n elif idx == 4:\n # FashionMNIST\n mean=(0.2190,) # Mean and std including the padding\n std=(0.3318,)\n dat={}\n dat['train']=FashionMNIST('../dat/fashion_mnist', train=True, download=True, transform=transforms.Compose([\n transforms.Pad(padding=2, fill=0), transforms.ToTensor(),transforms.Normalize(mean, std)]))\n dat['test']=FashionMNIST('../dat/fashion_mnist', train=False, download=True, transform=transforms.Compose([\n transforms.Pad(padding=2, fill=0), transforms.ToTensor(),transforms.Normalize(mean, std)]))\n data[n]={}\n data[n]['name']='fashion-mnist'\n data[n]['ncla']=10\n for s in ['train','test']:\n loader=torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)\n data[n][s]={'x': [], 'y': []}\n for image,target in loader:\n image=image.expand(1, 3, image.size(2), image.size(3)) # Create 3 equal channels\n data[n][s]['x'].append(image)\n data[n][s]['y'].append(target.numpy()[0])\n\n elif idx == 5:\n # Traffic signs\n mean=[0.3398,0.3117,0.3210]\n std=[0.2755,0.2647,0.2712]\n dat={}\n dat['train']=TrafficSigns('../dat/traffic_signs', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n dat['test']=TrafficSigns('../dat/traffic_signs', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n # mean, var = utils.compute_mean_std_dataset(dat['train'])\n data[n]={}\n data[n]['name']='traffic-signs'\n data[n]['ncla']=43\n for s in ['train','test']:\n loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)\n data[n][s] = {'x': [], 'y': []}\n for image, target in loader:\n data[n][s]['x'].append(image)\n data[n][s]['y'].append(target.numpy()[0])\n elif idx == 6:\n # Facescrub 100 faces\n mean=[0.5163,0.5569,0.4695]\n std=[0.2307,0.2272,0.2479]\n dat={}\n dat['train']=Facescrub('../dat/facescrub', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n dat['test']=Facescrub('../dat/facescrub', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n #mean, std = utils.compute_mean_std_dataset(dat['train']); print(mean,std); sys.exit()\n data[n]={}\n data[n]['name']='facescrub'\n data[n]['ncla']=100\n for s in ['train','test']:\n loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)\n data[n][s] = {'x': [], 'y': []}\n for image, target in loader:\n data[n][s]['x'].append(image)\n data[n][s]['y'].append(target.numpy()[0])\n elif idx == 7:\n # notMNIST A-J letters\n mean=(0.4254,)\n std=(0.4501,)\n dat={}\n dat['train']=notMNIST('../dat/notmnist', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n dat['test']=notMNIST('../dat/notmnist', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n #mean, std = utils.compute_mean_std_dataset(dat['train']); print(mean,std); sys.exit()\n data[n]={}\n data[n]['name']='notmnist'\n data[n]['ncla']=10\n for s in ['train','test']:\n loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)\n data[n][s] = {'x': [], 'y': []}\n for image, target in loader:\n image=image.expand(1,3,image.size(2),image.size(3))\n data[n][s]['x'].append(image)\n data[n][s]['y'].append(target.numpy()[0])\n else:\n print('ERROR: Undefined data set',n)\n sys.exit()\n #print(n,data[n]['name'],data[n]['ncla'],len(data[n]['train']['x']))\n\n # \"Unify\" and save\n for s in ['train','test']:\n data[n][s]['x']=torch.stack(data[n][s]['x']).view(-1,size[0],size[1],size[2])\n data[n][s]['y']=torch.LongTensor(np.array(data[n][s]['y'],dtype=int)).view(-1)\n torch.save(data[n][s]['x'], os.path.join(os.path.expanduser('../dat/binary_mixture'),'data'+str(idx)+s+'x.bin'))\n torch.save(data[n][s]['y'], os.path.join(os.path.expanduser('../dat/binary_mixture'),'data'+str(idx)+s+'y.bin'))\n\n else:\n\n # Load binary files\n for n,idx in enumerate(idata):\n data[n] = dict.fromkeys(['name','ncla','train','test'])\n if idx==0:\n data[n]['name']='cifar10'\n data[n]['ncla']=10\n elif idx==1:\n data[n]['name']='cifar100'\n data[n]['ncla']=100\n elif idx==2:\n data[n]['name']='mnist'\n data[n]['ncla']=10\n elif idx==3:\n data[n]['name']='svhn'\n data[n]['ncla']=10\n elif idx==4:\n data[n]['name']='fashion-mnist'\n data[n]['ncla']=10\n elif idx==5:\n data[n]['name']='traffic-signs'\n data[n]['ncla']=43\n elif idx==6:\n data[n]['name']='facescrub'\n data[n]['ncla']=100\n elif idx==7:\n data[n]['name']='notmnist'\n data[n]['ncla']=10\n else:\n print('ERROR: Undefined data set',n)\n sys.exit()\n\n # Load\n for s in ['train','test']:\n data[n][s]={'x':[],'y':[]}\n data[n][s]['x'] = torch.load(os.path.join(os.path.expanduser('../dat/binary_mixture'),'data'+str(idx)+s+'x.bin'))\n data[n][s]['y'] = torch.load(os.path.join(os.path.expanduser('../dat/binary_mixture'),'data'+str(idx)+s+'y.bin'))\n\n # Validation\n for t in data.keys():\n r=np.arange(data[t]['train']['x'].size(0))\n r=np.array(shuffle(r,random_state=seed),dtype=int)\n nvalid=int(pc_valid*len(r))\n ivalid=torch.LongTensor(r[:nvalid])\n itrain=torch.LongTensor(r[nvalid:])\n data[t]['valid']={}\n data[t]['valid']['x']=data[t]['train']['x'][ivalid].clone()\n data[t]['valid']['y']=data[t]['train']['y'][ivalid].clone()\n data[t]['train']['x']=data[t]['train']['x'][itrain].clone()\n data[t]['train']['y']=data[t]['train']['y'][itrain].clone()\n\n # Others\n n=0\n for t in data.keys():\n taskcla.append((t,data[t]['ncla']))\n n+=data[t]['ncla']\n data['ncla']=n\n\n return data,taskcla,size\n\n########################################################################################################################\n\nclass FashionMNIST(datasets.MNIST):\n \"\"\"`Fashion MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.\n \"\"\"\n urls = [\n 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',\n 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz',\n 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',\n 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz',\n ]\n\n########################################################################################################################\n\nclass TrafficSigns(torch.utils.data.Dataset):\n \"\"\"`German Traffic Signs <http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset>`_ Dataset.\n\n Args:\n root (string): Root directory of dataset where directory ``Traffic signs`` exists.\n split (string): One of {'train', 'test'}.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory.\n If dataset is already downloaded, it is not downloaded again.\n\n \"\"\"\n\n def __init__(self, root, train=True,transform=None, download=False):\n self.root = os.path.expanduser(root)\n self.transform = transform\n self.filename = \"traffic_signs_dataset.zip\"\n self.url = \"https://d17h27t6h515a5.cloudfront.net/topher/2016/October/580d53ce_traffic-sign-data/traffic-sign-data.zip\"\n # Other options for the same 32x32 pickled dataset\n # url=\"https://d17h27t6h515a5.cloudfront.net/topher/2016/November/581faac4_traffic-signs-data/traffic-signs-data.zip\"\n # url_train=\"https://drive.google.com/open?id=0B5WIzrIVeL0WR1dsTC1FdWEtWFE\"\n # url_test=\"https://drive.google.com/open?id=0B5WIzrIVeL0WLTlPNlR2RG95S3c\"\n\n fpath = os.path.join(root, self.filename)\n if not os.path.isfile(fpath):\n if not download:\n raise RuntimeError('Dataset not found. You can use download=True to download it')\n else:\n print('Downloading from '+self.url)\n self.download()\n\n training_file = 'lab 2 data/train.p'\n testing_file = 'lab 2 data/test.p'\n if train:\n with open(os.path.join(root,training_file), mode='rb') as f:\n train = pickle.load(f)\n self.data = train['features']\n self.labels = train['labels']\n else:\n with open(os.path.join(root,testing_file), mode='rb') as f:\n test = pickle.load(f)\n self.data = test['features']\n self.labels = test['labels']\n\n self.data = np.transpose(self.data, (0, 3, 1, 2))\n #print(self.data.shape); sys.exit()\n\n def __getitem__(self, index):\n \"\"\"\n Args: index (int): Index\n Returns: tuple: (image, target) where target is index of the target class.\n \"\"\"\n img, target = self.data[index], self.labels[index]\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(np.transpose(img, (1, 2, 0)))\n\n if self.transform is not None:\n img = self.transform(img)\n\n return img, target\n\n def __len__(self):\n return len(self.data)\n\n def download(self):\n import errno\n root = os.path.expanduser(self.root)\n fpath = os.path.join(root, self.filename)\n\n try:\n os.makedirs(root)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n urllib.request.urlretrieve(self.url, fpath)\n import zipfile\n zip_ref = zipfile.ZipFile(fpath, 'r')\n zip_ref.extractall(root)\n zip_ref.close()\n\n\n########################################################################################################################\n\nclass Facescrub(torch.utils.data.Dataset):\n \"\"\"Subset of the Facescrub cropped from the official Megaface challenge page: http://megaface.cs.washington.edu/participate/challenge.html, resized to 38x38\n\n Args:\n root (string): Root directory of dataset where directory ``Traffic signs`` exists.\n split (string): One of {'train', 'test'}.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory.\n If dataset is already downloaded, it is not downloaded again.\n\n \"\"\"\n\n def __init__(self, root, train=True,transform=None, download=False):\n self.root = os.path.expanduser(root)\n self.transform = transform\n self.filename = \"facescrub_100.zip\"\n self.url = \"https://github.com/nkundiushuti/facescrub_subset/blob/master/data/facescrub_100.zip?raw=true\"\n\n fpath=os.path.join(root,self.filename)\n if not os.path.isfile(fpath):\n if not download:\n raise RuntimeError('Dataset not found. You can use download=True to download it')\n else:\n print('Downloading from '+self.url)\n self.download()\n\n training_file = 'facescrub_train_100.pkl'\n testing_file = 'facescrub_test_100.pkl'\n if train:\n with open(os.path.join(root,training_file),'rb') as f:\n # u = pickle._Unpickler(f)\n # u.encoding = 'latin1'\n # train = u.load()\n train = pickle.load(f)\n self.data = train['features'].astype(np.uint8)\n self.labels = train['labels'].astype(np.uint8)\n \"\"\"\n print(self.data.shape)\n print(self.data.mean())\n print(self.data.std())\n print(self.labels.max())\n #\"\"\"\n else:\n with open(os.path.join(root,testing_file),'rb') as f:\n # u = pickle._Unpickler(f)\n # u.encoding = 'latin1'\n # test = u.load()\n test = pickle.load(f)\n\n self.data = test['features'].astype(np.uint8)\n self.labels = test['labels'].astype(np.uint8)\n\n def __getitem__(self, index):\n \"\"\"\n Args: index (int): Index\n Returns: tuple: (image, target) where target is index of the target class.\n \"\"\"\n img, target = self.data[index], self.labels[index]\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(np.transpose(img, (1, 2, 0)))\n\n if self.transform is not None:\n img = self.transform(img)\n\n return img, target\n\n def __len__(self):\n return len(self.data)\n\n def download(self):\n import errno\n root = os.path.expanduser(self.root)\n\n fpath = os.path.join(root, self.filename)\n\n try:\n os.makedirs(root)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n urllib.request.urlretrieve(self.url, fpath)\n\n import zipfile\n zip_ref = zipfile.ZipFile(fpath, 'r')\n zip_ref.extractall(root)\n zip_ref.close()\n\n\n########################################################################################################################\n\nclass notMNIST(torch.utils.data.Dataset):\n \"\"\"The notMNIST dataset is a image recognition dataset of font glypyhs for the letters A through J useful with simple neural networks. It is quite similar to the classic MNIST dataset of handwritten digits 0 through 9.\n\n Args:\n root (string): Root directory of dataset where directory ``Traffic signs`` exists.\n split (string): One of {'train', 'test'}.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory.\n If dataset is already downloaded, it is not downloaded again.\n\n \"\"\"\n\n def __init__(self, root, train=True,transform=None, download=False):\n self.root = os.path.expanduser(root)\n self.transform = transform\n self.filename = \"notmnist.zip\"\n self.url = \"https://github.com/nkundiushuti/notmnist_convert/blob/master/notmnist.zip?raw=true\"\n\n fpath = os.path.join(root, self.filename)\n if not os.path.isfile(fpath):\n if not download:\n raise RuntimeError('Dataset not found. You can use download=True to download it')\n else:\n print('Downloading from '+self.url)\n self.download()\n\n training_file = 'notmnist_train.pkl'\n testing_file = 'notmnist_test.pkl'\n if train:\n with open(os.path.join(root,training_file),'rb') as f:\n # u = pickle._Unpickler(f)\n # u.encoding = 'latin1'\n # train = u.load()\n train = pickle.load(f)\n self.data = train['features'].astype(np.uint8)\n self.labels = train['labels'].astype(np.uint8)\n else:\n with open(os.path.join(root,testing_file),'rb') as f:\n # u = pickle._Unpickler(f)\n # u.encoding = 'latin1'\n # test = u.load()\n test = pickle.load(f)\n\n self.data = test['features'].astype(np.uint8)\n self.labels = test['labels'].astype(np.uint8)\n\n\n def __getitem__(self, index):\n \"\"\"\n Args: index (int): Index\n Returns: tuple: (image, target) where target is index of the target class.\n \"\"\"\n img, target = self.data[index], self.labels[index]\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(img[0])\n\n if self.transform is not None:\n img = self.transform(img)\n\n return img, target\n\n def __len__(self):\n return len(self.data)\n\n def download(self):\n import errno\n root = os.path.expanduser(self.root)\n\n fpath = os.path.join(root, self.filename)\n\n try:\n os.makedirs(root)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n urllib.request.urlretrieve(self.url, fpath)\n\n import zipfile\n zip_ref = zipfile.ZipFile(fpath, 'r')\n zip_ref.extractall(root)\n zip_ref.close()\n\n\n########################################################################################################################\n" ]
[ [ "torch.utils.data.DataLoader", "torch.stack", "numpy.transpose", "sklearn.utils.shuffle", "numpy.arange", "numpy.array", "torch.LongTensor" ] ]
weijiawu/SyntoReal_STD
[ "4f92809cfa276d0424019bcc1fb77659d20423b8" ]
[ "lib/utils.py" ]
[ "import numpy as np\nfrom shapely.geometry import Polygon\nimport numpy as np\nimport cv2\nfrom PIL import Image\nimport math\nimport os\nimport torch\n\n\ndef get_MSER(image_path,):\n image = cv2.imread(image_path)\n rgb_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n negtive_map = np.zeros(image.shape[:2])\n mser = cv2.MSER_create(_delta=5, _min_area=10, _max_variation=0.8)\n regions, bboxes = mser.detectRegions(rgb_img)\n # 绘制文本区域(不规则轮廓)\n hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]\n\n keep = []\n for hull in hulls:\n x, y, w, h = cv2.boundingRect(hull)\n keep.append([x, y, x + w, y + h])\n\n # 使用非极大值抑制获取不重复的矩形框\n pick = non_max_suppression_fast(np.array(keep), overlapThresh=0.4)\n\n # loop over the picked bounding boxes and draw them\n for (startX, startY, endX, endY) in pick:\n cv2.fillPoly(negtive_map, np.array([[[startX, startY], [endX, startY], [endX, endY], [startX, endY]]]), (1))\n negtive_map = Image.fromarray(negtive_map)\n\n return negtive_map\n\n\ndef crop_img_target_source(img, vertices, labels, length,negtive_map):\n '''crop img patches to obtain batch and augment\n Input:\n img : PIL Image\n vertices : vertices of text regions <numpy.ndarray, (n,8)>\n labels : 1->valid, 0->ignore, <numpy.ndarray, (n,)>\n length : length of cropped image region\n Output:\n region : cropped image region\n new_vertices: new vertices in cropped region\n '''\n h, w = img.height, img.width\n # confirm the shortest side of image >= length\n if h >= w and w < length:\n img = img.resize((length, int(h * length / w)), Image.BILINEAR)\n negtive_map = negtive_map.resize((length, int(h * length / w)), Image.BILINEAR)\n elif h < w and h < length:\n img = img.resize((int(w * length / h), length), Image.BILINEAR)\n negtive_map = negtive_map.resize((length, int(h * length / w)), Image.BILINEAR)\n\n ratio_w = img.width / w\n ratio_h = img.height / h\n assert (ratio_w >= 1 and ratio_h >= 1)\n\n new_vertices = np.zeros(vertices.shape)\n if vertices.size > 0:\n new_vertices[:, [0, 2, 4, 6]] = vertices[:, [0, 2, 4, 6]] * ratio_w\n new_vertices[:, [1, 3, 5, 7]] = vertices[:, [1, 3, 5, 7]] * ratio_h\n\n # find random position\n remain_h = img.height - length\n remain_w = img.width - length\n flag = True\n cnt = 0\n while flag and cnt < 1000:\n cnt += 1\n start_w = int(np.random.rand() * remain_w)\n start_h = int(np.random.rand() * remain_h)\n flag = is_cross_text([start_w, start_h], length, new_vertices[labels == 1, :])\n box = (start_w, start_h, start_w + length, start_h + length)\n region = img.crop(box)\n negtive_map = negtive_map.crop(box)\n\n if new_vertices.size == 0:\n return region, new_vertices,negtive_map\n\n new_vertices[:, [0, 2, 4, 6]] -= start_w\n new_vertices[:, [1, 3, 5, 7]] -= start_h\n return region, new_vertices,negtive_map\n\n\ndef rotate_img_target_source(img, vertices, negtive_map,angle_range=10):\n '''rotate image [-10, 10] degree to aug data\n Input:\n img : PIL Image\n vertices : vertices of text regions <numpy.ndarray, (n,8)>\n angle_range : rotate range\n Output:\n img : rotated PIL Image\n new_vertices: rotated vertices\n '''\n center_x = (img.width - 1) / 2\n center_y = (img.height - 1) / 2\n angle = angle_range * (np.random.rand() * 2 - 1)\n img = img.rotate(angle, Image.BILINEAR)\n negtive_map = negtive_map.rotate(angle, Image.BILINEAR)\n\n new_vertices = np.zeros(vertices.shape)\n for i, vertice in enumerate(vertices):\n new_vertices[i, :] = rotate_vertices(vertice, -angle / 180 * math.pi, np.array([[center_x], [center_y]]))\n return img, new_vertices,negtive_map\n\ndef adjust_height_target_source(img, vertices, negtive_map, ratio=0.2):\n '''adjust height of image to aug data\n Input:\n img : PIL Image\n vertices : vertices of text regions <numpy.ndarray, (n,8)>\n ratio : height changes in [0.8, 1.2]\n Output:\n img : adjusted PIL Image\n new_vertices: adjusted vertices\n '''\n ratio_h = 1 + ratio * (np.random.rand() * 2 - 1) #[0.8, 1.2]\n old_h = img.height\n new_h = int(np.around(old_h * ratio_h))\n img = img.resize((img.width, new_h), Image.BILINEAR)\n negtive_map = negtive_map.resize((img.width, new_h), Image.BILINEAR)\n\n new_vertices = vertices.copy()\n if vertices.size > 0:\n new_vertices[:, [1, 3, 5, 7]] = vertices[:, [1, 3, 5, 7]] * (new_h / old_h)\n return img, new_vertices,negtive_map\n\ndef rotate_all_pixels(rotate_mat, anchor_x, anchor_y, length):\n '''get rotated locations of all pixels for next stages\n Input:\n rotate_mat: rotatation matrix\n anchor_x : fixed x position\n anchor_y : fixed y position\n length : length of image\n Output:\n rotated_x : rotated x positions <numpy.ndarray, (length,length)>\n rotated_y : rotated y positions <numpy.ndarray, (length,length)>\n '''\n x = np.arange(length)\n y = np.arange(length)\n x, y = np.meshgrid(x, y)\n x_lin = x.reshape((1, x.size))\n y_lin = y.reshape((1, x.size))\n coord_mat = np.concatenate((x_lin, y_lin), 0)\n rotated_coord = np.dot(rotate_mat, coord_mat - np.array([[anchor_x], [anchor_y]])) + \\\n np.array([[anchor_x], [anchor_y]])\n rotated_x = rotated_coord[0, :].reshape(x.shape)\n rotated_y = rotated_coord[1, :].reshape(y.shape)\n return rotated_x, rotated_y\n\n\ndef get_score_geo_target_source(img, vertices, labels, scale, length, negative_map):\n '''generate score gt and geometry gt\n Input:\n img : PIL Image\n vertices: vertices of text regions <numpy.ndarray, (n,8)>\n labels : 1->valid, 0->ignore, <numpy.ndarray, (n,)>\n scale : feature map / image 0.25\n length : image length 512\n Output:\n score gt, geo gt, ignored\n '''\n score_map = np.zeros((int(img.height * scale), int(img.width * scale), 1), np.float32)\n geo_map = np.zeros((int(img.height * scale), int(img.width * scale), 5), np.float32)\n ignored_map = np.zeros((int(img.height * scale), int(img.width * scale), 1), np.float32)\n\n\n negative_map = np.array(negative_map)\n negative_map = cv2.resize(negative_map,(int(img.height * scale), int(img.width * scale)))\n negative_map = np.expand_dims(negative_map, -1)\n\n index = np.arange(0, length, int(1 / scale)) # [0 4 8 12 .... 508]\n index_x, index_y = np.meshgrid(index, index) # Return coordinate matrices from coordinate vectors.\n ignored_polys = []\n polys = []\n\n for i, vertice in enumerate(vertices):\n\n ignored_polys.append(np.around(scale * vertice.reshape((4, 2))).astype(np.int32))\n\n #得到0.3缩放后的score map 将vectice缩小到原来的0.3\n poly = np.around(scale * shrink_poly(vertice).reshape((4, 2))).astype(np.int32) # scaled & shrinked\n polys.append(poly)\n temp_mask = np.zeros(score_map.shape[:-1], np.float32)\n cv2.fillPoly(temp_mask, [poly], 1)\n\n theta = find_min_rect_angle(vertice)\n rotate_mat = get_rotate_mat(theta)\n\n rotated_vertices = rotate_vertices(vertice, theta)\n x_min, x_max, y_min, y_max = get_boundary(rotated_vertices)\n rotated_x, rotated_y = rotate_all_pixels(rotate_mat, vertice[0], vertice[1], length)\n\n # print(\"rotated_x:\",rotated_x.shape)\n # print(\"rotated_y:\", rotated_y.shape)\n d1 = rotated_y - y_min\n d1[d1 < 0] = 0\n d2 = y_max - rotated_y\n d2[d2 < 0] = 0\n d3 = rotated_x - x_min\n d3[d3 < 0] = 0\n d4 = x_max - rotated_x\n d4[d4 < 0] = 0\n geo_map[:, :, 0] += d1[index_y, index_x] * temp_mask\n geo_map[:, :, 1] += d2[index_y, index_x] * temp_mask\n geo_map[:, :, 2] += d3[index_y, index_x] * temp_mask\n geo_map[:, :, 3] += d4[index_y, index_x] * temp_mask\n geo_map[:, :, 4] += theta * temp_mask\n\n cv2.fillPoly(ignored_map, ignored_polys, 1)\n\n # negative_map: 背景\n # ignored_map:前景\n text_background = negative_map*ignored_map\n negative_map = negative_map*(1-text_background)\n ignored_map = ignored_map*(1-text_background)\n\n ignored_map = 1 - (negative_map + ignored_map)\n cv2.fillPoly(score_map, polys, 1)\n return torch.Tensor(score_map).permute(2, 0, 1), torch.Tensor(geo_map).permute(2, 0, 1), torch.Tensor(\n ignored_map).permute(2, 0, 1)\n\n\ndef non_max_suppression_fast(boxes, overlapThresh):\n \"\"\"\n boxes: boxes为一个m*n的矩阵,m为bbox的个数,n的前4列为每个bbox的坐标,\n 格式为(x1,y1,x2,y2),有时会有第5列,该列为每一类的置信\n overlapThresh: 最大允许重叠率\n \"\"\"\n # if there are no boxes, return an empty list\n if len(boxes) == 0:\n return []\n\n # if the bounding boxes are integers, convert them to floats\n # this is important since we'll be doing a bunch of divisions\n if boxes.dtype.kind == \"i\":\n boxes = boxes.astype(\"float\")\n\n # initialize the list of picked indexes\n pick = []\n\n # grab the coordinates of all bounding boxes respectively\n x1 = boxes[:,0] # startX\n y1 = boxes[:,1] # startY\n x2 = boxes[:,2] # endX\n y2 = boxes[:,3] # endY\n # probs = boxes[:,4]\n\n # compute the area of the bounding boxes and sort the bboxes\n # by the bottom y-coordinate of the bboxes by ascending order\n # and grab the indexes of the sorted coordinates of bboxes\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n idxs = np.argsort(y2)\n\n # if probabilities are provided, sort by them instead\n # idxs = np.argsort(probs)\n\n # keep looping while some indexes still remain in the idxs list\n while len(idxs) > 0:\n # grab the last index in the idxs list (the bottom-right box)\n # and add the index value to the list of picked indexes\n last = len(idxs) - 1\n i = idxs[last]\n pick.append(i)\n\n # find the largest coordinates for the start of the bbox\n # and the smallest coordinates for the end of the bbox\n # in the rest of bounding boxes.\n xx1 = np.maximum(x1[i], x1[idxs[:last]])\n yy1 = np.maximum(y1[i], y1[idxs[:last]])\n xx2 = np.minimum(x2[i], x2[idxs[:last]])\n yy2 = np.minimum(y2[i], y2[idxs[:last]])\n\n # the width and height of the bounding box\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n # the ratio of overlap in the bounding box\n overlap = (w * h) / area[idxs[:last]]\n\n # delete all indexes from the index list that overlap is larger than overlapThresh\n idxs = np.delete(idxs, np.concatenate(([last], np.where(overlap > overlapThresh)[0])))\n\n # return only the bounding boxes that were picked using the\n # integer data type\n return boxes[pick].astype(\"int\")\n\n\n\ndef cal_distance(x1, y1, x2, y2):\n '''calculate the Euclidean distance'''\n return math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n\n\ndef move_points(vertices, index1, index2, r, coef):\n '''move the two points to shrink edge\n Input:\n vertices: vertices of text region <numpy.ndarray, (8,)>\n index1 : offset of point1\n index2 : offset of point2\n r : [r1, r2, r3, r4] in paper\n coef : shrink ratio in paper\n Output:\n vertices: vertices where one edge has been shinked\n '''\n index1 = index1 % 4\n index2 = index2 % 4\n x1_index = index1 * 2 + 0\n y1_index = index1 * 2 + 1\n x2_index = index2 * 2 + 0\n y2_index = index2 * 2 + 1\n\n r1 = r[index1]\n r2 = r[index2]\n length_x = vertices[x1_index] - vertices[x2_index]\n length_y = vertices[y1_index] - vertices[y2_index]\n length = cal_distance(vertices[x1_index], vertices[y1_index], vertices[x2_index], vertices[y2_index])\n if length > 1:\n ratio = (r1 * coef) / length\n vertices[x1_index] += ratio * (-length_x)\n vertices[y1_index] += ratio * (-length_y)\n ratio = (r2 * coef) / length\n vertices[x2_index] += ratio * length_x\n vertices[y2_index] += ratio * length_y\n return vertices\n\n\ndef shrink_poly(vertices, coef=0.3):\n '''shrink the text region\n Input:\n vertices: vertices of text region <numpy.ndarray, (8,)>\n coef : shrink ratio in paper\n Output:\n v : vertices of shrinked text region <numpy.ndarray, (8,)>\n '''\n x1, y1, x2, y2, x3, y3, x4, y4 = vertices\n r1 = min(cal_distance(x1, y1, x2, y2), cal_distance(x1, y1, x4, y4))\n r2 = min(cal_distance(x2, y2, x1, y1), cal_distance(x2, y2, x3, y3))\n r3 = min(cal_distance(x3, y3, x2, y2), cal_distance(x3, y3, x4, y4))\n r4 = min(cal_distance(x4, y4, x1, y1), cal_distance(x4, y4, x3, y3))\n r = [r1, r2, r3, r4]\n\n # obtain offset to perform move_points() automatically\n if cal_distance(x1, y1, x2, y2) + cal_distance(x3, y3, x4, y4) > \\\n cal_distance(x2, y2, x3, y3) + cal_distance(x1, y1, x4, y4):\n offset = 0 # two longer edges are (x1y1-x2y2) & (x3y3-x4y4)\n else:\n offset = 1 # two longer edges are (x2y2-x3y3) & (x4y4-x1y1)\n\n v = vertices.copy()\n v = move_points(v, 0 + offset, 1 + offset, r, coef)\n v = move_points(v, 2 + offset, 3 + offset, r, coef)\n v = move_points(v, 1 + offset, 2 + offset, r, coef)\n v = move_points(v, 3 + offset, 4 + offset, r, coef)\n return v\n\n\ndef get_rotate_mat(theta):\n '''positive theta value means rotate clockwise'''\n return np.array([[math.cos(theta), -math.sin(theta)], [math.sin(theta), math.cos(theta)]])\n\n\ndef rotate_vertices(vertices, theta, anchor=None):\n '''rotate vertices around anchor\n Input:\n vertices: vertices of text region <numpy.ndarray, (8,)>\n theta : angle in radian measure\n anchor : fixed position during rotation\n Output:\n rotated vertices <numpy.ndarray, (8,)>\n '''\n v = vertices.reshape((4, 2)).T\n\n # print(vertices)\n # print(\"v:\",v.shape)\n # 取第一个顶点\n if anchor is None:\n anchor = v[:, :1]\n # print(anchor)\n\n rotate_mat = get_rotate_mat(theta)\n res = np.dot(rotate_mat, v - anchor)\n return (res + anchor).T.reshape(-1)\n\n\ndef get_boundary(vertices):\n '''get the tight boundary around given vertices\n Input:\n vertices: vertices of text region <numpy.ndarray, (8,)>\n Output:\n the boundary\n '''\n x1, y1, x2, y2, x3, y3, x4, y4 = vertices\n x_min = min(x1, x2, x3, x4)\n x_max = max(x1, x2, x3, x4)\n y_min = min(y1, y2, y3, y4)\n y_max = max(y1, y2, y3, y4)\n return x_min, x_max, y_min, y_max\n\n\ndef cal_error(vertices):\n '''default orientation is x1y1 : left-top, x2y2 : right-top, x3y3 : right-bot, x4y4 : left-bot\n calculate the difference between the vertices orientation and default orientation\n Input:\n vertices: vertices of text region <numpy.ndarray, (8,)>\n Output:\n err : difference measure\n '''\n x_min, x_max, y_min, y_max = get_boundary(vertices)\n x1, y1, x2, y2, x3, y3, x4, y4 = vertices\n err = cal_distance(x1, y1, x_min, y_min) + cal_distance(x2, y2, x_max, y_min) + \\\n cal_distance(x3, y3, x_max, y_max) + cal_distance(x4, y4, x_min, y_max)\n return err\n\n\ndef find_min_rect_angle(vertices):\n '''find the best angle to rotate poly and obtain min rectangle\n Input:\n vertices: vertices of text region <numpy.ndarray, (8,)>\n Output:\n the best angle <radian measure>\n '''\n angle_interval = 1\n angle_list = list(range(-90, 90, angle_interval))\n area_list = []\n for theta in angle_list:\n rotated = rotate_vertices(vertices, theta / 180 * math.pi)\n x1, y1, x2, y2, x3, y3, x4, y4 = rotated\n temp_area = (max(x1, x2, x3, x4) - min(x1, x2, x3, x4)) * \\\n (max(y1, y2, y3, y4) - min(y1, y2, y3, y4))\n area_list.append(temp_area)\n\n sorted_area_index = sorted(list(range(len(area_list))), key=lambda k: area_list[k])\n min_error = float('inf')\n best_index = -1\n rank_num = 10\n # find the best angle with correct orientation\n for index in sorted_area_index[:rank_num]:\n rotated = rotate_vertices(vertices, angle_list[index] / 180 * math.pi)\n temp_error = cal_error(rotated)\n if temp_error < min_error:\n min_error = temp_error\n best_index = index\n return angle_list[best_index] / 180 * math.pi\n\n\ndef is_cross_text(start_loc, length, vertices):\n '''check if the crop image crosses text regions\n Input:\n start_loc: left-top position\n length : length of crop image\n vertices : vertices of text regions <numpy.ndarray, (n,8)>\n Output:\n True if crop image crosses text region\n '''\n if vertices.size == 0:\n return False\n start_w, start_h = start_loc\n a = np.array([start_w, start_h, start_w + length, start_h, \\\n start_w + length, start_h + length, start_w, start_h + length]).reshape((4, 2))\n p1 = Polygon(a).convex_hull\n for vertice in vertices:\n p2 = Polygon(vertice.reshape((4, 2))).convex_hull\n inter = p1.intersection(p2).area\n try:\n if 0.01 <= inter / p2.area <= 0.99:\n return True\n except:\n continue\n return False" ]
[ [ "numpy.zeros", "numpy.dot", "numpy.maximum", "numpy.argsort", "numpy.arange", "numpy.where", "numpy.expand_dims", "numpy.random.rand", "numpy.array", "numpy.around", "numpy.concatenate", "numpy.meshgrid", "torch.Tensor", "numpy.minimum" ] ]
snigdhagit/compare-selection
[ "26f41d06405e9a9be9894878bf604c38049a4729" ]
[ "statistics.py" ]
[ "from __future__ import division\n\nimport numpy as np, pandas as pd, time\nfrom utils import BHfilter\n\ndef interval_statistic(method,\n instance,\n X,\n Y,\n beta,\n l_theory,\n l_min,\n l_1se,\n sigma_reid,\n M=None):\n\n if M is None:\n toc = time.time()\n M = method(X.copy(), Y.copy(), l_theory.copy(), l_min, l_1se, sigma_reid)\n else:\n toc = np.inf\n try:\n active, lower, upper, pvalues = M.generate_intervals()\n except AttributeError:\n return M, None \n\n if len(active) > 0:\n naive_lower, naive_upper = M.naive_intervals(active)[1:]\n naive_pvalues = M.naive_pvalues(active)[1]\n else:\n naive_lower, naive_upper, naive_pvalues = None, None, None\n target = M.get_target(active, beta) # for now limited to Gaussian methods\n full_target = M.full_target(active, beta)\n tic = time.time()\n\n if len(active) > 0:\n alpha = 1 - M.confidence\n fdp = (pvalues[full_target == 0] < alpha).sum() / pvalues.shape[0]\n value = pd.DataFrame({'active_variable':active,\n 'lower_confidence':lower,\n 'upper_confidence':upper,\n 'target':target,\n 'full_target':full_target,\n 'fdp':fdp * np.ones_like(pvalues)})\n if naive_lower is not None:\n value['naive_lower_confidence'] = naive_lower\n value['naive_upper_confidence'] = naive_upper\n value['naive_pvalue'] = naive_pvalues\n if np.isfinite(toc):\n value['Time'] = tic-toc\n value['pvalue'] = pvalues\n return M, value\n else:\n return M, None\n\ndef interval_summary(result):\n\n length = result['upper_confidence'] - result['lower_confidence']\n if 'naive_lower_confidence' in result.columns:\n naive_length = result['naive_upper_confidence'] - result['naive_lower_confidence']\n else:\n naive_length = np.ones_like(length) * np.nan\n\n def coverage_(result):\n return np.mean(np.asarray(result['lower_confidence'] <= result['target']) *\n np.asarray(result['upper_confidence'] >= result['target']))\n \n def naive_coverage_(result):\n return np.mean(np.asarray(result['naive_lower_confidence'] <= result['target']) *\n np.asarray(result['naive_upper_confidence'] >= result['target']))\n \n instances = result.groupby('instance_id')\n len_cover = np.array([(len(g.index), coverage_(g)) for _, g in instances])\n\n instances = result.groupby('instance_id')\n naive_cover = np.array([(len(g.index), naive_coverage_(g)) for _, g in instances])\n naive_coverage = np.mean(naive_cover, 0)[1]\n active_vars, mean_coverage = np.mean(len_cover, 0)\n sd_coverage = np.std(len_cover[:,1])\n\n # XXX we should group by instances before averaging and computing SD\n\n value = pd.DataFrame([[len(np.unique(result['instance_id'])),\n mean_coverage,\n sd_coverage,\n np.median(length),\n np.mean(length),\n np.mean(naive_length),\n np.median(naive_length),\n naive_coverage,\n active_vars,\n np.mean(result['Time']),\n result['model_target'].values[0]]],\n columns=['Replicates',\n 'Coverage',\n 'SD(Coverage)',\n 'Median Length',\n 'Mean Length',\n 'Mean Naive Length',\n 'Median Naive Length',\n 'Naive Coverage',\n 'Active',\n 'Time',\n 'Model'])\n\n # keep all things constant over groups\n\n for n in result.columns:\n if len(np.unique(result[n])) == 1:\n value[n] = result[n].values[0]\n\n return value\n\ndef estimator_statistic(method,\n instance,\n X,\n Y,\n beta,\n l_theory,\n l_min,\n l_1se,\n sigma_reid,\n M=None):\n\n if M is None:\n toc = time.time()\n M = method(X.copy(), Y.copy(), l_theory.copy(), l_min, l_1se, sigma_reid)\n else:\n toc = np.inf\n \n try:\n active, point_estimate = M.point_estimator()\n except AttributeError:\n return M, None # cannot make point estimator\n\n if len(active) > 0:\n naive_estimate = M.naive_estimator(active)[1]\n else:\n naive_estimate = np.zeros_like(point_estimate)\n\n tic = time.time()\n\n S = instance.feature_cov\n\n full_risk = np.sum((beta - point_estimate) * S.dot(beta - point_estimate)) / beta[active].shape\n naive_full_risk = np.sum((beta - naive_estimate) * S.dot(beta - naive_estimate)) / beta[active].shape\n\n # partial risk -- only active coordinates\n\n target = M.get_target(active, beta) # for now limited to Gaussian methods\n\n S_active = S[active][:,active]\n delta = target - point_estimate[active]\n partial_risk = np.sum(delta * S_active.dot(delta)) / delta.shape[0]\n naive_delta = target - naive_estimate[active]\n naive_partial_risk = np.sum(naive_delta * S_active.dot(naive_delta)) / delta.shape[0]\n\n if np.linalg.norm(target) > 0:\n partial_relative_risk = partial_risk / max(np.sum(target * S_active.dot(target)), 1)\n naive_partial_relative_risk = naive_partial_risk / max(np.sum(target * S_active.dot(target)), 1)\n\n # relative risk\n\n relative_risk = full_risk / (np.sum(beta * S.dot(beta)) * beta.shape[0])\n naive_relative_risk = naive_full_risk / np.sum(beta * S.dot(beta))\n\n bias = np.mean(point_estimate - beta)\n naive_bias = np.mean(naive_estimate - beta)\n\n value = pd.DataFrame({'Full Risk':[full_risk],\n 'Naive Full Risk':[naive_full_risk],\n 'Partial Risk':[partial_risk],\n 'Partial Relative Risk':[partial_relative_risk],\n 'Naive Partial Relative Risk':[naive_partial_relative_risk],\n 'Naive Partial Risk':[naive_partial_risk],\n 'Relative Risk':[relative_risk],\n 'Naive Relative Risk':[naive_relative_risk],\n 'Bias':[bias],\n 'Naive Bias':[naive_bias],\n })\n\n if np.isfinite(toc):\n value['Time'] = tic-toc\n value['Active'] = len(active)\n\n return M, value\n\ndef estimator_summary(result):\n\n nresult = result['Full Risk'].shape[0]\n value = pd.DataFrame([[nresult,\n np.median(result['Full Risk']),\n np.std(result['Full Risk']),\n np.median(result['Naive Full Risk']),\n np.std(result['Naive Full Risk']),\n np.median(result['Partial Risk']),\n np.std(result['Partial Risk']),\n np.median(result['Naive Partial Risk']),\n np.std(result['Naive Partial Risk']),\n np.median(result['Relative Risk']),\n np.std(result['Relative Risk']),\n np.median(result['Naive Relative Risk']),\n np.std(result['Naive Relative Risk']),\n np.median(result['Bias']),\n np.std(result['Bias']),\n np.median(result['Naive Bias']),\n np.std(result['Naive Bias']),\n np.mean(result['Time']),\n np.mean(result['Active']),\n result['model_target'].values[0]]],\n columns=['Replicates',\n 'Median(Full Risk)',\n 'SD(Full Risk)',\n 'Median(Naive Full Risk)',\n 'SD(Naive Full Risk)',\n 'Median(Partial Risk)',\n 'SD(Partial Risk)',\n 'Median(Naive Partial Risk)',\n 'SD(Naive Partial Risk)',\n 'Median(Relative Risk)',\n 'SD(Relative Risk)',\n 'Median(Naive Relative Risk)',\n 'SD(Naive Relative Risk)',\n 'Median(Bias)',\n 'SD(Bias)',\n 'Median(Naive Bias)',\n 'SD(Naive Bias)',\n 'Time', \n 'Active',\n 'Model'\n ])\n\n # keep all things constant over groups\n\n for n in result.columns:\n if len(np.unique(result[n])) == 1:\n value[n] = result[n].values[0]\n\n return value\n\ndef BH_statistic(method,\n instance,\n X,\n Y,\n beta,\n l_theory,\n l_min,\n l_1se,\n sigma_reid,\n M=None):\n\n if M is None:\n toc = time.time()\n M = method(X.copy(), Y.copy(), l_theory.copy(), l_min, l_1se, sigma_reid)\n else:\n toc = np.inf\n \n selected, active = M.select()\n try:\n if len(active) > 0:\n naive_pvalues = M.naive_pvalues(active)[1]\n naive_selected = [active[j] for j in BHfilter(naive_pvalues, q=M.q)]\n else:\n naive_selected = None\n except AttributeError:\n naive_selected = None\n tic = time.time()\n true_active = np.nonzero(beta)[0]\n\n if active is not None:\n selection_quality = instance.discoveries(active, true_active)\n TD = instance.discoveries(selected, true_active)\n FD = len(selected) - TD\n FDP = FD / max(TD + 1. * FD, 1.)\n\n # naive\n if naive_selected is not None:\n nTD = instance.discoveries(naive_selected, true_active)\n nFD = len(naive_selected) - nTD\n nFDP = nFD / max(nTD + 1. * nFD, 1.)\n else:\n nTD, nFDP, nFD = np.nan, np.nan, np.nan\n\n ntrue_active = max(len(true_active), 1) \n value = pd.DataFrame([[TD / ntrue_active, \n FD, \n FDP, \n np.maximum(nTD / ntrue_active, 1), \n nFD,\n nFDP,\n selection_quality / ntrue_active,\n len(active)]],\n columns=['Full Model Power',\n 'False Discoveries',\n 'Full Model FDP',\n 'Naive Full Model Power',\n 'Naive False Discoveries',\n 'Naive Full Model FDP',\n 'Selection Quality',\n 'Active'])\n else:\n value = pd.DataFrame([[0, 0, 0, 0, 0, 0, tic-toc, 0, 0]],\n columns=['Full Model Power',\n 'False Discoveries',\n 'Full Model FDP',\n 'Naive Full Model Power',\n 'Naive False Discoveries',\n 'Naive Full Model FDP',\n 'Time',\n 'Selection Quality',\n 'Active'])\n if np.isfinite(toc):\n value['Time'] = tic-toc\n\n return M, value\n\ndef BH_summary(result):\n\n nresult = result['Full Model Power'].shape[0]\n value = pd.DataFrame([[nresult,\n np.mean(result['Full Model Power']), \n np.std(result['Full Model Power']) / np.sqrt(nresult),\n np.mean(result['False Discoveries']), \n np.mean(result['Full Model FDP']), \n np.std(result['Full Model FDP']) / np.sqrt(nresult),\n np.mean(result['Naive Full Model FDP']), \n np.mean(result['Naive Full Model Power']), \n np.mean(result['Naive False Discoveries']), \n np.mean(result['Time']),\n np.mean(result['Selection Quality']),\n np.mean(result['Active']),\n result['model_target'].values[0]]],\n columns=['Replicates', \n 'Full Model Power', \n 'SD(Full Model Power)', \n 'False Discoveries', \n 'Full Model FDR', \n 'SD(Full Model FDR)', \n 'Naive Full Model FDP',\n 'Naive Full Model Power',\n 'Naive False Discoveries',\n 'Time', \n 'Selection Quality',\n 'Active',\n 'Model'\n ])\n\n # keep all things constant over groups\n\n for n in result.columns:\n if len(np.unique(result[n])) == 1:\n value[n] = result[n].values[0]\n\n return value\n\n# marginally threshold p-values at 10% by default\n\nmarginal_summary = BH_summary # reporting statistics are the same as with BHfilter\n\ndef marginal_statistic(method, \n instance, \n X, \n Y, \n beta, \n l_theory, \n l_min, \n l_1se, \n sigma_reid):\n\n toc = time.time()\n M = method(X.copy(), Y.copy(), l_theory.copy(), l_min, l_1se, sigma_reid)\n try:\n active, pvalues = M.generate_pvalues()\n selected = pvalues < method.level\n except AttributeError: # some methods do not have pvalues (e.g. knockoffs for these we will run their select method\n active, selected = M.select()\n\n try:\n if len(active) > 0:\n naive_pvalues = M.naive_pvalues(active)[1]\n naive_selected = naive_pvalues < method.level\n else:\n naive_selected = None\n except AttributeError:\n naive_selected = None\n\n tic = time.time()\n true_active = np.nonzero(beta)[0]\n\n if active is not None:\n selection_quality = instance.discoveries(active, true_active)\n TD = instance.discoveries(selected, true_active)\n FD = len(selected) - TD\n FDP = FD / max(TD + 1. * FD, 1.)\n\n # naive\n if naive_selected is not None:\n nTD = instance.discoveries(naive_selected, true_active)\n nFD = len(naive_selected) - nTD\n nFDP = nFD / max(nTD + 1. * nFD, 1.)\n else:\n nTD, nFDP, nFD = np.nan, np.nan, np.nan\n\n ntrue_active = max(len(true_active), 1) \n return M, pd.DataFrame([[TD / ntrue_active,\n FD, \n FDP, \n np.maximum(nTD / ntrue_active, 1), \n nFD,\n nFDP,\n tic-toc, \n selection_quality / ntrue_active,\n len(active)]],\n columns=['Full Model Power',\n 'False Discoveries',\n 'Full Model FDP',\n 'Naive Full Model Power',\n 'Naive False Discoveries',\n 'Naive Full Model FDP',\n 'Time',\n 'Selection Quality',\n 'Active'])\n else:\n return M, pd.DataFrame([[0, 0, 0, 0, 0, 0, tic-toc, 0, 0]],\n columns=['Full Model Power',\n 'False Discoveries',\n 'Full Model FDP',\n 'Naive Full Model Power',\n 'Naive False Discoveries',\n 'Naive Full Model FDP',\n 'Time',\n 'Selection Quality',\n 'Active'])\n\n" ]
[ [ "numpy.sqrt", "numpy.zeros_like", "numpy.maximum", "pandas.DataFrame", "numpy.ones_like", "numpy.asarray", "numpy.median", "numpy.nonzero", "numpy.mean", "numpy.std", "numpy.linalg.norm", "numpy.unique", "numpy.isfinite" ] ]
fluiddyn/fluidpythran
[ "e34e9886680e6b8e365d24a77fcb66b67e554043" ]
[ "doc/examples/not_implemented/pythran_class_with_calls.py" ]
[ "\"\"\"\nWith classes, we have a problem with heritage. Note that for standard functions\n(like sum_arrays), we actually also have the problem with monkey patching.\n\nWe can just say that monkey patching of `sum_arrays` is not supported (so that\n`sum_arrays` can be treated as a Pythran function, and potentially inlined) but\nfor class, we really want to support heritage (like in MyClassChild) so we\nwould need to replace `compute` by a Python method calling Pythran functions\nand Python methods (which themselves call Pythran functions).\n\nThe mechanism needed for `compute` is much more complicated than the simple\ncase in `pythran_class.py` and more complicated than what is needed for\n`compute1` (which is actually similar to [issue\n#7](https://bitbucket.org/fluiddyn/fluidpythran/issues/7/support-kernels-with-function-calls)).\n\n\"\"\"\n\nfrom fluidpythran import Type, NDim, Array, boost\n\nimport numpy as np\n\n\nT = Type(int, np.float64)\nN = NDim(1)\n\nA1 = Array[T, N]\nA2 = Array[float, N + 1]\n\n\ndef sum_arrays(arr0, arr1):\n return arr0 + arr1\n\n\nclass MyClass:\n\n arr0: A1\n arr1: A1\n arr2: A2\n\n def __init__(self, n, dtype=int):\n self.arr0 = np.zeros(n, dtype=dtype)\n self.arr1 = np.zeros(n, dtype=dtype)\n self.arr2 = np.zeros(n)\n\n @boost\n def compute(self, alpha: float):\n tmp = self.sum_arrays().mean()\n return tmp ** alpha * self.arr2\n\n def sum_arrays(self):\n return self.arr0 + self.arr1\n\n @boost\n def compute1(self, alpha: float):\n tmp = sum_arrays(self.arr0, self.arr1).mean()\n return tmp ** alpha * self.arr2\n\n\nclass MyClassChild(MyClass):\n def sum_arrays(self):\n return 2 * self.arr0 + self.arr1\n" ]
[ [ "numpy.zeros" ] ]
tangibleai/rasa
[ "d92cda129bbbf4b52151d981535bd02fc7597d6d" ]
[ "tests/core/featurizers/test_single_state_featurizers.py" ]
[ "from typing import Text\nimport numpy as np\nfrom rasa.shared.core.constants import ENTITY_LABEL_SEPARATOR\nimport scipy.sparse\n\nimport pytest\n\nfrom rasa.core.featurizers.single_state_featurizer import SingleStateFeaturizer\nfrom rasa.shared.core.domain import Domain\nfrom rasa.shared.nlu.constants import (\n ACTION_TEXT,\n ACTION_NAME,\n ENTITIES,\n TEXT,\n INTENT,\n FEATURE_TYPE_SEQUENCE,\n FEATURE_TYPE_SENTENCE,\n ENTITY_ATTRIBUTE_TYPE,\n ENTITY_ATTRIBUTE_VALUE,\n ENTITY_ATTRIBUTE_START,\n ENTITY_ATTRIBUTE_END,\n ENTITY_TAGS,\n)\nfrom rasa.shared.core.constants import ACTIVE_LOOP, SLOTS\nfrom rasa.shared.nlu.interpreter import RegexInterpreter\nfrom rasa.shared.core.slots import Slot\nfrom rasa.shared.nlu.training_data.features import Features\n\n\ndef test_single_state_featurizer_without_interpreter_state_not_with_action_listen():\n \"\"\"This test are for encoding state without a trained interpreter.\n action_name is not action_listen, so, INTENT, TEXT and ENTITIES should not be\n featurized.\n \"\"\"\n f = SingleStateFeaturizer()\n f._default_feature_states[INTENT] = {\"a\": 0, \"b\": 1}\n f._default_feature_states[ACTION_NAME] = {\"c\": 0, \"d\": 1, \"action_listen\": 2}\n f._default_feature_states[SLOTS] = {\"e_0\": 0, \"f_0\": 1, \"g_0\": 2}\n f._default_feature_states[ACTIVE_LOOP] = {\"h\": 0, \"i\": 1, \"j\": 2, \"k\": 3}\n\n encoded = f.encode_state(\n {\n \"user\": {\"intent\": \"a\", \"text\": \"blah blah blah\"},\n \"prev_action\": {\"action_name\": \"d\", \"action_text\": \"boom\"},\n \"active_loop\": {\"name\": \"i\"},\n \"slots\": {\"g\": (1.0,)},\n },\n interpreter=RegexInterpreter(),\n )\n\n # user input is ignored as prev action is not action_listen\n assert list(encoded.keys()) == [ACTION_NAME, ACTIVE_LOOP, SLOTS]\n assert (\n encoded[ACTION_NAME][0].features != scipy.sparse.coo_matrix([[0, 1, 0]])\n ).nnz == 0\n assert (\n encoded[ACTIVE_LOOP][0].features != scipy.sparse.coo_matrix([[0, 1, 0, 0]])\n ).nnz == 0\n assert (encoded[SLOTS][0].features != scipy.sparse.coo_matrix([[0, 0, 1]])).nnz == 0\n\n\ndef test_single_state_featurizer_without_interpreter_state_with_action_listen():\n \"\"\"This test are for encoding state without a trained interpreter.\n action_name is action_listen, so, INTENT and ENTITIES should be featurized\n while text shouldn't because we don't have an interpreter.\n \"\"\"\n f = SingleStateFeaturizer()\n f._default_feature_states[INTENT] = {\"a\": 0, \"b\": 1}\n f._default_feature_states[ACTION_NAME] = {\"c\": 0, \"d\": 1, \"action_listen\": 2}\n f._default_feature_states[SLOTS] = {\"e_0\": 0, \"f_0\": 1, \"g_0\": 2}\n f._default_feature_states[ACTIVE_LOOP] = {\"h\": 0, \"i\": 1, \"j\": 2, \"k\": 3}\n\n encoded = f.encode_state(\n {\n \"user\": {\"intent\": \"a\", \"text\": \"blah blah blah\"},\n \"prev_action\": {\"action_name\": \"action_listen\", \"action_text\": \"boom\"},\n \"active_loop\": {\"name\": \"k\"},\n \"slots\": {\"e\": (1.0,)},\n },\n interpreter=RegexInterpreter(),\n )\n\n # we featurize all the features except for *_text ones because NLU wasn't trained\n assert list(encoded.keys()) == [INTENT, ACTION_NAME, ACTIVE_LOOP, SLOTS]\n assert (encoded[INTENT][0].features != scipy.sparse.coo_matrix([[1, 0]])).nnz == 0\n assert (\n encoded[ACTION_NAME][0].features != scipy.sparse.coo_matrix([[0, 0, 1]])\n ).nnz == 0\n assert (\n encoded[ACTIVE_LOOP][0].features != scipy.sparse.coo_matrix([[0, 0, 0, 1]])\n ).nnz == 0\n assert (encoded[SLOTS][0].features != scipy.sparse.coo_matrix([[1, 0, 0]])).nnz == 0\n\n\ndef test_single_state_featurizer_without_interpreter_state_no_intent_no_action_name():\n f = SingleStateFeaturizer()\n f._default_feature_states[INTENT] = {\"a\": 0, \"b\": 1}\n f._default_feature_states[ACTION_NAME] = {\"c\": 0, \"d\": 1, \"action_listen\": 2}\n f._default_feature_states[SLOTS] = {\"e_0\": 0, \"f_0\": 1, \"g_0\": 2}\n f._default_feature_states[ACTIVE_LOOP] = {\"h\": 0, \"i\": 1, \"j\": 2, \"k\": 3}\n\n # check that no intent / action_name features are added when the interpreter\n # isn't there and\n # intent / action_name not in input\n encoded = f.encode_state(\n {\n \"user\": {\"text\": \"blah blah blah\"},\n \"prev_action\": {\"action_text\": \"boom\"},\n \"active_loop\": {\"name\": \"k\"},\n \"slots\": {\"e\": (1.0,)},\n },\n interpreter=RegexInterpreter(),\n )\n\n assert list(encoded.keys()) == [ACTIVE_LOOP, SLOTS]\n assert (\n encoded[ACTIVE_LOOP][0].features != scipy.sparse.coo_matrix([[0, 0, 0, 1]])\n ).nnz == 0\n assert (encoded[SLOTS][0].features != scipy.sparse.coo_matrix([[1, 0, 0]])).nnz == 0\n\n\ndef test_single_state_featurizer_correctly_encodes_non_existing_value():\n f = SingleStateFeaturizer()\n f._default_feature_states[INTENT] = {\"a\": 0, \"b\": 1}\n f._default_feature_states[ACTION_NAME] = {\"c\": 0, \"d\": 1}\n\n encoded = f.encode_state(\n {\"user\": {\"intent\": \"e\"}, \"prev_action\": {\"action_name\": \"action_listen\"}},\n interpreter=RegexInterpreter(),\n )\n\n assert list(encoded.keys()) == [INTENT, ACTION_NAME]\n assert (encoded[INTENT][0].features != scipy.sparse.coo_matrix([[0, 0]])).nnz == 0\n\n\ndef test_single_state_featurizer_prepare_for_training():\n domain = Domain(\n intents=[\"greet\"],\n entities=[\"name\"],\n slots=[Slot(\"name\")],\n templates={},\n forms=[],\n action_names=[\"utter_greet\", \"action_check_weather\"],\n )\n\n f = SingleStateFeaturizer()\n f.prepare_for_training(domain, RegexInterpreter())\n\n assert len(f._default_feature_states[INTENT]) > 1\n assert \"greet\" in f._default_feature_states[INTENT]\n assert len(f._default_feature_states[ENTITIES]) == 1\n assert f._default_feature_states[ENTITIES][\"name\"] == 0\n assert len(f._default_feature_states[SLOTS]) == 1\n assert f._default_feature_states[SLOTS][\"name_0\"] == 0\n assert len(f._default_feature_states[ACTION_NAME]) > 2\n assert \"utter_greet\" in f._default_feature_states[ACTION_NAME]\n assert \"action_check_weather\" in f._default_feature_states[ACTION_NAME]\n assert len(f._default_feature_states[ACTIVE_LOOP]) == 0\n\n\ndef test_single_state_featurizer_creates_encoded_all_actions():\n domain = Domain(\n intents=[],\n entities=[],\n slots=[],\n templates={},\n forms={},\n action_names=[\"a\", \"b\", \"c\", \"d\"],\n )\n\n f = SingleStateFeaturizer()\n f.prepare_for_training(domain, RegexInterpreter())\n encoded_actions = f.encode_all_actions(domain, RegexInterpreter())\n\n assert len(encoded_actions) == len(domain.action_names_or_texts)\n assert all(\n [\n ACTION_NAME in encoded_action and ACTION_TEXT not in encoded_action\n for encoded_action in encoded_actions\n ]\n )\n\n\[email protected](300) # these can take a longer time than the default timeout\ndef test_single_state_featurizer_with_entity_roles_and_groups(\n unpacked_trained_moodbot_path: Text,\n):\n from rasa.core.agent import Agent\n\n interpreter = Agent.load(unpacked_trained_moodbot_path).interpreter\n # TODO roles and groups are not supported in e2e yet\n domain = Domain(\n intents=[],\n entities=[\"city\", f\"city{ENTITY_LABEL_SEPARATOR}to\"],\n slots=[],\n templates={},\n forms={},\n action_names=[],\n )\n f = SingleStateFeaturizer()\n f.prepare_for_training(domain, RegexInterpreter())\n encoded = f.encode_entities(\n {\n TEXT: \"I am flying from London to Paris\",\n ENTITIES: [\n {\n ENTITY_ATTRIBUTE_TYPE: \"city\",\n ENTITY_ATTRIBUTE_VALUE: \"London\",\n ENTITY_ATTRIBUTE_START: 17,\n ENTITY_ATTRIBUTE_END: 23,\n },\n {\n ENTITY_ATTRIBUTE_TYPE: f\"city{ENTITY_LABEL_SEPARATOR}to\",\n ENTITY_ATTRIBUTE_VALUE: \"Paris\",\n ENTITY_ATTRIBUTE_START: 27,\n ENTITY_ATTRIBUTE_END: 32,\n },\n ],\n },\n interpreter=interpreter,\n )\n assert sorted(list(encoded.keys())) == sorted([ENTITY_TAGS])\n assert np.all(\n encoded[ENTITY_TAGS][0].features == [[0], [0], [0], [0], [1], [0], [2]]\n )\n\n\ndef test_single_state_featurizer_uses_dtype_float():\n f = SingleStateFeaturizer()\n f._default_feature_states[INTENT] = {\"a\": 0, \"b\": 1}\n f._default_feature_states[ACTION_NAME] = {\"e\": 0, \"d\": 1}\n f._default_feature_states[ENTITIES] = {\"c\": 0}\n\n encoded = f.encode_state(\n {\n \"user\": {\"intent\": \"a\", \"entities\": [\"c\"]},\n \"prev_action\": {\"action_name\": \"d\"},\n },\n interpreter=RegexInterpreter(),\n )\n\n assert encoded[ACTION_NAME][0].features.dtype == np.float32\n\n\[email protected](300) # these can take a longer time than the default timeout\ndef test_single_state_featurizer_with_interpreter_state_with_action_listen(\n unpacked_trained_moodbot_path: Text,\n):\n from rasa.core.agent import Agent\n\n interpreter = Agent.load(unpacked_trained_moodbot_path).interpreter\n\n f = SingleStateFeaturizer()\n f._default_feature_states[INTENT] = {\"greet\": 0, \"inform\": 1}\n f._default_feature_states[ENTITIES] = {\n \"city\": 0,\n \"name\": 1,\n f\"city{ENTITY_LABEL_SEPARATOR}to\": 2,\n f\"city{ENTITY_LABEL_SEPARATOR}from\": 3,\n }\n f._default_feature_states[ACTION_NAME] = {\n \"utter_ask_where_to\": 0,\n \"utter_greet\": 1,\n \"action_listen\": 2,\n }\n # `_0` in slots represent feature dimension\n f._default_feature_states[SLOTS] = {\"slot_1_0\": 0, \"slot_2_0\": 1, \"slot_3_0\": 2}\n f._default_feature_states[ACTIVE_LOOP] = {\n \"active_loop_1\": 0,\n \"active_loop_2\": 1,\n \"active_loop_3\": 2,\n \"active_loop_4\": 3,\n }\n encoded = f.encode_state(\n {\n \"user\": {\n \"text\": \"I am flying from London to Paris\",\n \"intent\": \"inform\",\n \"entities\": [\"city\", f\"city{ENTITY_LABEL_SEPARATOR}to\"],\n },\n \"prev_action\": {\n \"action_name\": \"action_listen\",\n \"action_text\": \"throw a ball\",\n },\n \"active_loop\": {\"name\": \"active_loop_4\"},\n \"slots\": {\"slot_1\": (1.0,)},\n },\n interpreter=interpreter,\n )\n\n # check all the features are encoded and *_text features are encoded by a\n # dense featurizer\n assert sorted(list(encoded.keys())) == sorted(\n [TEXT, ENTITIES, ACTION_NAME, SLOTS, ACTIVE_LOOP, INTENT, ACTION_TEXT]\n )\n assert encoded[TEXT][0].features.shape[-1] == 300\n assert encoded[ACTION_TEXT][0].features.shape[-1] == 300\n assert (encoded[INTENT][0].features != scipy.sparse.coo_matrix([[0, 1]])).nnz == 0\n assert (\n encoded[ACTION_NAME][0].features != scipy.sparse.coo_matrix([[0, 0, 1]])\n ).nnz == 0\n assert encoded[ENTITIES][0].features.shape[-1] == 4\n assert (encoded[SLOTS][0].features != scipy.sparse.coo_matrix([[1, 0, 0]])).nnz == 0\n assert (\n encoded[ACTIVE_LOOP][0].features != scipy.sparse.coo_matrix([[0, 0, 0, 1]])\n ).nnz == 0\n\n\[email protected](300) # these can take a longer time than the default timeout\ndef test_single_state_featurizer_with_interpreter_state_not_with_action_listen(\n unpacked_trained_moodbot_path: Text,\n):\n # check that user features are ignored when action_name is not action_listen\n from rasa.core.agent import Agent\n\n interpreter = Agent.load(unpacked_trained_moodbot_path).interpreter\n f = SingleStateFeaturizer()\n f._default_feature_states[INTENT] = {\"a\": 0, \"b\": 1}\n f._default_feature_states[ENTITIES] = {\"c\": 0}\n f._default_feature_states[ACTION_NAME] = {\"e\": 0, \"d\": 1, \"action_listen\": 2}\n f._default_feature_states[SLOTS] = {\"e_0\": 0, \"f_0\": 1, \"g_0\": 2}\n f._default_feature_states[ACTIVE_LOOP] = {\"h\": 0, \"i\": 1, \"j\": 2, \"k\": 3}\n\n encoded = f.encode_state(\n {\n \"user\": {\"text\": \"a ball\", \"intent\": \"b\", \"entities\": [\"c\"]},\n \"prev_action\": {\"action_name\": \"d\", \"action_text\": \"throw a ball\"},\n \"active_loop\": {\"name\": \"k\"},\n \"slots\": {\"e\": (1.0,)},\n },\n interpreter=interpreter,\n )\n\n # check user input is ignored when action is not action_listen\n assert list(encoded.keys()) == [ACTION_TEXT, ACTION_NAME, ACTIVE_LOOP, SLOTS]\n assert encoded[ACTION_TEXT][0].features.shape[-1] == 300\n assert (\n encoded[ACTION_NAME][0].features != scipy.sparse.coo_matrix([[0, 1, 0]])\n ).nnz == 0\n assert (encoded[SLOTS][0].features != scipy.sparse.coo_matrix([[1, 0, 0]])).nnz == 0\n assert (\n encoded[ACTIVE_LOOP][0].features != scipy.sparse.coo_matrix([[0, 0, 0, 1]])\n ).nnz == 0\n\n\[email protected](300) # these can take a longer time than the default timeout\ndef test_single_state_featurizer_with_interpreter_state_with_no_action_name(\n unpacked_trained_moodbot_path: Text,\n):\n # check that action name features are not added by the featurizer when not\n # present in the state and\n # check user input is ignored when action is not action_listen\n # and action_name is features are not added\n from rasa.core.agent import Agent\n\n interpreter = Agent.load(unpacked_trained_moodbot_path).interpreter\n\n f = SingleStateFeaturizer()\n f._default_feature_states[INTENT] = {\"a\": 0, \"b\": 1}\n f._default_feature_states[ENTITIES] = {\"c\": 0}\n f._default_feature_states[ACTION_NAME] = {\"e\": 0, \"d\": 1, \"action_listen\": 2}\n f._default_feature_states[SLOTS] = {\"e_0\": 0, \"f_0\": 1, \"g_0\": 2}\n f._default_feature_states[ACTIVE_LOOP] = {\"h\": 0, \"i\": 1, \"j\": 2, \"k\": 3}\n\n encoded = f.encode_state(\n {\n \"user\": {\"text\": \"a ball\", \"intent\": \"b\", \"entities\": [\"c\"]},\n \"prev_action\": {\"action_text\": \"throw a ball\"},\n \"active_loop\": {\"name\": \"k\"},\n \"slots\": {\"e\": (1.0,)},\n },\n interpreter=interpreter,\n )\n\n assert list(encoded.keys()) == [ACTION_TEXT, ACTIVE_LOOP, SLOTS]\n assert encoded[ACTION_TEXT][0].features.shape[-1] == 300\n assert (encoded[SLOTS][0].features != scipy.sparse.coo_matrix([[1, 0, 0]])).nnz == 0\n assert (\n encoded[ACTIVE_LOOP][0].features != scipy.sparse.coo_matrix([[0, 0, 0, 1]])\n ).nnz == 0\n\n\ndef test_state_features_for_attribute_raises_on_not_supported_attribute():\n f = SingleStateFeaturizer()\n\n with pytest.raises(ValueError):\n f._state_features_for_attribute({}, \"not-supported-attribute\")\n\n\ndef test_to_sparse_sentence_features():\n features = [\n Features(\n scipy.sparse.csr_matrix(np.random.randint(5, size=(5, 10))),\n FEATURE_TYPE_SEQUENCE,\n TEXT,\n \"some-featurizer\",\n )\n ]\n\n sentence_features = SingleStateFeaturizer._to_sparse_sentence_features(features)\n\n assert len(sentence_features) == 1\n assert FEATURE_TYPE_SENTENCE == sentence_features[0].type\n assert features[0].origin == sentence_features[0].origin\n assert features[0].attribute == sentence_features[0].attribute\n assert sentence_features[0].features.shape == (1, 10)\n\n\[email protected](300) # these can take a longer time than the default timeout\ndef test_single_state_featurizer_uses_regex_interpreter(\n unpacked_trained_moodbot_path: Text,\n):\n from rasa.core.agent import Agent\n\n domain = Domain(\n intents=[], entities=[], slots=[], templates={}, forms=[], action_names=[],\n )\n f = SingleStateFeaturizer()\n # simulate that core was trained separately by passing\n # RegexInterpreter to prepare_for_training\n f.prepare_for_training(domain, RegexInterpreter())\n # simulate that nlu and core models were manually combined for prediction\n # by passing trained interpreter to encode_all_actions\n interpreter = Agent.load(unpacked_trained_moodbot_path).interpreter\n features = f._extract_state_features({TEXT: \"some text\"}, interpreter)\n # RegexInterpreter cannot create features for text, therefore since featurizer\n # was trained without nlu, features for text should be empty\n assert not features\n" ]
[ [ "numpy.all", "numpy.random.randint" ] ]
mohammadzainabbas/tensorflow
[ "049dfd5e070cfa84c82eea71c6c746a70cba4a3f" ]
[ "tensorflow/contrib/eager/python/datasets_test.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport threading\nimport time\n\nimport numpy as np\n\nfrom tensorflow.contrib import lookup\nfrom tensorflow.contrib.data.python.ops import prefetching_ops\nfrom tensorflow.contrib.data.python.ops import threadpool\nfrom tensorflow.contrib.data.python.ops import unique\nfrom tensorflow.contrib.eager.python import checkpointable_utils\nfrom tensorflow.contrib.eager.python import datasets\nfrom tensorflow.python.data import Dataset\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import script_ops\n\n\nclass IteratorTest(test.TestCase):\n\n def testBasic(self):\n got = []\n for t in datasets.Iterator(Dataset.range(4)):\n got.append(t.numpy())\n self.assertAllEqual([0, 1, 2, 3], got)\n\n def testBasicOneShotIterator(self):\n got = []\n for t in Dataset.range(4).make_one_shot_iterator():\n got.append(t.numpy())\n self.assertAllEqual([0, 1, 2, 3], got)\n\n def testBasicImplicitIterator(self):\n got = []\n for t in Dataset.range(4):\n got.append(t.numpy())\n self.assertAllEqual([0, 1, 2, 3], got)\n\n def testGetNext(self):\n iterator = datasets.Iterator(Dataset.range(4))\n self.assertEqual(0, iterator.get_next().numpy())\n self.assertEqual(1, iterator.get_next().numpy())\n self.assertEqual(2, iterator.get_next().numpy())\n self.assertEqual(3, iterator.get_next().numpy())\n with self.assertRaises(errors.OutOfRangeError):\n iterator.get_next()\n\n def testGetNextOneShotIterator(self):\n iterator = Dataset.range(4).make_one_shot_iterator()\n self.assertEqual(0, iterator.get_next().numpy())\n self.assertEqual(1, iterator.get_next().numpy())\n self.assertEqual(2, iterator.get_next().numpy())\n self.assertEqual(3, iterator.get_next().numpy())\n with self.assertRaises(errors.OutOfRangeError):\n iterator.get_next()\n\n def testMultipleIteratorsOnTheSameDataset(self):\n ds = Dataset.range(4)\n it1 = datasets.Iterator(ds)\n it2 = datasets.Iterator(ds)\n got = [x.numpy() for x in it1]\n self.assertAllEqual([0, 1, 2, 3], got)\n\n got = [x.numpy() for x in it2]\n self.assertAllEqual([0, 1, 2, 3], got)\n\n def testNestedOutputs(self):\n ds = Dataset.zip((Dataset.range(4), Dataset.zip((Dataset.range(4),\n Dataset.range(4)))))\n total = 0\n # The Iterator will return a nested structure of Tensor objects.\n # Some funkiness to compare against simple integers.\n for (i, x) in enumerate(datasets.Iterator(ds)):\n want = (i, (i, i))\n got = (x[0].numpy(), (x[1][0].numpy(), x[1][1].numpy()))\n self.assertEqual(got, want)\n total += 1\n self.assertEqual(4, total)\n\n def testMapAndFilter(self):\n def even(x):\n return math_ops.equal(math_ops.mod(x, 2), 0)\n\n it = datasets.Iterator(Dataset.range(8).map(math_ops.square).filter(even))\n got = [x.numpy() for x in it]\n self.assertAllEqual([0, 4, 16, 36], got)\n\n def testMapCaptureLookupTable(self):\n default_val = -1\n keys = constant_op.constant(['brain', 'salad', 'surgery'])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = lookup.HashTable(\n lookup.KeyValueTensorInitializer(keys, values), default_val)\n dataset = Dataset.from_tensor_slices(['brain', 'salad', 'surgery'])\n dataset = dataset.map(table.lookup)\n it = datasets.Iterator(dataset)\n got = [x.numpy() for x in it]\n self.assertAllEqual([0, 1, 2], got)\n\n def testMultipleIteratorsOnADatasetThatUsesFunctions(self):\n ds = Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6]).map(math_ops.square)\n\n got1 = [x.numpy() for x in datasets.Iterator(ds)]\n self.assertAllEqual([1, 4, 9, 16, 25, 36], got1)\n got2 = [x.numpy() for x in datasets.Iterator(ds)]\n self.assertAllEqual(got1, got2)\n\n def assertSparseValuesEqual(self, a, b):\n self.assertAllEqual(a.indices, b.indices)\n self.assertAllEqual(a.values, b.values)\n self.assertAllEqual(a.dense_shape, b.dense_shape)\n\n def testSparseTensorElements(self):\n components = (sparse_tensor.SparseTensorValue(\n indices=np.array([[0, 0], [1, 0], [2, 0]]),\n values=np.array([0, 0, 0]),\n dense_shape=np.array([3, 1])),\n sparse_tensor.SparseTensorValue(\n indices=np.array([[0, 0], [1, 1], [2, 2]]),\n values=np.array([1, 2, 3]),\n dense_shape=np.array([3, 3])))\n\n expected = [\n (sparse_tensor.SparseTensorValue(\n indices=np.array([[0]]),\n values=np.array([0]),\n dense_shape=np.array([1])),\n sparse_tensor.SparseTensorValue(\n indices=np.array([[0]]),\n values=np.array([1]),\n dense_shape=np.array([3]))),\n (sparse_tensor.SparseTensorValue(\n indices=np.array([[0]]),\n values=np.array([0]),\n dense_shape=np.array([1])),\n sparse_tensor.SparseTensorValue(\n indices=np.array([[1]]),\n values=np.array([2]),\n dense_shape=np.array([3]))),\n (sparse_tensor.SparseTensorValue(\n indices=np.array([[0]]),\n values=np.array([0]),\n dense_shape=np.array([1])),\n sparse_tensor.SparseTensorValue(\n indices=np.array([[2]]),\n values=np.array([3]),\n dense_shape=np.array([3]))),\n ]\n\n for i, result in enumerate(\n datasets.Iterator(Dataset.from_tensor_slices(components))):\n self.assertSparseValuesEqual(expected[i][0], result[0])\n self.assertSparseValuesEqual(expected[i][1], result[1])\n\n def testPyFunc(self):\n\n def my_map(inp):\n return [[x + 1 for x in inp]]\n\n ds = Dataset.range(4).map(\n lambda x: script_ops.py_func(my_map, [[x]], dtypes.int64))\n got = [x.numpy() for x in datasets.Iterator(ds)]\n self.assertAllEqual([[1], [2], [3], [4]], got)\n\n def testTensorsPlacedOnDevice(self):\n ds = Dataset.from_tensors([0., 1.])\n with ops.device(test.gpu_device_name()):\n x = datasets.Iterator(ds).next()\n x = math_ops.add(x, x)\n self.assertAllEqual([0., 2.], x.numpy())\n\n def testTensorsExplicitPrefetchToDevice(self):\n ds = Dataset.from_tensor_slices([0., 1.])\n ds = ds.apply(prefetching_ops.prefetch_to_device(test.gpu_device_name()))\n\n with self.assertRaisesRegexp(TypeError, 'prefetch_to_device'):\n datasets.Iterator(ds)\n\n for i, x in enumerate(ds):\n with ops.device(test.gpu_device_name()):\n x = math_ops.add(x, x)\n self.assertEqual(float(i) + float(i), x.numpy())\n\n def testOverrideThreadPool(self):\n\n def get_thread_id(_):\n # Python creates a dummy thread object to represent the current\n # thread when called from an \"alien\" thread (such as a\n # `PrivateThreadPool` thread in this case). It does not include\n # the TensorFlow-given display name, but it has a unique\n # identifier that maps one-to-one with the underlying OS thread.\n return np.array(threading.current_thread().ident).astype(np.int64)\n\n for num_threads in [1, 2, 4, 8, 16]:\n\n dataset = (\n Dataset.range(1000).map(\n lambda x: script_ops.py_func(get_thread_id, [x], dtypes.int64),\n num_parallel_calls=32).apply(unique.unique()))\n\n dataset = threadpool.override_threadpool(\n dataset,\n threadpool.PrivateThreadPool(\n num_threads, display_name='private_thread_pool_%d' % num_threads))\n\n thread_ids = []\n for next_element in datasets.Iterator(dataset):\n thread_ids.append(next_element)\n self.assertEqual(len(thread_ids), len(set(thread_ids)))\n self.assertGreater(len(thread_ids), 0)\n # NOTE(mrry): We don't control the thread pool scheduling, and\n # so cannot guarantee that all of the threads in the pool will\n # perform work.\n self.assertLessEqual(len(thread_ids), num_threads)\n\n def testSaveRestore(self):\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')\n dataset = Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])\n dataset = dataset.map(math_ops.square).batch(2)\n iterator = datasets.Iterator(dataset)\n checkpoint = checkpointable_utils.Checkpoint(iterator=iterator)\n self.assertAllEqual([1, 4], iterator.get_next().numpy())\n save_path = checkpoint.save(checkpoint_prefix)\n self.assertAllEqual([9, 16], iterator.get_next().numpy())\n self.assertAllEqual([25, 36], iterator.get_next().numpy())\n checkpoint.restore(save_path)\n self.assertAllEqual([9, 16], iterator.get_next().numpy())\n self.assertAllEqual([25, 36], iterator.get_next().numpy())\n\n def testSaveRestoreMultipleIterator(self):\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')\n dataset = Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])\n dataset = dataset.map(math_ops.square).batch(2)\n iterator_1 = datasets.Iterator(dataset)\n iterator_2 = datasets.Iterator(dataset)\n dataset_2 = Dataset.range(10)\n iterator_3 = datasets.Iterator(dataset_2)\n\n checkpoint = checkpointable_utils.Checkpoint(\n iterator_1=iterator_1, iterator_2=iterator_2, iterator_3=iterator_3)\n self.assertAllEqual([1, 4], iterator_1.get_next().numpy())\n self.assertEqual(0, iterator_3.get_next().numpy())\n self.assertEqual(1, iterator_3.get_next().numpy())\n self.assertEqual(2, iterator_3.get_next().numpy())\n\n save_path = checkpoint.save(checkpoint_prefix)\n self.assertAllEqual([1, 4], iterator_2.get_next().numpy())\n self.assertAllEqual([9, 16], iterator_2.get_next().numpy())\n self.assertEqual(3, iterator_3.get_next().numpy())\n checkpoint.restore(save_path)\n self.assertAllEqual([9, 16], iterator_1.get_next().numpy())\n self.assertAllEqual([1, 4], iterator_2.get_next().numpy())\n self.assertEqual(3, iterator_3.get_next().numpy())\n\n def testRestoreExhaustedIterator(self):\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')\n dataset = Dataset.range(3)\n iterator = datasets.Iterator(dataset)\n\n checkpoint = checkpointable_utils.Checkpoint(iterator=iterator)\n self.assertEqual(0, iterator.get_next().numpy())\n self.assertEqual(1, iterator.get_next().numpy())\n save_path = checkpoint.save(checkpoint_prefix)\n self.assertEqual(2, iterator.get_next().numpy())\n checkpoint.restore(save_path)\n self.assertEqual(2, iterator.get_next().numpy())\n\n\nclass DatasetConstructorBenchmark(test.Benchmark):\n\n def benchmarkSliceRepeatBatchEager(self):\n input_size = 10000\n batch_size = 100\n num_epochs = 100\n\n input_data = np.random.randn(input_size)\n\n dataset = (\n Dataset.from_tensor_slices(input_data).repeat(num_epochs)\n .batch(batch_size))\n iterator = datasets.Iterator(dataset)\n\n ends = [time.time()]\n for _ in iterator:\n ends.append(time.time())\n\n deltas = np.ediff1d(ends)\n median_wall_time = np.median(deltas)\n print(\n 'Slice/repeat/batch eager input size: %d batch size: %d Median wall '\n 'time per element: %f'\n % (input_size, batch_size, median_wall_time))\n self.report_benchmark(\n iters=len(deltas),\n wall_time=median_wall_time,\n name='benchmark_slice_repeat_batch_eager_input_%d_batch_%d' %\n (input_size, batch_size))\n\n def benchmarkSliceBatchCacheRepeatCallable(self):\n input_size = 10000\n batch_size = 100\n num_epochs = 100\n\n input_data = np.random.randn(input_size)\n\n dataset = (\n Dataset.from_tensor_slices(input_data).batch(batch_size).cache()\n .repeat(num_epochs))\n iterator = datasets.Iterator(dataset)\n\n ends = [time.time()]\n for _ in iterator:\n ends.append(time.time())\n\n deltas = np.ediff1d(ends)\n median_wall_time = np.median(deltas)\n print(\n 'Slice/batch/cache/repeat eager input size: %d batch size: %d Median '\n 'wall time per element: %f'\n % (input_size, batch_size, median_wall_time))\n self.report_benchmark(\n iters=len(deltas),\n wall_time=median_wall_time,\n name='benchmark_slice_batch_cache_repeat_eager_input_%d_batch_%d' %\n (input_size, batch_size))\n\n\nif __name__ == '__main__':\n test.main()\n" ]
[ [ "numpy.ediff1d", "tensorflow.python.ops.math_ops.mod", "tensorflow.contrib.data.python.ops.unique.unique", "tensorflow.contrib.eager.python.datasets.Iterator", "tensorflow.python.ops.math_ops.add", "tensorflow.python.eager.test.gpu_device_name", "numpy.random.randn", "numpy.median", "tensorflow.contrib.lookup.KeyValueTensorInitializer", "tensorflow.python.data.Dataset.range", "tensorflow.python.eager.test.main", "tensorflow.python.data.Dataset.from_tensors", "tensorflow.python.ops.script_ops.py_func", "tensorflow.contrib.data.python.ops.threadpool.PrivateThreadPool", "numpy.array", "tensorflow.python.data.Dataset.from_tensor_slices", "tensorflow.contrib.eager.python.checkpointable_utils.Checkpoint", "tensorflow.python.framework.constant_op.constant" ] ]
minuJeong/moderngl-window
[ "6386478f1e6b07cefda8f4d9324d972ab88b34ec" ]
[ "examples/advanced/boids.py" ]
[ "from pathlib import Path\r\nimport random\r\nimport numpy\r\nfrom pyrr import matrix44\r\n\r\nimport moderngl\r\nimport moderngl_window\r\nfrom moderngl_window.opengl.vao import VAO\r\n\r\n\r\nclass Boids(moderngl_window.WindowConfig):\r\n \"\"\"\r\n An attempt to make something boid-list with GL3.3.\r\n Not currently working as intended, but still creates\r\n and interesting result.\r\n\r\n For this to properly work we need to split the calculations\r\n into several passes.\r\n\r\n We are doing this the O(n^2) way with the gpu using transform feedback.\r\n To make the data avaialble to the vertex shader (looping through it)\r\n we copy the vertex buffer every frame to a texture.\r\n\r\n A better way in the future is to use compute shader.\r\n \"\"\"\r\n title = \"Boids\"\r\n resource_dir = (Path(__file__) / '../../resources').absolute()\r\n aspect_ratio = 3440 / 1440\r\n\r\n def __init__(self, *args, **kwargs):\r\n super().__init__(*args, **kwargs)\r\n\r\n MAX_TEX_WIDTH = 8192\r\n N = MAX_TEX_WIDTH * 1\r\n\r\n def gen_initial_data(n, x_area=2.0, y_area=2.0):\r\n for n in range(n):\r\n # position\r\n yield (random.random() - 0.5) * x_area\r\n yield (random.random() - 0.5) * y_area\r\n # Velocity\r\n yield (random.random() - 0.5)\r\n yield (random.random() - 0.5)\r\n\r\n # Create geometry data\r\n gen = gen_initial_data(N, x_area=self.aspect_ratio * 2 * 0.9, y_area=2.0 * 0.95)\r\n data = numpy.fromiter(gen, count=N * 4, dtype='f4')\r\n self.boids_buffer_1 = self.ctx.buffer(data.tobytes())\r\n self.boids_buffer_2 = self.ctx.buffer(data=self.boids_buffer_1.read())\r\n\r\n self.boids_vao_1 = VAO(name='boids_1', mode=moderngl.POINTS)\r\n self.boids_vao_1.buffer(self.boids_buffer_1, '2f 2f', ['in_position', 'in_velocity'])\r\n\r\n self.boids_vao_2 = VAO(name='boids_2', mode=moderngl.POINTS)\r\n self.boids_vao_2.buffer(self.boids_buffer_2, '2f 2f', ['in_position', 'in_velocity'])\r\n\r\n self.boids_texture = self.ctx.texture((MAX_TEX_WIDTH, N * 2 // MAX_TEX_WIDTH), components=2, dtype='f4')\r\n\r\n # Programs\r\n self.boids_render_program = self.load_program('programs/boids/boids_render.glsl')\r\n self.boids_transform_program = self.load_program('programs/boids/boids_transform.glsl')\r\n\r\n # Prepare for rendering\r\n self.m_proj = matrix44.create_orthogonal_projection(\r\n -self.aspect_ratio, self.aspect_ratio,\r\n -1.0, 1.0,\r\n -1.0, 1.0,\r\n dtype='f4',\r\n )\r\n self.boids_render_program['m_proj'].write(self.m_proj.tobytes())\r\n self.boids_transform_program['data'].value = 0\r\n self.boids_transform_program['num_boids'].value = N\r\n self.boids_transform_program['tex_width'].value = MAX_TEX_WIDTH\r\n\r\n def render(self, time, frame_time):\r\n\r\n self.boids_texture.use(location=0)\r\n self.boids_transform_program['timedelta'].value = frame_time # max(frame_time, 1.0 / 60.0)\r\n self.boids_vao_1.transform(self.boids_transform_program, self.boids_buffer_2)\r\n self.boids_vao_2.render(self.boids_render_program)\r\n\r\n # Swap around ..\r\n self.boids_vao_1, self.boids_vao_2 = self.boids_vao_2, self.boids_vao_1\r\n self.boids_buffer_1, self.boids_buffer_2 = self.boids_buffer_2, self.boids_buffer_1\r\n\r\n # Write vertex data into texture so we can interate it in shader\r\n self.boids_texture.write(self.boids_buffer_1.read())\r\n\r\n\r\nif __name__ == '__main__':\r\n moderngl_window.run_window_config(Boids)\r\n" ]
[ [ "numpy.fromiter" ] ]
linearlabstech/blox
[ "6a5c8a28fcfcb17731be89939284e7ac13a047d7" ]
[ "BLOX/Modules/EfficientNetBody.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCopyright (c) 2019, Linear Labs Technologies\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\n\nfrom efficientnet_pytorch import EfficientNet\nimport torch\nfrom torch import nn\n\nclass EfficientNetBody(nn.Module):\n def __init__(self,mtype='efficientnet-b0'): \n super(EfficientNetBody,self).__init__()\n self.model = EfficientNet.from_pretrained(mtype)\n if torch.cuda.is_available():self.model.cuda()\n else:self.model.cpu()\n\n def forward(self,x):\n x = self.model.extract_features(x)\n return x" ]
[ [ "torch.cuda.is_available" ] ]
motokimura/M3D-RPN
[ "7a9be66cb257c349e51a3eac7e67bdea3a6ddd72" ]
[ "scripts/test_rpn_3d.py" ]
[ "# -----------------------------------------\n# python modules\n# -----------------------------------------\nfrom importlib import import_module\nfrom easydict import EasyDict as edict\nimport torch.backends.cudnn as cudnn\nimport sys\nimport numpy as np\nimport os\n\n# stop python from writing so much bytecode\nsys.dont_write_bytecode = True\nsys.path.append(os.getcwd())\nnp.set_printoptions(suppress=True)\n\n# -----------------------------------------\n# custom modules\n# -----------------------------------------\nfrom lib.imdb_util import *\n\n\ndef parse_args(argv):\n from getopt import getopt\n opts, args = getopt(argv, '', ['config=', 'weight=', 'outdir='])\n # defaults (trainval split #1)\n conf_path = 'weights/M3D-RPN-Release/m3d_rpn_depth_aware_val1_config.pkl'\n weights_path = 'weights/M3D-RPN-Release/m3d_rpn_depth_aware_val1'\n outdir = None\n # read opts\n for opt, arg in opts:\n if opt in ('--config'):\n conf_path = arg\n if opt in ('--weight'):\n weights_path = arg\n if opt in ('--outdir'):\n outdir = arg\n\n if outdir is None:\n # if --outdir option is not used, give the weight file name to output directory\n outdir = os.path.basename(weights_path)\n\n return conf_path, weights_path, outdir\n\n\nconf_path, weights_path, outdir = parse_args(sys.argv[1:])\nprint()\nprint('CONFIG: {}'.format(conf_path))\nprint('WEIGHT: {}'.format(weights_path))\nprint('OUTDIR: {}'.format(outdir))\nprint()\n\n# load config\nconf = edict(pickle_read(conf_path))\nconf.pretrained = None\n\ndata_path = os.path.join(os.getcwd(), 'data')\nresults_path = os.path.join('output', outdir, 'data')\n\n# make directory\nmkdir_if_missing(results_path, delete_if_exist=True)\n\n# -----------------------------------------\n# torch defaults\n# -----------------------------------------\n\n# defaults\ninit_torch(conf.rng_seed, conf.cuda_seed)\n\n# -----------------------------------------\n# setup network\n# -----------------------------------------\n\n# net\nnet = import_module('models.' + conf.model).build(conf)\n\n# load weights\nload_weights(net, weights_path, remove_module=True)\n\n# switch modes for evaluation\nnet.eval()\n\nprint(pretty_print('conf', conf))\n\n# -----------------------------------------\n# test kitti\n# -----------------------------------------\n\ntest_kitti_3d(conf.dataset_test, net, conf, results_path, data_path, use_log=False)" ]
[ [ "numpy.set_printoptions" ] ]
MuAuan/llightning-pytorch
[ "38dc9ed75dd8e6f4a2a05e5a10072a549dcbf4d6" ]
[ "Coloring/simple_YCC_resnet.py" ]
[ "import os\nimport time\nimport numpy as np\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nimport torchvision\nfrom torchvision.datasets import CIFAR10 #MNIST\nfrom torch.utils.data import DataLoader, random_split\nfrom torchvision import transforms\nimport pytorch_lightning as pl\nimport matplotlib.pyplot as plt\nfrom torchsummary import summary\nimport cv2\n\n#from net_encoder_decoder_vgg16 import Encoder, Decoder\n#from net_encoder_decoder_vgg_resnet import Encoder, Decoder\n#from net_encoder_decoder_vgg_resnet2 import Encoder, Decoder\nfrom net_colarization_resnet import ColorizationNet\n\ndef imshow(img,file='', text_=''):\n img = img / 2 + 0.5 # unnormalize\n npimg = img.detach().numpy() #img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.text(x = 3, y = 2, s = text_, c = \"red\")\n plt.pause(3)\n if file != '':\n plt.savefig(file+'.png')\n plt.close()\n\nfrom pytorch_lightning.callbacks import Callback \nclass MyPrintingCallback(Callback):\n def on_epoch_end(self, trainer, pl_module):\n print('')\n\nclass rgb2YCrCb(object):\n def __init__(self):\n self.ts = transforms.ToPILImage()\n self.ts2 = transforms.ToTensor()\n mean, std =[0.5,0.5,0.5], [0.25,0.25,0.25]\n self.ts3 = transforms.Normalize(mean, std)\n pass\n \n def __call__(self, tensor):\n tensor = tensor / 4 + 0.5 # unnormalize\n orgYCrCb = cv2.cvtColor(np.float32(self.ts(tensor)), cv2.COLOR_BGR2YCR_CB)\n Y, Cr,Cb = cv2.split(orgYCrCb)\n CC = cv2.merge((Cr,Cb))\n CC = np.array(CC).reshape(2,32*8,32*8) #(2,32*2,32*2)\n #print(CC.shape)\n return np.array(CC)\n \n def __repr__(self):\n return self.__class__.__name__\n \nclass rgb2YCrCb_(object):\n def __init__(self):\n self.ts = transforms.ToPILImage()\n self.ts2 = transforms.ToTensor()\n mean, std =[0.5,0.5,0.5], [0.25,0.25,0.25]\n self.ts3 = transforms.Normalize(mean, std)\n pass\n \n def __call__(self, tensor):\n #tensor = self.ts3(self.ts2(self.ts(tensor))) / 4 + 0.5 # unnormalize \n tensor = tensor / 4 + 0.5 # unnormalize\n orgYCrCb = cv2.cvtColor(np.float32(self.ts(tensor)), cv2.COLOR_BGR2YCR_CB)\n Y, Cr,Cb = cv2.split(orgYCrCb)\n CC = cv2.merge((Cr,Cb))\n Y = np.array(Y).reshape(1,32*8,32*8) #(1,32*2,32*2)\n #print(Y.shape)\n return Y\n\nclass ImageDataset(torch.utils.data.Dataset):\n\n def __init__(self, data_num,train_=True, transform1 = None, transform2 = None,train = True):\n \n self.transform1 = transform1\n self.transform2 = transform2\n self.ts = transforms.ToPILImage()\n self.ts2 = transforms.ToTensor()\n mean, std =[0.5,0.5,0.5], [0.25,0.25,0.25]\n self.ts3 = transforms.Compose([\n transforms.ToTensor(),\n #transforms.Resize((64,64)),\n transforms.Normalize(mean, std),\n ])\n self.train = train_\n \n self.data_dir = './'\n self.data_num = data_num\n self.data = []\n self.label = []\n\n # download\n CIFAR10(self.data_dir, train=True, download=True)\n self.data =CIFAR10(self.data_dir, train=self.train, transform=self.ts3)\n\n def __len__(self):\n return self.data_num\n\n def __getitem__(self, idx):\n out_data = self.data[idx][0]\n out_label_ = self.data[idx][1]\n out_label = torch.from_numpy(np.array(out_label_)).long()\n \n if self.transform1:\n out_data1 = self.transform1(out_data)\n if self.transform2:\n out_data2 = self.transform2(out_data)\n \n return out_data, out_data1, out_data2, out_label\n \nclass LitAutoEncoder(pl.LightningModule):\n\n def __init__(self, data_dir='./'):\n super().__init__()\n self.ts2 = transforms.ToTensor()\n self.ts = transforms.ToPILImage()\n self.data_dir = data_dir\n self.data_num =50000 #50000\n # Hardcode some dataset specific attributes\n self.num_classes = 10\n self.classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n self.dims = (32*8, 32*8)\n \n self.encoder_decoder = ColorizationNet()\n #self.encoder = Encoder()\n #self.decoder = Decoder()\n\n def forward(self, x):\n # in lightning, forward defines the prediction/inference actions\n \n x = self.encoder_decoder(x)\n return x\n\n def training_step(self, batch, batch_idx):\n # training_step defined the train loop. It is independent of forward\n _,x,x_ , y = batch\n x_hat = self.encoder_decoder(x) ##resnet\n loss = F.mse_loss(x_hat, x_)\n self.log('train_loss', loss, prog_bar = True)\n return loss\n\n def validation_step(self, batch, batch_idx):\n _,x, x_, y = batch\n x_hat = self.encoder_decoder(x)\n loss = F.mse_loss(x_hat, x_)\n self.log('test_loss', loss, prog_bar = True)\n return loss\n \n def test_step(self, batch, batch_idx):\n # Here we just reuse the validation_step for testing\n return self.validation_step(batch, batch_idx)\n \n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=1e-3) \n return optimizer\n \ndef main():\n ts = transforms.ToPILImage()\n ts2 = transforms.ToTensor()\n mean, std =[0.5,0.5,0.5], [0.25,0.25,0.25]\n ts3 = transforms.Normalize(mean, std)\n ts4 = transforms.Resize((256,256))\n meang, stdg =[0.5], [0.25]\n ts5 = transforms.Normalize(meang, stdg)\n trans2 = transforms.Compose([\n transforms.Resize((256,256)),\n #transforms.Normalize(mean, std),\n rgb2YCrCb(), #CrCb\n ])\n trans1 = transforms.Compose([\n transforms.Resize((256,256)),\n #transforms.Normalize(mean, std),\n rgb2YCrCb_(), #Y\n ])\n dim1 =(256,256)\n dim2 = (1,256,256)\n dim3 = (256,256,2)\n data_num = 50000\n cifar10_full =ImageDataset(data_num, train=True, transform1=trans1, transform2=trans2)\n n_train = int(len(cifar10_full)*0.95)\n n_val = int(len(cifar10_full)*0.04)\n n_test = len(cifar10_full)-n_train -n_val\n cifar10_train, cifar10_val, cifar10_test = torch.utils.data.random_split(cifar10_full, [n_train, n_val, n_test])\n \n trainloader = DataLoader(cifar10_train, shuffle=True, drop_last = True, batch_size=32, num_workers=0)\n valloader = DataLoader(cifar10_val, shuffle=False, batch_size=32, num_workers=0)\n testloader = DataLoader(cifar10_test, shuffle=False, batch_size=32, num_workers=0)\n \n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") #for gpu\n # Assuming that we are on a CUDA machine, this should print a CUDA device:\n print(device)\n pl.seed_everything(0)\n\n # model\n autoencoder = LitAutoEncoder()\n #path_ = './simple_coloring/'\n #PATH = path_+'example_cifar4Ln100_9.ckpt'\n #autoencoder = autoencoder.load_from_checkpoint(PATH)\n \n #autoencoder = LitAutoEncoder()\n autoencoder = autoencoder.to(device) #for gpu\n print(autoencoder)\n summary(autoencoder,dim2)\n \n trainer = pl.Trainer(max_epochs=1, gpus=1, callbacks=[MyPrintingCallback()]) ####epoch\n sk = 0\n for i in range(0,10,1):\n trainer.fit(autoencoder, trainloader, valloader) \n print('training_finished')\n \n results = trainer.test(autoencoder, testloader)\n print(results)\n if sk%1==0:\n dataiter = iter(trainloader)\n _,images, images_, labels = dataiter.next()\n print(images.shape, images_.shape)\n\n images0 = []\n for i in range(32):\n print(i, images[i].shape, images_[i].shape)\n YCC_ = cv2.merge((np.array(images[i]).reshape(dim1),np.array(images_[i]).reshape(dim3)))\n images0_ = cv2.cvtColor(YCC_, cv2.COLOR_YCR_CB2BGR)\n images0.append(ts2(images0_/255.))\n # show images \n imshow(torchvision.utils.make_grid(images0), 'cifar10_results',text_ =' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4))) #3\n # print labels\n print(' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4)))\n\n path_ = './simple_coloring/'\n PATH = path_+'example_cifar4Ln100_{}.ckpt'.format(sk)\n trainer.save_checkpoint(PATH)\n\n pretrained_model = autoencoder.load_from_checkpoint(PATH)\n pretrained_model.freeze()\n pretrained_model.eval()\n\n latent_dim,ver = \"Gray2Clolor_resnet\", \"1_{}\".format(sk) #####save condition\n dataiter = iter(testloader)\n images0,images, images1, labels = dataiter.next() #original, Y, CrCb, label\n # show images\n imshow(torchvision.utils.make_grid(images.reshape(32,1,32*8,32*8)/255.),path_+'1_Y_cifar10_{}_{}'.format(latent_dim,0),text_ =' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4)))\n # show images0\n imshow(torchvision.utils.make_grid(images0.reshape(32,3,32,32)),path_+'2_original_cifar10_{}_{}'.format(latent_dim,0),text_ =' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4)))\n # show images0\n imshow(torchvision.utils.make_grid(ts4(images0).reshape(32,3,32*8,32*8)),path_+'3_original_normx2_cifar10_{}_{}'.format(latent_dim,0),text_ =' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4))) \n # show images1\n #imshow(torchvision.utils.make_grid(images1.reshape(32,3,32*2,32*2)),'normalized_images1_cifar10_{}_{}'.format(latent_dim,ver),text_ =' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4))) \n\n decode_img = pretrained_model.encoder_decoder(images[0:32].to('cpu').reshape(32,1,32*8,32*8)) #3\n #decode_img = pretrained_model.decoder(encode_img)\n decode_img_cpu = decode_img.cpu()\n images2 = []\n for i in range(32):\n print(i, images[i].shape, decode_img_cpu[i].shape)\n YCC_ = cv2.merge((np.array(images[i].reshape(dim1)),np.array(decode_img_cpu[i].reshape(dim3))))\n images2_ = cv2.cvtColor(YCC_, cv2.COLOR_YCR_CB2BGR)\n images2.append(ts3(ts2(images2_/255.)))\n #images2.append(ts2(images2_/255.))\n imshow(torchvision.utils.make_grid(images2), path_+'4_preds_cifar10_{}_{}'.format(latent_dim,ver),text_ =' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4)))\n sk += 1\n\nif __name__ == '__main__':\n start_time = time.time()\n main()\n print('elapsed time: {:.3f} [sec]'.format(time.time() - start_time)) \n" ]
[ [ "torch.utils.data.DataLoader", "torch.nn.functional.mse_loss", "matplotlib.pyplot.pause", "numpy.transpose", "matplotlib.pyplot.savefig", "torch.cuda.is_available", "torch.utils.data.random_split", "matplotlib.pyplot.text", "matplotlib.pyplot.close", "numpy.array" ] ]
JiaxiangBU/xgboost-LightGBM_demo
[ "ea9b443121c8124340b5906340a0b9d5a098ac1a" ]
[ "Xgboost_prac/UCI_CAD/UCI_test.py" ]
[ "import numpy as np\nimport time\nimport pandas as pd\nfrom xgboost.sklearn import XGBClassifier\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import KFold\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.metrics import accuracy_score\n\ndata_np=np.array(pd.read_csv('./UCI_CAD.csv'))\n\n\nX=np.array([line[:-1] for line in data_np])\ny=np.array([line[-1] for line in data_np])\n\nxgb_model=XGBClassifier(nthread=4,n_estimators=370,\n silent=False,objective='multi:softmax',\n scale_pos_weight=1,max_depth=4,min_child_weight=2,\n seed=1993,gamma=4.4,colsample_bytree=0.1,subsample=0.1,\n learning_rate=0.1)\n\n# # specify your configurations as a dict\n# param_grid_xgboost={'n_estimators':np.arange(300,400,10)}\n# start_time=time.clock()\n# grid_xgb=GridSearchCV(xgb_model,param_grid_xgboost,cv=5,scoring='accuracy')\n# grid_xgb.fit(X,y)\n# endtime=time.clock()\n# print('score',grid_xgb.grid_scores_)\n# print('Xgboost_best_estimator_',grid_xgb.best_estimator_)\n# print('Xgboost_best_score_',grid_xgb.best_score_)\n# print('Xgboost_best_params_',grid_xgb.best_params_)\n# print(\"run_time\",endtime-start_time)\n\nstart_time=time.clock()\nscore_all=0\nkf=KFold(n_splits=5,shuffle=True)\nfor train,test in kf.split(X):\n print(len(train),len(test))\n X_train=X[train]\n X_test=X[test]\n y_train=y[train]\n y_test=y[test]\n xgb_model.fit(X_train,y_train)\n preds=xgb_model.predict(X_test)\n score=accuracy_score(y_test,preds)\n print(\"score:\",score)\n score_all=score_all+score\nprint(\"score_all\",score_all/5)\nendtime=time.clock()\nprint(\"run_time\",endtime-start_time)\n\n" ]
[ [ "numpy.array", "sklearn.metrics.accuracy_score", "pandas.read_csv", "sklearn.model_selection.KFold" ] ]
iryzhkov/stock-trading-backend
[ "7161026b7b4deb78a934b66550c85a27c6b32933" ]
[ "tests/agent/test_polynomial_model.py" ]
[ "\"\"\"Unit tests for PolynomialModel class\n\"\"\"\nimport os\nimport unittest\n\nimport pandas as pd\n\nfrom stock_trading_backend.agent import PolynomialModel\n\n\nclass TestPolynomialModel(unittest.TestCase):\n \"\"\"Unit tests for PolynomialModel class.\n \"\"\"\n def test_initializes(self):\n \"\"\"Checks if model initializes properly.\n \"\"\"\n model = PolynomialModel(degree=5)\n self.assertEqual(5, model.degree)\n with self.assertRaises(ValueError):\n _ = PolynomialModel(degree=0)\n\n def test_save_and_load(self):\n \"\"\"Checks if saving and loading functin works properly.\n \"\"\"\n file_path = \"data/test/test.pkl\"\n model = PolynomialModel()\n observation = pd.Series([1, 2, 3], [\"balance\", \"net_worth\", \"owned\"])\n predictions_1 = model.predict(observation, [[0, 1]] * 5)\n model.save(file_path)\n model.load(file_path)\n predictions_2 = model.predict(observation, [[0, 1]] * 5)\n self.assertTrue(all(predictions_1 == predictions_2))\n os.remove(file_path)\n\n def test_predict(self):\n \"\"\"Checks if predict function works properly.\n \"\"\"\n model = PolynomialModel()\n observation = pd.Series([1, 2, 3], [\"balance\", \"net_worth\", \"owned\"])\n predictions = model.predict(observation, [[0, 1]] * 5)\n self.assertEqual(5, len(predictions))\n\n def test_train(self):\n \"\"\"Checks if train function works properly.\n \"\"\"\n model = PolynomialModel(degree=2)\n observations = pd.DataFrame([[1, 2, 3]] * 10, columns=[\"balance\", \"net_worth\", \"owned\"])\n actions = [[0]] * 5 + [[1]] * 5\n expected_values = [[0]] * 5 + [[1]] * 5\n losses = [model.train(observations, actions, expected_values) for i in range(10)]\n self.assertTrue(losses[0] > losses[-1])\n" ]
[ [ "pandas.Series", "pandas.DataFrame" ] ]
licanisme/Paddle
[ "d11c140e280880b9d031fa38361f3230aef6cf9c" ]
[ "python/paddle/incubate/hapi/vision/transforms/functional.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport collections\nimport random\nimport math\n\nimport cv2\nimport numbers\nimport numpy as np\n\nif sys.version_info < (3, 3):\n Sequence = collections.Sequence\n Iterable = collections.Iterable\nelse:\n Sequence = collections.abc.Sequence\n Iterable = collections.abc.Iterable\n\n__all__ = ['flip', 'resize', 'pad', 'rotate', 'to_grayscale']\n\n\ndef flip(image, code):\n \"\"\"\n Accordding to the code (the type of flip), flip the input image\n\n Args:\n image: Input image, with (H, W, C) shape\n code: Code that indicates the type of flip.\n -1 : Flip horizontally and vertically\n 0 : Flip vertically\n 1 : Flip horizontally\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n from paddle.incubate.hapi.vision.transforms import functional as F\n\n fake_img = np.random.rand(224, 224, 3)\n\n # flip horizontally and vertically\n F.flip(fake_img, -1)\n\n # flip vertically\n F.flip(fake_img, 0)\n\n # flip horizontally\n F.flip(fake_img, 1)\n \"\"\"\n return cv2.flip(image, flipCode=code)\n\n\ndef resize(img, size, interpolation=cv2.INTER_LINEAR):\n \"\"\"\n resize the input data to given size\n\n Args:\n input: Input data, could be image or masks, with (H, W, C) shape\n size: Target size of input data, with (height, width) shape.\n interpolation: Interpolation method.\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n from paddle.incubate.hapi.vision.transforms import functional as F\n\n fake_img = np.random.rand(256, 256, 3)\n\n F.resize(fake_img, 224)\n\n F.resize(fake_img, (200, 150))\n \"\"\"\n\n if isinstance(interpolation, Sequence):\n interpolation = random.choice(interpolation)\n\n if isinstance(size, int):\n h, w = img.shape[:2]\n if (w <= h and w == size) or (h <= w and h == size):\n return img\n if w < h:\n ow = size\n oh = int(size * h / w)\n return cv2.resize(img, (ow, oh), interpolation=interpolation)\n else:\n oh = size\n ow = int(size * w / h)\n return cv2.resize(img, (ow, oh), interpolation=interpolation)\n else:\n return cv2.resize(img, size[::-1], interpolation=interpolation)\n\n\ndef pad(img, padding, fill=(0, 0, 0), padding_mode='constant'):\n \"\"\"Pads the given CV Image on all sides with speficified padding mode and fill value.\n\n Args:\n img (np.ndarray): Image to be padded.\n padding (int|tuple): Padding on each border. If a single int is provided this\n is used to pad all borders. If tuple of length 2 is provided this is the padding\n on left/right and top/bottom respectively. If a tuple of length 4 is provided\n this is the padding for the left, top, right and bottom borders\n respectively.\n fill (int|tuple): Pixel fill value for constant fill. Default is 0. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the padding_mode is constant\n padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.\n ``constant`` means padding with a constant value, this value is specified with fill. \n ``edge`` means padding with the last value at the edge of the image. \n ``reflect`` means padding with reflection of image (without repeating the last value on the edge) \n padding ``[1, 2, 3, 4]`` with 2 elements on both sides in reflect mode \n will result in ``[3, 2, 1, 2, 3, 4, 3, 2]``.\n ``symmetric`` menas pads with reflection of image (repeating the last value on the edge)\n padding ``[1, 2, 3, 4]`` with 2 elements on both sides in symmetric mode \n will result in ``[2, 1, 1, 2, 3, 4, 4, 3]``.\n\n Returns:\n numpy ndarray: Padded image.\n\n Examples:\n \n .. code-block:: python\n\n import numpy as np\n\n from paddle.incubate.hapi.vision.transforms.functional import pad\n\n fake_img = np.random.rand(500, 500, 3).astype('float32')\n\n fake_img = pad(fake_img, 2)\n print(fake_img.shape)\n\n \"\"\"\n\n if not isinstance(padding, (numbers.Number, list, tuple)):\n raise TypeError('Got inappropriate padding arg')\n if not isinstance(fill, (numbers.Number, str, list, tuple)):\n raise TypeError('Got inappropriate fill arg')\n if not isinstance(padding_mode, str):\n raise TypeError('Got inappropriate padding_mode arg')\n\n if isinstance(padding, collections.Sequence) and len(padding) not in [2, 4]:\n raise ValueError(\n \"Padding must be an int or a 2, or 4 element tuple, not a \" +\n \"{} element tuple\".format(len(padding)))\n\n assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \\\n 'Expected padding mode be either constant, edge, reflect or symmetric, but got {}'.format(padding_mode)\n\n PAD_MOD = {\n 'constant': cv2.BORDER_CONSTANT,\n 'edge': cv2.BORDER_REPLICATE,\n 'reflect': cv2.BORDER_DEFAULT,\n 'symmetric': cv2.BORDER_REFLECT\n }\n\n if isinstance(padding, int):\n pad_left = pad_right = pad_top = pad_bottom = padding\n if isinstance(padding, collections.Sequence) and len(padding) == 2:\n pad_left = pad_right = padding[0]\n pad_top = pad_bottom = padding[1]\n if isinstance(padding, collections.Sequence) and len(padding) == 4:\n pad_left, pad_top, pad_right, pad_bottom = padding\n\n if isinstance(fill, numbers.Number):\n fill = (fill, ) * (2 * len(img.shape) - 3)\n\n if padding_mode == 'constant':\n assert (len(fill) == 3 and len(img.shape) == 3) or (len(fill) == 1 and len(img.shape) == 2), \\\n 'channel of image is {} but length of fill is {}'.format(img.shape[-1], len(fill))\n\n img = cv2.copyMakeBorder(\n src=img,\n top=pad_top,\n bottom=pad_bottom,\n left=pad_left,\n right=pad_right,\n borderType=PAD_MOD[padding_mode],\n value=fill)\n\n return img\n\n\ndef rotate(img,\n angle,\n interpolation=cv2.INTER_LINEAR,\n expand=False,\n center=None):\n \"\"\"Rotates the image by angle.\n\n Args:\n img (numpy.ndarray): Image to be rotated.\n angle (float|int): In degrees clockwise order.\n interpolation (int, optional):\n interpolation: Interpolation method.\n expand (bool|optional): Optional expansion flag.\n If true, expands the output image to make it large enough to hold the entire rotated image.\n If false or omitted, make the output image the same size as the input image.\n Note that the expand flag assumes rotation around the center and no translation.\n center (2-tuple|optional): Optional center of rotation.\n Origin is the upper left corner.\n Default is the center of the image.\n\n Returns:\n numpy ndarray: Rotated image.\n\n Examples:\n \n .. code-block:: python\n\n import numpy as np\n\n from paddle.incubate.hapi.vision.transforms.functional import rotate\n\n fake_img = np.random.rand(500, 500, 3).astype('float32')\n\n fake_img = rotate(fake_img, 10)\n print(fake_img.shape)\n \"\"\"\n dtype = img.dtype\n\n h, w, _ = img.shape\n point = center or (w / 2, h / 2)\n M = cv2.getRotationMatrix2D(point, angle=-angle, scale=1)\n\n if expand:\n if center is None:\n cos = np.abs(M[0, 0])\n sin = np.abs(M[0, 1])\n\n nW = int((h * sin) + (w * cos))\n nH = int((h * cos) + (w * sin))\n\n M[0, 2] += (nW / 2) - point[0]\n M[1, 2] += (nH / 2) - point[1]\n\n dst = cv2.warpAffine(img, M, (nW, nH))\n else:\n xx = []\n yy = []\n for point in (np.array([0, 0, 1]), np.array([w - 1, 0, 1]),\n np.array([w - 1, h - 1, 1]), np.array([0, h - 1, 1])):\n target = np.dot(M, point)\n xx.append(target[0])\n yy.append(target[1])\n nh = int(math.ceil(max(yy)) - math.floor(min(yy)))\n nw = int(math.ceil(max(xx)) - math.floor(min(xx)))\n\n M[0, 2] += (nw - w) / 2\n M[1, 2] += (nh - h) / 2\n dst = cv2.warpAffine(img, M, (nw, nh), flags=interpolation)\n else:\n dst = cv2.warpAffine(img, M, (w, h), flags=interpolation)\n return dst.astype(dtype)\n\n\ndef to_grayscale(img, num_output_channels=1):\n \"\"\"Converts image to grayscale version of image.\n\n Args:\n img (numpy.ndarray): Image to be converted to grayscale.\n\n Returns:\n numpy.ndarray: Grayscale version of the image.\n if num_output_channels == 1, returned image is single channel\n if num_output_channels == 3, returned image is 3 channel with r == g == b\n \n Examples:\n \n .. code-block:: python\n\n import numpy as np\n\n from paddle.incubate.hapi.vision.transforms.functional import to_grayscale\n\n fake_img = np.random.rand(500, 500, 3).astype('float32')\n\n fake_img = to_grayscale(fake_img)\n print(fake_img.shape)\n \"\"\"\n\n if num_output_channels == 1:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n elif num_output_channels == 3:\n img = cv2.cvtColor(\n cv2.cvtColor(img, cv2.COLOR_RGB2GRAY), cv2.COLOR_GRAY2RGB)\n else:\n raise ValueError('num_output_channels should be either 1 or 3')\n\n return img\n" ]
[ [ "numpy.array", "numpy.dot", "numpy.abs" ] ]
cpelley/improver
[ "ca028e3a1c842e3ff00b188c8ea6eaedd0a07149", "ca028e3a1c842e3ff00b188c8ea6eaedd0a07149" ]
[ "improver/cli/nbhood_land_and_sea.py", "improver/nbhood/recursive_filter.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# (C) British Crown Copyright 2017-2021 Met Office.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Script to run neighbourhooding processing over areas of land and sea\nseparately before combining them to return unified fields. Topographic zones\nmay also be employed, with the sea area being treated as a distinct zone.\"\"\"\nfrom improver import cli\n\n\[email protected]\[email protected]_output\ndef process(\n cube: cli.inputcube,\n mask: cli.inputcube,\n weights: cli.inputcube = None,\n *,\n radii: cli.comma_separated_list,\n lead_times: cli.comma_separated_list = None,\n area_sum=False,\n):\n \"\"\" Module to process land and sea separately before combining them.\n\n Neighbourhood the input dataset over two distinct regions of land and sea.\n If performed as a single level neighbourhood, a land-sea mask should be\n provided. If instead topographic_zone neighbourhooding is being employed,\n the mask should be one of topographic zones. In the latter case a weights\n array is also needed to collapse the topographic_zone coordinate. These\n weights are created with the improver generate-topography-bands-weights\n CLI and should be made using a land-sea mask, which will then be employed\n within this code to draw the distinction between the two surface types.\n\n Args:\n cube (iris.cube.Cube):\n A cube to be processed.\n mask (iris.cube.Cube):\n A cube containing either a mask of topographic zones over land or\n a land-sea mask. If this is a land-sea mask, land points should be\n set to one and sea points set to zero.\n weights (iris.cube.Cube):\n A cube containing the weights which are used for collapsing the\n dimension gained through masking. These weights must have been\n created using a land-sea mask. (Optional).\n radii (list of float):\n The radius or a list of radii in metres of the neighbourhood to\n apply.\n If it is a list, it must be the same length as lead_times, which\n defines at which lead time to use which nbhood radius. The radius\n will be interpolated for intermediate lead times.\n lead_times (list of int):\n The lead times in hours that correspond to the radii to be used.\n If lead_times are set, radii must be a list the same length as\n lead_times. Lead times must be given as integer values.\n area_sum (bool):\n Return sum rather than fraction over the neighbourhood area.\n\n Returns:\n (tuple): tuple containing:\n **result** (iris.cube.Cube):\n A cube of the processed data.\n\n Raises:\n ValueError:\n If the topographic zone mask has the attribute\n topographic_zones_include_seapoints.\n IOError:\n if a weights cube isn't given and a topographic_zone mask is given.\n ValueError:\n If the weights cube has the attribute\n topographic_zones_include_seapoints.\n RuntimeError:\n If lead times are not None and has a different length to radii.\n TypeError:\n A weights cube has been provided but no topographic zone.\n\n \"\"\"\n import numpy as np\n\n from improver.nbhood.nbhood import NeighbourhoodProcessing\n from improver.nbhood.use_nbhood import ApplyNeighbourhoodProcessingWithAMask\n\n sum_or_fraction = \"sum\" if area_sum else \"fraction\"\n\n masking_coordinate = None\n if any(\n \"topographic_zone\" in coord.name() for coord in mask.coords(dim_coords=True)\n ):\n\n if mask.attributes[\"topographic_zones_include_seapoints\"] == \"True\":\n raise ValueError(\n \"The topographic zones mask cube must have been \"\n \"masked to exclude sea points, but \"\n \"topographic_zones_include_seapoints = True\"\n )\n\n if not weights:\n raise TypeError(\n \"A weights cube must be provided if using a mask \"\n \"of topographic zones to collapse the resulting \"\n \"vertical dimension.\"\n )\n\n if weights.attributes[\"topographic_zones_include_seapoints\"] == \"True\":\n raise ValueError(\n \"The weights cube must be masked to exclude sea \"\n \"points, but topographic_zones_include_seapoints \"\n \"= True\"\n )\n\n masking_coordinate = \"topographic_zone\"\n land_sea_mask = weights[0].copy(data=weights[0].data.mask)\n land_sea_mask.rename(\"land_binary_mask\")\n land_sea_mask.remove_coord(masking_coordinate)\n # Create land and sea masks in IMPROVER format (inverse of\n # numpy standard) 1 - include this region, 0 - exclude this region.\n land_only = land_sea_mask.copy(\n data=np.logical_not(land_sea_mask.data).astype(int)\n )\n sea_only = land_sea_mask.copy(data=land_sea_mask.data.astype(int))\n\n else:\n if weights is not None:\n raise TypeError(\"A weights cube has been provided but will not be \" \"used\")\n land_sea_mask = mask\n # In this case the land is set to 1 and the sea is set to 0 in the\n # input mask.\n sea_only = land_sea_mask.copy(\n data=np.logical_not(land_sea_mask.data).astype(int)\n )\n land_only = land_sea_mask.copy(data=land_sea_mask.data.astype(int))\n\n if lead_times is None:\n radius_or_radii = float(radii[0])\n else:\n if len(radii) != len(lead_times):\n raise RuntimeError(\n \"If leadtimes are supplied, it must be a list\"\n \" of equal length to a list of radii.\"\n )\n radius_or_radii = [float(x) for x in radii]\n lead_times = [int(x) for x in lead_times]\n\n # Section for neighbourhood processing land points.\n if land_only.data.max() > 0.0:\n if masking_coordinate is None:\n result_land = NeighbourhoodProcessing(\n \"square\",\n radius_or_radii,\n lead_times=lead_times,\n sum_or_fraction=sum_or_fraction,\n re_mask=True,\n )(cube, land_only)\n else:\n result_land = ApplyNeighbourhoodProcessingWithAMask(\n masking_coordinate,\n radius_or_radii,\n lead_times=lead_times,\n collapse_weights=weights,\n sum_or_fraction=sum_or_fraction,\n re_mask=False,\n )(cube, mask)\n result = result_land\n\n # Section for neighbourhood processing sea points.\n if sea_only.data.max() > 0.0:\n result_sea = NeighbourhoodProcessing(\n \"square\",\n radius_or_radii,\n lead_times=lead_times,\n sum_or_fraction=sum_or_fraction,\n re_mask=True,\n )(cube, sea_only)\n result = result_sea\n\n # Section for combining land and sea points following land and sea points\n # being neighbourhood processed individually.\n if sea_only.data.max() > 0.0 and land_only.data.max() > 0.0:\n # Recombine cubes to be a single output.\n combined_data = result_land.data.filled(0) + result_sea.data.filled(0)\n result = result_land.copy(data=combined_data)\n\n return result\n", "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# (C) British Crown Copyright 2017-2021 Met Office.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Module to apply a recursive filter to neighbourhooded data.\"\"\"\nimport warnings\nfrom typing import List, Optional, Tuple\n\nimport iris\nimport numpy as np\nfrom iris.cube import Cube, CubeList\nfrom numpy import ndarray\n\nfrom improver import PostProcessingPlugin\nfrom improver.generate_ancillaries.generate_orographic_smoothing_coefficients import (\n OrographicSmoothingCoefficients,\n)\nfrom improver.metadata.constants.time_types import TIME_COORDS\nfrom improver.utilities.cube_checker import check_cube_coordinates\nfrom improver.utilities.pad_spatial import pad_cube_with_halo, remove_halo_from_cube\n\n\nclass RecursiveFilter(PostProcessingPlugin):\n \"\"\"\n Apply a recursive filter to the input cube.\n \"\"\"\n\n def __init__(self, iterations: Optional[int] = None, edge_width: int = 15) -> None:\n \"\"\"\n Initialise the class.\n\n Args:\n iterations:\n The number of iterations of the recursive filter.\n edge_width:\n Half the width of the padding halo applied before\n recursive filtering.\n Raises:\n ValueError: If number of iterations is not None and is set such\n that iterations is less than 1.\n Warns:\n UserWarning:\n If iterations is higher than 2.\n \"\"\"\n if iterations is not None:\n if iterations < 1:\n raise ValueError(\n \"Invalid number of iterations: must be >= 1: {}\".format(iterations)\n )\n if iterations > 2:\n warnings.warn(\n \"More than two iterations degrades the conservation\"\n \"of probability assumption.\"\n )\n self.iterations = iterations\n self.edge_width = edge_width\n self.smoothing_coefficient_name_format = \"smoothing_coefficient_{}\"\n\n def __repr__(self) -> str:\n \"\"\"Represent the configured plugin instance as a string.\"\"\"\n result = \"<RecursiveFilter: iterations: {}, edge_width: {}\"\n return result.format(self.iterations, self.edge_width)\n\n @staticmethod\n def _recurse_forward(\n grid: ndarray, smoothing_coefficients: ndarray, axis: int\n ) -> ndarray:\n \"\"\"\n Method to run the recursive filter in the forward direction.\n\n In the forward direction:\n Recursive filtering is calculated as:\n\n .. math::\n B_i = ((1 - \\\\rm{smoothing\\\\_coefficient_{i-1}}) \\\\times A_i) +\n (\\\\rm{smoothing\\\\_coefficient_{i-1}} \\\\times B_{i-1})\n\n Progressing from gridpoint i-1 to i:\n :math:`B_i` = new value at gridpoint i\n\n :math:`A_i` = Old value at gridpoint i\n\n :math:`B_{i-1}` = New value at gridpoint i - 1\n\n Args:\n grid:\n 2D array containing the input data to which the recursive\n filter will be applied.\n smoothing_coefficients:\n Matching 2D array of smoothing_coefficient values that will be\n used when applying the recursive filter along the specified\n axis.\n axis:\n Index of the spatial axis (0 or 1) over which to recurse.\n\n Returns:\n 2D array containing the smoothed field after the recursive\n filter method has been applied to the input array in the\n forward direction along the specified axis.\n \"\"\"\n lim = grid.shape[axis]\n for i in range(1, lim):\n if axis == 0:\n grid[i, :] = (1.0 - smoothing_coefficients[i - 1, :]) * grid[\n i, :\n ] + smoothing_coefficients[i - 1, :] * grid[i - 1, :]\n if axis == 1:\n grid[:, i] = (1.0 - smoothing_coefficients[:, i - 1]) * grid[\n :, i\n ] + smoothing_coefficients[:, i - 1] * grid[:, i - 1]\n return grid\n\n @staticmethod\n def _recurse_backward(\n grid: ndarray, smoothing_coefficients: ndarray, axis: int\n ) -> ndarray:\n \"\"\"\n Method to run the recursive filter in the backwards direction.\n\n In the backwards direction:\n Recursive filtering is calculated as:\n\n .. math::\n B_i = ((1 - \\\\rm{smoothing\\\\_coefficient}) \\\\times A_i) +\n (\\\\rm{smoothing\\\\_coefficient} \\\\times B_{i+1})\n\n Progressing from gridpoint i+1 to i:\n :math:`B_i` = new value at gridpoint i\n\n :math:`A_i` = Old value at gridpoint i\n\n :math:`B_{i+1}` = New value at gridpoint i+1\n\n Args:\n grid:\n 2D array containing the input data to which the recursive\n filter will be applied.\n smoothing_coefficients:\n Matching 2D array of smoothing_coefficient values that will be\n used when applying the recursive filter along the specified\n axis.\n axis:\n Index of the spatial axis (0 or 1) over which to recurse.\n\n Returns:\n 2D array containing the smoothed field after the recursive\n filter method has been applied to the input array in the\n backwards direction along the specified axis.\n \"\"\"\n lim = grid.shape[axis]\n for i in range(lim - 2, -1, -1):\n if axis == 0:\n grid[i, :] = (1.0 - smoothing_coefficients[i, :]) * grid[\n i, :\n ] + smoothing_coefficients[i, :] * grid[i + 1, :]\n if axis == 1:\n grid[:, i] = (1.0 - smoothing_coefficients[:, i]) * grid[\n :, i\n ] + smoothing_coefficients[:, i] * grid[:, i + 1]\n return grid\n\n @staticmethod\n def _run_recursion(\n cube: Cube,\n smoothing_coefficients_x: Cube,\n smoothing_coefficients_y: Cube,\n iterations: int,\n ) -> Cube:\n \"\"\"\n Method to run the recursive filter.\n\n Args:\n cube:\n 2D cube containing the input data to which the recursive\n filter will be applied.\n smoothing_coefficients_x:\n 2D cube containing array of smoothing_coefficient values that\n will be used when applying the recursive filter along the\n x-axis.\n smoothing_coefficients_y:\n 2D cube containing array of smoothing_coefficient values that\n will be used when applying the recursive filter along the\n y-axis.\n iterations:\n The number of iterations of the recursive filter\n\n Returns:\n Cube containing the smoothed field after the recursive filter\n method has been applied to the input cube.\n \"\"\"\n (x_index,) = cube.coord_dims(cube.coord(axis=\"x\").name())\n (y_index,) = cube.coord_dims(cube.coord(axis=\"y\").name())\n output = cube.data\n\n for _ in range(iterations):\n output = RecursiveFilter._recurse_forward(\n output, smoothing_coefficients_x.data, x_index\n )\n output = RecursiveFilter._recurse_backward(\n output, smoothing_coefficients_x.data, x_index\n )\n output = RecursiveFilter._recurse_forward(\n output, smoothing_coefficients_y.data, y_index\n )\n output = RecursiveFilter._recurse_backward(\n output, smoothing_coefficients_y.data, y_index\n )\n cube.data = output\n return cube\n\n def _validate_coefficients(\n self, cube: Cube, smoothing_coefficients: CubeList\n ) -> List[Cube]:\n \"\"\"Validate the smoothing coefficients cubes.\n\n Args:\n cube:\n 2D cube containing the input data to which the recursive\n filter will be applied.\n\n smoothing_coefficients:\n A cubelist containing two cubes of smoothing_coefficient values,\n one corresponding to smoothing in the x-direction, and the other\n to smoothing in the y-direction.\n\n Returns:\n A list of smoothing coefficients cubes ordered: [x-coeffs, y-coeffs].\n\n Raises:\n ValueError: Smoothing coefficient cubes are not named correctly.\n ValueError: If any smoothing_coefficient cube value is over 0.5\n ValueError: The coordinate to be smoothed within the\n smoothing coefficient cube is not of the expected length.\n ValueError: The coordinate to be smoothed within the\n smoothing coefficient cube does not have the expected points.\n \"\"\"\n # Ensure cubes are in x, y order.\n smoothing_coefficients.sort(key=lambda cell: cell.name())\n axes = [\"x\", \"y\"]\n\n for axis, smoothing_coefficient in zip(axes, smoothing_coefficients):\n\n # Check the smoothing coefficient cube name is as expected\n expected_name = self.smoothing_coefficient_name_format.format(axis)\n if smoothing_coefficient.name() != expected_name:\n msg = (\n \"The smoothing coefficient cube name {} does not match the \"\n \"expected name {}\".format(\n smoothing_coefficient.name(), expected_name\n )\n )\n raise ValueError(msg)\n\n # Check the smoothing coefficients do not exceed an empirically determined\n # maximum value; larger values damage conservation significantly.\n if (smoothing_coefficient.data > 0.5).any():\n raise ValueError(\n \"All smoothing_coefficient values must be less than 0.5. \"\n \"A large smoothing_coefficient value leads to poor \"\n \"conservation of probabilities\"\n )\n\n for test_axis in axes:\n coefficient_crd = smoothing_coefficient.coord(axis=test_axis)\n if test_axis == axis:\n expected_points = (\n cube.coord(axis=test_axis).points[1:]\n + cube.coord(axis=test_axis).points[:-1]\n ) / 2\n else:\n expected_points = cube.coord(axis=test_axis).points\n\n if len(coefficient_crd.points) != len(\n expected_points\n ) or not np.allclose(coefficient_crd.points, expected_points):\n msg = (\n f\"The smoothing coefficients {test_axis} dimension does not \"\n \"have the expected length or values compared with the cube \"\n \"to which smoothing is being applied.\\n\\nSmoothing \"\n \"coefficient cubes must have coordinates that are:\\n\"\n \"- one element shorter along the dimension being smoothed \"\n f\"({axis}) than in the target cube, with points in that \"\n \"dimension equal to the mean of each pair of points along \"\n \"the dimension in the target cube\\n- equal to the points \"\n \"in the target cube along the dimension not being smoothed\"\n )\n raise ValueError(msg)\n\n return smoothing_coefficients\n\n def _pad_coefficients(self, coeff_x, coeff_y):\n \"\"\"Pad smoothing coefficients\"\"\"\n pad_x, pad_y = [\n pad_cube_with_halo(\n coeff, 2 * self.edge_width, 2 * self.edge_width, pad_method=\"symmetric\",\n )\n for coeff in [coeff_x, coeff_y]\n ]\n return pad_x, pad_y\n\n @staticmethod\n def _update_coefficients_from_mask(\n coeffs_x: Cube, coeffs_y: Cube, mask: Cube\n ) -> Tuple[Cube, Cube]:\n \"\"\"\n Zero all smoothing coefficients for data points that are masked\n\n Args:\n coeffs_x\n coeffs_y\n mask\n\n Returns:\n Updated smoothing coefficients\n \"\"\"\n plugin = OrographicSmoothingCoefficients(\n use_mask_boundary=False, invert_mask=False\n )\n plugin.zero_masked(coeffs_x, coeffs_y, mask)\n return coeffs_x, coeffs_y\n\n def process(self, cube: Cube, smoothing_coefficients: CubeList) -> Cube:\n \"\"\"\n Set up the smoothing_coefficient parameters and run the recursive\n filter. Smoothing coefficients can be generated using\n :class:`~.OrographicSmoothingCoefficients`\n and :func:`~improver.cli.generate_orographic_smoothing_coefficients`.\n The steps undertaken are:\n\n 1. Split the input cube into slices determined by the co-ordinates in\n the x and y directions.\n 2. Construct an array of filter parameters (smoothing_coefficients_x\n and smoothing_coefficients_y) for each cube slice that are used to\n weight the recursive filter in the x- and y-directions.\n 3. Pad each cube slice with a square-neighbourhood halo and apply\n the recursive filter for the required number of iterations.\n 4. Remove the halo from the cube slice and append the recursed cube\n slice to a 'recursed cube'.\n 5. Merge all the cube slices in the 'recursed cube' into a 'new cube'.\n 6. Modify the 'new cube' so that its scalar dimension co-ordinates are\n consistent with those in the original input cube.\n 7. Return the 'new cube' which now contains the recursively filtered\n values for the original input cube.\n\n The smoothing_coefficient determines how much \"value\" of a cell\n undergoing filtering is comprised of the current value at that cell and\n how much comes from the adjacent cell preceding it in the direction in\n which filtering is being applied. A larger smoothing_coefficient\n results in a more significant proportion of a cell's new value coming\n from its neighbouring cell.\n\n Args:\n cube:\n Cube containing the input data to which the recursive filter\n will be applied.\n smoothing_coefficients:\n A cubelist containing two cubes of smoothing_coefficient values,\n one corresponding to smoothing in the x-direction, and the other\n to smoothing in the y-direction.\n\n Returns:\n Cube containing the smoothed field after the recursive filter\n method has been applied.\n\n Raises:\n ValueError:\n If the cube contains masked data from multiple cycles or times\n \"\"\"\n cube_format = next(cube.slices([cube.coord(axis=\"y\"), cube.coord(axis=\"x\")]))\n coeffs_x, coeffs_y = self._validate_coefficients(\n cube_format, smoothing_coefficients\n )\n\n mask_cube = None\n if np.ma.is_masked(cube.data):\n # Assumes mask is the same for each x-y slice. This may not be\n # true if there are several time slices in the cube - so throw\n # an error if this is so.\n for coord in TIME_COORDS:\n if cube.coords(coord) and len(cube.coord(coord).points) > 1:\n raise ValueError(\n \"Dealing with masks from multiple time points is unsupported\"\n )\n\n mask_cube = cube_format.copy(data=cube_format.data.mask)\n coeffs_x, coeffs_y = self._update_coefficients_from_mask(\n coeffs_x, coeffs_y, mask_cube,\n )\n\n padded_coefficients_x, padded_coefficients_y = self._pad_coefficients(\n coeffs_x, coeffs_y\n )\n\n recursed_cube = iris.cube.CubeList()\n for output in cube.slices([cube.coord(axis=\"y\"), cube.coord(axis=\"x\")]):\n\n padded_cube = pad_cube_with_halo(\n output, 2 * self.edge_width, 2 * self.edge_width, pad_method=\"symmetric\"\n )\n\n new_cube = self._run_recursion(\n padded_cube,\n padded_coefficients_x,\n padded_coefficients_y,\n self.iterations,\n )\n new_cube = remove_halo_from_cube(\n new_cube, 2 * self.edge_width, 2 * self.edge_width\n )\n\n if mask_cube is not None:\n new_cube.data = np.ma.MaskedArray(new_cube.data, mask=mask_cube.data)\n\n recursed_cube.append(new_cube)\n\n new_cube = recursed_cube.merge_cube()\n new_cube = check_cube_coordinates(cube, new_cube)\n\n return new_cube\n" ]
[ [ "numpy.logical_not" ], [ "numpy.allclose", "numpy.ma.is_masked", "numpy.ma.MaskedArray" ] ]
SimengSun/revisit-nplm
[ "bbe1cdaecf1d7d104d27b1035a591ebbd3b5141e" ]
[ "data/text.py" ]
[ "\"\"\"\n\tImplement torch iterable dataset\n\t\t- build vocab ordered by freq for \n\"\"\"\nfrom tqdm import tqdm\nimport torch\nimport torch.utils.data\nfrom torch.utils.data.dataloader import DataLoader\nimport os\nimport sys\nimport pickle5 as pickle #import pickle\nimport math\nfrom collections import defaultdict\n\nSPLITS = ['train', 'valid', 'test']\nEOS = '<eos>'\nPAD = '<pad>'\n\nclass Dataset(torch.utils.data.IterableDataset):\n\n\tdef __init__(self, data_dir, batch_size, split):\n\n\t\tself.data_dir = data_dir\n\t\tif not self.data_exist():\n\t\t\tself.build_vocab()\n\t\t\tfor s in SPLITS:\n\t\t\t\tself.binarize(s)\n\n\t\tself.load_vocab()\n\t\tself.data = self.load_data(split, batch_size) # bsz x (len(data)/bsz)\n\t\tself.start = 0\n\t\tself.end = self.data.size(1)\n\t\tself.split = split\n\n\tdef __iter__(self):\n\t\tworker_info = torch.utils.data.get_worker_info()\n\t\tif worker_info is None: # single-process data loading, return the full iterator\n\t\t\titer_start = self.start\n\t\t\titer_end = self.end\n\t\telse: \t\t\t\t\t# in a worker process split workload\n\t\t\tper_worker = int(math.ceil((self.end - self.start) / float(worker_info.num_workers)))\n\t\t\tworker_id = worker_info.id\n\t\t\titer_start = self.start + worker_id * per_worker\n\t\t\titer_end = min(iter_start + per_worker, self.end)\n\t\treturn iter(self.data.transpose(1,0)[iter_start:iter_end])\n\n\t@property\n\tdef eos_idx(self):\n\t\treturn self.tok2id[EOS]\n\t\n\t@property\n\tdef padding_idx(self):\n\t\treturn self.tok2id[PAD]\n\n\t@property\n\tdef size(self):\n\t\treturn len(self.id2tok)\n\t\n\n\tdef build_vocab(self, min_freq=0, max_freq=sys.maxsize):\n\t\t\"\"\"\n\t\tbuild vocab + add eos\n\t\tencode sentence\n\t\t\"\"\"\n\t\twith open(os.path.join(self.data_dir, 'train.txt'), 'r') as fn:\n\t\t\tdata = fn.readlines()\n\n\t\tif 'lambada' in self.data_dir:\n\t\t\twith open(os.path.join(self.data_dir, 'test.txt'), 'r') as fn:\n\t\t\t\tdata.extend(fn.readlines())\n\n\t\t\twith open(os.path.join(self.data_dir, 'valid.txt'), 'r') as fn:\n\t\t\t\tdata.extend(fn.readlines())\n\n\t\tprint('building vocab ...')\n\t\tself.vocab = defaultdict(int)\n\t\tself.tok2id = {}\n\t\tself.id2tok = []\n\n\t\tfor line in tqdm(data):\n\t\t\tline = line.strip().split()\n\t\t\tfor tok in line:\n\t\t\t\tself.vocab[tok] += 1\n\t\t\n\t\tself.vocab = {a : self.vocab[a] for a in self.vocab if self.vocab[a] >= min_freq and self.vocab[a] <= max_freq}\n\t\t# sort vocab in case of using adaptive softmax\n\t\tself.vocab = list(sorted(self.vocab.items(), key=lambda a: a[1], reverse=True))\n\t\tprint(self.vocab[:10])\n\n\t\tif 'lambada' in self.data_dir:\n\t\t\tself.vocab = self.vocab[:60000]\n\t\t\tself.vocab.append(('<unk>', 0))\n\n\t\tself.id2tok = ['<pad>'] + ['<eos>'] + [a[0] for a in self.vocab] \n\t\tself.tok2id = {a : i for i, a in enumerate(self.id2tok)}\n\t\tself.vocab_size = len(self.id2tok)\n\n\t\tprint('end building vocab ...')\n\t\tprint('vocab size', len(self.tok2id))\n\t\twith open(os.path.join(self.data_dir, 'vocab.pkl'), 'wb') as fn: \n\t\t\tpickle.dump({'id2tok': self.id2tok, 'tok2id': self.tok2id, 'vocab_size':self.vocab_size}, fn)\n\n\tdef encode_line(self, line):\n\n\t\tif 'lambada' not in self.data_dir:\n\t\t\treturn torch.tensor([self.tok2id[tok] for tok in line+['<eos>']])\n\t\telse:\n\t\t\treturn torch.tensor([self.tok2id[tok] if tok in self.tok2id else self.tok2id['<unk>'] for tok in line])\n\n\tdef decode_tokids(self, tensor):\n\t\ttokens = []\n\t\tfor tokid in tensor:\n\t\t\ttokens.append(self.id2tok[tokid])\n\t\ttokens = [t if t != '<eos>' else '\\n' for t in tokens]\n\t\treturn ' '.join(tokens)\n\n\tdef binarize(self, split):\n\t\t\"\"\"binarize data to torch.tensor shape (doc_len, )\"\"\"\n\t\twith open(os.path.join(self.data_dir, f\"{split}.txt\"), \"r\") as fn:\n\t\t\tdata = [line.strip().split() for line in fn.readlines()]\n\n\t\tprint('binarizing data ...')\n\t\tdoc = []\n\t\tfor line in tqdm(data):\n\t\t\tif line != '':\n\t\t\t\tdoc.append(self.encode_line(line))\n\n\t\tdoc = torch.cat(doc)\n\n\t\tprint('end binarizing data ...')\n\t\tprint('doc shape', doc.shape)\n\t\tprint([self.id2tok[i] for i in doc[:100]])\n\t\twith open(os.path.join(self.data_dir, f\"{split}.bin\"), \"wb\") as fout:\n\t\t\tpickle.dump({\"data\": doc}, fout, protocol=pickle.HIGHEST_PROTOCOL)\n\n\tdef load_vocab(self):\n\t\t\n\t\twith open(os.path.join(self.data_dir, 'vocab.pkl'), 'rb') as fn: \n\t\t\tdata = pickle.load(fn)\n\t\tprint('loading vocab...')\n\t\tself.id2tok = data['id2tok']\n\t\tself.tok2id = data['tok2id']\n\t\tself.vocab_size = data['vocab_size']\n\t\t# self.id2freq = data['id2freq']\n\t\tprint(f'vocab size {self.vocab_size}')\n\n\tdef data_exist(self):\n\t\treturn all([os.path.exists(os.path.join(self.data_dir, f\"{fn}.bin\")) \\\n\t\t\tfor fn in ['train', 'valid', 'test'] ] + [os.path.exists(os.path.join(self.data_dir, \"vocab.pkl\"))])\n\n\tdef load_data(self, split, bsz):\n\n\t\twith open(os.path.join(self.data_dir, f\"{split}.bin\"), \"rb\") as fin:\n\t\t\tdata = pickle.load(fin)['data']\n\n\t\tnstep = data.size(0) // bsz\n\t\treturn data[ : nstep * bsz].view(bsz, -1)\n\n" ]
[ [ "torch.utils.data.get_worker_info", "torch.cat", "torch.tensor" ] ]
VarunNangalia/ga-learner-dsmp-repo
[ "c7c1485e6745ba62e666cce7e2accf6eee30ed17" ]
[ "-Publish-Superhero-Statistics/code.py" ]
[ "# --------------\n#Header files\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\n#path of the data file- path\r\ndata = pd.read_csv(path)\r\n#Code starts here \r\ndata['Gender'].replace('-','Agender',inplace=True)\r\ngender_count = data['Gender'].value_counts()\r\ngender_count.plot(kind='bar', title =\"Gender\",figsize=(15,10),legend=True, fontsize=12)\r\nplt.show()\n\n\n# --------------\n#Code starts here\r\nalignment=data['Alignment'].value_counts()\r\nalignment.plot.pie()\r\nplt.title('Character Alignment')\r\nplt.show()\n\n\n# --------------\n#Code starts here\r\nimport pandas as pd\r\n#strength and combat\r\nsc_df = data[['Strength','Combat']].copy()\r\nsc_covariance= round((sc_df['Strength'].cov(sc_df['Combat'])),2)\r\nsc_strength = round((sc_df['Strength'].std()),2)\r\nsc_combat = round((sc_df['Combat'].std()),2)\r\nsc_pearson = round((sc_covariance/(sc_combat*sc_strength)),2)\r\n#intelligence and combat\r\nic_df = round((data[['Intelligence','Combat']].copy()),2)\r\nic_covariance = round((ic_df['Intelligence'].cov(ic_df['Combat'])),2)\r\nic_intelligence = round((ic_df['Intelligence'].std()),2)\r\nic_combat = round((ic_df['Combat'].std()),2)\r\nic_pearson = round((ic_covariance/(ic_combat*ic_intelligence)),2)\r\n\n\n\n# --------------\n#Code starts here\r\ntotal_high = np.quantile(data['Total'], .99)\r\n#print(total_high)\r\nsuper_best = data[data['Total']>total_high]\r\n\r\nsuper_best_names = super_best['Name'].tolist()\r\nprint(super_best_names)\n\n\n# --------------\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nfig,(ax_1,ax_2,ax_3) = plt.subplots(3,1)\r\nax_1.plot(data['Intelligence'])\r\nax_1.set_title('Intelligence')\r\nax_1.set_xlabel('Intelligence')\r\nax_1.legend()\r\n\r\nax_2.plot(data['Speed'])\r\nax_2.set_title('Speed')\r\nax_2.set_xlabel('Speed')\r\nax_2.legend()\r\n\r\nax_3.plot(data['Power'])\r\nax_3.set_title('Power')\r\nax_3.set_xlabel('Power')\r\nax_3.legend()\r\nplt.tight_layout()\r\nplt.show()\n\n\n" ]
[ [ "numpy.quantile", "pandas.read_csv", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.subplots", "matplotlib.pyplot.title", "matplotlib.pyplot.show" ] ]
ParikhKadam/cycloid
[ "c5e64e8379f801417a38755eb6b2fde881dabd8c" ]
[ "design/trackplan/trackplan.py" ]
[ "# racetrack route planner\n# based on apex cone locations and track widths, get a \nimport numpy as np\n\n\nmaxv = 10\nlaterala = 8\nmaxk = 1.5\nbw_v = np.pi*2*0.7\nbw_w = np.pi*2*1.5\n\n# track is a set of points and radii (positive or negative if track goes CCW/CW\n# around each point)\n# so first we determine the nearest point\n\n# T is [5, NUM_PTS]; [(x, y, r), i]\n# TODO: precompute handy normals\n\n\ndef gettargetv(k):\n kmin = laterala / (maxv**2)\n targetv = maxv\n if np.abs(k) > kmin:\n targetv = np.sqrt(laterala / np.abs(k))\n return targetv\n\n\n# get nearest point on track, its direction normal and curvature\ndef gettrack(xy, T):\n Tn = np.hstack([T[:, 1:], T[:, :1]])\n Nn = Tn[:2] - T[:2]\n L = np.linalg.norm(Nn, axis=0)\n S = (Tn[2] - T[2]) / L\n C = np.sqrt(1 - S**2)\n Nn /= L\n Nn = np.vstack([-Nn[0]*S - Nn[1]*C, Nn[0]*C - Nn[1]*S])\n\n # we need to rotate Nn / Np based on the radii of the current/next/previous points\n # ...unless they have the same radius, in which case this doesn't work.\n # ln = np.linalg.norm(Tn[:2] - T[:2], axis=0) * T[2] / (Tn[2] - T[2])\n # print 'ln', ln\n # crap. for now let's just ignore the problem\n\n Pn = (Tn[:2] + Nn*Tn[2]).T\n P = (T[:2] + Nn*T[2]).T\n\n tnum = np.sum((Pn - P)*(xy - P), axis=1)\n tden = np.sum((Pn - P)**2, axis=1)\n t = np.clip(tnum / tden, 0, 1)\n # closest point on each edge in the polygon to xy\n pxy = (P.T*(1-t) + Pn.T*t).T\n dists = np.sqrt(np.sum((pxy-xy)**2, axis=1))\n i = np.argmin(dists)\n if t[i] == 0 or t[i] == 1:\n if t[i] == 1:\n i = (i+1) % T.shape[1]\n # closest point is one of the circles\n dp = xy - T[:2, i].T\n dp /= np.linalg.norm(dp)\n p = T[:2, i].T + dp * np.abs(T[2, i])\n n = np.array([dp[1], -dp[0]]) * np.sign(T[2, i])\n return p, n, 1.0/T[2, i], gettargetv(1.0/T[2, i])\n else:\n # closest point is on the linear sections\n n = Pn[i] - P[i]\n n /= np.linalg.norm(n)\n finalv = gettargetv(1.0/Tn[2, i])\n # need to compute deceleration\n tt = t[i]**2\n return pxy[i], n, 0, maxv*(1-tt) + tt*finalv\n\n return None\n\n\ndef step(X, u, targetv, dt):\n # X = [x y theta v w]\n # velocity control\n if targetv > X[3]:\n ebw = np.exp(-bw_v * dt)\n else:\n ebw = np.exp(-bw_v * 2 * dt)\n vnew = (1 - ebw) * targetv + ebw * X[3]\n v = (X[3] + vnew) * 0.5\n\n # yaw rate control\n targetw = v * u\n ebw = np.exp(-bw_w * dt)\n wnew = (1 - ebw) * targetw + ebw * X[4]\n thetanew = X[2] + wnew * dt\n theta = X[2] + wnew * dt * 0.5\n\n X[0] += np.cos(theta)*v*dt\n X[1] += np.sin(theta)*v*dt\n X[2] = thetanew\n X[3] = vnew\n X[4] = wnew\n\n return X\n\n\ndef drive(X, dt):\n p, n, k, v = gettrack(X[:2], T)\n nx = np.array([n[1], -n[0]])\n ye = np.dot(X[:2] - p, nx)\n C, S = np.cos(X[2]), np.sin(X[2])\n R = np.array([[C, S], [-S, C]])\n Rn = np.dot(R, n)\n # not sure if psie is backwards or not\n psie = np.arctan2(Rn[1], Rn[0])\n # print n, C, S, ye, psie, k\n Cp = np.cos(psie)\n Sp = np.sin(psie)\n # print psie, Cp, Sp\n Cpy = Cp / (1 - k * ye)\n Kpy = 1.0\n Kvy = 5.0\n ds = X[3]*Cpy*dt\n return -Cpy*(ye*Cpy*(-Kpy*Cp) + Sp*(k*Sp - Kvy*Cp) + k), v, ds\n\n\ndef trackexport(T):\n ''' compute various positions and normals for export '''\n output = np.zeros((9, T.shape[1]))\n\n output[:3] = T[:3] # first three dimensions of output are unchanged\n\n Tn = np.hstack([T[:, 1:], T[:, :1]])\n Nn = Tn[:2] - T[:2]\n L = np.linalg.norm(Nn, axis=0)\n S = (Tn[2] - T[2]) / L\n C = np.sqrt(1 - S**2)\n Nn /= L\n Nn = np.vstack([-Nn[0]*S - Nn[1]*C, Nn[0]*C - Nn[1]*S])\n Nn /= np.linalg.norm(Nn, axis=0)\n\n # we need to rotate Nn / Np based on the radii of the current/next/previous points\n # ...unless they have the same radius, in which case this doesn't work.\n # ln = np.linalg.norm(Tn[:2] - T[:2], axis=0) * T[2] / (Tn[2] - T[2])\n # print 'ln', ln\n # crap. for now let's just ignore the problem\n\n Pn = (Tn[:2] + Nn*Tn[2])\n P = (T[:2] + Nn*T[2])\n output[3:5] = P\n output[5:7] = Pn\n output[7:9] = Nn\n\n print(output.shape[1])\n for i in range(output.shape[1]):\n print(' '.join(map(str, output[:, i])))\n\n return output\n\n\nif __name__ == '__main__':\n from matplotlib import pyplot as plt\n T = np.array([\n [0, 0, 1],\n [9, -1, 2],\n [10, -4, 1],\n [5, -3, -1],\n [0, -5, 1],\n ], np.float32).T\n\n T = np.array([\n [208, -181, 147],\n [488, -170, 110],\n [304, -306, -118],\n [126, -198, 88],\n ], np.float32).T*1.2\n T[1] -= 20\n T[0] -= 408\n T[1] += 102\n T *= 0.02\n\n print(trackexport(T))\n\n if False:\n plt.plot(T[0], T[1], 'o')\n t = np.linspace(0, 2*np.pi, 100)\n for x in range(T.shape[1]):\n plt.plot(np.cos(t)*T[2, x] + T[0, x], np.sin(t)*T[2, x] + T[1, x])\n plt.axis('equal')\n\n xy = np.array([7.0, -3.0])\n pp, n, k, _ = gettrack(xy, T)\n plt.plot(xy[0], xy[1], 'o')\n\n plt.plot([pp[0], pp[0]+n[0]], [pp[1], pp[1] + n[1]], '-x')\n\n plt.show()\n\n if False:\n X = np.zeros(5)\n v = np.zeros(100)\n w = np.zeros(100)\n for i in range(100):\n X = step(X, 2, 5, 1.0/30)\n v[i] = X[3]\n w[i] = X[4]\n plt.plot(v)\n plt.plot(w)\n plt.plot(w/v)\n plt.show()\n\n if True:\n totalS = 0\n X = np.array([1, 1, 0.8, 0, 0], np.float32)\n Nsteps = 222*5\n xy = np.zeros((2, Nsteps))\n dt = 1.0 / 30\n y = 0\n for i in range(Nsteps):\n u, v, ds = drive(X, dt)\n u = np.clip(u, -maxk, maxk)\n X = step(X, u, v, dt)\n xy[:, i] = X[:2]\n totalS += ds\n\n print('distance around track', totalS)\n plt.plot(T[0], T[1], 'o')\n t = np.linspace(0, 2*np.pi, 100)\n for x in range(T.shape[1]):\n plt.plot(np.cos(t)*T[2, x] + T[0, x], np.sin(t)*T[2, x] + T[1, x])\n plt.axis('equal')\n\n plt.plot(xy[0], xy[1])\n plt.plot(xy[0, -1], xy[1, -1], 'x')\n plt.show()\n" ]
[ [ "numpy.sum", "matplotlib.pyplot.plot", "numpy.vstack", "numpy.argmin", "numpy.abs", "numpy.cos", "numpy.linspace", "numpy.sqrt", "numpy.zeros", "matplotlib.pyplot.axis", "numpy.hstack", "numpy.linalg.norm", "numpy.arctan2", "numpy.sign", "numpy.exp", "numpy.clip", "matplotlib.pyplot.show", "numpy.array", "numpy.sin", "numpy.dot" ] ]
GaoxiangLuo/flame
[ "16bd1715a545421d45ea0fc32544e448389de49c" ]
[ "lib/python/flame/examples/mnist/aggregator/pytorch/main.py" ]
[ "# Copyright 2022 Cisco Systems, Inc. and its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"MNIST horizontal FL aggregator for PyTorch.\n\nThe example below is implemented based on the following example from pytorch:\nhttps://github.com/pytorch/examples/blob/master/mnist/main.py.\n\"\"\"\n\nimport logging\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom flame.config import Config\nfrom flame.dataset import Dataset\nfrom flame.mode.horizontal.top_aggregator import TopAggregator\nfrom torchvision import datasets, transforms\n\nlogger = logging.getLogger(__name__)\n\n\nclass Net(nn.Module):\n \"\"\"Net class.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize.\"\"\"\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, 3, 1)\n self.conv2 = nn.Conv2d(32, 64, 3, 1)\n self.dropout1 = nn.Dropout(0.25)\n self.dropout2 = nn.Dropout(0.5)\n self.fc1 = nn.Linear(9216, 128)\n self.fc2 = nn.Linear(128, 10)\n\n def forward(self, x):\n \"\"\"Forward.\"\"\"\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout2(x)\n x = self.fc2(x)\n output = F.log_softmax(x, dim=1)\n return output\n\n\nclass PyTorchMnistAggregator(TopAggregator):\n \"\"\"PyTorch Mnist Aggregator.\"\"\"\n\n def __init__(self, config: Config) -> None:\n \"\"\"Initialize a class instance.\"\"\"\n self.config = config\n self.model = None\n self.dataset: Dataset = None\n\n self.device = None\n self.test_loader = None\n\n def initialize(self):\n \"\"\"Initialize role.\"\"\"\n self.device = torch.device(\n \"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n self.model = Net().to(self.device)\n\n def load_data(self) -> None:\n \"\"\"Load a test dataset.\"\"\"\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307, ), (0.3081, ))\n ])\n\n dataset = datasets.MNIST('./data',\n train=False,\n download=True,\n transform=transform)\n\n self.test_loader = torch.utils.data.DataLoader(dataset)\n\n # store data into dataset for analysis (e.g., bias)\n self.dataset = Dataset(dataloader=self.test_loader)\n\n def train(self) -> None:\n \"\"\"Train a model.\"\"\"\n # Implement this if testing is needed in aggregator\n pass\n\n def evaluate(self) -> None:\n \"\"\"Evaluate (test) a model.\"\"\"\n self.model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in self.test_loader:\n data, target = data.to(self.device), target.to(self.device)\n output = self.model(data)\n test_loss += F.nll_loss(\n output, target,\n reduction='sum').item() # sum up batch loss\n pred = output.argmax(\n dim=1,\n keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n total = len(self.test_loader.dataset)\n test_loss /= total\n test_accuray = correct / total\n\n logger.info(f\"Test loss: {test_loss}\")\n logger.info(f\"Test accuracy: {correct}/{total} ({test_accuray})\")\n\n # update metrics after each evaluation so that the metrics can be\n # logged in a model registry.\n self.update_metrics({\n 'test-loss': test_loss,\n 'test-accuracy': test_accuray\n })\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('config', nargs='?', default=\"./config.json\")\n\n args = parser.parse_args()\n\n config = Config(args.config)\n\n a = PyTorchMnistAggregator(config)\n a.compose()\n a.run()\n" ]
[ [ "torch.utils.data.DataLoader", "torch.nn.functional.log_softmax", "torch.nn.Linear", "torch.nn.functional.max_pool2d", "torch.flatten", "torch.nn.functional.nll_loss", "torch.no_grad", "torch.nn.functional.relu", "torch.cuda.is_available", "torch.nn.Conv2d", "torch.nn.Dropout" ] ]
Ravie403/chainerrl-visualizer
[ "302bcd574d435ab68652b084764d4bb777300494" ]
[ "examples/a3c_breakout/main.py" ]
[ "import chainer\nimport numpy as np\nfrom chainerrl.agents import a3c\nfrom chainerrl import links\nfrom chainerrl import misc\nfrom chainerrl.optimizers import rmsprop_async\nfrom chainerrl import policy\nfrom chainerrl import v_function\n\nfrom chainerrl.wrappers import atari_wrappers\n\nfrom chainerrl_visualizer import launch_visualizer\n\n\nclass A3CFF(chainer.ChainList, a3c.A3CModel):\n\n def __init__(self, n_actions):\n self.head = links.NIPSDQNHead()\n self.pi = policy.FCSoftmaxPolicy(\n self.head.n_output_channels, n_actions)\n self.v = v_function.FCVFunction(self.head.n_output_channels)\n super().__init__(self.head, self.pi, self.v)\n\n def pi_and_v(self, state):\n out = self.head(state)\n return self.pi(out), self.v(out)\n\n\ndef phi(x):\n # Feature extractor\n return np.asarray(x, dtype=np.float32) / 255\n\n\ndef make_env():\n env = atari_wrappers.wrap_deepmind(\n atari_wrappers.make_atari(env_name),\n episode_life=False,\n clip_rewards=False)\n env.seed(seed)\n return env\n\n\nseed = 0\nenv_name = 'BreakoutNoFrameskip-v4'\n\nmisc.set_random_seed(seed)\n\nenv = make_env()\nn_actions = env.action_space.n\n\nmodel = A3CFF(n_actions)\nopt = rmsprop_async.RMSpropAsync(lr=7e-4, eps=1e-1, alpha=0.99)\nopt.setup(model)\nopt.add_hook(chainer.optimizer.GradientClipping(40))\n\nagent = a3c.A3C(model, opt, t_max=5, gamma=0.99,\n beta=1e-2, phi=phi)\n\nagent.load('parameters')\n\nACTION_MEANINGS = {\n 0: 'NOOP',\n 1: 'FIRE',\n 2: 'RIGHT',\n 3: 'LEFT',\n}\n\nlaunch_visualizer(agent, env, ACTION_MEANINGS, raw_image_input=True)\n" ]
[ [ "numpy.asarray" ] ]
mbed92/rl-physnet
[ "62b6e8a84a6704a50855434933a147f507f94263" ]
[ "nn/train/log.py" ]
[ "import os\n\nimport tensorflow as tf\nfrom tqdm import tqdm\n\n\nclass ExperimentHandler:\n\n def __init__(self, working_path, out_name, max_to_keep=3, **objects_to_save) -> None:\n super().__init__()\n\n # prepare log writers\n train_log_path = _get_or_create_dir(working_path, out_name, 'logs', 'train')\n val_log_path = _get_or_create_dir(working_path, out_name, 'logs', 'val')\n\n self.train_writer = tf.summary.create_file_writer(train_log_path)\n self.val_writer = tf.summary.create_file_writer(val_log_path)\n\n # prepare checkpoints\n self.last_path = _get_or_create_dir(working_path, out_name, 'checkpoints', 'last')\n self.best_path = _get_or_create_dir(working_path, out_name, 'checkpoints', 'best')\n\n self.checkpoint_last, self.checkpoint_manager_last = _prepare_checkpoint_manager(\n self.last_path, max_to_keep,\n **objects_to_save\n )\n\n self.checkpoint_best, self.checkpoint_manager_best = _prepare_checkpoint_manager(\n self.best_path, max_to_keep,\n **objects_to_save\n )\n\n def log_training(self):\n self.train_writer.set_as_default()\n\n def log_validation(self):\n self.val_writer.set_as_default()\n\n def flush(self):\n self.train_writer.flush()\n self.val_writer.flush()\n\n def save_last(self):\n self.checkpoint_manager_last.save()\n\n def save_best(self):\n self.checkpoint_manager_best.save()\n\n def restore_best(self):\n self.checkpoint_best.restore(self.checkpoint_manager_best.latest_checkpoint)\n\n def restore(self, path):\n self.checkpoint_last.restore(tf.train.latest_checkpoint(path)).assert_consumed()\n\n\ndef restore_from_checkpoint(path, **kwargs):\n checkpoint = tf.train.Checkpoint(**kwargs)\n return checkpoint.restore(path)\n\n\ndef restore_from_checkpoint_latest(path, **kwargs):\n return restore_from_checkpoint(tf.train.latest_checkpoint(path), **kwargs)\n\n\ndef as_progressbar(label, epoch, total):\n bar = '%s epoch %d | {l_bar}{bar} | Elapsed: {elapsed} | Remaining: {remaining} | Inverted Rate: {rate_inv_fmt}' \\\n % (label, epoch)\n return tqdm(ncols=120, bar_format=bar, total=total)\n\n\ndef _prepare_checkpoint_manager(path, max_to_keep, **kwargs):\n checkpoint = tf.train.Checkpoint(**kwargs)\n checkpoint_manager = tf.train.CheckpointManager(\n checkpoint=checkpoint,\n directory=path,\n max_to_keep=max_to_keep\n )\n return checkpoint, checkpoint_manager\n\n\ndef _get_or_create_dir(*paths):\n join_path = os.path.join(*paths)\n os.makedirs(join_path, exist_ok=True)\n return join_path\n" ]
[ [ "tensorflow.train.Checkpoint", "tensorflow.train.latest_checkpoint", "tensorflow.summary.create_file_writer", "tensorflow.train.CheckpointManager" ] ]
pirovc/grimer
[ "169f8d3009004d6d2f4ca4d3e7dfec819078cb34" ]
[ "scripts/ehomd_download.py" ]
[ "#!/usr/bin/env python3\nimport pandas as pd\nimport sys\nimport urllib.request\nimport re\n\n\ndef get_taxid(url):\n try:\n sys.stderr.write(url+\"\\n\")\n assembly_stats = url + \"/\" + url.split(\"/\")[-1] + \"_assembly_stats.txt\"\n filedata = urllib.request.urlopen(assembly_stats).read().decode()\n x = re.search(\"# Taxid:[\\s0-9]*\\\\r\\\\n\", filedata)\n if x:\n return re.findall(\"\\d+\", x.group())[0]\n else:\n return None\n except:\n return None\n\n# Can be Oral, Nasal or both (\"Nasal,Oral\")\nhabitats = [\"Oral\", \"Nasal\"]\ndata = \"http://www.ehomd.org/ftp/genomes/PROKKA/current/SEQID_info.csv\"\n\ndf = pd.read_table(data, sep=\",\", usecols=[\"Habitat\", \"Sequence_Source\"])\ndf = df[df[\"Habitat\"].isin(habitats + [\"Nasal,Oral\"])].drop_duplicates()\ndf[\"taxid\"] = df[\"Sequence_Source\"].map(get_taxid)\n\nprint('\"Human Oral Microbiome Database (eHOMD)\":')\nfor h in habitats:\n print(' \"' + h + '\":')\n parsed_ids = set(df.taxid[df.Habitat.str.contains(h)])\n print(' url: \"http://www.ehomd.org/?name=HOMD\"')\n print(\" ids: [\" + \", \".join(parsed_ids) + \"]\")\n\nsys.stderr.write(\"Could not retrieve taxid for: \" + \"\\n\".join(df[df.taxid.isna()][\"Sequence_Source\"].to_list()) + \"\\n\")\n" ]
[ [ "pandas.read_table" ] ]
Hiroshiba/hifi-gan
[ "17601a07573309ee305c58bf87a041f267b1c0c8" ]
[ "hifi_gan/meldataset.py" ]
[ "import math\nimport os\nimport random\nimport torch\nimport torch.utils.data\nimport numpy as np\nfrom librosa.core import load\nfrom librosa.util import normalize\nfrom librosa.filters import mel as librosa_mel_fn\n\nMAX_WAV_VALUE = 32768.0\n\n\ndef load_wav(full_path, sampling_rate=None):\n if os.path.splitext(full_path)[1] != '.npy':\n data, sampling_rate = load(full_path, sr=sampling_rate)\n else:\n a = np.load(full_path, allow_pickle=True).item()\n assert sampling_rate == a['rate']\n data = a['array']\n return data, sampling_rate\n\n\ndef dynamic_range_compression(x, C=1, clip_val=1e-5):\n return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)\n\n\ndef dynamic_range_decompression(x, C=1):\n return np.exp(x) / C\n\n\ndef dynamic_range_compression_torch(x, C=1, clip_val=1e-5):\n return torch.log(torch.clamp(x, min=clip_val) * C)\n\n\ndef dynamic_range_decompression_torch(x, C=1):\n return torch.exp(x) / C\n\n\ndef spectral_normalize_torch(magnitudes):\n output = dynamic_range_compression_torch(magnitudes)\n return output\n\n\ndef spectral_de_normalize_torch(magnitudes):\n output = dynamic_range_decompression_torch(magnitudes)\n return output\n\n\nmel_basis = {}\nhann_window = {}\n\n\ndef mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):\n if torch.min(y) < -1.:\n print('min value is ', torch.min(y))\n if torch.max(y) > 1.:\n print('max value is ', torch.max(y))\n\n global mel_basis, hann_window\n if fmax not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)\n hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)\n\n y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')\n y = y.squeeze(1)\n\n spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)],\n center=center, pad_mode='reflect', normalized=False, onesided=True)\n\n spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))\n\n spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec\n\n\ndef get_dataset_filelist(a):\n ext = '.wav' if not a.input_wavs_npy else '.npy'\n with open(a.input_training_file, 'r', encoding='utf-8') as fi:\n training_files = [os.path.join(a.input_wavs_dir, x.split('|')[0] + ext)\n for x in fi.read().split('\\n') if len(x) > 0]\n\n with open(a.input_validation_file, 'r', encoding='utf-8') as fi:\n validation_files = [os.path.join(a.input_wavs_dir, x.split('|')[0] + ext)\n for x in fi.read().split('\\n') if len(x) > 0]\n return training_files, validation_files\n\n\nclass MelDataset(torch.utils.data.Dataset):\n def __init__(self, training_files, segment_size, n_fft, num_mels,\n hop_size, win_size, sampling_rate, fmin, fmax, split=True, shuffle=True, n_cache_reuse=1,\n device=None, fmax_loss=None, fine_tuning=False, base_mels_path=None):\n self.audio_files = training_files\n random.seed(1234)\n if shuffle:\n random.shuffle(self.audio_files)\n self.segment_size = segment_size\n self.sampling_rate = sampling_rate\n self.split = split\n self.n_fft = n_fft\n self.num_mels = num_mels\n self.hop_size = hop_size\n self.win_size = win_size\n self.fmin = fmin\n self.fmax = fmax\n self.fmax_loss = fmax_loss\n self.cached_wav = None\n self.n_cache_reuse = n_cache_reuse\n self._cache_ref_count = 0\n self.device = device\n self.fine_tuning = fine_tuning\n self.base_mels_path = base_mels_path\n\n def __getitem__(self, index):\n filename = self.audio_files[index]\n if self._cache_ref_count == 0:\n audio, sampling_rate = load_wav(filename, self.sampling_rate)\n if not self.fine_tuning:\n audio = normalize(audio) * 0.95\n self.cached_wav = audio\n if sampling_rate != self.sampling_rate:\n raise ValueError(\"{} SR doesn't match target {} SR\".format(\n sampling_rate, self.sampling_rate))\n self._cache_ref_count = self.n_cache_reuse\n else:\n audio = self.cached_wav\n self._cache_ref_count -= 1\n\n audio = torch.FloatTensor(audio)\n audio = audio.unsqueeze(0)\n\n if not self.fine_tuning:\n if self.split:\n if audio.size(1) >= self.segment_size:\n max_audio_start = audio.size(1) - self.segment_size\n audio_start = random.randint(0, max_audio_start)\n audio = audio[:, audio_start:audio_start+self.segment_size]\n else:\n audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant')\n\n mel = mel_spectrogram(audio, self.n_fft, self.num_mels,\n self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax,\n center=False)\n else:\n mel = np.load(\n os.path.join(self.base_mels_path, os.path.splitext(os.path.split(filename)[-1])[0] + '.npy'))\n mel = torch.from_numpy(mel)\n\n if len(mel.shape) < 3:\n mel = mel.unsqueeze(0)\n\n if self.split:\n frames_per_seg = math.ceil(self.segment_size / self.hop_size)\n\n if audio.size(1) >= self.segment_size:\n mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1)\n mel = mel[:, :, mel_start:mel_start + frames_per_seg]\n audio = audio[:, mel_start * self.hop_size:(mel_start + frames_per_seg) * self.hop_size]\n else:\n mel = torch.nn.functional.pad(mel, (0, frames_per_seg - mel.size(2)), 'constant')\n audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant')\n\n mel_loss = mel_spectrogram(audio, self.n_fft, self.num_mels,\n self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax_loss,\n center=False)\n\n return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze())\n\n def __len__(self):\n return len(self.audio_files)\n" ]
[ [ "numpy.load", "torch.FloatTensor", "torch.min", "torch.exp", "numpy.exp", "numpy.clip", "torch.from_numpy", "torch.max", "torch.hann_window", "torch.clamp" ] ]
pbevan1/Skin-Deep-Unlearning
[ "b8802db8bd61bbf3fdeb10c9899a4117ae38e89c" ]
[ "misc_code/marking_detection.py" ]
[ "import cv2\nimport numpy as np\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\nimport shutil\nfrom zipfile import ZipFile\n\n# Detecting Gentian Violet Markers\n\n# defining numpy arrays for HSV threshold values to look for in images\nlower_violet = np.array([125, 100, 60], dtype=np.uint8)\nupper_violet = np.array([145, 255, 255], dtype=np.uint8)\n\nfolder = '/Data/train'\n# Looping through images to identify those with gentian violet pixels\nfor im in os.listdir(folder):\n src = f'/Data/train/{im}'\n img = cv2.imread(src) # Reading image\n img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # Converting image to HSV for more effective trhesholding\n array = cv2.inRange(img, lower_violet, upper_violet) # Creating array of pixels that fit within the threshold\n if 255 in array: # Checking if the array contains any values of 255 (within the HSV values above)\n shutil.copy(src, '/Data/Train_Marked_OpenCV2') # Copying to new directory for inspection\n\n# Manually weed out anomolies by looking through 'marked' images\n\n# Making list of the remaining images with gentian violet markers\nmarked_list = []\nfor i in os.listdir('/Data/Train_Marked_OpenCV2'):\n marked_list.append(str(i)[:12])\ntrain = pd.read_csv(r'/Data/train.csv') # Opening metadata/labels\ntrain['marked'] = 0 # Creating 'marked' column and setting the default to 0 (False)\ntrain.loc[train.image_name.isin(marked_list), 'marked'] = 1 # Setting images identified as marked to 1 (True)\n\n# Manually labeled scale data\n\n# Making list of the images with scales\nscale_list = []\nscale_images_path = '/content/drive/MyDrive/MSc Project/Data/train_512/train_scale'\nfor i in os.listdir(scale_images_path):\n scale_list.append(str(i)[:12])\ntrain['scale'] = 0 # Creating 'scale' column and setting the default to 0 (False)\ntrain.loc[train.image_name.isin(scale_list), 'scale'] = 1 # Setting images identified as having a scale to 1 (True)\ntrain.to_csv('/content/drive/MyDrive/MSc Project/Data/train.csv', index=False) # Saving the metadata/labels file with new columns\n" ]
[ [ "numpy.array", "pandas.read_csv" ] ]
ncilfone/fairscale
[ "b434b7354898febf718f23c7ff21368a6e0bbe1a" ]
[ "fairscale/optim/adascale.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n# Copyright 2020 Petuum, Inc. All Rights Reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of Petuum, Inc. nor the names of its contributors may be\n# used to endorse or promote products derived from this software without\n# specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport functools\nfrom typing import TYPE_CHECKING, Any, Dict, List, Optional, Type\n\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nimport torch.distributed as dist\nfrom torch.optim import SGD, Optimizer\n\nif TYPE_CHECKING: # pragma: no cover\n from torch.optim.optimizer import _params_t\nelse:\n _params_t = Any\n\n\nclass AdaScale(Optimizer):\n \"\"\"\n Implements the AdaScale_ algorithm for scaling the learning rate for\n distributed and large batch size training. Can be used in combination with\n ``torch.nn.parallel.DistributedDataParallel`` and ``torch.optim.SGD``.\n\n .. _AdaScale: https://proceedings.icml.cc/static/paper_files/icml/2020/4682-Supplemental.pdf\n\n This class subclasses `Optimizer` so that `torch.optim.lr_scheduler` can\n work with it. In other words, AdaScale is intended to be a complete wrapper of an\n torch Optimizer.\n\n Note that, AdaScale does *not* help increase per-GPU batch size.\n\n There are several ways to integrate AdaScale with your training loop.\n We show two examples below.\n\n Example 1: using PyTorch's `lr_scheduler` classes.\n\n .. code-block:: python\n\n optim = AdaScale(SGD(model.parameters(), lr=0.001))\n model = DistributedDataParallel(model)\n scheduler = LambdaLR(optim, lr_lambda=...)\n\n last_epoch = 0\n done = False\n step = 0\n while not done:\n for batch in dataset:\n optim.zero_grad()\n logits = model()\n loss = criterion(logits, ...)\n loss.backward()\n step += optim.gain()\n optim.step()\n epoch = step // len(dataset)\n if epoch > last_epoch:\n scheduler.step()\n last_epoch = epoch\n if epoch >= MAX_EPOCHS:\n done = True\n\n Example 2: using a custom `update_lr()` function that update the learning\n rate based on the current step count per epoch.\n\n .. code-block:: python\n\n optim = AdaScale(SGD(model.parameters(), lr=0.001))\n model = DistributedDataParallel(model)\n\n step = 0\n while step < max_steps:\n for batch in ...:\n optim.zero_grad()\n logits = model()\n loss = criterion()\n loss.backward()\n step += optim.gain()\n optim.step()\n update_lr(step)\n\n Args:\n optimizer (torch.optim.Optimizer):\n Optimizer to apply AdaScale to.\n world_size (int):\n Number of world_size for distributed training.\n If None, defaults to ``dist.get_world_size()``.\n scale (float):\n Scaling factor of the batch size from scale equals 1, e.g. using a 10x\n larger batch size (summed across all ranks with gradient accumulation)\n means a scale of 10.\n If None, defaults to ``world_size * num_gradients_to_accumulate``.\n smoothing (float):\n Smoothing factor for moving average.\n If None, it defaults to ``max(1 - (world_size * num_gradients_to_accumulate)/1000, 0)``.\n Note, for very high scale training, higher smoothing value might be needed,\n esp at the begining of the training. Therefore, if your scale is close to or larger\n than 1000, try experimenting with smoothing value > 0 if the final accuracy is poor.\n num_gradients_to_accumulate (int):\n Number of passes that we accumulate gradients locally\n between each optimizer step. This can be changed during\n training as long as the train loop changes gradient accumulation\n accordingly.\n Default to 1, which does not accumulate gradients.\n debias_ewma (bool):\n (experimental) Use debias exponential moving average\n for smoothing and mu and sigma variables. False will\n use the method in the paper's Appendix B.3.\n Default: True, which is what have been validated so far.\n \"\"\"\n\n def __init__(\n self,\n optimizer: torch.optim.Optimizer,\n world_size: Optional[int] = None,\n scale: Optional[float] = None,\n smoothing: float = None,\n num_gradients_to_accumulate: int = 1,\n debias_ewma: bool = True,\n ):\n self._optimizer = optimizer\n self._local_grad_sqr: Optional[torch.Tensor] = None\n self._world_size: int = (\n world_size if world_size is not None else dist.get_world_size() if dist.is_initialized() else 1\n )\n self._num_backward_calls = 0\n self._last_final_backward_call = 0\n self._num_grads_to_accum = num_gradients_to_accumulate\n self._debias_ewma = debias_ewma\n\n # Proxy the param_groups so that `torch.optim.lr_scheduler` can work.\n self.param_groups = self._optimizer.param_groups\n\n self.set_num_gradients_to_accumulate(num_gradients_to_accumulate, update_smoothing=True)\n\n # The previous function call sets smoothing to its default value.\n # Override that here if smoothing was passed as an argument.\n if smoothing is not None:\n self._smoothing = smoothing\n\n if self._world_size * self._num_grads_to_accum <= 1:\n # gain will be NaN since we will be dividing by zero in paper's B.3 where (S-1) == 0.\n raise RuntimeError(\"AdaScale does not support a single worker without grad accumulation.\")\n\n # Per-param-group sqr & var states (sigma^2 & mu^2 in the paper).\n self._optimizer.state.setdefault(\n \"adascale\",\n {\n \"grad_sqr_avg\": np.ones(len(optimizer.param_groups)),\n \"grad_var_avg\": np.zeros(len(optimizer.param_groups)),\n },\n )\n\n self._scale = 1.0 # Assign to inform mypy about the typing of this variable.\n self.set_scale(self._world_size * self._num_grads_to_accum if scale is None else scale)\n\n self._hook_handles: List[Any] = []\n self._hook()\n\n def _hook(self) -> None:\n \"\"\" Internal function to register the gradient hooks.\n\n Note, don't assume every parameter will generate a gradient (i.e. triggering the hook)\n in every backward pass, which is the reason that we have ``find_unused_params`` flag\n in the DDP class in ``torch.nn.parallel``.\n \"\"\"\n assert self._hook_handles == [], \"Must run unhook first\"\n for idx, param_group in enumerate(self._optimizer.param_groups):\n for param in param_group[\"params\"]:\n h = param.register_hook(functools.partial(self._backward_hook, idx))\n self._hook_handles.append(h)\n\n def __del__(self) -> None:\n \"\"\" Unhook in case caller forgets to call unhook.\n\n This however may not \"work\" since there would be circular reference\n between the hook objects and this objects. In that case, neither will\n get GC'ed. Calling unhook explicitly if you really want to delete\n AdaScale from memory.\n \"\"\"\n self.unhook()\n\n def unhook(self) -> None:\n \"\"\" Unregister hook handles.\n\n This is public because caller may need to call this to ensure all GPU\n memory are released. Otherwise, the hook may prevent parameters from being\n released from the GPU memory pool.\n\n Internally, we use this to support ``add_param_group()`` API.\n \"\"\"\n for h in self._hook_handles:\n h.remove()\n self._hook_handles = []\n\n @property\n def _state(self) -> Dict[str, np.ndarray]:\n \"\"\"\n Return the states of AdaScale.\n \"\"\"\n return self._optimizer.state[\"adascale\"]\n\n @property\n def scale(self) -> float:\n \"\"\"\n The scaling factor of the current batch size, relative to the baseline\n batch size, which could be a DDP training. For example, if the\n baseline batch size is 32 on 2 GPUs, but using a scaled-up batch size\n of 80 on 4 GPUs, then then the scaling factor is 80 * 4 / 32 / 2 = 5.\n\n This is exposed API mainly for logging purpose. Note, this is different\n from ``self.gain()``.\n\n Returns:\n (float):\n The current scaling factor.\n \"\"\"\n return self._scale\n\n @property\n def smoothing(self) -> float:\n \"\"\"\n The smoothing constant used in exponentially-weighted moving average\n tracking the gradient norm mean and variance within AdaScale.\n\n This is exposed API since the value is computed and caller may\n want to obtain this value and log it.\n\n Returns:\n (float):\n The current smoothing value.\n \"\"\"\n return self._smoothing\n\n def set_scale(self, scale: float, update_estimate: bool = True) -> None:\n \"\"\"\n Set the scaling factor of the current batch size. It is up to the\n application to invoke this function to make sure that AdaScale's\n scaling factor matches the actual batch size used during training.\n\n Args:\n scale (float):\n New scaling factor to be applied to AdaScale.\n update_estimate (bool):\n Whether to update the scale-depenent estimate of gradient\n variance; this is highly recommended. (default: True)\n \"\"\"\n assert self._local_grad_sqr is None, \"Don't change scale in backward phase\"\n assert scale >= 1, \"Scale must be at least 1\"\n if update_estimate and hasattr(self, \"_scale\"):\n assert self._scale >= 1, \"bug: old scale isn't valid\"\n # Rescale grad_var_avg to account for the change in scale\n if self._debias_ewma and \"grad_var_avg_biased\" in self._state:\n self._state[\"grad_var_avg_biased\"] *= self._scale / scale\n elif \"grad_var_avg_total\" in self._state: # _debias_ewma==False\n self._state[\"grad_var_avg_total\"] *= self._scale / scale\n self._state[\"grad_var_avg\"] *= self._scale / scale\n self._scale = scale\n\n def _grad_sqr_avg(self, pg_idx: Optional[int] = None) -> float:\n \"\"\"\n Current estimate of the squared l2-norm of the true gradient\n (sigma squared in the AdaScale paper).\n\n Args:\n pg_idx (Optional[int]):\n Optional index for a parameter group.\n\n Returns:\n (float):\n Estimate of squared l2-norm.\n \"\"\"\n if pg_idx is not None:\n return self._state[\"grad_sqr_avg\"][pg_idx]\n else:\n return float(np.sum(self._state[\"grad_sqr_avg\"]))\n\n def _grad_var_avg(self, pg_idx: Optional[int] = None) -> float:\n \"\"\"\n Current estimate of the trace of the covariance of the true gradient\n (mu squared in the AdaScale paper).\n\n Args:\n pg_idx (Optional[int]):\n Optional index for a parameter group.\n\n Returns:\n (float):\n Estimate of trace of the covariance.\n \"\"\"\n if pg_idx is not None:\n return self._state[\"grad_var_avg\"][pg_idx]\n else:\n return float(np.sum(self._state[\"grad_var_avg\"]))\n\n def gain(self, pg_idx: Optional[int] = None) -> float:\n \"\"\"\n Current estimate of the AdaScale gain ratio (r_t in the paper).\n\n Args:\n pg_idx (int):\n Optional index of a parameter group.\n Default None: returns \"averaged\" gain for all groups.\n\n Returns:\n (float):\n Estimate of gain ratio.\n \"\"\"\n var = self._grad_var_avg(pg_idx)\n sqr = self._grad_sqr_avg(pg_idx)\n gain = (var + sqr) / (var / self.scale + sqr)\n return gain\n\n def _update_avg(self, name: str, value: np.ndarray, factor: float) -> None:\n if self._debias_ewma:\n # This function computes and stores the moving average of a vector\n # using a smoothing factor.\n biased = self._state.get(name + \"_biased\", np.zeros(value.shape[0]))\n unbias = self._state.get(name + \"_unbias\", np.zeros(value.shape[0]))\n biased = factor * biased + (1.0 - factor) * value\n unbias = factor * unbias + (1.0 - factor)\n self._state[name + \"_biased\"] = biased\n self._state[name + \"_unbias\"] = unbias\n self._state[name] = biased / unbias\n else:\n # Moving average procedure described in Appendix B.3\n # For iterations t < 1 / (1 - smoothing) define grad_var_avg\n # and grad_sqr_avg as mean of the past samples. After that\n # start using running average.\n #\n # Note: we only keep a single _count for all parameter groups.\n # Ideally, it should be a vector and in case a PG is added\n # after some iterations are done. But, then the if condition\n # below will need to be a np.where. I leave this corner\n # case to a future exercise.\n count = self._state.get(name + \"_count\", np.zeros(1))\n count[0] += 1\n self._state[name + \"_count\"] = count\n if count < 1 / (1 - self._smoothing):\n total = self._state.get(name + \"_total\", None)\n if total is None:\n total = value\n else:\n total += value\n self._state[name + \"_total\"] = total\n self._state[name] = total / count\n else:\n self._state[name] = factor * self._state[name] + (1.0 - factor) * value\n\n def _backward_hook(self, pg_idx: int, grad: torch.Tensor) -> None:\n # This method should be invoked once for each parameter during the\n # backward pass, before gradients are synchronized between world_size.\n\n # Store the local gradient square sums in a vector.\n # This vector is also used for error checking. Whenever it is not None,\n # it means that we are in backward pass.\n if self._local_grad_sqr is None:\n self._local_grad_sqr = torch.zeros(\n len(self._optimizer.param_groups), device=grad.device, requires_grad=False,\n )\n self._local_grad_sqr[pg_idx] += grad.pow(2).sum()\n\n # Now, ensure we queue a callback at the end of the callback queue.\n # This will fire after all gradient callbacks are done (esp. those\n # queued by DDP.\n self._final_callback_queued = False\n Variable._execution_engine.queue_callback(self._queue_callback)\n\n def _queue_callback(self) -> None:\n # This method should be invoked after the entire backward pass. We want\n # to make sure self._final_callback is invoked once, only after all\n # gradients have been synchronized between each worker. However, the\n # synchronization code in DistributedDataParallel is also done in a\n # callback, which might not yet be executed. Therefore, we enqueue\n # self._final_callback from this method, which should ensure it is\n # invoked after the gradient synchronization callback.\n if self._final_callback_queued:\n return\n self._final_callback_queued = True\n Variable._execution_engine.queue_callback(self._final_callback)\n\n def _final_callback(self) -> None:\n # This method should be invoked once for each backward pass, after\n # gradients have been synchronized between each worker, unless we\n # are in gradient accumulation mode, where grads are not all_reduced\n # between the GPUs.\n self._final_callback_queued = False\n assert isinstance(self._local_grad_sqr, torch.Tensor)\n\n # Keep track of number of backward calls for gradient accumulation.\n # TODO (min): this may not work with activation checkpointing when\n # multiple backward calls happen in a big backward.\n self._num_backward_calls += 1\n\n # TODO (min, mike): We need to have a way to check that training loop & DDP\n # is doing the right thing where the gradient is reduced\n # in this backward pass.\n # Longer term, we may compute the gain and then inform\n # the training loop when it is a good time to step().\n assert (\n self._num_backward_calls - self._last_final_backward_call\n ) <= self._num_grads_to_accum, (\n f\"bug: {self._num_backward_calls} - {self._last_final_backward_call} should <= {self._num_grads_to_accum}\"\n )\n if (self._num_backward_calls - self._last_final_backward_call) % self._num_grads_to_accum != 0:\n assert self._local_grad_sqr is not None, \"We should still be in backward phase\"\n return\n\n # Since self._local_grad_sqr is FP32, sum shouldn't overflow.\n # This vector has length of # of param_groups, so it is small, but we\n # use async to hide the all_reduce latency, esp when # of nodes is large.\n work = None\n if self._world_size > 1:\n work = dist.all_reduce(self._local_grad_sqr, async_op=True) # SUM\n\n # Compute the sums of squares for reduced gradients.\n # Divide by _num_grads_to_accum since the gradients are accumulated.\n total_grad_sqr = np.array(\n [sum(param.grad.pow(2).sum().item() for param in group[\"params\"]) for group in self._optimizer.param_groups]\n )\n # Divide by (_num_grads_to_accum ** 2) to account for gradient\n # accumulation.\n if self._num_grads_to_accum > 1:\n # np array doesn't support /=.\n total_grad_sqr = total_grad_sqr / (self._num_grads_to_accum ** 2)\n\n # Wait for all_reduce to be done and move it to cpu & np.\n if work:\n work.wait()\n local_grad_sqr = self._local_grad_sqr.cpu().numpy()\n\n # See appendix B.3 of the paper.\n # Modified to handle cases where scale != world_size\n #\n # local_grad_sqr is \\sum_{i=1}^{c N} \\norm{g_t_i}^2\n # where N is world size and c is num_grads_to_accum\n # total_grad_sqr is \\norm{\\bar{g}_t}^2\n S = self._scale\n cN = self._world_size * self._num_grads_to_accum\n grad_var = local_grad_sqr * (S / cN) / (cN - 1) - total_grad_sqr * S / (cN - 1)\n grad_sqr = total_grad_sqr - grad_var / S\n grad_var = np.maximum(grad_var, 1e-6)\n grad_sqr = np.maximum(grad_sqr, 0.0)\n self._update_avg(\"grad_sqr_avg\", grad_sqr, self.smoothing)\n self._update_avg(\"grad_var_avg\", grad_var, self.smoothing)\n self._last_final_backward_call = self._num_backward_calls\n # Indicating backward is done.\n self._local_grad_sqr = None\n\n def step(self, *args: Any, **kwargs: Any) -> Optional[float]:\n \"\"\"\n Run one optimizer step using Adascale. Essentially just invokes\n ``optimizer.step(*args, **kwargs)`` with a scaled learning rate.\n\n .. note::\n\n It is possible that this function becames a performance\n bottleneck if you have frequent updates. To avoid that,\n making bigger steps and reducing update frequency is generally\n better for performance.\n\n Args:\n args (Any):\n Positional arguments passed to ``optimizer.step``.\n kwargs (Any):\n Keyword arguments passed to ``optimizer.step``.\n\n Returns:\n (Tensor):\n The loss tensor if a closure if used to re-evaluate the model.\n \"\"\"\n assert self._local_grad_sqr is None, \"Don't step without finishing backward phase\"\n # Set original LR and set new LR.\n original_lr = []\n for idx, param_group in enumerate(self._optimizer.param_groups):\n original_lr.append(param_group[\"lr\"])\n param_group[\"lr\"] = self.gain(pg_idx=idx) * param_group[\"lr\"]\n\n # Step it.\n res = self._optimizer.step(*args, **kwargs)\n\n # Restore the original LR.\n for lr, param_group in zip(original_lr, self._optimizer.param_groups):\n param_group[\"lr\"] = lr\n\n return res\n\n def add_param_group(self, pg: Dict) -> None:\n \"\"\" Support adding parameter groups\n\n We need to re-size some of the state and re-register the backward hooks.\n \"\"\"\n assert self._local_grad_sqr is None, \"Can't add parameter group during backward\"\n self._optimizer.add_param_group(pg)\n # Update the hooks.\n self.unhook()\n self._hook()\n # Extend the states.\n for name in self._state.keys():\n assert name.startswith(\"grad_sqr_avg\") or name.startswith(\"grad_var_avg\"), name\n if name.endswith(\"_count\"):\n # This is the \"_count\" variable, should be a 1D int.\n assert self._state[name].shape == (1,), self._state[name].shape\n continue\n # must be a np array, extend it with the right value and check the shape.\n val = 1 if name == \"grad_sqr_avg\" else 0\n self._state[name] = np.append(self._state[name], val)\n assert self._state[name].shape == (len(self._optimizer.param_groups),)\n\n def zero_grad(self) -> None:\n \"\"\"Proxy function to optimizer, because some training loops need this.\"\"\"\n assert self._local_grad_sqr is None, \"Don't zero_grad in backward\"\n return self._optimizer.zero_grad()\n\n def state_dict(self) -> Dict:\n \"\"\" Proxy function to optimizer, checkpointing needs this.\n\n .. note::\n\n Do NOT checkpoint in the middle of gradient accumulation since\n associated AdaScale internal states are not saved in the checkpoint.\n \"\"\"\n assert self._local_grad_sqr is None, \"Don't checkpoint in backward\"\n return self._optimizer.state_dict()\n\n def load_state_dict(self, data: Dict) -> None:\n \"\"\" Proxy function to optimizer, checkpointing needs this.\n\n .. note::\n\n Do NOT checkpoint in the middle of gradient accumulation since\n associated AdaScale internal states are not saved in the checkpoint.\n \"\"\"\n assert self._local_grad_sqr is None, \"Don't load checkpoint in backward\"\n return self._optimizer.load_state_dict(data)\n\n def set_num_gradients_to_accumulate(self, num_gradients_to_accumulate: int, update_smoothing: bool = True,) -> None:\n \"\"\"Set the number of gradients to accumulate to a new value.\n\n This is experimental. This could be called while training so that\n we can gradually increasing the steps between updates. Almost always,\n `set_scale` needs to be called to update the scale as well.\n\n TODO (min): need a way of determine how much to increase the step size?\n\n TODO (min): have both `set_scale` and `set_num_gradients_to_accumulate`\n is hard to use and easy to make mistake. I think it is better\n to specific a specify a `base_scale`. But more discussion is\n needed here.\n\n Args:\n num_gradients_to_accumulate (int):\n Number of gradients to accumulate (calls to backward) between\n each optimizer step\n update_smoothing (bool):\n Whether to update smoothing factor or not. Default: True.\n \"\"\"\n assert self._local_grad_sqr is None, \"Don't change num_grad_to_accum in backward\"\n assert num_gradients_to_accumulate >= 1, f\"Invalid value {num_gradients_to_accumulate}\"\n self._num_grads_to_accum = num_gradients_to_accumulate\n if update_smoothing:\n # Set smoothing based on effective world_size rather than scale here,\n # since world_size determines the number of samples being averaged over\n # at every update.\n #\n # When effective world size is large enough, smoothing is probably\n # not needed, so the smoothing factor is 0.\n self._smoothing = max(1 - self._world_size * self._num_grads_to_accum / 1000, 0)\n\n def __getattr__(self, name: str) -> Any:\n \"\"\"Forward missing attributes to wrapped optimizer.\"\"\"\n try:\n return super().__getattr__(name) # defer to Optimizer logic\n except AttributeError:\n return getattr(self._optimizer, name) # fallback to wrapped optim\n\n\nclass AdaScaleWrapper(AdaScale):\n \"\"\"\n A thin wrapper for AdaScale so that the constructor resembles a\n standard optimizer. This allows it to work with other Optimizer\n Wrappers, like `OSS`.\n\n .. warn::\n OSS(AdaScaleWrapper) (i.e. OSS wrapping AdaScale) resulting in each\n rank's AdaScale operates on different set of parameters. They\n will get different gain values and it is unclear how to adjust\n effective step size in that case. We have not validated effectiveness\n or benefit in this case.\n\n OTOH, AdaScale(OSS) (i.e. AdaScale wrapping OSS) is recommended\n and is numerically identical to AdaScale without OSS. Since\n AdaScale doesn't incur per-parameter state, the memory benefit\n of OSS is still the same.\n\n Args:\n params (list of tensors):\n parameters to be optimized\n optim (class subtyping torch.optim.Optimizer):\n a optimizer class to be wrapped.\n additional_optim_args (argument dict):\n keyward arguments to the `optim` class above.\n\n The rest params are in-sync with the `AdaScale` class above.\n \"\"\"\n\n def __init__(\n self,\n params: _params_t,\n world_size: Optional[int] = None,\n scale: Optional[float] = None,\n smoothing: float = None,\n num_gradients_to_accumulate: int = 1,\n debias_ewma: bool = True,\n optim_cls: Type[Optimizer] = SGD,\n **additional_optim_args: Any,\n ):\n optim_obj = optim_cls(params, **additional_optim_args)\n super().__init__(optim_obj, world_size, scale, smoothing, num_gradients_to_accumulate, debias_ewma)\n" ]
[ [ "numpy.sum", "torch.autograd.Variable._execution_engine.queue_callback", "numpy.append", "torch.distributed.get_world_size", "numpy.zeros", "torch.distributed.is_initialized", "torch.distributed.all_reduce", "numpy.maximum" ] ]
lianghongzhuo/ycb-tools
[ "e5d99a23a2b8b345c571acf9c8ea10a648d7fb03" ]
[ "create_ycb_sdf.py" ]
[ "import os\nimport trimesh\nimport numpy as np\n\n\"\"\"\nCreates Gazebo compatible SDF files from downloaded YCB data.\n\nThis looks through all the YCB objects you have downloaded in a particular \nfolder, and creates Gazebo compatible SDF files from a set of templates.\n\nIf the object has google_16k meshes downloaded, it will use those; else, it\nwill use the tsdf meshes which are of lower quality. \n\nWe recommend ensuring that you've enabled `google_16k` as one of the file \ntypes to download in the `download_ycb_dataset.py` script.\n\nSebastian Castro 2020\n\"\"\"\n\n# Define downsample ratio for mesh. This makes Gazebo run much faster.\ndownsample_ratio = 0.33\n\n# Define folders\nycb_folder = os.path.join(\"models\", \"ycb\")\ntemplate_folder = os.path.join(\"templates\", \"ycb\")\n\nif __name__ == \"__main__\":\n\n print(\"Creating files to use YCB objects in Gazebo...\")\n\n # Get the list of all downloaded mesh folders\n folder_names = os.listdir(ycb_folder)\n\n # Get the template files to copy over\n config_template_file = os.path.join(template_folder, \"model.config\")\n model_template_file = os.path.join(template_folder, \"template.sdf\")\n material_template_file = os.path.join(template_folder, \"template.material\")\n with open(config_template_file, \"r\") as f:\n config_template_text = f.read()\n with open(model_template_file, \"r\") as f:\n model_template_text = f.read()\n with open(material_template_file, \"r\") as f:\n material_template_text = f.read()\n\n # Now loop through all the folders\n for folder in folder_names:\n if folder != \"template\":\n try:\n print(\"Creating Gazebo files for {} ...\".format(folder))\n\n # Extract model name and folder\n model_long = folder\n model_short = folder[4:]\n model_folder = os.path.join(ycb_folder, model_long)\n\n # Check if there are Google meshes; else use the TSDF folder\n if \"google_16k\" in os.listdir(model_folder):\n mesh_type = \"google_16k\"\n else:\n mesh_type = \"tsdf\"\n\n # Extract key data from the mesh\n if mesh_type == \"google_16k\":\n mesh_file = os.path.join(model_folder, \"google_16k\", \"textured.obj\")\n elif mesh_type == \"tsdf\":\n mesh_file = os.path.join(model_folder, \"tsdf\", \"textured.obj\")\n else:\n raise NotImplementedError\n mesh = trimesh.load(mesh_file)\n # Mass and moments of inertia\n mass_text = str(mesh.mass)\n tf = mesh.principal_inertia_transform\n inertia = trimesh.inertia.transform_inertia(tf, mesh.moment_inertia)\n # Center of mass\n com_vec = mesh.center_mass.tolist()\n eul = trimesh.transformations.euler_from_matrix(np.linalg.inv(tf), axes=\"sxyz\")\n com_vec.extend(list(eul))\n com_text = str(com_vec)\n com_text = com_text.replace(\"[\", \"\")\n com_text = com_text.replace(\"]\", \"\")\n com_text = com_text.replace(\",\", \"\")\n\n # Create a downsampled mesh file with a subset of vertices and faces\n if downsample_ratio < 1:\n mesh_pts = mesh.vertices.shape[0]\n num_pts = int(mesh_pts * downsample_ratio)\n (_, face_idx) = mesh.sample(num_pts, True)\n downsampled_mesh = mesh.submesh((face_idx,), append=True)\n with open(os.path.join(model_folder, \"downsampled.obj\"), \"w\") as f:\n downsampled_mesh.export(f, \"obj\")\n collision_mesh_text = model_long + \"/downsampled.obj\"\n else:\n collision_mesh_text = model_long + \"/\" + mesh_type + \"/textured.obj\"\n\n # Copy and modify the model configuration file template\n config_text = config_template_text.replace(\"$MODEL_SHORT\", model_short)\n with open(os.path.join(model_folder, \"model.config\"), \"w\") as f:\n f.write(config_text)\n\n # Copy and modify the model file template\n model_text = model_template_text.replace(\"$MODEL_SHORT\", model_short)\n model_text = model_text.replace(\"$MODEL_LONG\", model_long)\n model_text = model_text.replace(\"$MESH_TYPE\", mesh_type)\n model_text = model_text.replace(\"$COLLISION_MESH\", collision_mesh_text)\n model_text = model_text.replace(\"$MASS\", mass_text)\n model_text = model_text.replace(\"$COM_POSE\", com_text)\n model_text = model_text.replace(\"$IXX\", str(inertia[0][0]))\n model_text = model_text.replace(\"$IYY\", str(inertia[1][1]))\n model_text = model_text.replace(\"$IZZ\", str(inertia[2][2]))\n model_text = model_text.replace(\"$IXY\", str(inertia[0][1]))\n model_text = model_text.replace(\"$IXZ\", str(inertia[0][2]))\n model_text = model_text.replace(\"$IYZ\", str(inertia[1][2]))\n with open(os.path.join(model_folder, model_short + \".sdf\"), \"w\") as f:\n f.write(model_text)\n\n # Copy and modify the material file template\n if mesh_type == \"google_16k\":\n texture_file = \"texture_map.png\"\n elif mesh_type == \"tsdf\":\n texture_file = \"textured.png\"\n else:\n raise NotImplementedError\n material_text = material_template_text.replace(\"$MODEL_SHORT\", model_short)\n material_text = material_text.replace(\"$MODEL_LONG\", model_long)\n material_text = material_text.replace(\"$MESH_TYPE\", mesh_type)\n material_text = material_text.replace(\"$TEXTURE_FILE\", texture_file)\n with open(os.path.join(model_folder, model_short + \".material\"), \"w\") as f:\n f.write(material_text)\n except:\n print(\"Error processing {}. Textured mesh likely does not exist for this object.\".format(folder))\n\n print(\"Done.\")\n\n" ]
[ [ "numpy.linalg.inv" ] ]
neilzhang-ucsb/ABRS
[ "03acc766f87e58870fe39d5403570c44be69f235" ]
[ "real_time_ABRS.py" ]
[ "#real_time_ABRS\r\n\r\n# Copyright (c) 2019 Primoz Ravbar UCSB\r\n# Licensed under BSD 2-Clause [see LICENSE for details]\r\n# Written by Primoz Ravbar\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\nimport pickle\r\nimport msvcrt\r\n\r\n\r\nfrom scipy import misc #pip install pillow\r\nimport scipy\r\nfrom scipy import ndimage\r\n\r\nfrom PIL import Image\r\n\r\n\r\nfrom ABRS_modules import getting_frame_record\r\nfrom ABRS_modules import center_of_gravity\r\nfrom ABRS_modules import subtract_average\r\nfrom ABRS_modules import smooth_2d\r\nfrom ABRS_modules import smooth_1d\r\nfrom ABRS_modules import discrete_radon_transform\r\nfrom ABRS_modules import computeSpeedFromPosXY\r\nfrom ABRS_modules import create_3C_image\r\n\r\nimport tensorflow as tf\r\n\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras.datasets import cifar10\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\r\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\r\nfrom tensorflow.keras.callbacks import TensorBoard\r\n\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Dense, Dropout, LSTM\r\n\r\n\r\ncap = cv2.VideoCapture('Empty_Chrimson_dusted_1_2.avi');fb=4 #insert path to the raw movie; see README for the format\r\n\r\n\r\nnewSize = (400,400);\r\nstartFrame = 0;\r\nendFrame = 50012;\r\n\r\nkernelSize = 100\r\nsmoothingWindow = 89\r\n\r\nwindowSize = 10006 #size of window for training -- ignore in this version\r\n\r\nwinST = 16;\r\n\r\nhalfWindowSpeed = 15\r\n\r\nind = 0;\r\n\r\nprevFrame = np.zeros((400,400))\r\nfrRec = np.zeros((16+1,newSize[0]*newSize[1]))\r\n\r\ntrainImRec = np.zeros((80*80,1000))\r\ntrainLabelRec = np.zeros((1,1000))\r\n\r\npredictionsProbRec = np.zeros((10,endFrame))\r\n\r\netho = np.zeros((1,endFrame))\r\n\r\npathToABRSfolder = 'INSERT PATH TO ABRS MAIN FOLDER HERE'\r\n \r\n\r\nmodel = keras.models.load_model('modelConv2ABRS_3C')\r\nmodel.summary()\r\n\r\nfeatureCol = np.zeros((30,1));\r\nfeatureColAP = np.zeros((30,1));\r\nposCol = np.zeros((2,1));\r\nimCol = np.zeros((80*80,1));\r\nbehCol = np.zeros((1,1));\r\n\r\nfeatureMat = np.zeros((30,kernelSize))\r\nposMat = np.zeros((2,kernelSize))\r\nimMat = np.zeros((80*80,windowSize))\r\nbehMat = np.zeros((1,windowSize))\r\n\r\nim3Crec = np.zeros((1000,80,80,3))\r\n\r\nkernelInd = 0\r\ntrainInd = windowSize\r\nkeyInd = 0\r\nframeInd = 0\r\n\r\nwhile(cap.isOpened()): \r\n ret, frame = cap.read() #\r\n\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #\r\n\r\n rs = cv2.resize(gray,(newSize[0],newSize[1]));\r\n\r\n currentFrame = rs.astype(float)/1;\r\n diffFrame = currentFrame - prevFrame;\r\n prevFrame = currentFrame;\r\n\r\n diffFrameAbs = np.absolute(diffFrame)\r\n\r\n frameVect = currentFrame.reshape(1,newSize[0]*newSize[1]);\r\n frameVectFloat = frameVect.astype(float);\r\n\r\n frRecShort = np.delete(frRec, 0, 0);\r\n frRec = np.vstack((frRecShort,frameVectFloat));\r\n\r\n sumFrRec = np.sum(frRec,0);\r\n \r\n posDic, maxMovement, cfrVectRec, frameVectFloatRec = getting_frame_record(frRec, 0, winST,fb);\r\n \r\n im3CRaw = create_3C_image (cfrVectRec)\r\n \r\n if np.count_nonzero(im3CRaw[:,:,0])>6400: \r\n im3CRaw[:,:,0] = np.zeros((80,80))\r\n \r\n if np.count_nonzero(im3CRaw[:,:,1])>800: \r\n im3CRaw[:,:,1] = np.zeros((80,80))\r\n \r\n rgbArray = np.zeros((80,80,3), 'uint8')\r\n rgbArray[..., 0] = im3CRaw[:,:,0]\r\n rgbArray[..., 1] = im3CRaw[:,:,1]\r\n rgbArray[..., 2] = im3CRaw[:,:,2]\r\n im3C = Image.fromarray(rgbArray)\r\n\r\n X_rs = np.zeros((1,80,80,3))\r\n \r\n X_rs[0,:,:,:]=im3C\r\n\r\n storeFrameRec = 0\r\n if storeFrameRec == 1:\r\n im3Crec[frameInd,:,:,:]=im3C\r\n\r\n X = X_rs/256 # normalize\r\n\r\n\r\n predictionsProb = model.predict(X)\r\n\r\n predictionsProbRec[:,ind] = predictionsProb\r\n\r\n predictionLabel = np.zeros((1,np.shape(predictionsProb)[0]))\r\n predictionLabel[0,:] = np.argmax(predictionsProb,axis=1)\r\n \r\n\r\n beh = predictionLabel\r\n\r\n if maxMovement < 200: #this is to \r\n beh=7\r\n \r\n etho[0,ind]=beh\r\n \r\n print(beh)\r\n\r\n ###### this part is being developed for online training and for semi-automatic ethogram production \r\n \r\n trainKey = 'n'\r\n if keyInd == windowSize: \r\n trainKey = input('train?')\r\n \r\n\r\n if trainKey == 't':\r\n\r\n trainLabelRec[0,trainInd-windowSize:trainInd] = behMat\r\n trainImRec[:,trainInd-windowSize:trainInd] = imMat\r\n \r\n trainInd = trainInd +windowSize\r\n keyInd=0\r\n print(trainKey)\r\n\r\n if trainKey == 'f':\r\n beh = input('behavior?')\r\n trainLabelRec[0,trainInd-windowSize:trainInd] = beh\r\n trainImRec[:,trainInd-windowSize:trainInd] = imMat\r\n \r\n trainInd = trainInd +1\r\n keyInd=0\r\n print(trainKey) \r\n\r\n if trainKey != 't' and keyInd>windowSize:\r\n keyInd=0\r\n print(trainKey)\r\n\r\n keyInd = keyInd + 1\r\n\r\n frameInd = frameInd + 1\r\n\r\n ##################################################################\r\n\r\n \r\n cv2.imshow('im3CRaw',im3CRaw)\r\n cv2.imshow('frame',gray)\r\n\r\n\r\n \r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n if ind > endFrame-1:\r\n break\r\n\r\n ind=ind+1\r\n \r\ncap.release()\r\ncv2.destroyAllWindows()\r\n\r\n" ]
[ [ "numpy.vstack", "numpy.sum", "tensorflow.keras.models.load_model", "numpy.zeros", "numpy.argmax", "numpy.count_nonzero", "numpy.shape", "numpy.delete", "numpy.absolute" ] ]
DT021/GamestonkTerminal
[ "10d231ec2f86a19e69fdb65a2f4d37f33f723f6a" ]
[ "gamestonk_terminal/etf/etf_controller.py" ]
[ "\"\"\"ETF Controller\"\"\"\n__docformat__ = \"numpy\"\n\nimport argparse\nimport os\nfrom typing import List\nimport matplotlib.pyplot as plt\nfrom prompt_toolkit.completion import NestedCompleter\nfrom gamestonk_terminal import feature_flags as gtff\nfrom gamestonk_terminal.helper_funcs import get_flair\nfrom gamestonk_terminal.menu import session\nfrom gamestonk_terminal.etf.stockanalysis_model import (\n name_search,\n open_web,\n etf_overview,\n compare_etfs,\n etf_holdings,\n)\nfrom gamestonk_terminal.etf.screener_model import etf_screener\nfrom gamestonk_terminal.etf import wsj_view\n\n\nclass ETFController:\n CHOICES = [\n \"cls\",\n \"?\",\n \"help\",\n \"q\",\n \"quit\",\n \"web\",\n \"search\",\n \"overview\",\n \"compare\",\n \"holdings\",\n \"screener\",\n \"gainers\",\n \"decliners\",\n \"active\",\n ]\n\n def __init__(self):\n \"\"\"CONSTRUCTOR\"\"\"\n\n self.etf_parser = argparse.ArgumentParser(add_help=False, prog=\"etf\")\n self.etf_parser.add_argument(\"cmd\", choices=self.CHOICES)\n\n def print_help(self):\n \"\"\"Print help\"\"\"\n print(\n \"https://github.com/GamestonkTerminal/GamestonkTerminal/tree/main/gamestonk_terminal/etf\"\n )\n print(\"\\nETF:\")\n print(\" cls clear screen\")\n print(\" ?/help show this menu again\")\n print(\" q quit this menu, and shows back to main menu\")\n print(\" quit quit to abandon program\")\n print(\"\\nStockAnalysis.com\")\n print(\" web open StockAnalysis.com/etf\")\n print(\" search search ETFs matching name (i.e. BlackRock or Invesco)\")\n print(\" overview get overview of ETF symbol\")\n print(\" holdings get top holdings for ETF\")\n print(\" compare compare overview of multiple ETF\")\n print(\" screener screen etfs based on overview data\")\n print(\"\\n Wall St. Journal\")\n print(\" gainers show top gainers\")\n print(\" decliners show top decliners\")\n print(\" active show most active\")\n print(\"\")\n\n def switch(self, an_input: str):\n \"\"\"Process and dispatch input\n\n Returns\n -------\n True, False or None\n False - quit the menu\n True - quit the program\n None - continue in the menu\n \"\"\"\n\n # Empty command\n if not an_input:\n print(\"\")\n return None\n\n (known_args, other_args) = self.etf_parser.parse_known_args(an_input.split())\n\n # Help menu again\n if known_args.cmd == \"?\":\n self.print_help()\n return None\n\n # Clear screen\n if known_args.cmd == \"cls\":\n os.system(\"cls||clear\")\n return None\n\n return getattr(\n self, \"call_\" + known_args.cmd, lambda: \"Command not recognized!\"\n )(other_args)\n\n def call_help(self, _):\n \"\"\"Process Help command\"\"\"\n self.print_help()\n\n def call_q(self, _):\n \"\"\"Process Q command - quit the menu\"\"\"\n return False\n\n def call_quit(self, _):\n \"\"\"Process Quit command - quit the program\"\"\"\n return True\n\n def call_web(self, other_args: List[str]):\n \"\"\"Process web command\"\"\"\n open_web(other_args)\n\n def call_search(self, other_args: List[str]):\n \"\"\"Process search command\"\"\"\n name_search(other_args)\n\n def call_overview(self, other_args: List[str]):\n \"\"\"Process overview command\"\"\"\n etf_overview(other_args)\n\n def call_holdings(self, other_args: List[str]):\n \"\"\"Process holdings command\"\"\"\n etf_holdings(other_args)\n\n def call_compare(self, other_args):\n \"\"\"Process compare command\"\"\"\n compare_etfs(other_args)\n\n def call_screener(self, other_args):\n \"\"\"Process screener command\"\"\"\n etf_screener(other_args)\n\n def call_gainers(self, other_args):\n \"\"\"Process gainers command\"\"\"\n wsj_view.show_top_mover(\"gainers\", other_args)\n\n def call_decliners(self, other_args):\n \"\"\"Process decliners command\"\"\"\n wsj_view.show_top_mover(\"decliners\", other_args)\n\n def call_active(self, other_args):\n \"\"\"Process gainers command\"\"\"\n wsj_view.show_top_mover(\"active\", other_args)\n\n\ndef menu():\n etf_controller = ETFController()\n etf_controller.print_help()\n plt.close(\"all\")\n while True:\n # Get input command from user\n if session and gtff.USE_PROMPT_TOOLKIT:\n completer = NestedCompleter.from_nested_dict(\n {c: None for c in etf_controller.CHOICES}\n )\n an_input = session.prompt(\n f\"{get_flair()} (etf)> \",\n completer=completer,\n )\n else:\n an_input = input(f\"{get_flair()} (etf)> \")\n\n try:\n process_input = etf_controller.switch(an_input)\n\n if process_input is not None:\n return process_input\n\n except SystemExit:\n print(\"The command selected doesn't exist\\n\")\n continue\n" ]
[ [ "matplotlib.pyplot.close" ] ]
ding-ma/applied-ml
[ "91f5ade1984e84fd252fbc76d72f0ee8bd5c96d0" ]
[ "mini-project-2/twenty_news_run.py" ]
[ "import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport numpy as np\r\nimport itertools\r\nfrom random import randrange\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\r\nfrom sklearn.utils import shuffle\r\nfrom sklearn.naive_bayes import MultinomialNB, BernoulliNB\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom datetime import datetime\r\nfrom backports.zoneinfo import ZoneInfo\r\nfrom model.CrossValidation import CrossVal\r\nfrom model.Helpers import evaluate_acc, print_acc_err, DATASET_PATH, NAIVE_BAYES_REPEAT_DICT, LOGISITC_REPEAT_DICT\r\nfrom model.NaiveBayes import BernoulliBayes, MultiNomialBayes\r\nimport sys\r\nfrom statistics import mean\r\nimport logging\r\n\r\n\r\nMODEL = MultinomialNB\r\n\r\n# only needed for kCV\r\nVECTORIZER = TfidfVectorizer()\r\n\r\nexperiment_description = f\"\"\"\r\nOnly word tokenization, no other cleaning\r\nMultiNomialBayes(), CountVectorizer()\r\n\"\"\"\r\n\r\nlogging.basicConfig(\r\n format=\"%(asctime)s %(levelname)-8s %(message)s\",\r\n level=logging.INFO,\r\n datefmt=\"%Y-%m-%d %H:%M:%S\",\r\n handlers=[\r\n logging.FileHandler(filename=\"logs/News-{}.log\".format(datetime.now().strftime(\"%Y-%m-%d_%H%M%S\"))),\r\n logging.StreamHandler(sys.stdout),\r\n ],\r\n)\r\n\r\nlogging.info(experiment_description)\r\n\r\ntwenty_news_df = pd.read_csv(DATASET_PATH.joinpath(\"twenty_news_row_array_token_lower.csv\"))\r\ntwenty_news_df = shuffle(twenty_news_df, random_state=1)\r\ntwenty_news_df[\"sentence\"] = twenty_news_df[\"sentence\"].apply(lambda x: \" \".join(eval(x)))\r\n\r\ntwenty_news_df_X = twenty_news_df[\"sentence\"]\r\ntwenty_news_df_y = twenty_news_df[\"target\"]\r\n\r\ntwenty_CV = CrossVal(twenty_news_df_X, twenty_news_df_y)\r\nres = twenty_CV.kfoldCV(MultiNomialBayes(), CountVectorizer())\r\nprint_acc_err(res)\r\n" ]
[ [ "sklearn.feature_extraction.text.TfidfVectorizer", "sklearn.feature_extraction.text.CountVectorizer", "sklearn.utils.shuffle" ] ]
pjordan/deep-reinforcement-learning
[ "4784635b67a30aadb5a7d93a0945781f55b6dccf" ]
[ "p3_collab-compet/view-actors.py" ]
[ "from model import Actor, Critic\nfrom config import Config\nimport torch\nfrom rlcc.act import NetworkActor, StackedActor\nfrom unityagents import UnityEnvironment\nimport numpy as np\nimport imageio\nimport os\nfrom itertools import cycle\n\nconfigs = []\nnames = []\n\n# \nconfigs.append(Config())\nconfigs[-1].fc1_units = 400\nconfigs[-1].fcs1_units = 400\nconfigs[-1].fc2_units = 300\nnames.append(\"ddpg-400-300-128bs-100p-01s\")\n\nconfigs.append(Config())\nconfigs[-1].fc1_units = 400\nconfigs[-1].fcs1_units = 400\nconfigs[-1].fc2_units = 300\nnames.append(\"ddpg-400-300-128bs-100p-001s\")\n\n\nconfigs.append(Config())\nconfigs[-1].fc1_units = 400\nconfigs[-1].fcs1_units = 400\nconfigs[-1].fc2_units = 300\nnames.append(\"ddpg-400-300-128bs-100p-0001s\")\n\nconfigs.append(Config())\nconfigs[-1].fc1_units = 400\nconfigs[-1].fcs1_units = 400\nconfigs[-1].fc2_units = 300\nnames.append(\"ddpg-400-300-128bs-10p-01s\")\n\nconfigs.append(Config())\nconfigs[-1].fc1_units = 400\nconfigs[-1].fcs1_units = 400\nconfigs[-1].fc2_units = 300\nnames.append(\"ddpg-400-300-256bs-100p-001s\")\n\nconfigs.append(Config())\nconfigs[-1].fc1_units = 200\nconfigs[-1].fcs1_units = 200\nconfigs[-1].fc2_units = 150\nnames.append(\"ddpg-200-150-128bs-100p-001s-3t\")\n\nconfigs.append(Config())\nconfigs[-1].fc1_units = 200\nconfigs[-1].fcs1_units = 200\nconfigs[-1].fc2_units = 150\nnames.append(\"ddpg-200-150-128bs-100p-001s-4t\")\n\nconfigs.append(Config())\nconfigs[-1].fc1_units = 100\nconfigs[-1].fcs1_units = 100\nconfigs[-1].fc2_units = 75\nnames.append(\"ddpg-100-75-128bs-100p-001s-3t\")\n\nconfigs.append(Config())\nconfigs[-1].fc1_units = 100\nconfigs[-1].fcs1_units = 100\nconfigs[-1].fc2_units = 75\nnames.append(\"ddpg-100-75-128bs-100p-001s-4t\")\n\nconfigs.append(Config())\nconfigs[-1].fc1_units = 50\nconfigs[-1].fcs1_units = 50\nconfigs[-1].fc2_units = 35\nnames.append(\"ddpg-50-35-128bs-100p-001s-3t\")\n\nconfigs.append(Config())\nconfigs[-1].fc1_units = 50\nconfigs[-1].fcs1_units = 50\nconfigs[-1].fc2_units = 35\nnames.append(\"ddpg-50-35-128bs-100p-001s-4t\")\n\nconfigs.append(Config())\nconfigs[-1].fc1_units = 200\nconfigs[-1].fcs1_units = 200\nconfigs[-1].fc2_units = 150\nnames.append(\"ddpg-200-150-256bs-100p-001s-3t\")\n\n\nactors = []\nfor c,n in zip(configs, names):\n model_path = 'saved-models/{}/checkpoint_actor.pth'.format(n)\n actor_model = Actor(c)\n actor_model.load_state_dict(torch.load(model_path, map_location='cpu'))\n actor_model.to(c.device)\n base_actor = NetworkActor(actor_model, c.device)\n actor = StackedActor([base_actor, base_actor])\n actors.append(actor)\n\nenv = UnityEnvironment(file_name=\"Tennis.app\")\n\nbrain_name = env.brain_names[0]\nbrain = env.brains[brain_name]\n\nactor_iter = cycle(actors)\n\nwhile True:\n env_info = env.reset(train_mode=False)[brain_name]\n states = env_info.vector_observations \n frames = []\n actor = next(actor_iter)\n while True:\n actions = actor.act(states) \n env_info = env.step(actions)[brain_name]\n # print(env_info.visual_observations)\n # frames.append(env_info.visual_observations[0])\n states = env_info.vector_observations \n dones = env_info.local_done \n if np.any(dones): \n break\n\n#imageio.mimsave(os.path.join('episode-gifs', 'ddpg-200-150-128bs-100p-001s-3t.gif'), frames, duration=.04)\n \n#env.close()\n" ]
[ [ "numpy.any", "torch.load" ] ]
hytsang/cs-ranking
[ "241626a6a100a27b96990b4f199087a6dc50dcc0" ]
[ "csrank/dataset_reader/labelranking/survey_dataset_reader.py" ]
[ "import os\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import ShuffleSplit\nfrom sklearn.preprocessing import Imputer, StandardScaler\nfrom sklearn.utils import check_random_state\n\nfrom csrank.constants import LABEL_RANKING\nfrom csrank.util import ranking_ordering_conversion\nfrom ..dataset_reader import DatasetReader\n\n\nclass SurveyDatasetReader(DatasetReader):\n def __init__(self, random_state=None, **kwargs):\n super(SurveyDatasetReader, self).__init__(learning_problem=LABEL_RANKING, dataset_folder='survey_data',\n **kwargs)\n self.train_file = os.path.join(self.dirname, 'rawdata_all.dta')\n self.random_state = check_random_state(random_state)\n self.__load_dataset__()\n\n def __load_dataset__(self):\n df = pd.io.stata.read_stata(self.train_file)\n orderings = []\n features = []\n for row in df.itertuples():\n orderings.append(row[4:8])\n context_feature = [float(i) if i != '.' else np.NAN for i in row[13:33]]\n features.append(context_feature)\n X = np.array(features)\n X = Imputer().fit_transform(X)\n X = np.array([np.log(np.array(X[:, i]) + 1) for i in range(len(features[0]))])\n X = np.array(X.T)\n self.X = StandardScaler().fit_transform(X)\n orderings = np.array(orderings) - 1\n self.rankings = ranking_ordering_conversion(orderings)\n\n def get_train_test_dataset(self):\n cv_iter = ShuffleSplit(n_splits=1, test_size=0.3, random_state=self.random_state)\n (train_idx, test_idx) = list(cv_iter.split(self.X))[0]\n return self.X[train_idx], self.rankings[train_idx], self.X[test_idx], self.rankings[test_idx]\n\n def get_complete_dataset(self):\n return self.X, self.rankings\n" ]
[ [ "sklearn.model_selection.ShuffleSplit", "sklearn.utils.check_random_state", "pandas.io.stata.read_stata", "sklearn.preprocessing.Imputer", "sklearn.preprocessing.StandardScaler", "numpy.array" ] ]
jbwang1997/BboxToolk
[ "a1e6b9dfcf1f533b630d656dc114ff62a5b37ba9" ]
[ "BboxToolkit/datasets/HRSCio.py" ]
[ "import os\nimport time\nimport os.path as osp\nimport xml.etree.ElementTree as ET\nimport numpy as np\n\nfrom PIL import Image\nfrom functools import partial\nfrom multiprocessing import Pool\nfrom .misc import img_exts, get_classes, _ConstMapper\n\n\ndef load_hrsc(img_dir, ann_dir, classes=None, img_keys=None, obj_keys=None, nproc=10):\n assert osp.isdir(img_dir), f'The {img_dir} is not an existing dir!'\n assert ann_dir is None or osp.isdir(ann_dir), f'The {ann_dir} is not an existing dir!'\n\n classes = get_classes('HRSC' if classes is None else classes)\n if (len(classes) == 1) and (classes[0] == 'ship'):\n cls2lbl = _ConstMapper(0)\n else:\n cls2lbl = dict()\n for i, cls in enumerate(classes):\n if len(cls) < 9:\n cls = '1' + '0' * (8 - len(cls)) + cls\n cls2lbl[cls] = i\n\n img_keys = dict() if img_keys is None else img_keys\n obj_keys = dict() if obj_keys is None else obj_keys\n\n contents = []\n print('Starting loading HRSC dataset information.')\n start_time = time.time()\n _load_func = partial(_load_hrsc_single,\n img_dir=img_dir,\n ann_dir=ann_dir,\n img_keys=img_keys,\n obj_keys=obj_keys,\n cls2lbl=cls2lbl)\n if nproc > 1:\n pool = Pool(nproc)\n contents = pool.map(_load_func, os.listdir(img_dir))\n pool.close()\n else:\n contents = list(map(_load_func, os.listdir(img_dir)))\n contents = [c for c in contents if c is not None]\n end_time = time.time()\n print(f'Finishing loading HRSC, get {len(contents)} images,',\n f'using {end_time-start_time:.3f}s.')\n return contents, ['ship']\n\n\ndef _load_hrsc_single(imgfile, img_dir, ann_dir, img_keys, obj_keys, cls2lbl):\n img_id, ext = osp.splitext(imgfile)\n if ext not in img_exts:\n return None\n\n xmlfile = None if ann_dir is None else osp.join(ann_dir, img_id+'.xml')\n content = _load_hrsc_xml(xmlfile, img_keys, obj_keys, cls2lbl)\n\n if not ('width' in content and 'height' in content):\n imgpath = osp.join(img_dir, imgfile)\n size = Image.open(imgpath).size\n content.update(dict(width=size[0], height=size[1]))\n content.update(dict(filename=imgfile, id=img_id))\n return content\n\n\ndef _load_hrsc_xml(xmlfile, img_keys, obj_keys, cls2lbl):\n hbboxes, bboxes, labels, diffs = list(), list(), list(), list()\n content = {k: None for k in img_keys}\n ann = {k: [] for k in obj_keys}\n if xmlfile is None:\n pass\n elif not osp.isfile(xmlfile):\n print(f\"Can't find {xmlfile}, treated as empty xmlfile\")\n else:\n tree = ET.parse(xmlfile)\n root = tree.getroot()\n\n content['width'] = int(root.find('Img_SizeWidth').text)\n content['height'] = int(root.find('Img_SizeHeight').text)\n for k, xml_k in img_keys.items():\n node = root.find(xml_k)\n value = None if node is None else node.text\n content[k] = value\n\n objects = root.find('HRSC_Objects')\n for obj in objects.findall('HRSC_Object'):\n cls = obj.find('Class_ID').text\n if cls not in cls2lbl:\n continue\n\n labels.append(cls2lbl[cls])\n hbboxes.append([\n float(obj.find('box_xmin').text),\n float(obj.find('box_ymin').text),\n float(obj.find('box_xmax').text),\n float(obj.find('box_ymax').text)\n ])\n bboxes.append([\n float(obj.find('mbox_cx').text),\n float(obj.find('mbox_cy').text),\n float(obj.find('mbox_w').text),\n float(obj.find('mbox_h').text),\n -float(obj.find('mbox_ang').text)\n ])\n diffs.append(\n int(obj.find('difficult').text))\n\n for k, xml_k in obj_keys.items():\n node = obj.find(xml_k)\n value = None if node is None else node.text\n ann[k].append(value)\n\n hbboxes = np.array(hbboxes, dtype=np.float32) if hbboxes \\\n else np.zeros((0, 4), dtype=np.float32)\n bboxes = np.array(bboxes, dtype=np.float32) if bboxes \\\n else np.zeros((0, 5), dtype=np.float32)\n labels = np.array(labels, dtype=np.int64) if diffs \\\n else np.zeros((0, ), dtype=np.int64)\n diffs = np.array(diffs, dtype=np.int64) if diffs \\\n else np.zeros((0, ), dtype=np.int64)\n\n ann['hbboxes'] = hbboxes\n ann['bboxes'] = bboxes\n ann['labels'] = labels\n ann['diffs'] = diffs\n content['ann'] = ann\n return content\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
glauberrleite/ventilator-microcontroller
[ "3b0b489a71e841bf152059585de1d54f4e95e4cc" ]
[ "test/collect_data.py" ]
[ "import serial\nimport numpy as np\nimport time\nimport signal\nimport sys\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef signal_handler(sig, frame):\n ser.close()\n \n df = pd.DataFrame({'time':l_time, 'state':l_state, 'fl_int':l_fl_int, 'pres_int':l_pres_int, 'pres_pac':l_pres_pac, 'pres_exp':l_pres_exp, 'fl_pac':l_fl_pac})\n df.to_csv('list.csv', index=False)\n print(\"XAU\")\n\n sys.exit(0)\n\nsignal.signal(signal.SIGINT, signal_handler)\n\n#plt.ion()\n#fig=plt.figure()\n\nk = 0\nTs = 0.01\nl_time = list()\nl_state = list()\nl_fl_int = list()\nl_fl_pac = list()\nl_pres_exp = list()\nl_pres_pac = list()\nl_pres_int = list()\nk = 0\nser = serial.Serial('/dev/ttyACM0', 9600)\nser.close()\nser.open()\n#ser.write(\"START\".encode())\n\nstate = \"\"\nfl_int = \"\"\nfl_pac = \"\"\npres_pac = \"\"\npres_int = \"\"\npres_exp = \"\"\n\nwhile True:\n data = ser.readline().decode('utf-8')\n\n #print(data)\n state, fl_int, pres_int, pres_pac, pres_exp, fl_pac = data.split('\\t')\n \n l_time.append(k * Ts)\n l_state.append(state)\n l_fl_int.append(float(fl_int))\n l_fl_pac.append(float(fl_pac))\n l_pres_int.append(float(pres_int))\n l_pres_pac.append(float(pres_pac))\n l_pres_exp.append(float(pres_exp))\n\n #plt.scatter(k * Ts, float(pres_pac), c='blue')\n\n #plt.cla()\n #plt.plot(l_time[len(l_time)-100:len(l_time)-1], l_state[len(l_state)-100:len(l_state)-1], linewidth=2, c='blue')\n\n #print(state + '\\t' + fl_int + '\\t' + fl_pac_ins + '\\t' + fl_pac_exp + '\\t' + pres_pac + '\\t' + pres_int)\n #print(y[:,0])\n\n k += 1\n\n time.sleep(Ts)\n #plt.show()\n #plt.pause(Ts) # Note this correction\n" ]
[ [ "pandas.DataFrame" ] ]
yieldsfalsehood/rng_fdw
[ "31a181a6a912f0b1072c7ed9f09c5bd0afa052b8" ]
[ "rng_fdw/__init__.py" ]
[ "#!/usr/bin/env python\n\nfrom multicorn import ForeignDataWrapper\n\nimport numpy as np\nimport scipy.stats\n\nclass RNGWrapper(ForeignDataWrapper):\n\n def __init__(self, options, columns):\n\n super(RNGWrapper, self).__init__(options, columns)\n self.columns = columns\n\n # default to the normal distribution if none was specified\n distribution = options.get(\"distribution\", \"norm\")\n\n # this should be made to fail indicating that the distribution\n # given doesn't exist\n try:\n self.func = getattr(scipy.stats, distribution)\n except:\n pass\n\n def execute(self, quals, columns):\n\n has_size = False\n size = 20\n params = dict()\n\n for qual in quals:\n\n # right now we only handle simple equality\n # constraints. any other predicates will cause no results\n # to be generated (because they won't be satisfied).\n if qual.is_list_operator or qual.operator != \"=\":\n pass\n\n # if a constraint on \"size\" is given, use that to override\n # the default value (20). otherwise, keep a record of the\n # parameters provided and their values\n if qual.field_name == \"size\":\n has_size = True\n size = qual.value\n else:\n params[qual.field_name] = np.float(qual.value)\n\n # instantiate a distribution object from the parameters and\n # generate some variates!\n F = self.func(**params)\n for x in F.rvs(size=size):\n # this is a messy way of saying:\n # 1. set the column \"val\" to the value of this variate\n # 2. include all the equality predicates that were passed\n # in as extracted above\n # 3. set the column \"size\" to the provided value if one\n # was given (otherwise leave it null)\n d = dict([(\"val\", x)] + params.items() + ([(\"size\", size)] if has_size else []))\n yield d\n" ]
[ [ "numpy.float" ] ]
Grazziela/ChiralLigands
[ "8a786212c464bdef8141643d29122c18fd73bc5f" ]
[ "Dragon_signature desciptors/Step 2 - Regression Models/RandomForestRegression_SHAP_Analysis.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 4 13:52:25 2019\r\n\r\nSHAP importance calculation for Random Forests with standard parameters\r\nusing training/test split\r\n\r\nObs: Run the code in the same folder as your data files\r\n\r\n@author: Grazziela Figueredo\r\n\"\"\"\r\nimport pandas as pd #for manipulating data\r\nimport numpy as np #for manipulating data\r\n\r\nimport sklearn #for building models\r\nimport sklearn.ensemble #for building models\r\nfrom sklearn.model_selection import train_test_split #for creating a hold-out sample\r\nfrom sklearn.metrics import r2_score\r\nfrom sklearn.metrics import mean_squared_error\r\n\r\nimport shap #SHAP package for model interpretability\r\nimport matplotlib.pyplot as plt \r\nfrom matplotlib import cm\r\n\r\ndef plot_regression(y, y_hat, figure_title):\r\n fig, ax = plt.subplots()\r\n ax.scatter(y, y_hat)\r\n ax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4)\r\n ax.set_xlabel('Measured ' + dependent_variable, fontsize = 13)\r\n ax.set_ylabel('Predicted ' + dependent_variable, fontsize = 13)\r\n plt.title(figure_title, fontsize = 13)\r\n coefficient_of_dermination = r2_score(y, y_hat)\r\n legend = 'R2: '+str(float(\"{0:.2f}\".format(coefficient_of_dermination)))\r\n plt.legend(['Best fit',legend],loc = 'upper left', fontsize = 13)\r\n plt.show()\r\n \r\n rmse = np.sqrt(mean_squared_error(y, y_hat))\r\n print(\"\\n\\n RMSE train RF: %f\" % (rmse)) \r\n print(\"\\n R2 train RF: %f\" % (coefficient_of_dermination))\r\n\r\n\r\n# Random Forest Regression using standard parameters\r\ndef random_forest_regression(X_train, y_train, X_test, y_test): \r\n rf = sklearn.ensemble.RandomForestRegressor()\r\n rf.fit(X_train, y_train)\r\n y_hat = rf.predict(X_train)\r\n \r\n plot_regression(y_train, y_hat, \"Results for the Training Set\")\r\n y_hat = rf.predict(X_test)\r\n plot_regression(y_test, y_hat, \"Results for the Test Set\")\r\n \r\n return rf\r\n\r\n\r\n# Reading input data\r\ndata = pd.read_excel('LigandSubstrateBoronDragonDescriptors_LASSO.xlsx')\r\n\r\n# Determining X and y arrays. Y is supposed to be the last column of the input file\r\nindex = len(data.columns)\r\nX = data.iloc[:,0:index-1]\r\ny = data.iloc[:,index-1]\r\n\r\n# Variable used to plot the y axis name in the regression graphs\r\ndependent_variable = y.name\r\n\r\n# Training and test sets split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1, test_size = 0.3)\r\n\r\n##############################################################################\r\n\r\nrf = random_forest_regression(X_train, y_train, X_test, y_test)\r\n\r\n# Random Forest explainer\r\nexplainerRF = shap.TreeExplainer(rf)\r\nshap_values_RF_test = explainerRF.shap_values(X_test)\r\nshap_values_RF_train = explainerRF.shap_values(X_train)\r\n\r\ndf_shap_RF_test = pd.DataFrame(shap_values_RF_test, columns=X_test.columns.values)\r\ndf_shap_RF_train = pd.DataFrame(shap_values_RF_train, columns=X_train.columns.values)\r\n\r\n# if a feature has 10 or less unique values then treat it as categorical\r\ncategorical_features = np.argwhere(np.array([len(set(X_train.values[:,x]))\r\nfor x in range(X_train.values.shape[1])]) <= 10).flatten()\r\n\r\n# Printing SHAP results\r\nprint('Shap for RF:\\n\\n')\r\nplt.figure()\r\nshap.summary_plot(shap_values_RF_train, X_train, plot_type=\"bar\", max_display = 8) \r\n\r\nplt.figure()\r\nshap.summary_plot(shap_values_RF_train, X_train, max_display = 8, color_bar_label = 'Descriptor value', show = False, plot_size= (4.5,3))\r\nplt.grid()\r\n#Changing plot colours\r\nfor fc in plt.gcf().get_children():\r\n for fcc in fc.get_children():\r\n if hasattr(fcc, \"set_cmap\"):\r\n fcc.set_cmap(cm.get_cmap('coolwarm'))\r\n" ]
[ [ "matplotlib.pyplot.legend", "sklearn.metrics.mean_squared_error", "matplotlib.pyplot.figure", "matplotlib.pyplot.grid", "pandas.DataFrame", "pandas.read_excel", "matplotlib.pyplot.gcf", "matplotlib.pyplot.subplots", "matplotlib.pyplot.title", "sklearn.ensemble.RandomForestRegressor", "matplotlib.pyplot.show", "matplotlib.cm.get_cmap", "sklearn.metrics.r2_score", "sklearn.model_selection.train_test_split" ] ]
BEL-Public/mffpy
[ "8515824d89a77cf10f7c36bb405f61d338b6f5fe" ]
[ "mffpy/header_block/header_block.py" ]
[ "\"\"\"Management of header blocks in .mff binary files\n\n.mff binary files have a blocked structure. Consecutive blocks can be\nseparated by a header, which brings us to the topic of this module.\n\nThe header consists of either a single flag (`flag=0`) or a block describing\nthe following bytes of signal data (`flag=1`). Regardless, the flag is 32-bit\nwide.\n\nThis module adds functionality to read and write these header blocks.\n\n**Header block structure**\n\n+-------------+-------------+---------------------------------------+\n| start byte | end byte | description |\n+-------------+-------------+---------------------------------------+\n| 0 | 4 | header flag, if 1, header present |\n| 4 | 8 | bytes in header := `hb` |\n| 8 | 12 | bytes in data blob w/out header |\n| 12 | 16 | channel count := `nc` |\n| 16 | 16 + 4 * nc | per-channel byte offset |\n| 16 + 4 * nc | 16 + 8 * nc | per-channel frequency and byte depths |\n| 16 + 8 * nc | hb | optional header bytes |\n+-------------+-------------+---------------------------------------+\n\nOptional header bytes are described in \"./optional_header_block.py\"\n\"\"\"\n\nfrom typing import Optional, Tuple\nfrom collections import namedtuple\n\nimport numpy as np\n\nfrom .helpers import FileLike, read, skip, write\nfrom . import optional_header_block as opt\n\nHEADER_BLOCK_PRESENT = 1\n\n_HeaderBlock = namedtuple('_HeaderBlock', [\n 'header_size',\n 'block_size',\n 'num_channels',\n 'num_samples',\n 'sampling_rate',\n 'optional_header'\n])\n\n\nclass HeaderBlock(_HeaderBlock):\n\n def __new__(cls,\n block_size: int,\n num_channels: int,\n num_samples: int,\n sampling_rate: int,\n header_size: Optional[int] = None,\n optional_header: opt.BlockTypes = opt.NoOptHeaderBlock()):\n \"\"\"create new HeaderBlock instance\n\n Parameters\n ----------\n block_size : byte size of the block\n num_channels : channel count in the block\n num_samples : sample count per channel in the block\n sampling_rate : sampling_rate per channel in the block\n header_size : byte size of the header (computed if None)\n optional_header : optional header with additional fields\n \"\"\"\n computed_size = cls.compute_byte_size(num_channels, optional_header)\n if header_size and header_size != computed_size:\n raise ValueError(f\"\"\"header of inconsistent size:\n {header_size} != {computed_size}\"\"\")\n\n header_size = computed_size\n return super().__new__(cls, header_size, block_size, num_channels,\n num_samples, sampling_rate, optional_header)\n\n @classmethod\n def from_file(cls, fp: FileLike):\n \"\"\"return HeaderBlock, read from fp\"\"\"\n\n # Each block starts with a 4-byte-long header flag which is\n # * `0`: there is no header\n # * `1`: it follows a header\n if read(fp, 'i') == 0:\n return None\n # Read general information\n header_size, block_size, num_channels = read(fp, '3i')\n # number of 4-byte samples per channel in the data block\n num_samples = (block_size//num_channels) // 4\n # Read channel-specific information\n # Skip byte offsets\n skip(fp, 4 * num_channels)\n # Sample rate/depth: Read one skip, over the rest\n # We also check that depth is always 4-byte floats (32 bit)\n sampling_rate, depth = cls.decode_rate_depth(read(fp, 'i'))\n skip(fp, 4 * (num_channels - 1))\n assert depth == 32, f\"\"\"\n Unable to read MFF with `depth != 32` [`depth={depth}`]\"\"\"\n optional_header = opt.from_file(fp)\n return cls(\n block_size=block_size,\n header_size=header_size,\n num_samples=num_samples,\n num_channels=num_channels,\n sampling_rate=sampling_rate,\n optional_header=optional_header,\n )\n\n def write(self, fp: FileLike):\n \"\"\"write HeaderBlock to file pointer `fp`\"\"\"\n write(fp, '4i', (\n HEADER_BLOCK_PRESENT,\n self.header_size,\n self.block_size,\n self.num_channels\n ))\n num_samples = (self.block_size//self.num_channels) // 4\n # Write channel offset into the data block\n arr = 4 * num_samples * np.arange(self.num_channels).astype(np.int32)\n fp.write(arr.tobytes())\n # write sampling-rate/depth word\n sr_d = self.encode_rate_depth(self.sampling_rate, 32)\n arr = sr_d * np.ones(self.num_channels, dtype=np.int32)\n fp.write(arr.tobytes())\n self.optional_header.write(fp)\n\n @staticmethod\n def decode_rate_depth(x: int) -> Tuple[int, int]:\n \"\"\"return rate and depth from encoded representation\"\"\"\n rate = x >> 8\n depth = x & 0xff\n return rate, depth\n\n @staticmethod\n def encode_rate_depth(rate: int, depth: int) -> int:\n \"\"\"return joined rate and byte depth of samples\n\n Sampling rate and sample depth are encoded in a single 4-byte integer.\n The first byte is the depth the last 3 bytes give the sampling rate.\n \"\"\"\n assert depth < (\n 1 << 8), f\"depth must be smaller than 256 (got {depth})\"\n assert rate < (\n 1 << 24), f\"depth must be smaller than {1<<24} (got {rate})\"\n return (rate << 8) + depth\n\n @staticmethod\n def compute_byte_size(num_channels: int,\n optional_header: opt.BlockTypes) -> int:\n \"\"\"returns sum of header byte size and optional header size\n\n `(5 + ..)`: The 4-byte int of the optional header byte size constitutes\n the \"5\", not in `optional_header.byte_size`. See the file description\n for detailed infos on all bytes.\n \"\"\"\n return 4 * (5 + 2 * num_channels) + optional_header.byte_size\n" ]
[ [ "numpy.arange", "numpy.ones" ] ]
ashao/SmartSim
[ "54ca7a72e4e19a167b67b8d16daf113e81f75817" ]
[ "smartsim/experiment.py" ]
[ "# BSD 2-Clause License\n#\n# Copyright (c) 2021, Hewlett Packard Enterprise\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport os.path as osp\nimport time\nfrom os import getcwd\nfrom pprint import pformat\n\nimport pandas as pd\nfrom tqdm import trange\n\nfrom .control import Controller\nfrom .entity import Ensemble, EntityList, Model, SmartSimEntity\nfrom .error import SmartSimError\nfrom .generation import Generator\nfrom .utils import get_logger\nfrom .utils.entityutils import separate_entities\nfrom .utils.helpers import colorize, init_default\n\nlogger = get_logger(__name__)\n\n\nclass Experiment:\n \"\"\"Experiments are the main user interface in SmartSim.\n\n Experiments can create instances to launch called ``Model``\n and ``Ensemble``. Through the ``Experiment`` interface, users\n can programmatically create, configure, start, stop, and\n query the instances they create.\n \"\"\"\n\n def __init__(self, name, exp_path=None, launcher=\"local\"):\n \"\"\"Initialize an ``Experiment``\n\n :param name: name for the ``Experiment``\n :type name: str\n :param exp_path: path to location of ``Experiment`` directory if generated\n :type exp_path: str\n :param launcher: type of launcher, options are \"slurm\", \"pbs\",\n \"cobalt\", or \"local\". Defaults to \"local\"\n :type launcher: str\n \"\"\"\n self.name = name\n if exp_path:\n if not isinstance(exp_path, str):\n raise TypeError(\"exp_path argument was not of type str\")\n if not osp.isdir(osp.abspath(exp_path)):\n raise NotADirectoryError(\"Experiment path provided does not exist\")\n exp_path = osp.abspath(exp_path)\n self.exp_path = init_default(osp.join(getcwd(), name), exp_path, str)\n self._control = Controller(launcher=launcher)\n\n def start(self, *args, block=True, summary=False):\n \"\"\"Launch instances passed as arguments\n\n Start the ``Experiment`` by turning specified instances into jobs\n for the underlying launcher and launching them.\n\n Instances of ``Model``, ``Ensemble`` and ``Orchestrator``\n can all be passed as arguments to the start method.\n\n :param block: block execution until all non-database\n jobs are finished, defaults to True\n :type block: bool, optional\n :param summary: print a launch summary prior to launch,\n defaults to False\n :type summary: bool, optional\n \"\"\"\n try:\n if summary:\n self._launch_summary(*args)\n self._control.start(*args, block=block)\n except SmartSimError as e:\n logger.error(e)\n raise\n\n def stop(self, *args):\n \"\"\"Stop specific instances launched by this ``Experiment``\n\n Instances of ``Model``, ``Ensemble`` and ``Orchestrator``\n can all be passed as arguments to the start method.\n\n :raises TypeError: if wrong type\n :raises SmartSimError: if stop request fails\n \"\"\"\n try:\n for entity in args:\n if isinstance(entity, SmartSimEntity):\n self._control.stop_entity(entity)\n elif isinstance(entity, EntityList):\n self._control.stop_entity_list(entity)\n else:\n raise TypeError(\n f\"Argument was of type {type(entity)} not SmartSimEntity or EntityList\"\n )\n except SmartSimError as e:\n logger.error(e)\n raise\n\n def generate(self, *args, tag=None, overwrite=False):\n \"\"\"Generate the file structure for an ``Experiment``\n\n ``Experiment.generate`` creates directories for each instance\n passed to organize Experiments that launch many instances\n\n If files or directories are attached to ``Model`` objects\n using ``Model.attach_generator_files()``, those files or\n directories will be symlinked, copied, or configured and\n written into the created directory for that instance.\n\n Instances of ``Model``, ``Ensemble`` and ``Orchestrator``\n can all be passed as arguments to the generate method.\n\n :param tag: tag used in `to_configure` generator files\n :type tag: str, optional\n :param overwrite: overwrite existing folders and contents\n :type overwrite: bool, optional\n \"\"\"\n try:\n generator = Generator(self.exp_path, overwrite=overwrite)\n if tag:\n generator.set_tag(tag)\n generator.generate_experiment(*args)\n except SmartSimError as e:\n logger.error(e)\n raise\n\n def poll(self, interval=10, verbose=True):\n \"\"\"Monitor jobs through logging to stdout.\n\n This method should only be used if jobs were launched\n with ``Experiment.start(block=False)``\n\n :param interval: frequency of logging to stdout\n :type interval: int\n :param verbose: set verbosity\n :type verbose: bool\n :raises SmartSimError:\n \"\"\"\n try:\n self._control.poll(interval, verbose)\n except SmartSimError as e:\n logger.error(e)\n raise\n\n def finished(self, entity):\n \"\"\"Query if a job as completed\n\n A instance of ``Model``, ``Ensemble`` can be passed\n as an argument.\n\n :param entity: object launched by this ``Experiment``\n :type entity: SmartSimEntity | EntityList\n :returns: True if job has completed\n :rtype: bool\n \"\"\"\n try:\n return self._control.finished(entity)\n except SmartSimError as e:\n logger.error(e)\n raise\n\n def get_status(self, *args):\n \"\"\"Query the status of a job\n\n Instances of ``Model``, ``Ensemble`` and ``Orchestrator``\n can all be passed as arguments to ``Experiment.get_status()``\n\n :returns: status of the job\n :rtype: list[str]\n :raises SmartSimError: if status retrieval fails\n :raises TypeError:\n \"\"\"\n try:\n statuses = []\n for entity in args:\n if isinstance(entity, SmartSimEntity):\n statuses.append(self._control.get_entity_status(entity))\n elif isinstance(entity, EntityList):\n statuses.extend(self._control.get_entity_list_status(entity))\n else:\n raise TypeError(\n f\"Argument was of type {type(entity)} not SmartSimEntity or EntityList\"\n )\n return statuses\n except SmartSimError as e:\n logger.error(e)\n raise\n\n def create_ensemble(\n self,\n name,\n params=None,\n batch_settings=None,\n run_settings=None,\n replicas=None,\n perm_strategy=\"all_perm\",\n **kwargs,\n ):\n \"\"\"Create an ``Ensemble`` of ``Model`` instances\n\n Ensembles can be launched sequentially or as a batch\n if using a non-local launcher. e.g. slurm\n\n Ensembles require one of the following combinations\n of arguments\n - ``run_settings`` and ``params``\n - ``run_settings`` and ``replicas``\n - ``batch_settings``\n - ``batch_settings``, ``run_settings``, and ``params``\n - ``batch_settings``, ``run_settings``, and ``replicas``\n\n If given solely batch settings, an empty ensemble\n will be created that models can be added to manually\n through ``Ensemble.add_model()``.\n The entire ensemble will launch as one batch.\n\n Provided batch and run settings, either ``params``\n or ``replicas`` must be passed and the entire ensemble\n will launch as a single batch.\n\n Provided solely run settings, either ``params``\n or ``replicas`` must be passed and the ensemble members\n will each launch sequentially.\n\n :param name: name of the ensemble\n :type name: str\n :param params: parameters to expand into ``Model`` members\n :type params: dict[str, Any]\n :param batch_settings: describes settings for ``Ensemble`` as batch workload\n :type batch_settings: BatchSettings\n :param run_settings: describes how each ``Model`` should be executed\n :type run_settings: RunSettings\n :param replicas: number of replicas to create\n :type replicas: int\n :param perm_strategy: strategy for expanding ``params`` into\n ``Model`` instances from params argument\n options are \"all_perm\", \"stepped\", \"random\"\n or a callable function\n :type perm_strategy: str\n :raises SmartSimError: if initialization fails\n :return: ``Ensemble`` instance\n :rtype: Ensemble\n \"\"\"\n try:\n new_ensemble = Ensemble(\n name,\n params,\n batch_settings=batch_settings,\n run_settings=run_settings,\n perm_strat=perm_strategy,\n replicas=replicas,\n **kwargs,\n )\n return new_ensemble\n except SmartSimError as e:\n logger.error(e)\n raise\n\n def create_model(\n self, name, run_settings, params=None, path=None, enable_key_prefixing=False\n ):\n \"\"\"Create a ``Model``\n\n By default, all ``Model`` instances start with the cwd\n as their path unless specified. If specified or not, upon\n user passing the instance to ``Experiment.generate()``,\n the ``Model`` path will be overwritten and replaced\n with the created directory for the ``Model``\n\n :param name: name of the model\n :type name: str\n :param run_settings: defines how ``Model`` should be run,\n :type run_settings: RunSettings\n :param params: model parameters for writing into configuration files\n :type params: dict, optional\n :param path: path to where the model should be executed at runtime\n :type path: str, optional\n :param enable_key_prefixing: If true, data sent to the Orchestrator\n using SmartRedis from this ``Model`` will\n be prefixed with the ``Model`` name.\n :type enable_key_prefixing: bool\n :raises SmartSimError: if initialization fails\n :return: the created ``Model``\n :rtype: Model\n \"\"\"\n path = init_default(getcwd(), path, str)\n params = init_default({}, params, dict)\n try:\n new_model = Model(name, params, path, run_settings)\n if enable_key_prefixing:\n new_model.enable_key_prefixing()\n return new_model\n except SmartSimError as e:\n logger.error(e)\n raise\n\n def reconnect_orchestrator(self, checkpoint):\n \"\"\"Reconnect to a running ``Orchestrator``\n\n This method can be used to connect to a Redis deployment\n that was launched by a previous ``Experiment``. This way\n users can run many experiments utilizing the same Redis\n deployment\n\n :param checkpoint: the `smartsim_db.dat` file created\n when an ``Orchestrator`` is launched\n :type checkpoint: str\n \"\"\"\n try:\n orc = self._control.reload_saved_db(checkpoint)\n return orc\n except SmartSimError as e:\n logger.error(e)\n raise\n\n def summary(self):\n \"\"\"Return a summary of the ``Experiment``\n\n The summary will show each instance that has been\n launched and completed in this ``Experiment``\n\n :return: Dataframe of ``Experiment`` history\n :rtype: pd.DataFrame\n \"\"\"\n index = 0\n df = pd.DataFrame(\n columns=[\n \"Name\",\n \"Entity-Type\",\n \"JobID\",\n \"RunID\",\n \"Time\",\n \"Status\",\n \"Returncode\",\n ]\n )\n # TODO should this include running jobs?\n for job in self._control._jobs.completed.values():\n for run in range(job.history.runs + 1):\n df.loc[index] = [\n job.entity.name,\n job.entity.type,\n job.history.jids[run],\n run,\n job.history.job_times[run],\n job.history.statuses[run],\n job.history.returns[run],\n ]\n index += 1\n return df\n\n def _launch_summary(self, *args):\n \"\"\"Experiment pre-launch summary of entities that will be launched\"\"\"\n\n def sprint(p):\n print(p, flush=True)\n\n sprint(\"\\n\")\n models, ensembles, orchestrator = separate_entities(args)\n\n header = colorize(\"=== LAUNCH SUMMARY ===\", color=\"cyan\", bold=True)\n exname = colorize(\"Experiment: \" + self.name, color=\"green\", bold=True)\n expath = colorize(\"Experiment Path: \" + self.exp_path, color=\"green\")\n launch = colorize(\n \"Launching with: \" + str(self._control._launcher), color=\"green\"\n )\n numens = colorize(\"# of Ensembles: \" + str(len(ensembles)), color=\"green\")\n numods = colorize(\"# of Models: \" + str(len(models)), color=\"green\")\n has_orc = \"yes\" if orchestrator else \"no\"\n orches = colorize(\"Database: \" + has_orc, color=\"green\")\n\n sprint(f\"{header}\")\n sprint(f\"{exname}\\n{expath}\\n{launch}\\n{numens}\\n{numods}\\n{orches}\\n\")\n\n if ensembles:\n sprint(colorize(\"=== ENSEMBLES ===\", color=\"cyan\", bold=True))\n for ens in ensembles:\n name = colorize(ens.name, color=\"green\", bold=True)\n num_models = colorize(\n \"# of models in ensemble: \" + str(len(ens)), color=\"green\"\n )\n batch_settings = colorize(\n \"Batch Settings: \\n\" + str(ens.batch_settings),\n color=\"green\",\n )\n run_settng = colorize(\n \"Run Settings: \\n\" + str(ens.run_settings),\n color=\"green\",\n )\n batch = colorize(f\"Launching as batch: {ens.batch}\", color=\"green\")\n\n sprint(f\"{name}\")\n sprint(f\"{num_models}\")\n sprint(f\"{batch}\")\n if ens.batch:\n print(f\"{batch_settings}\")\n else:\n sprint(f\"{run_settng}\")\n sprint(\"\\n\")\n if models:\n sprint(colorize(\"=== MODELS ===\", color=\"cyan\", bold=True))\n for model in models:\n model_name = colorize(model.name, color=\"green\", bold=True)\n parameters = colorize(\n \"Model Parameters: \\n\" + pformat(model.params), color=\"green\"\n )\n run_settng = colorize(\n \"Model Run Settings: \\n\" + str(model.run_settings),\n color=\"green\",\n )\n sprint(f\"{model_name}\")\n sprint(f\"{parameters}\")\n sprint(f\"{run_settng}\")\n sprint(\"\\n\")\n if orchestrator:\n sprint(colorize(\"=== DATABASE ===\", color=\"cyan\", bold=True))\n size = colorize(\n \"# of database nodes: \" + str(len(orchestrator)), color=\"green\"\n )\n batch = colorize(f\"Launching as batch: {orchestrator.batch}\", color=\"green\")\n sprint(f\"{batch}\")\n sprint(f\"{size}\")\n\n sprint(\"\\n\")\n\n wait, steps = 10, 100\n prog_bar = trange(\n steps,\n desc=\"Launching in...\",\n leave=False,\n ncols=80,\n mininterval=0.25,\n bar_format=\"{desc}: {bar}| {remaining} {elapsed}\",\n )\n for _ in prog_bar:\n time.sleep(wait / steps)\n\n def __str__(self):\n return self.name\n" ]
[ [ "pandas.DataFrame" ] ]
WerzSoft/PElyzer
[ "18ab87bb4aafaf7a9f02749545a4ba110c1036fe" ]
[ "tests/estadisticas.py" ]
[ "import numpy as np\r\nimport pandas as pd\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.manifold import TSNE\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.cm as cm\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport seaborn as sns\r\n\r\n\r\ndatos = pd.read_csv(\"pelyzer/recursos/dataset.csv\")\r\n\r\n#visualizacion valores nulos\r\nsns.heatmap(datos.isnull(), cbar=True, cmap=\"OrRd_r\")\r\nplt.title(\"Nulos heatmap\")\r\nplt.savefig(\"imagenes/nulos_heatmap.png\")\r\nplt.clf()\r\n\r\nsns.countplot('is_exe', hue='malware', data=datos)\r\nplt.title(\"Malware por tipo - exe\")\r\nplt.savefig(\"imagenes/tipo_exe.png\")\r\nplt.clf()\r\n\r\nsns.barplot(x='is_exe', y='malware', data=datos)\r\nplt.ylabel(\"Índice Malware\")\r\nplt.title(\"Índice de malware por tipo (exe)\")\r\nplt.savefig(\"imagenes/indice_tipo_exe.png\")\r\nplt.clf()\r\n\r\nsns.countplot('is_dll', hue='malware', data=datos)\r\nplt.title(\"Malware por tipo - dll\")\r\nplt.savefig(\"imagenes/tipo_dll.png\")\r\nplt.clf()\r\n\r\nsns.barplot(x='is_dll', y='malware', data=datos)\r\nplt.ylabel(\"Índice Malware\")\r\nplt.title(\"Índice de malware por tipo (dll)\")\r\nplt.savefig(\"imagenes/indice_tipo_dll.png\")\r\nplt.clf()\r\n\r\n\r\nsns.countplot('is_driver', hue='malware', data=datos)\r\nplt.title(\"Malware por tipo - driver\")\r\nplt.savefig(\"imagenes/tipo_driver.png\")\r\nplt.clf()\r\n\r\nsns.barplot(x='is_driver', y='malware', data=datos)\r\nplt.ylabel(\"Índice Malware\")\r\nplt.title(\"Índice de malware por tipo (driver)\")\r\nplt.savefig(\"imagenes/indice_tipo_driver.png\")\r\nplt.clf()\r\n\r\n\r\nsns.countplot('unk_opcodes', hue='malware', data=datos)\r\nplt.title(\"Malware por opdcodes desconocidos\")\r\nplt.savefig(\"imagenes/unk_opcodes.png\")\r\nplt.clf()\r\n\r\nsns.barplot(x='unk_opcodes', y='malware', data=datos)\r\nplt.ylabel(\"Índice Malware\")\r\nplt.title(\"Índice de malware por opcodes desconocidos\")\r\nplt.savefig(\"imagenes/indice_unk_opcodes.png\")\r\nplt.clf()\r\n\r\n\r\nsns.countplot('n_std_sec', hue='malware', data=datos)\r\nplt.title(\"Malware por secciones estandar\")\r\nplt.savefig(\"imagenes/secciones_estandar.png\")\r\nplt.clf()\r\n\r\nsns.barplot(x='n_std_sec', y='malware', data=datos)\r\nplt.ylabel(\"Índice Malware\")\r\nplt.title(\"Índice de malware por secciones estandar\")\r\nplt.savefig(\"imagenes/indice_secciones_estandar.png\")\r\nplt.clf()\r\n\r\n\r\nsns.countplot('n_susp_sec', hue='malware', data=datos)\r\nplt.title(\"Malware por secciones sospechosas\")\r\nplt.savefig(\"imagenes/secciones_sospechosas.png\")\r\nplt.clf()\r\n\r\nsns.barplot(x='n_susp_sec', y='malware', data=datos)\r\nplt.ylabel(\"Índice Malware\")\r\nplt.title(\"Índice de malware por secciones sospechosas\")\r\nplt.savefig(\"imagenes/indice_secciones_sospechosas.png\")\r\nplt.clf()\r\n\r\nsns.countplot('checksum_invalido', hue='malware', data=datos)\r\nplt.title(\"Malware por checksum invalido\")\r\nplt.savefig(\"imagenes/checksum.png\")\r\nplt.clf()\r\n\r\nsns.barplot(x='checksum_invalido', y='malware', data=datos)\r\nplt.ylabel(\"Índice Malware\")\r\nplt.title(\"Índice de malware por checksum invalido\")\r\nplt.savefig(\"imagenes/indice_checksum.png\")\r\nplt.clf()\r\n\r\n\r\nsns.countplot('firmado', hue='malware', data=datos)\r\nplt.title(\"Malware por firma\")\r\nplt.savefig(\"imagenes/firmado.png\")\r\nplt.clf()\r\n\r\nsns.barplot(x='firmado', y='malware', data=datos)\r\nplt.ylabel(\"Índice Malware\")\r\nplt.title(\"Índice de malware por firma\")\r\nplt.savefig(\"imagenes/indice_firma.png\")\r\nplt.clf()\r\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.savefig", "matplotlib.pyplot.clf", "matplotlib.pyplot.title", "matplotlib.pyplot.ylabel" ] ]
pawangeek/CovidHack
[ "3a57106eeb443550bb801a8120243b992bc960fb" ]
[ "app.py" ]
[ "import requests, json, populartimes\nimport numpy as np\nimport pandas as pd\nfrom itsdangerous import URLSafeTimedSerializer, SignatureExpired\nfrom form import DetailForm, UserForm, UserLogin, NGOForm\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom datetime import datetime, timedelta\nfrom flask import Flask, redirect, url_for, request, render_template, session, flash\nfrom flask_mail import Mail, Message\nfrom models import get_coords\nfrom flask_login import LoginManager, login_required\nfrom flask_googlemaps import GoogleMaps, Map\nfrom haversine import haversine\n\n# Create Flask App\napp = Flask(__name__)\napp.secret_key = \"xb1\\x058\\xb8o\\x82\\xaf\\xdb\\xd5I\"\napp.config.from_pyfile('config.cfg')\n\n# Get Google Places API: https://developers.google.com/places/web-service/get-api-key and replace\nMyAPI_key = \"Put your key\"\n\nGoogleMaps(app,key=MyAPI_key)\nmail = Mail(app)\n\nkey = 'xb1\\x058\\xb8o\\x82\\xaf\\xdb\\xd5I'\nengine = create_engine(\"mysql+pymysql://root:pawan@localhost/covid\")\ndb = scoped_session(sessionmaker(bind=engine))\ns = URLSafeTimedSerializer(key)\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = \"userlogin\"\n\n# URL for request to google place text search to find user address based on what is typed in\nurl = \"https://maps.googleapis.com/maps/api/place/textsearch/json?\"\n\n# constant factor used later to calculate the area (longitude, latitude) to scan for stores\nalpha = 180/(np.pi*6371000)\n\[email protected]('/')\ndef home():\n return render_template('homes.html')\n\n\[email protected](\"/userregister\", methods=[\"GET\", \"POST\"])\ndef register():\n form = UserForm(request.form)\n if request.method == 'POST' and form.validate():\n fname = form.fname.data\n lname = form.lname.data\n email = form.email.data\n password = form.password.data\n confirm = form.confirm_password.data\n\n # ipaddr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)\n # loc = get_coords(ipaddr)\n\n ipaddr = '157.37.154.227' # hard coded till we deploy it\n loc = get_coords(ipaddr)\n lat, lon = loc[0], loc[1]\n\n emaildata = db.execute(\"SELECT email FROM users WHERE email=:email\", {\"email\": email}).fetchone()\n\n if emaildata is not None:\n flash(\"Email taken\", \"danger\")\n return render_template(\"userregister.html\", form=form)\n\n if password == confirm:\n db.execute(\"INSERT INTO users (first_name, last_name, email, pass,lon, lat) VALUES (:fname, :lname, :email, :password, :lon,:lan)\", {\n \"fname\": fname, \"lname\": lname, \"email\": email, \"password\": password, \"lon\":lon, \"lat\":lat})\n db.commit()\n\n email = request.form['email']\n token = s.dumps(email, salt='email-confirm')\n\n msg = Message('Confirm Email', sender='[email protected]', recipients=[email])\n link = url_for('confirm_email', token=token, _external=True)\n\n msg.body = link\n mail.send(msg)\n flash(\"A confirmation email has been sent. Please confirm your email.\", \"success\")\n return render_template(\"userregister.html\", form=form)\n\n else:\n flash(\"Passwords do not match\", \"danger\")\n return render_template(\"userregister.html\",form=form)\n\n return render_template(\"userregister.html\",form=form)\n\n\[email protected](\"/ngoregister\", methods=[\"GET\",\"POST\"])\ndef ngoregister():\n form = NGOForm(request.form)\n if request.method == 'POST' and form.validate():\n name = form.name.data\n email = form.email.data\n password = form.password.data\n confirm = form.confirm_password.data\n\n # ipaddr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)\n # loc = get_coords(ipaddr)\n\n ipaddr = '157.37.154.227' # hard coded till we deploy it\n loc = get_coords(ipaddr)\n lat, lon = loc[0], loc[1]\n\n emaildata = db.execute(\"SELECT email FROM users WHERE email=:email\", {\"email\": email}).fetchone()\n\n if emaildata is not None:\n flash(\"Email taken\", \"danger\")\n return render_template(\"ngoregister.html\", form=form)\n\n\n if password == confirm:\n db.execute(\"INSERT INTO users (first_name, email, pass, usertype ,lon, lat) VALUES (:fname, :email, :password, :usertype, :lon,:lat)\",\n { \"fname\": name, \"email\": email, \"password\": password, \"usertype\":3, \"lon\":lon, \"lat\":lat})\n db.commit()\n\n email = request.form['email']\n token = s.dumps(email, salt='email-confirm')\n\n msg = Message('Confirm Email', sender='[email protected]', recipients=[email])\n link = url_for('confirm_email', token=token, _external=True)\n\n msg.body = link\n mail.send(msg)\n flash(\"A confirmation email has been sent. Please confirm your email.\", \"success\")\n return render_template(\"ngoregister.html\", form=form)\n\n else:\n flash(\"Passwords do not match\", \"danger\")\n return render_template(\"ngoregister.html\",form=form)\n\n return render_template(\"ngoregister.html\",form=form)\n\[email protected]('/confirm_email/<token>')\ndef confirm_email(token):\n try:\n email = s.loads(token, salt='email-confirm', max_age=36000)\n flash(\"You are registered. Please login\", \"success\")\n return redirect(url_for('userlogin'))\n except SignatureExpired:\n flash(\"The link has expired. Please login\", \"danger\")\n return render_template(\"userregister.html\")\n\n\[email protected](\"/userlogin\", methods=[\"GET\", \"POST\"])\ndef userlogin():\n form = UserLogin(request.form)\n if request.method == 'POST' and form.validate():\n\n email = request.form.get(\"email\")\n password = request.form.get(\"password\")\n\n emaildata = db.execute(\"SELECT email FROM users WHERE email=:email\", {\"email\": email}).fetchone()\n passwordData = db.execute(\"SELECT pass FROM users WHERE email=:email\", {\"email\": email}).fetchone()\n userTypeData = db.execute(\"SELECT userType FROM users WHERE email=:email\", {\"email\": email}).fetchone()\n\n if emaildata is None:\n flash(\"Email not found. Please try again.\", \"danger\")\n return render_template(\"userlogin.html\", form=form)\n else:\n\n for password_data in passwordData:\n if password==password_data:\n session[\"log\"] = True\n flash(\"You are logged in.\")\n session[\"USER\"] = email\n\n print(userTypeData.userType)\n\n if (int(userTypeData.userType)==2):\n print(\"yes\")\n return redirect(url_for('userhome'))\n elif (int(userTypeData.userType)==3):\n return redirect(url_for('ngohome'))\n else:\n flash(\"Incorrect password\", \"danger\")\n return render_template(\"userlogin.html\",form=form)\n return render_template(\"userlogin.html\",form=form)\n\n\[email protected](\"/forgetpassword\", methods=[\"GET\", \"POST\"])\ndef forget_password():\n if request.form.get(\"email\"):\n email = request.form['email']\n emaildata = db.execute(\"SELECT email FROM users WHERE email=:email\", {\"email\": email}).fetchone()\n\n if emaildata is None:\n flash(\"Email not found. Please try again.\", \"danger\")\n return render_template(\"forgetpassword.html\")\n else:\n new_password = (str('new_password'))\n db.execute(\"UPDATE users SET pass=:password WHERE email=:email\", {\"password\": new_password, \"email\": email})\n db.commit()\n msg = Message('Forget Password',\n sender='[email protected]', recipients=[email])\n msg.body = 'Your new password is: new_password'\n\n mail.send(msg)\n flash(\"Your password was sent to your email.\", \"success\")\n return redirect(url_for('userlogin'))\n return render_template(\"forgetpassword.html\")\n\n\[email protected](\"/logout\")\n@login_required\ndef logout():\n session.clear()\n flash(\"You are logged out\", \"success\")\n return redirect(url_for('userlogin'))\n\[email protected](401)\ndef page_not_found(e):\n return render_template('error.html')\n\[email protected]('/error')\ndef error():\n return render_template('error.html')\n\n\[email protected]('/stop')\ndef stop():\n return render_template('stop.html')\n\n\[email protected](\"/admin\")\ndef admin():\n return render_template(\"adminlogin.html\")\n\n\ndef get_map(loc):\n mymap = Map(identifier=\"view-side\", lat=loc[0], lng=loc[1],\n markers=[{\n 'icon': 'http://maps.google.com/mapfiles/ms/icons/blue-dot.png','zoom': 16,\n 'lat': loc[0], 'lng': loc[1], 'infobox': \"<b>Your current location</b>\",\n 'style':'width:500px'}])\n\n return mymap\n\n\[email protected](\"/ngohome\", methods=['GET','POST'])\ndef ngohome():\n # Hardcoded till we deploy it so that we can get user ip addr\n ipaddr = '157.37.154.227'\n loc = get_coords(ipaddr)\n\n # hardcoded can be taken by nearest requesters\n loc2 = [26.9363461, 75.9213346]\n mymap = get_map(loc)\n\n calc_dist = haversine(loc,loc2)\n if calc_dist<20:\n print(\"yes there is a request\")\n\n return render_template(\"ngohome.html\", mymap=mymap)\n\n\[email protected](\"/userhome\", methods=['GET','POST'])\ndef userhome():\n\n # Hardcoded till we deploy it so that we can get user ip addr\n ipaddr = '157.37.154.227'\n loc = get_coords(ipaddr)\n mymap = get_map(loc)\n\n if request.form.get(\"packets\"):\n packets = request.form['packets']\n email = session[\"USER\"]\n current = db.execute(\"SELECT quantity FROM users WHERE email=:email\", {\"email\": email}).fetchone()\n\n if int(packets) < 1:\n flash(\"Invalid Quantity\", \"danger\")\n return redirect(url_for('userhome'))\n if int(packets) > 6:\n flash(\"Maximum 6 from one signup\", \"danger\")\n return redirect(url_for('userhome'))\n if int(current.quantity) != 0:\n flash(\"You already submitted a request\",\"danger\")\n return redirect(url_for('userhome'))\n\n else:\n db.execute(\"UPDATE users SET quantity=:quantity WHERE email=:email\", {\"quantity\": packets, \"email\": email})\n db.commit()\n\n flash(\"Your Request has been submitted\", \"success\")\n return redirect(url_for('userhome'))\n\n # ipaddr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)\n # loc = get_coords(ipaddr)\n return render_template(\"userhome.html\", mymap= mymap)\n\n\n# Run Store Search and Selection\[email protected]('/details', methods=['GET', 'POST'])\ndef detail():\n form = DetailForm(request.form)\n if request.method == 'POST' and form.validate():\n # Import User Input\n user_address = form.user_address.data\n store_type = form.store_type.data\n radius = form.radius.data \n\n radius = int(radius)\n \n # Find the google place from the user_address input\n user_address_res = requests.get(url + 'query=' + user_address + '&key=' + MyAPI_key)\n x = user_address_res.json()\n\n user_location = x[\"results\"][0][\"geometry\"][\"location\"]\n\n user_latitude = user_location[\"lat\"]\n user_longitude = user_location[\"lng\"]\n \n # Define search area around the user location\n delta = radius*alpha\n\n p1 = (user_latitude-delta, user_longitude-delta)\n p2 = (user_latitude+delta, user_longitude+delta)\n\n if store_type == 'supermarket':\n results = populartimes.get(MyAPI_key, [\"grocery_or_supermarket\"], p1, p2, radius=radius, all_places=False, n_threads=1)\n \n if store_type == 'pharmacy':\n results = populartimes.get(MyAPI_key, [\"pharmacy\"], p1, p2, radius=radius, all_places=False, n_threads=10)\n\n # Find out the current time at the user's location (can only be found by a place details request)\n user_location_id = x[\"results\"][0][\"reference\"]\n\n url_details = \"https://maps.googleapis.com/maps/api/place/details/json?\"\n user_location_details_res = requests.get(url_details+\"key=\"+MyAPI_key+\"&place_id=\" + user_location_id)\n y = user_location_details_res.json()\n\n utc_offset = y[\"result\"][\"utc_offset\"]\n time_now = datetime.utcnow()+timedelta(hours=utc_offset/60)\n \n # Create a list of stores with their activity data (current if available, otherwise average at current time)\n # Closed stores (activity=0) are omitted\n store_list = []\n\n for item in results:\n if \"current_popularity\" in item:\n store_list.append([item[\"current_popularity\"], item[\"name\"], item[\"id\"]])\n else:\n temp = item[\"populartimes\"][time_now.weekday()][\"data\"][time_now.hour]\n if temp != 0:\n store_list.append([temp, item[\"name\"], item[\"id\"]])\n \n # If no Stores are found give out an error\n if len(store_list) == 0:\n # return 'there has been an error: No data available for this choice'\n return redirect(url_for('error'))\n \n # Select the store with the least activity and get its ID and name\n df = pd.DataFrame(store_list)\n store_place_id = df.iloc[df[0].idxmin(), 2]\n store_name = df.iloc[df[0].idxmin(), 1]\n \n # Create google maps link based of store_place_id\n store_gmap_url = \"https://www.google.com/maps/place/?q=place_id:\" + store_place_id\n\n return render_template('stop.html', value=store_name, key=store_gmap_url)\n\n else:\n return render_template('details.html', form=form)\n\n\n@login_manager.user_loader\ndef load_user(userid):\n return db.query.get(userid)\n\n\nif __name__ == '__main__':\n app.config['TEMPLATES_AUTO_RELOAD'] = True\n app.run(debug=True)\n" ]
[ [ "pandas.DataFrame" ] ]
jmichellehu/vmap
[ "0f631a0e2d625215ea419cd7ea537f6d5a2ccd57" ]
[ "vmap/vmap.py" ]
[ "#! /usr/bin/env python\n\n#David Shean\n#[email protected]\n\n#This script uses ASP correlator to produce disparity maps from two inputs\n#Input data should be orthorectified/mapped in the same projected coordinate system\n#Run disp2v.py to convert to surface velocities\n\nimport sys\nimport os\nimport argparse\nimport subprocess\nfrom datetime import datetime, timedelta\nfrom distutils.spawn import find_executable\n\nfrom osgeo import gdal\nimport numpy as np\n\nfrom pygeotools.lib import warplib, geolib, iolib\nfrom pygeotools.lib.malib import calcperc \nfrom pygeotools.lib.timelib import get_t_factor_fn\n\n#Generate and execute stereo commands\ndef run_cmd(bin, args, **kw):\n #Note, need to add full executable\n binpath = find_executable(bin)\n if binpath is None:\n msg = (\"Unable to find executable %s\\n\" \n \"Install ASP and ensure it is in your PATH env variable\\n\" \n \"https://ti.arc.nasa.gov/tech/asr/intelligent-robotics/ngt/stereo/\" % bin)\n sys.exit(msg)\n call = [binpath,]\n call.extend(args)\n print(' '.join(call))\n try:\n code = subprocess.call(call, shell=False)\n except OSError as e:\n raise Exception('%s: %s' % (binpath, e))\n if code != 0:\n raise Exception('Stereo step ' + kw['msg'] + ' failed')\n\ndef get_stereo_opt(threads=28, kernel=(35,35), nlevels=5, spr=1, timeout=360, erode=0, align='None'):\n stereo_opt = []\n #This is irrelevant\n stereo_opt.extend(['-t', 'pinhole'])\n #Set number of threads/cores to use (will use all CPUs if possible)\n stereo_opt.extend(['--threads', str(threads)])\n #This assumes input images are already mapped \n stereo_opt.extend(['--alignment-method', align])\n #This will attempt to remove most of the offset between two images, for relative offsets\n #stereo_opt.extend(['--alignment-method', 'Homography'])\n #stereo_opt.append('--ip-debug-images')\n #This should be explored further\n stereo_opt.append('--individually-normalize')\n #Integer correlator kernel size\n stereo_opt.extend(['--corr-kernel', str(kernel[0]), str(kernel[1])])\n stereo_opt.extend(['--corr-max-levels', str(nlevels)])\n if timeout > 0:\n stereo_opt.extend(['--corr-timeout', str(timeout)])\n #Define the search area\n #Useful if you know your orhotorectification is good to, say 100 pixels in any direction\n #stereo_opt.extend(['--corr-search', '-100', '-100', '100', '100'])\n stereo_opt.extend(['--subpixel-mode', str(spr)])\n #If using Semi-global matching (spr 0):\n if spr > 3:\n #Use SGM\n stereo_opt.extend(['--stereo-algorithm', '1'])\n #Use MGM\n #stereo_opt.extend(['--stereo-algorithm', '2'])\n #bro nodes had 128 GB of RAM, 28 threads, ~4.7 GB/thread\n stereo_opt.extend(['--corr-tile-size', '3600'])\n stereo_opt.extend(['--xcorr-threshold', '-1'])\n stereo_opt.extend(['--median-filter-size', '5'])\n stereo_opt.extend(['--texture-smooth-size', '11'])\n else:\n #Sub-pixel kernel size (ASP default is 35)\n #Set to same as integer correlator kernel\n stereo_opt.extend(['--subpixel-kernel', str(kernel[0]), str(kernel[1])])\n #Note: stereo_fltr throws out a lot of good data when noisy\n #Want to play with the following options\n #--rm-half-kernel 5 5\n #--rm_min_matches 60\n #--rm-threshold 3\n if erode > 0:\n stereo_opt.extend(['--erode-max-size', str(erode)])\n return stereo_opt\n\ndef make_ln(outdir, outprefix, ext):\n #Create symbolic links with appropriate names \n ln_fn = os.path.join(outdir, outdir+ext)\n if os.path.lexists(ln_fn):\n os.remove(ln_fn)\n os.symlink(os.path.split(outprefix)[1]+ext, ln_fn)\n return ln_fn\n\ndef gen_d_sub(d_sub_fn, dx, dy, pad_perc=0.1, ndv=-9999):\n nl = dx.shape[0]\n ns = dx.shape[1]\n #Use GDT_Byte or GDT_Int16 to save space?\n dtype = gdal.GDT_Int32\n opt = iolib.gdal_opt\n d_sub_ds = iolib.gtif_drv.Create(d_sub_fn, ns, nl, 3, dtype, opt)\n d_sub_ds.GetRasterBand(1).WriteArray(np.rint(dx.filled(ndv)).astype(np.int32))\n d_sub_ds.GetRasterBand(2).WriteArray(np.rint(dy.filled(ndv)).astype(np.int32))\n d_sub_ds.GetRasterBand(3).WriteArray((~dx.mask).astype(np.int32))\n for n in range(1, d_sub_ds.RasterCount+1):\n band = d_sub_ds.GetRasterBand(n)\n band.SetNoDataValue(float(ndv))\n d_sub_ds = None\n\n #Now write D_sub_spread.tif - defines spread around D_sub values\n d_sub_ds = iolib.fn_getds(d_sub_fn)\n d_sub_spread_fn = os.path.splitext(d_sub_fn)[0]+'_spread.tif'\n d_sub_spread_ds = iolib.gtif_drv.CreateCopy(d_sub_spread_fn, d_sub_ds, 0)\n dx_spread = np.ma.abs(dx * pad_perc)\n dy_spread = np.ma.abs(dy * pad_perc)\n d_sub_spread_ds.GetRasterBand(1).WriteArray(np.rint(dx_spread.filled(ndv)).astype(np.int32))\n d_sub_spread_ds.GetRasterBand(2).WriteArray(np.rint(dy_spread.filled(ndv)).astype(np.int32))\n d_sub_spread_ds.GetRasterBand(3).WriteArray((~dx_spread.mask).astype(np.int32))\n for n in range(1, d_sub_spread_ds.RasterCount+1):\n band = d_sub_spread_ds.GetRasterBand(n)\n band.SetNoDataValue(float(ndv))\n d_sub_spread_ds = None\n #Copy proj/gt to D_sub and D_sub_spread?\n\n#Return ndarray with h, v, m\ndef get_vel(fn, fill=True):\n ds = gdal.Open(fn)\n if fill:\n import dem_downsample_fill\n ds = dem_downsample_fill.gdalfill_ds(ds)\n u_b = ds.GetRasterBand(1)\n v_b = ds.GetRasterBand(2)\n u = iolib.b_getma(u_b)\n v = iolib.b_getma(v_b)\n m = np.ma.sqrt(u*u + v*v)\n return u, v, m\n\ndef getparser():\n parser = argparse.ArgumentParser(description=\"Generate velocity map via feature-tracking\")\n parser.add_argument('-outdir', default=None, help='Output directory')\n parser.add_argument('-threads', type=int, default=iolib.cpu_count(), help='Number of threads to use(default: %(default)s)')\n parser.add_argument('-tr', default='min', help='Output resolution (default: %(default)s)')\n #Set correlator kernel size\n parser.add_argument('-kernel', type=int, default=35, help='Correlator kernel size. Smaller kernels offer more detail but are prone to more noise. Odd integers required (~9-51 px recommended). (default: %(default)s)')\n align_choices = ['AffineEpipolar', 'Homography', 'Epipolar', 'None']\n parser.add_argument('-align', default='None', choices=align_choices, help='Alignment method to warp second image to match first image, if not already orthorectified. Provides flexibility for L1B inputs')\n #Integer correlator seeding\n #D_sub is low-resolution correlation (default), which works well for most situations\n #sparse_disp will use sparse seeding from full-res chips, useful for ice sheets with limited low-frequency texture\n #existing_velocity will accept existing vx and vy rasters. Useful for limiting search range and limiting blunders. Measures products are useful for ice sheets.\n seedmode_choices = ['D_sub', 'sparse_disp', 'existing_velocity']\n parser.add_argument('-seedmode', type=str, choices=seedmode_choices, default='D_sub', help='Seeding option (default: %(default)s)')\n parser.add_argument('-vx_fn', type=str, default=None, help='Seed E-W velocity map filename')\n parser.add_argument('-vy_fn', type=str, default=None, help='Seed N-S velocity map filename')\n \n #Sub-pixel refinement\n #0) None, 1) Parabolic, 2) Bayes, 3) AffineAdaptive\n #See ASP doc or Shean et al, ISPRS, (2016)\n #1 is fast but lower quality\n #2 is slow but highest quality, \n #3 is a good compromise for speed and quality\n refinement_choices = list(range(12))\n parser.add_argument('-refinement', type=int, default=1, help='Sub-pixel refinement type (see ASP doc): 0) None, 1) Parabolic, 2) Bayes, 3) AffineAdaptive 4) LK, 5) Bayes w/gamma, 6) SGM Linear, 7) SGM Poly4, 8) SGM Cos, 9) SGM Parabola, 10) SGM None, 11) SGM Blend (default: %(default)s)')\n #Numer of gaussian pyramids to use\n #Can look at texture in GDAL overviews to make a decision\n #If you can see plenty of texture at 1/32 resolution, go with 5 \n #For featureless areas, limiting to 2 can help, or even 0\n parser.add_argument('-pyramid-levels', type=int, default=5, help='Number of pyramid levels for correlation (default: %(default)s)')\n #This helps get rid of bogus \"islands\" in the disparity maps\n parser.add_argument('-erode', type=int, default=1024, help='Erode isolated blobs smaller than this many pixels. Set to 0 to disable (default: %(default)s)')\n parser.add_argument('-filter', action='store_true', help='Filter the output F.tif, smoothing with Gaussian filter')\n #This masks input images to improve performance. Useful for forested areas.\n parser.add_argument('-mask_input', action='store_true', help='Mask any vegetation/water in input images. Requires demcoreg')\n parser.add_argument('-remove_offsets', action='store_true', help='Remove median horizontal and vertical offsets over stable control surfaces')\n parser.add_argument('-dt', type=str, choices=['yr','day','none'], default='yr', help='Time increment (default: %(default)s)')\n\n #Inputs can be images, DEMs, shaded relief maps\n #Personal experience suggests multi-directional hillshades with identical illumination work well\n #Only 2 input datsets allowed for this - want to stay modular\n parser.add_argument('fn1', type=str, help='Raster filename 1')\n parser.add_argument('fn2', type=str, help='Raster filename 2')\n return parser\n\ndef main():\n parser = getparser()\n args = parser.parse_args()\n if args.seedmode == 'existing_velocity':\n if args.vx_fn is None or args.vy_fn is None:\n parser.error('\"-seedmode existing_velocity\" requires \"-vx_fn\" and \"-vy_fn\"')\n\n print('\\n%s' % datetime.now())\n print('%s UTC\\n' % datetime.utcnow())\n\n align = args.align\n seedmode = args.seedmode\n spr = args.refinement\n erode = args.erode\n #Correlator tile timeout\n #With proper seeding, correlation should be very fast\n #timeout = 360 \n timeout = 1200 \n threads = args.threads\n\n kernel = (args.kernel, args.kernel)\n #SGM correlator\n if spr > 3:\n #kernel = (7,7)\n kernel = (11,11)\n erode = 0\n\n #Smooth the output F.tif \n smoothF = args.filter \n\n res = args.tr\n #Resample input to something easier to work with\n #res = 4.0\n\n #Open input files\n fn1 = args.fn1\n fn2 = args.fn2 \n\n if not iolib.fn_check(fn1) or not iolib.fn_check(fn2):\n sys.exit(\"Unable to locate input files\")\n\n if args.outdir is not None:\n outdir = args.outdir\n else:\n outdir = '%s__%s_vmap_%sm_%ipx_spm%i' % (os.path.splitext(os.path.split(fn1)[1])[0], \\\n os.path.splitext(os.path.split(fn2)[1])[0], res, kernel[0], spr)\n\n #Note, can encounter filename length issues in boost, just use vmap prefix\n outprefix = '%s/vmap' % (outdir)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n\n #Check to see if inputs have geolocation and projection information\n ds1 = iolib.fn_getds(fn1)\n ds2 = iolib.fn_getds(fn2)\n\n if geolib.srs_check(ds1) and geolib.srs_check(ds2):\n ds1_clip_fn = os.path.join(outdir, os.path.splitext(os.path.basename(fn1))[0]+'_warp.tif')\n ds2_clip_fn = os.path.join(outdir, os.path.splitext(os.path.basename(fn2))[0]+'_warp.tif')\n\n if not os.path.exists(ds1_clip_fn) or not os.path.exists(ds2_clip_fn):\n #This should write out files to new subdir\n ds1_clip, ds2_clip = warplib.diskwarp_multi_fn([fn1, fn2], extent='intersection', res=res, r='average', outdir=outdir)\n ds1_clip = None\n ds2_clip = None\n #However, if inputs have identical extent/res/proj, then link to original files\n if not os.path.exists(ds1_clip_fn):\n os.symlink(os.path.abspath(fn1), ds1_clip_fn)\n if not os.path.exists(ds2_clip_fn):\n os.symlink(os.path.abspath(fn2), ds2_clip_fn)\n align = 'None'\n\n #Mask support - limit correlation only to rock/ice surfaces, no water/veg\n #This masks input images - guarantee we won't waste time correlating over vegetation\n #TODO: Add support to load arbitrary raster or shp mask\n if args.mask_input:\n ds1_masked_fn = os.path.splitext(ds1_clip_fn)[0]+'_masked.tif'\n ds2_masked_fn = os.path.splitext(ds2_clip_fn)[0]+'_masked.tif'\n\n if not os.path.exists(ds1_masked_fn) or not os.path.exists(ds2_masked_fn):\n #Load NLCD or bareground mask\n from demcoreg.dem_mask import get_lulc_mask\n\n ds1_clip = iolib.fn_getds(ds1_clip_fn)\n lulc_mask_fn = os.path.join(outdir, 'lulc_mask.tif')\n #if not os.path.exists(nlcd_mask_fn):\n lulc_mask = get_lulc_mask(ds1_clip, mask_glaciers=False, filter='not_forest')\n iolib.writeGTiff(lulc_mask, lulc_mask_fn, ds1_clip) \n ds1_clip = None\n\n #Now apply to original images \n #This could be problematic for huge inputs, see apply_mask.py\n #lulc_mask = lulc_mask.astype(int)\n for fn in (ds1_clip_fn, ds2_clip_fn):\n ds = iolib.fn_getds(fn)\n a = iolib.ds_getma(ds)\n a = np.ma.array(a, mask=~(lulc_mask))\n if a.count() > 0:\n out_fn = os.path.splitext(fn)[0]+'_masked.tif'\n iolib.writeGTiff(a,out_fn,ds)\n a = None\n else:\n sys.exit(\"No unmasked pixels over bare earth\")\n ds1_clip_fn = ds1_masked_fn\n ds2_clip_fn = ds2_masked_fn\n else:\n ds1_clip_fn = fn1\n ds2_clip_fn = fn2\n #Now let user specify alignment methods as option - don't hardcode\n #align = 'Homography'\n #align = 'AffineEpipolar'\n ds1 = None\n ds2 = None\n\n #Should have extra kwargs option here\n stereo_opt = get_stereo_opt(threads=threads, kernel=kernel, timeout=timeout, \\\n erode=erode, spr=spr, align=align)\n \n #Stereo arguments\n #Latest version of ASP should accept tif without camera models\n #stereo_args = [ds1_clip_fn, ds2_clip_fn, outprefix]\n #Nope - still need to provide dummy camera models, and they must be unique files\n #Use the dummy.tsai file bundled in the vmap repo\n dummy_tsai = os.path.join(os.path.split(os.path.realpath(__file__))[0], 'dummy.tsai')\n dummy_tsai2 = os.path.splitext(dummy_tsai)[0]+'2.tsai'\n if not os.path.exists(dummy_tsai2):\n dummy_tsai2 = os.symlink(dummy_tsai, os.path.splitext(dummy_tsai)[0]+'2.tsai')\n stereo_args = [ds1_clip_fn, ds2_clip_fn, dummy_tsai, dummy_tsai2, outprefix]\n\n #Run stereo_pprc\n if not os.path.exists(outprefix+'-R_sub.tif'):\n run_cmd('stereo_pprc', stereo_opt+stereo_args, msg='0: Preprocessing')\n #Copy proj info to outputs, this should happen automatically now?\n for ext in ('L', 'R', 'L_sub', 'R_sub', 'lMask', 'rMask', 'lMask_sub', 'rMask_sub'):\n geolib.copyproj(ds1_clip_fn, '%s-%s.tif' % (outprefix,ext))\n\n #Prepare seeding for stereo_corr\n #TODO: these are untested after refactoring\n if not os.path.exists(outprefix+'_D_sub.tif'):\n #Don't need to do anything for default seed-mode 1\n if seedmode == 'sparse_disp':\n #Sparse correlation of full-res images\n stereo_opt.extend(['--corr-seed-mode', '3'])\n sparse_disp_opt = []\n sparse_disp_opt.extend(['--Debug', '--coarse', '512', '--fine', '256', '--no_epipolar_fltr']) \n sparse_disp_opt.extend(['-P', str(threads)])\n sparse_disp_args = [outprefix+'-L.tif', outprefix+'-R.tif', outprefix]\n run_cmd('sparse_disp', sparse_disp_opt+sparse_disp_args, msg='0.5: D_sub generation')\n elif seedmode == 'existing_velocity':\n #User-input low-res velocity maps for seeding\n #TODO: Add functions that fetch best available velocities for Ant/GrIS or user-defined low-res velocities\n #Automatically query GoLive velocities here\n vx_fn = args.vx_fn \n vy_fn = args.vy_fn \n #Check for existence\n\n #HMA seeding\n vdir = '/nobackup/deshean/rpcdem/hma/velocity_jpl_amaury_2013-2015'\n vx_fn = os.path.join(vdir, 'PKH_WRS2_B8_2013_2015_snr5_n1_r170_res12.x_vel.TIF')\n vy_fn = os.path.join(vdir, 'PKH_WRS2_B8_2013_2015_snr5_n1_r170_res12.y_vel.TIF')\n\n if os.path.exists(vx_fn) and os.path.exists(vy_fn):\n ds1_clip = iolib.fn_getds(ds1_clip_fn)\n ds1_res = geolib.get_res(ds1_clip, square=True)[0]\n\n #Compute L_sub res - use this for output dimensions\n L_sub_fn = outprefix+'-L_sub.tif' \n L_sub_ds = gdal.Open(L_sub_fn)\n L_sub_x_scale = float(ds1_clip.RasterXSize) / L_sub_ds.RasterXSize\n L_sub_y_scale = float(ds1_clip.RasterYSize) / L_sub_ds.RasterYSize\n L_sub_scale = np.max([L_sub_x_scale, L_sub_y_scale])\n L_sub_res = ds1_res * L_sub_scale\n\n #Since we are likely upsampling here, use cubicspline\n vx_ds_clip, vy_ds_clip = warplib.memwarp_multi_fn([vx_fn, vy_fn], extent=ds1_clip, \\\n t_srs=ds1_clip, res=L_sub_res, r='cubicspline')\n\n ds1_clip = None\n\n #Get vx and vy arrays\n vx = iolib.ds_getma(vx_ds_clip)\n vy = iolib.ds_getma(vy_ds_clip)\n\n #Determine time interval between inputs\n #Use to scaling of known low-res velocities\n t_factor = get_t_factor_fn(ds1_clip_fn, ds2_clip_fn, ds=vx_ds_clip)\n\n if t_factor is not None:\n #Compute expected offset in scaled pixels \n dx = (vx*t_factor)/L_sub_res\n dy = (vy*t_factor)/L_sub_res\n #Note: Joughin and Rignot's values are positive y up!\n #ASP is positive y down, so need to multiply these values by -1\n #dy = -(vy*t_factor)/L_sub_res\n\n #Should smooth/fill dx and dy\n\n #If absolute search window is only 30x30\n #Don't seed, just use fixed search window \n #search_window_area_thresh = 900\n search_window_area_thresh = 0 \n search_window = np.array([dx.min(), dy.min(), dx.max(), dy.max()])\n dx_p = calcperc(dx, perc=(0.5, 99.5))\n dy_p = calcperc(dy, perc=(0.5, 99.5))\n search_window = np.array([dx_p[0], dy_p[0], dx_p[1], dy_p[1]])\n search_window_area = (search_window[2]-search_window[0]) * (search_window[3]-search_window[1])\n if search_window_area < search_window_area_thresh:\n stereo_opt.extend(['--corr-seed-mode', '0'])\n stereo_opt.append('--corr-search')\n stereo_opt.extend([str(x) for x in search_window])\n #pad_perc=0.1\n #stereo_opt.extend(['--corr-sub-seed-percent', str(pad_perc)]\n #Otherwise, generate a D_sub map from low-res velocity\n else:\n stereo_opt.extend(['--corr-seed-mode', '3'])\n #This is relative to the D_sub scaled disparities\n d_sub_fn = L_sub_fn.split('-L_sub')[0]+'-D_sub.tif' \n gen_d_sub(d_sub_fn, dx, dy)\n\n #If the above didn't generate a D_sub.tif for seeding, run stereo_corr to generate Low-res D_sub.tif\n if not os.path.exists(outprefix+'-D_sub.tif'):\n newopt = ['--compute-low-res-disparity-only',]\n run_cmd('stereo_corr', newopt+stereo_opt+stereo_args, msg='1.1: Low-res Correlation')\n #Copy projection info to D_sub\n geolib.copyproj(outprefix+'-L_sub.tif', outprefix+'-D_sub.tif')\n \n #Mask D_sub to limit correlation over bare earth surfaces\n #This _should_ be a better approach than masking input images, but stereo_corr doesn't honor D_sub\n #Still need to mask input images before stereo_pprc\n #Left this in here for reference, or if this changes in ASP\n if False:\n D_sub_ds = gdal.Open(outprefix+'-D_sub.tif', gdal.GA_Update)\n\n #Mask support - limit correlation only to rock/ice surfaces, no water/veg\n from demcoreg.dem_mask import get_nlcd, mask_nlcd\n nlcd_fn = get_nlcd()\n nlcd_ds = warplib.diskwarp_multi_fn([nlcd_fn,], extent=D_sub_ds, res=D_sub_ds, t_srs=D_sub_ds, r='near', outdir=outdir)[0]\n #validmask = mask_nlcd(nlcd_ds, valid='rock+ice')\n validmask = mask_nlcd(nlcd_ds, valid='not_forest', mask_glaciers=False)\n nlcd_mask_fn = os.path.join(outdir, 'nlcd_validmask.tif')\n iolib.writeGTiff(validmask, nlcd_mask_fn, nlcd_ds) \n\n #Now apply to D_sub (band 3 is valid mask)\n #validmask = validmask.astype(int)\n for b in (1,2,3):\n dsub = iolib.ds_getma(D_sub_ds, b)\n dsub = np.ma.array(dsub, mask=~(validmask))\n D_sub_ds.GetRasterBand(b).WriteArray(dsub.filled())\n D_sub_ds = None\n\n #OK, finally run stereo_corr full-res integer correlation with appropriate seeding\n if not os.path.exists(outprefix+'-D.tif'):\n run_cmd('stereo_corr', stereo_opt+stereo_args, msg='1: Correlation')\n geolib.copyproj(ds1_clip_fn, outprefix+'-D.tif')\n\n #Run stereo_rfne\n if spr > 0:\n if not os.path.exists(outprefix+'-RD.tif'):\n run_cmd('stereo_rfne', stereo_opt+stereo_args, msg='2: Refinement')\n geolib.copyproj(ds1_clip_fn, outprefix+'-RD.tif')\n d_fn = make_ln(outdir, outprefix, '-RD.tif')\n else:\n ln_fn = outprefix+'-RD.tif'\n if os.path.lexists(ln_fn):\n os.remove(ln_fn)\n os.symlink(os.path.split(outprefix)[1]+'-D.tif', ln_fn)\n\n #Run stereo_fltr\n if not os.path.exists(outprefix+'-F.tif'):\n run_cmd('stereo_fltr', stereo_opt+stereo_args, msg='3: Filtering')\n geolib.copyproj(ds1_clip_fn, outprefix+'-F.tif')\n\n d_fn = make_ln(outdir, outprefix, '-F.tif')\n\n if smoothF and not os.path.exists(outprefix+'-F_smooth.tif'):\n print('Smoothing F.tif')\n from pygeotools.lib import filtlib \n #Fill holes and smooth F\n F_fill_fn = outprefix+'-F_smooth.tif'\n F_ds = gdal.Open(outprefix+'-F.tif', gdal.GA_ReadOnly)\n #import dem_downsample_fill\n #F_fill_ds = dem_downsample_fill.gdalfill_ds(F_fill_ds)\n print('Creating F_smooth.tif')\n F_fill_ds = iolib.gtif_drv.CreateCopy(F_fill_fn, F_ds, 0, options=iolib.gdal_opt)\n F_ds = None\n for n in (1, 2):\n print('Smoothing band %i' % n)\n b = F_fill_ds.GetRasterBand(n)\n b_fill_bma = iolib.b_getma(b)\n #b_fill_bma = iolib.b_getma(dem_downsample_fill.gdalfill(b))\n #Filter extreme values (careful, could lose areas of valid data with fastest v)\n #b_fill_bma = filtlib.perc_fltr(b_fill_bma, perc=(0.01, 99.99))\n #These filters remove extreme values and fill data gaps\n #b_fill_bma = filtlib.median_fltr_skimage(b_fill_bma, radius=7, erode=0)\n #b_fill_bma = filtlib.median_fltr(b_fill_bma, fsize=7, origmask=True)\n #Gaussian filter\n b_fill_bma = filtlib.gauss_fltr_astropy(b_fill_bma, size=9)\n b.WriteArray(b_fill_bma)\n F_fill_ds = None\n d_fn = make_ln(outdir, outprefix, '-F_smooth.tif')\n\n print('\\n%s' % datetime.now())\n print('%s UTC\\n' % datetime.utcnow())\n\n #If time interval is specified, convert pixel displacements to rates\n if args.dt != 'none':\n #Check if vm.tif already exists\n #Should probably just overwrite by default\n #if os.path.exists(os.path.splitext(d_fn)[0]+'_vm.tif'):\n # print(\"\\nFound existing velocity magnitude map!\\n\"\n #else:\n #Generate output velocity products and figure\n #Requires that vmap repo is in PATH\n cmd = ['disp2v.py', d_fn]\n #Note: this will attempt to automatically determine control surfaces\n #disp2v.py will accept arbitrary mask, could pass through here\n if args.remove_offsets:\n cmd.append('-remove_offsets')\n cmd.extend(['-dt', args.dt])\n print(\"Converting disparities to velocities\")\n print(cmd)\n subprocess.call(cmd)\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.ma.array", "numpy.max", "numpy.ma.sqrt", "numpy.array", "numpy.ma.abs" ] ]
SobhanOmranian/spark-dca
[ "cd0c5ddbe433a1772442456549e37bf8838f75e3" ]
[ "scripts/figure10_experiment[static_solution_ssd]/plot_dca_static_ssd.py" ]
[ "import matplotlib.pyplot as plt\nfrom matplotlib.pyplot import savefig\nimport matplotlib.patches as mpatches\nimport pandas as pd\nimport numpy as np\nfrom numpy import dtype\nfrom matplotlib.pyplot import ylabel\nimport math\nimport re\n\nimport sys\nsys.path.append(f'./common')\nimport util\nfrom util import *\nsetup(util.setting_font_size_2, util.style_plot)\n\nfrom argparse import ArgumentParser\n\nparser = ArgumentParser()\nparser.add_argument(\"-i\", \"--inputFile\")\nparser.add_argument(\"-o\", \"--outputFilePath\")\n\nargs = parser.parse_args(sys.argv[1:])\n\ninputFileName = args.inputFile\noutputFilePath = args.outputFilePath\n\ndef normalise (x, min, max):\n return (x-min)/(max-min)\ndef get_percentage_change(current, previous):\n if current == previous:\n return 100.0\n try:\n return round ( ((current - previous) / previous) * 100.0 , 2) \n except ZeroDivisionError:\n return 0\n\nappNames = ['lda',\n 'SVM',\n 'terasort-10000000r',\n 'terasort-100000000r',\n 'terasort-300000000r',\n 's0-s1-terasort-1200000000r',\n 'terasort-hdfs-Main-1200000000r',\n 'terasort',\n 'nweight',\n 'SparseNaiveBayes',\n 'Join',\n 'Aggregation',\n 'Scan',\n 'ScalaPageRank',\n 'WordCount', \n ]\nappNamesDict = {\n 'terasort-hdfs-Main-1200000000r' : 'terasort-120GB'\n \n }\n\nmy_colors = [\n util.color_default,\n util.color_16c,\n util.color_8c,\n util.color_4c,\n util.color_2c,\n 'purple'\n ]\n\ncolor_dict = {\n 64: util.color_64c,\n 32 : util.color_default,\n 16 : util.color_16c,\n 8 : util.color_8c,\n 4 : util.color_4c,\n 2 : util.color_2c,\n }\n\nc64_patch = mpatches.Patch(color=util.color_64c, label='64c')\nc32_patch = mpatches.Patch(color=util.color_default, label='Default (32)')\nc16_patch = mpatches.Patch(color=util.color_16c, label='16 threads')\nc8_patch = mpatches.Patch(color=util.color_8c, label='8 threads')\nc4_patch = mpatches.Patch(color=util.color_4c, label='4 threads')\nc2_patch = mpatches.Patch(color=util.color_2c, label='2 threads')\n\ndef getAppNameShort(longName):\n shortName = \"unknown\"\n for appName in appNames:\n if(appName in longName):\n if appName in appNamesDict:\n dictName = appNamesDict[appName]\n shortName = dictName\n else:\n shortName = appName\n break\n return shortName\n\ndef extractNumberOfCores(appName):\n result = None\n matches = re.findall(r\"[0-9]+c\", appName)\n for matchNum, match in enumerate(matches):\n numberMatches = re.findall(\"[0-9]+\", match) \n for mNum, m in enumerate(numberMatches):\n result = m\n return result\n\n# data/dca/dca_static_ssd.csv\ndata = pd.read_csv(inputFileName, dtype={\n 'stage': int,\n 'duration': float,\n 'usedCores': int,\n 'totalCores': int,\n 'adaptive': int,\n 'isIo': int\n })\ndata['appName'] = data['appName'].apply (lambda x: x.split(\"^\", 1)[0])\ndata['appName'] = data['appName'].apply (lambda x: re.sub(r\"-[0-9]+c-\", '-', x))\ndata[\"duration\"] = data[\"duration\"].apply(lambda s: s / 1000)\nmean_data = data.groupby([\"appName\", \"stage\"]).mean()\nmean_data = mean_data.reset_index()\nappCount = len(data[\"appName\"].unique().tolist())\n\ndef getMeanAndTransformBack(df, name):\n mean = df.groupby([\"stage\", \"usedCores\"]).mean()\n mean = mean.reset_index()\n mean['appName'] = name\n return mean\n\ndef findMinimumDuration(group):\n # Filter default rows\n group = group.loc[group[\"totalCores\"] != 128]\n sum = group.groupby([\"usedCores\"])['duration'].sum()\n print(sum)\n minUsedCores = sum.idxmin()\n numCores = int( minUsedCores / numNodes)\n print(numCores,sum[minUsedCores])\n return numCores,sum[minUsedCores]\n\ndfgroup = data.groupby([\"appName\"], sort=False)\ni = 0\nnumNodes = 4\nnumberOfRows = math.ceil(appCount / 2)\nprint(f\"numberOfRows- {numberOfRows}\") \nfor name, group in dfgroup:\n fig = plt.figure(figsize=(6,5))\n \n # Find mean for the default\n mean_default = group.loc[group[\"totalCores\"] == 128 ]\n mean_default = getMeanAndTransformBack(mean_default, name)\n print(\"MEAN DEFAULT:\")\n print(mean_default)\n # Find the default duration\n mean_default_duration = mean_default['duration'].sum()\n print(f\"Default duration: {mean_default_duration}\")\n \n\n pos = 0\n numExperiments = len(group[\"totalCores\"].unique())\n previous_values = np.array(numExperiments)\n \n # Get all the non-default rows\n group = group.loc[group[\"totalCores\"] != 128 ]\n \n\n\n group = getMeanAndTransformBack(group, name)\n \n \n # Concat the mean default which we found earlier with non default rows\n for namerow, row in group.iterrows():\n if row[\"isIo\"] == 0:\n mean_default_row = (mean_default.loc[mean_default[\"stage\"] == row[\"stage\"]])\n group.loc[namerow, \"duration\"] = mean_default_row[\"duration\"].values\n group = pd.concat([mean_default, group])\n \n group = group.sort_values([\"totalCores\", \"stage\"], ascending=[False, True])\n print(\"Updated group:\")\n print(group)\n\n # Find the minimum duration\n minCores, minDuration = findMinimumDuration(group)\n print(f\"Min Cores: {minCores} and Min Duration: {minDuration}\")\n percentageChange = get_percentage_change(minDuration, mean_default_duration)\n print(f\"Percentage change from 32c to {minCores}c: {percentageChange}%\")\n\n for name2, group2 in group.groupby([\"stage\"], sort=False):\n group2 = group2.reset_index()\n colors = []\n for namerow, row in group2.iterrows():\n if row['isIo'] == 0:\n colors.append(util.color_default)\n else:\n colors.append(color_dict[row[\"usedCores\"] / 4])\n \n \n dataset = group2[\"duration\"].values\n \n # Find min in each stage\n minRow = group2.loc[group2[\"duration\"].idxmin()]\n assert isinstance(minRow, pd.Series)\n\n dataset = np.append(dataset,minRow[\"duration\"])\n colors.append(color_dict[minRow[\"usedCores\"] / 4])\n \n x =[32,16,8,4,2, 'bestfit']\n xs = range(len(x))\n y = dataset\n \n \n barlist = plt.bar(xs, y, bottom= previous_values, color='black', width=0.5, linewidth=2, edgecolor='black')\n i = 0\n for bar in barlist:\n bar.set_facecolor(colors[i])\n bar.set_linewidth(0.7)\n bar.set_edgecolor(util.color_stage_border)\n i = i +1 \n \n plt.xticks(xs, x)\n previous_values = np.array(previous_values) + np.array(dataset)\n default_patch = mpatches.Patch(color='black', label='Default')\n static_patch = mpatches.Patch(color='r', label='Static')\n legend = plt.legend(handles=[c32_patch, c16_patch, c8_patch, c4_patch, c2_patch], frameon=1)\n frame = legend.get_frame()\n frame.set_facecolor('lightgrey')\n plt.xlabel(formatLabelForLatex(\"Number of Threads\"))\n plt.ylabel(formatLabelForLatex(\"Runtime (s)\"))\n# /Users/sobhan/scala-ide-workspace-spark/spark/publication/Big Data Paradigm/img\n savefig(f\"{outputFilePath}/dca_static_ssd_{name}.pdf\",dpi=100, bbox_inches='tight')\n\nplt.show()" ]
[ [ "matplotlib.pyplot.legend", "numpy.append", "matplotlib.pyplot.xticks", "pandas.read_csv", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "matplotlib.pyplot.show", "pandas.concat", "numpy.array", "matplotlib.patches.Patch", "matplotlib.pyplot.bar" ] ]
shenghh2015/automl
[ "8c2cca6e35b1f27adf4ad709128aa586badc8a76" ]
[ "efficientdet/backbone/efficientnet_builder_test.py" ]
[ "# Copyright 2020 Google Research. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for efficientnet_builder.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import logging\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nfrom backbone import efficientnet_builder\n\n\nclass EfficientnetBuilderTest(tf.test.TestCase):\n\n def _test_model_params(self,\n model_name,\n input_size,\n expected_params,\n override_params=None,\n features_only=False,\n pooled_features_only=False):\n images = tf.zeros((1, input_size, input_size, 3), dtype=tf.float32)\n efficientnet_builder.build_model(\n images,\n model_name=model_name,\n override_params=override_params,\n training=False,\n features_only=features_only,\n pooled_features_only=pooled_features_only)\n num_params = np.sum([np.prod(v.shape) for v in tf.trainable_variables()])\n self.assertEqual(num_params, expected_params)\n\n def test_efficientnet_b0(self):\n self._test_model_params('efficientnet-b0', 224, expected_params=5288548)\n\n def test_efficientnet_b1(self):\n self._test_model_params('efficientnet-b1', 240, expected_params=7794184)\n\n def test_efficientnet_b2(self):\n self._test_model_params('efficientnet-b2', 260, expected_params=9109994)\n\n def test_efficientnet_b3(self):\n self._test_model_params('efficientnet-b3', 300, expected_params=12233232)\n\n def test_efficientnet_b4(self):\n self._test_model_params('efficientnet-b4', 380, expected_params=19341616)\n\n def test_efficientnet_b5(self):\n self._test_model_params('efficientnet-b5', 456, expected_params=30389784)\n\n def test_efficientnet_b6(self):\n self._test_model_params('efficientnet-b6', 528, expected_params=43040704)\n\n def test_efficientnet_b7(self):\n self._test_model_params('efficientnet-b7', 600, expected_params=66347960)\n\n def test_efficientnet_b0_with_customized_num_classes(self):\n self._test_model_params(\n 'efficientnet-b0',\n 224,\n expected_params=4135648,\n override_params={'num_classes': 100})\n\n def test_efficientnet_b0_with_features_only(self):\n self._test_model_params(\n 'efficientnet-b0', 224, features_only=True, expected_params=3595388)\n\n def test_efficientnet_b0_with_pooled_features_only(self):\n self._test_model_params(\n 'efficientnet-b0',\n 224,\n pooled_features_only=True,\n expected_params=4007548)\n\n def test_efficientnet_b0_fails_if_both_features_requested(self):\n with self.assertRaises(AssertionError):\n efficientnet_builder.build_model(\n None,\n model_name='efficientnet-b0',\n training=False,\n features_only=True,\n pooled_features_only=True)\n\n def test_efficientnet_b0_base(self):\n # Creates a base model using the model configuration.\n images = tf.zeros((1, 224, 224, 3), dtype=tf.float32)\n _, endpoints = efficientnet_builder.build_model_base(\n images, model_name='efficientnet-b0', training=False)\n\n # reduction_1 to reduction_5 should be in endpoints\n self.assertIn('reduction_1', endpoints)\n self.assertIn('reduction_5', endpoints)\n # reduction_5 should be the last one: no reduction_6.\n self.assertNotIn('reduction_6', endpoints)\n\n\nif __name__ == '__main__':\n logging.set_verbosity(logging.WARNING)\n # Disable eager to allow tf.profile works for #params/#flops.\n tf.disable_eager_execution()\n tf.test.main()\n" ]
[ [ "tensorflow.compat.v1.disable_eager_execution", "tensorflow.compat.v1.zeros", "numpy.prod", "tensorflow.compat.v1.test.main", "tensorflow.compat.v1.trainable_variables" ] ]
shenjl/PStudy
[ "d49c710524781b604acf4cb3d28b774ab09ff73d" ]
[ "PythonStudy/Tensorflow4mnist/mnist/v1/train.py" ]
[ "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nfrom model import Network\n\n'''\npython 3.6\ntensorflow 1.4\n'''\n\n\nclass Train:\n def __init__(self):\n self.net = Network()\n\n # 初始化 session\n # Network() 只是构造了一张计算图,计算需要放到会话(session)中\n self.sess = tf.Session()\n # 初始化变量\n self.sess.run(tf.global_variables_initializer())\n\n # 读取训练和测试数据,这是tensorflow库自带的,不存在训练集会自动下载\n # 项目目录下已经下载好,删掉后,重新运行代码会自动下载\n # data_set/train-images-idx3-ubyte.gz\n # data_set/train-labels-idx1-ubyte.gz\n # data_set/t10k-images-idx3-ubyte.gz\n # data_set/t10k-labels-idx1-ubyte.gz\n # self.data = input_data.read_data_sets('../data_set', one_hot=True)\n self.data = input_data.read_data_sets('Tensorflow-mnist\\mnist\\data_set', one_hot=True)\n\n def train(self):\n # batch_size 是指每次迭代训练,传入训练的图片张数。\n # 数据集小,可以使用全数据集,数据大的情况下,\n # 为了提高训练速度,用随机抽取的n张图片来训练,效果与全数据集相近\n # https://www.zhihu.com/question/32673260\n batch_size = 64\n\n # 总的训练次数\n train_step = 2000\n\n # 开始训练\n for i in range(train_step):\n # 从数据集中获取 输入和标签(也就是答案)\n x, label = self.data.train.next_batch(batch_size)\n # 每次计算train,更新整个网络\n # loss只是为了看到损失的大小,方便打印\n _, loss = self.sess.run([self.net.train, self.net.loss],\n feed_dict={self.net.x: x, self.net.label: label})\n\n # 打印 loss,训练过程中将会看到,loss有变小的趋势\n # 代表随着训练的进行,网络识别图像的能力提高\n # 但是由于网络规模较小,后期没有明显下降,而是有明显波动\n if (i + 1) % 10 == 0:\n print('第%5d步,当前loss:%.2f' % (i + 1, loss))\n\n def calculate_accuracy(self):\n test_x = self.data.test.images\n test_label = self.data.test.labels\n # 注意:与训练不同的是,并没有计算 self.net.train\n # 只计算了accuracy这个张量,所以不会更新网络\n # 最终准确率约为0.91\n accuracy = self.sess.run(self.net.accuracy,\n feed_dict={self.net.x: test_x, self.net.label: test_label})\n print(\"准确率: %.2f,共测试了%d张图片 \" % (accuracy, len(test_label)))\n\n\nif __name__ == \"__main__\":\n app = Train()\n app.train()\n app.calculate_accuracy()\n" ]
[ [ "tensorflow.global_variables_initializer", "tensorflow.Session", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets" ] ]
JevinJ/pyvista
[ "c9be18ed209de3f80e1a70ef01eef3355b3616ce" ]
[ "pyvista/utilities/geometric_objects.py" ]
[ "\"\"\"Provides an easy way of generating several geometric objects.\n\nCONTAINS\n--------\nvtkArrowSource\nvtkCylinderSource\nvtkSphereSource\nvtkPlaneSource\nvtkLineSource\nvtkCubeSource\nvtkConeSource\nvtkDiskSource\nvtkRegularPolygonSource\nvtkPyramid\n\n\"\"\"\nimport numpy as np\nimport vtk\n\nimport pyvista\nfrom pyvista.utilities import assert_empty_kwargs, check_valid_vector\n\nNORMALS = {\n 'x': [1, 0, 0],\n 'y': [0, 1, 0],\n 'z': [0, 0, 1],\n '-x': [-1, 0, 0],\n '-y': [0, -1, 0],\n '-z': [0, 0, -1],\n}\n\n\ndef translate(surf, center=[0., 0., 0.], direction=[1., 0., 0.]):\n \"\"\"Translate and orientate a mesh to a new center and direction.\n\n By default, the input mesh is considered centered at the origin\n and facing in the x direction.\n\n \"\"\"\n normx = np.array(direction)/np.linalg.norm(direction)\n normz = np.cross(normx, [0, 1.0, 0.0000001])\n normz /= np.linalg.norm(normz)\n normy = np.cross(normz, normx)\n\n trans = np.zeros((4, 4))\n trans[:3, 0] = normx\n trans[:3, 1] = normy\n trans[:3, 2] = normz\n trans[3, 3] = 1\n\n surf.transform(trans)\n if not np.allclose(center, [0., 0., 0.]):\n surf.points += np.array(center)\n\n\ndef Cylinder(center=(0.,0.,0.), direction=(1.,0.,0.), radius=0.5, height=1.0,\n resolution=100, capping=True, **kwargs):\n \"\"\"Create the surface of a cylinder.\n\n See also :func:`pyvista.CylinderStructured`.\n\n Parameters\n ----------\n center : list or tuple or np.ndarray\n Location of the centroid in [x, y, z]\n\n direction : list or tuple or np.ndarray\n Direction cylinder points to in [x, y, z]\n\n radius : float\n Radius of the cylinder.\n\n height : float\n Height of the cylinder.\n\n resolution : int\n Number of points on the circular face of the cylinder.\n\n capping : bool, optional\n Cap cylinder ends with polygons. Default True\n\n Returns\n -------\n cylinder : pyvista.PolyData\n Cylinder surface.\n\n Examples\n --------\n >>> import pyvista\n >>> import numpy as np\n >>> cylinder = pyvista.Cylinder(np.array([1, 2, 3]), np.array([1, 1, 1]), 1, 1)\n >>> cylinder.plot() # doctest:+SKIP\n \"\"\"\n capping = kwargs.pop('cap_ends', capping)\n assert_empty_kwargs(**kwargs)\n cylinderSource = vtk.vtkCylinderSource()\n cylinderSource.SetRadius(radius)\n cylinderSource.SetHeight(height)\n cylinderSource.SetCapping(capping)\n cylinderSource.SetResolution(resolution)\n cylinderSource.Update()\n surf = pyvista.PolyData(cylinderSource.GetOutput())\n surf.rotate_z(-90)\n translate(surf, center, direction)\n return surf\n\n\ndef CylinderStructured(radius=0.5, height=1.0,\n center=(0.,0.,0.), direction=(1.,0.,0.),\n theta_resolution=32, z_resolution=10):\n \"\"\"Create a cylinder mesh as a :class:`pyvista.StructuredGrid`.\n\n The end caps are left open. This can create a surface mesh if a single\n value for the ``radius`` is given or a 3D mesh if multiple radii are given\n as a list/array in the ``radius`` argument.\n\n Parameters\n ----------\n radius : float\n Radius of the cylinder. If an iterable\n\n height : float\n Height (length) of the cylinder along its Z-axis\n\n center : list or tuple or np.ndarray\n Location of the centroid in [x, y, z]\n\n direction : list or tuple or np.ndarray\n Direction cylinder Z-axis in [x, y, z]\n\n theta_resolution : int\n Number of points on the circular face of the cylinder.\n\n z_resolution : int\n Number of points along the height (Z-axis) of the cylinder\n\n \"\"\"\n # Define grid in polar coordinates\n r = np.array([radius]).ravel()\n nr = len(r)\n theta = np.linspace(0, 2*np.pi, num=theta_resolution)\n radius_matrix, theta_matrix = np.meshgrid(r,theta)\n\n # Transform to cartesian space\n X = radius_matrix * np.cos(theta_matrix)\n Y = radius_matrix * np.sin(theta_matrix)\n\n # Make all the nodes in the grid\n xx = np.array([X] * z_resolution).ravel()\n yy = np.array([Y] * z_resolution).ravel()\n dz = height / (z_resolution - 1)\n zz = np.empty(yy.size)\n zz = np.full((X.size, z_resolution), dz)\n zz *= np.arange(z_resolution)\n zz = zz.ravel(order='f')\n\n # Create the grid\n grid = pyvista.StructuredGrid()\n grid.points = np.c_[xx, yy, zz]\n grid.dimensions = [nr, theta_resolution, z_resolution]\n\n # Orient properly in user direction\n vx = np.array([0., 0., 1.])\n if not np.allclose(vx, direction):\n direction /= np.linalg.norm(direction)\n vx -= vx.dot(direction) * direction\n vx /= np.linalg.norm(vx)\n vy = np.cross(direction, vx)\n rmtx = np.array([vx, vy, direction])\n grid.points = grid.points.dot(rmtx)\n\n # Translate to given center\n grid.points -= np.array(grid.center)\n grid.points += np.array(center)\n\n return grid\n\n\ndef Arrow(start=(0.,0.,0.), direction=(1.,0.,0.), tip_length=0.25,\n tip_radius=0.1, tip_resolution=20, shaft_radius=0.05,\n shaft_resolution=20, scale=None):\n \"\"\"Create a vtk Arrow.\n\n Parameters\n ----------\n start : np.ndarray\n Start location in [x, y, z]\n\n direction : list or tuple or np.ndarray\n Direction the arrow points to in [x, y, z]\n\n tip_length : float, optional\n Length of the tip.\n\n tip_radius : float, optional\n Radius of the tip.\n\n tip_resolution : int, optional\n Number of faces around the tip.\n\n shaft_radius : float, optional\n Radius of the shaft.\n\n shaft_resolution : int, optional\n Number of faces around the shaft.\n\n scale : float or str, optional\n Scale factor of the entire object, default is None (i.e. scale of 1).\n 'auto' scales to length of direction array.\n\n Returns\n -------\n arrow : pyvista.PolyData\n Arrow surface.\n\n \"\"\"\n # Create arrow object\n arrow = vtk.vtkArrowSource()\n arrow.SetTipLength(tip_length)\n arrow.SetTipRadius(tip_radius)\n arrow.SetTipResolution(tip_resolution)\n arrow.SetShaftRadius(shaft_radius)\n arrow.SetShaftResolution(shaft_resolution)\n arrow.Update()\n surf = pyvista.PolyData(arrow.GetOutput())\n\n if scale == 'auto':\n scale = float(np.linalg.norm(direction))\n if isinstance(scale, float) or isinstance(scale, int):\n surf.points *= scale\n elif scale is not None:\n raise TypeError(\"Scale must be either float, int or 'auto'.\")\n\n translate(surf, start, direction)\n return surf\n\n\ndef Sphere(radius=0.5, center=(0, 0, 0), direction=(0, 0, 1), theta_resolution=30,\n phi_resolution=30, start_theta=0, end_theta=360, start_phi=0, end_phi=180):\n \"\"\"Create a vtk Sphere.\n\n Parameters\n ----------\n radius : float, optional\n Sphere radius\n\n center : np.ndarray or list, optional\n Center in [x, y, z]\n\n direction : list or tuple or np.ndarray\n Direction the top of the sphere points to in [x, y, z]\n\n theta_resolution: int , optional\n Set the number of points in the longitude direction (ranging from\n start_theta to end theta).\n\n phi_resolution : int, optional\n Set the number of points in the latitude direction (ranging from\n start_phi to end_phi).\n\n start_theta : float, optional\n Starting longitude angle.\n\n end_theta : float, optional\n Ending longitude angle.\n\n start_phi : float, optional\n Starting latitude angle.\n\n end_phi : float, optional\n Ending latitude angle.\n\n Returns\n -------\n sphere : pyvista.PolyData\n Sphere mesh.\n\n \"\"\"\n sphere = vtk.vtkSphereSource()\n sphere.SetRadius(radius)\n sphere.SetThetaResolution(theta_resolution)\n sphere.SetPhiResolution(phi_resolution)\n sphere.SetStartTheta(start_theta)\n sphere.SetEndTheta(end_theta)\n sphere.SetStartPhi(start_phi)\n sphere.SetEndPhi(end_phi)\n sphere.Update()\n surf = pyvista.PolyData(sphere.GetOutput())\n surf.rotate_y(-90)\n translate(surf, center, direction)\n return surf\n\n\ndef Plane(center=(0, 0, 0), direction=(0, 0, 1), i_size=1, j_size=1,\n i_resolution=10, j_resolution=10):\n \"\"\"Create a plane.\n\n Parameters\n ----------\n center : list or tuple or np.ndarray\n Location of the centroid in [x, y, z]\n\n direction : list or tuple or np.ndarray\n Direction cylinder points to in [x, y, z]\n\n i_size : float\n Size of the plane in the i direction.\n\n j_size : float\n Size of the plane in the j direction.\n\n i_resolution : int\n Number of points on the plane in the i direction.\n\n j_resolution : int\n Number of points on the plane in the j direction.\n\n Returns\n -------\n plane : pyvista.PolyData\n Plane mesh\n\n \"\"\"\n planeSource = vtk.vtkPlaneSource()\n planeSource.SetXResolution(i_resolution)\n planeSource.SetYResolution(j_resolution)\n planeSource.Update()\n\n surf = pyvista.PolyData(planeSource.GetOutput())\n\n surf.points[:, 0] *= i_size\n surf.points[:, 1] *= j_size\n surf.rotate_y(-90)\n translate(surf, center, direction)\n return surf\n\n\ndef Line(pointa=(-0.5, 0., 0.), pointb=(0.5, 0., 0.), resolution=1):\n \"\"\"Create a line.\n\n Parameters\n ----------\n pointa : np.ndarray or list\n Location in [x, y, z].\n\n pointb : np.ndarray or list\n Location in [x, y, z].\n\n resolution : int\n number of pieces to divide line into\n\n \"\"\"\n if resolution <= 0:\n raise ValueError('Resolution must be positive')\n if np.array(pointa).size != 3:\n raise TypeError('Point A must be a length three tuple of floats.')\n if np.array(pointb).size != 3:\n raise TypeError('Point B must be a length three tuple of floats.')\n src = vtk.vtkLineSource()\n src.SetPoint1(*pointa)\n src.SetPoint2(*pointb)\n src.SetResolution(resolution)\n src.Update()\n line = pyvista.wrap(src.GetOutput())\n # Compute distance of every point along line\n compute = lambda p0, p1: np.sqrt(np.sum((p1 - p0)**2, axis=1))\n distance = compute(np.array(pointa), line.points)\n line['Distance'] = distance\n return line\n\n\ndef Cube(center=(0., 0., 0.), x_length=1.0, y_length=1.0, z_length=1.0, bounds=None):\n \"\"\"Create a cube.\n\n It's possible to specify either the center and side lengths or just\n the bounds of the cube. If ``bounds`` are given, all other arguments are\n ignored.\n\n Parameters\n ----------\n center : np.ndarray or list\n Center in [x, y, z].\n\n x_length : float\n length of the cube in the x-direction.\n\n y_length : float\n length of the cube in the y-direction.\n\n z_length : float\n length of the cube in the z-direction.\n\n bounds : np.ndarray or list\n Specify the bounding box of the cube. If given, all other arguments are\n ignored. ``(xMin,xMax, yMin,yMax, zMin,zMax)``\n\n \"\"\"\n src = vtk.vtkCubeSource()\n if bounds is not None:\n if np.array(bounds).size != 6:\n raise TypeError('Bounds must be given as length 6 tuple: (xMin,xMax, yMin,yMax, zMin,zMax)')\n src.SetBounds(bounds)\n else:\n src.SetCenter(center)\n src.SetXLength(x_length)\n src.SetYLength(y_length)\n src.SetZLength(z_length)\n src.Update()\n return pyvista.wrap(src.GetOutput())\n\n\ndef Box(bounds=(-1., 1., -1., 1., -1., 1.), level=0, quads=True):\n \"\"\"Create a box with solid faces for the given bounds.\n\n Parameters\n ----------\n bounds : np.ndarray or list\n Specify the bounding box of the cube.\n ``(xMin, xMax, yMin, yMax, zMin, zMax)``\n\n level : int\n Level of subdivision of the faces.\n\n quads : bool, optional\n Flag to tell the source to generate either a quad or two\n triangle for a set of four points. Default ``True``.\n\n \"\"\"\n if np.array(bounds).size != 6:\n raise TypeError('Bounds must be given as length 6 tuple: (xMin, xMax, yMin, yMax, zMin, zMax)')\n src = vtk.vtkTessellatedBoxSource()\n src.SetLevel(level)\n if quads:\n src.QuadsOn()\n else:\n src.QuadsOff()\n src.SetBounds(bounds)\n src.Update()\n return pyvista.wrap(src.GetOutput())\n\n\ndef Cone(center=(0.,0.,0.), direction=(1.,0.,0.), height=1.0, radius=None,\n capping=True, angle=None, resolution=6):\n \"\"\"Create a cone.\n\n Parameters\n ----------\n center : np.ndarray or list\n Center in [x, y, z]. middle of the axis of the cone.\n\n direction : np.ndarray or list\n Direction vector in [x, y, z]. orientation vector of the cone.\n\n height : float\n Height along the cone in its specified direction.\n\n radius : float\n Base radius of the cone\n\n capping : bool\n Turn on/off whether to cap the base of the cone with a polygon.\n\n angle : float\n The angle degrees between the axis of the cone and a generatrix.\n\n resolution : int\n Number of facets used to represent the cone\n\n \"\"\"\n src = vtk.vtkConeSource()\n src.SetCapping(capping)\n src.SetDirection(direction)\n src.SetCenter(center)\n src.SetHeight(height)\n # Contributed by @kjelljorner in #249:\n if angle and radius:\n raise ValueError(\"Both radius and angle specified. They are mutually exclusive.\")\n elif angle and not radius:\n src.SetAngle(angle)\n elif not angle and radius:\n src.SetRadius(radius)\n elif not angle and not radius:\n src.SetRadius(0.5)\n src.SetResolution(resolution)\n src.Update()\n return pyvista.wrap(src.GetOutput())\n\n\ndef Polygon(center=(0.,0.,0.), radius=1, normal=(0,0,1), n_sides=6):\n \"\"\"Create a polygonal disk with a hole in the center.\n\n The disk has zero height. The user can specify the inner and outer radius\n of the disk, and the radial and circumferential resolution of the polygonal\n representation.\n\n Parameters\n ----------\n center : np.ndarray or list\n Center in [x, y, z]. middle of the axis of the polygon.\n\n radius : float\n The radius of the polygon\n\n normal : np.ndarray or list\n Direction vector in [x, y, z]. orientation vector of the cone.\n\n n_sides : int\n Number of sides of the polygon\n\n \"\"\"\n src = vtk.vtkRegularPolygonSource()\n src.SetCenter(center)\n src.SetNumberOfSides(n_sides)\n src.SetRadius(radius)\n src.SetNormal(normal)\n src.Update()\n return pyvista.wrap(src.GetOutput())\n\n\ndef Disc(center=(0., 0., 0.), inner=0.25, outer=0.5, normal=(0, 0, 1), r_res=1,\n c_res=6):\n \"\"\"Create a polygonal disk with a hole in the center.\n\n The disk has zero height. The user can specify the inner and outer radius\n of the disk, and the radial and circumferential resolution of the polygonal\n representation.\n\n Parameters\n ----------\n center : np.ndarray or list\n Center in [x, y, z]. middle of the axis of the disc.\n\n inner : float\n The inner radius\n\n outer : float\n The outer radius\n\n normal : np.ndarray or list\n Direction vector in [x, y, z]. orientation vector of the cone.\n\n r_res: int\n Number of points in radius direction.\n\n r_res: int\n Number of points in circumferential direction.\n\n \"\"\"\n src = vtk.vtkDiskSource()\n src.SetInnerRadius(inner)\n src.SetOuterRadius(outer)\n src.SetRadialResolution(r_res)\n src.SetCircumferentialResolution(c_res)\n src.Update()\n normal = np.array(normal)\n center = np.array(center)\n surf = pyvista.PolyData(src.GetOutput())\n surf.rotate_y(90)\n translate(surf, center, normal)\n return surf\n\n\ndef Text3D(string, depth=0.5):\n \"\"\"Create 3D text from a string.\"\"\"\n vec_text = vtk.vtkVectorText()\n vec_text.SetText(string)\n\n extrude = vtk.vtkLinearExtrusionFilter()\n extrude.SetInputConnection(vec_text.GetOutputPort())\n extrude.SetExtrusionTypeToNormalExtrusion()\n extrude.SetVector(0, 0, 1)\n extrude.SetScaleFactor(depth)\n\n tri_filter = vtk.vtkTriangleFilter()\n tri_filter.SetInputConnection(extrude.GetOutputPort())\n tri_filter.Update()\n return pyvista.wrap(tri_filter.GetOutput())\n\n\ndef Wavelet(extent=(-10,10,-10,10,-10,10), center=(0,0,0), maximum=255,\n x_freq=60, y_freq=30, z_freq=40, x_mag=10, y_mag=18, z_mag=5,\n std=0.5, subsample_rate=1):\n \"\"\"Create a wavelet.\"\"\"\n wavelet_source = vtk.vtkRTAnalyticSource()\n wavelet_source.SetWholeExtent(*extent)\n wavelet_source.SetCenter(center)\n wavelet_source.SetMaximum(maximum)\n wavelet_source.SetXFreq(x_freq)\n wavelet_source.SetYFreq(y_freq)\n wavelet_source.SetZFreq(z_freq)\n wavelet_source.SetXMag(x_mag)\n wavelet_source.SetYMag(y_mag)\n wavelet_source.SetZMag(z_mag)\n wavelet_source.SetStandardDeviation(std)\n wavelet_source.SetSubsampleRate(subsample_rate)\n wavelet_source.Update()\n return pyvista.wrap(wavelet_source.GetOutput())\n\n\ndef CircularArc(pointa, pointb, center, resolution=100, normal=None,\n polar=None, angle=None, negative=False):\n \"\"\"Create a circular arc defined by two endpoints and a center.\n\n The number of segments composing the polyline is controlled by\n setting the object resolution. Alternatively, one can use a\n better API (that does not allow for inconsistent nor ambiguous\n inputs), using a starting point (polar vector, measured from the\n arc's center), a normal to the plane of the arc, and an angle\n defining the arc length.\n\n Parameters\n ----------\n pointa : np.ndarray or list\n Position of the first end point.\n\n pointb : np.ndarray or list\n Position of the other end point.\n\n center : np.ndarray or list\n Center of the circle that defines the arc.\n\n resolution : int, optional\n The number of segments of the polyline that draws the arc.\n Resolution of 1 will just create a line.\n\n normal : np.ndarray or list\n The normal vector to the plane of the arc. By default it\n points in the positive Z direction.\n\n polar : np.ndarray or list\n (starting point of the arc). By default it is the unit vector\n in the positive x direction. Note: This is only used when\n normal has been input.\n\n angle : float\n Arc length (in degrees), beginning at the polar vector. The\n direction is counterclockwise by default; a negative value\n draws the arc in the clockwise direction. Note: This is only\n used when normal has been input.\n\n negative : bool, optional\n By default the arc spans the shortest angular sector point1 and point2.\n\n By setting this to true, the longest angular sector is used\n instead (i.e. the negative coterminal angle to the shortest\n one). This is only used when normal has not been input\n\n Examples\n --------\n Quarter arc centered at the origin in the xy plane\n\n >>> import pyvista\n >>> arc = pyvista.CircularArc([-1, 0, 0], [0, 1, 0], [0, 0, 0])\n >>> pl = pyvista.Plotter()\n >>> _ = pl.add_mesh(arc, color='k', line_width=4)\n >>> _ = pl.show_bounds(location='all')\n >>> _ = pl.view_xy()\n >>> pl.show() # doctest:+SKIP\n\n Quarter arc centered at the origin in the xz plane\n\n >>> arc = pyvista.CircularArc([-1, 0, 0], [1, 0, 0], [0, 0, 0], normal=[0, 0, 1])\n >>> arc.plot() # doctest:+SKIP\n \"\"\"\n check_valid_vector(pointa, 'pointa')\n check_valid_vector(pointb, 'pointb')\n check_valid_vector(center, 'center')\n\n # fix half-arc bug: if a half arc travels directly through the\n # center point, it becomes a line\n pointb = list(pointb)\n pointb[0] -= 1E-10\n pointb[1] -= 1E-10\n\n arc = vtk.vtkArcSource()\n arc.SetPoint1(*pointa)\n arc.SetPoint2(*pointb)\n arc.SetCenter(*center)\n arc.SetResolution(resolution)\n arc.SetNegative(negative)\n\n if normal is not None:\n arc.UseNormalAndAngleOn()\n check_valid_vector(normal, 'normal')\n arc.SetNormal(*normal)\n\n if polar is not None:\n check_valid_vector(polar, 'polar')\n arc.SetPolarVector(*polar)\n\n if angle is not None:\n arc.SetAngle(angle)\n\n arc.Update()\n return pyvista.wrap(arc.GetOutput())\n\n\ndef Pyramid(points):\n \"\"\"Create a pyramid defined by 5 points.\n\n Parameters\n ----------\n points : np.ndarray or list\n Points of the pyramid. Points are ordered such that the first\n four points are the four counterclockwise points on the\n quadrilateral face, and the last point is the apex.\n\n Returns\n -------\n pyramid : pyvista.UnstructuredGrid\n\n Examples\n --------\n >>> import pyvista\n >>> pointa = [1.0, 1.0, 1.0]\n >>> pointb = [-1.0, 1.0, 1.0]\n >>> pointc = [-1.0, -1.0, 1.0]\n >>> pointd = [1.0, -1.0, 1.0]\n >>> pointe = [0.0, 0.0, 0.0]\n >>> pyramid = pyvista.Pyramid([pointa, pointb, pointc, pointd, pointe])\n >>> pyramid.plot() # doctest:+SKIP\n \"\"\"\n if len(points) != 5:\n raise TypeError('Points must be given as length 5 np.ndarray or list')\n\n check_valid_vector(points[0], 'points[0]')\n check_valid_vector(points[1], 'points[1]')\n check_valid_vector(points[2], 'points[2]')\n check_valid_vector(points[3], 'points[3]')\n check_valid_vector(points[4], 'points[4]')\n\n pyramid = vtk.vtkPyramid()\n pyramid.GetPointIds().SetId(0, 0)\n pyramid.GetPointIds().SetId(1, 1)\n pyramid.GetPointIds().SetId(2, 2)\n pyramid.GetPointIds().SetId(3, 3)\n pyramid.GetPointIds().SetId(4, 4)\n\n ug = vtk.vtkUnstructuredGrid()\n ug.SetPoints(pyvista.vtk_points(np.array(points), False))\n ug.InsertNextCell(pyramid.GetCellType(), pyramid.GetPointIds())\n\n return pyvista.wrap(ug)\n" ]
[ [ "numpy.allclose", "numpy.sum", "numpy.linalg.norm", "numpy.empty", "numpy.zeros", "numpy.cross", "numpy.cos", "numpy.arange", "numpy.full", "numpy.array", "numpy.sin", "numpy.meshgrid", "numpy.linspace" ] ]
joegeisz/pylith
[ "f74060b7b19d7e90abf8597bbe9250c96593c0ad" ]
[ "examples/2d/subduction/viz/plot_slipprofile.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nThis script generates a plot showing slip or fault tractions.\n\"\"\"\n\n# The code requires the numpy, h5py, and matplotlib packages.\nimport numpy\nimport h5py\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as pyplot\n\n\n# ----------------------------------------------------------------------\ndef calcDist(vertices):\n \"\"\"Compute down-dip distance from the trench.\n \"\"\"\n dist = numpy.zeros(vertices.shape[0])\n pt0 = vertices[:-1,:]\n pt1 = vertices[1:,:]\n dx = ((pt1[:,0]-pt0[:,0])**2 + (pt1[:,1]-pt0[:,1])**2)**0.5\n dist[1:] = numpy.cumsum(dx)\n return dist\n\n# ----------------------------------------------------------------------\ndef getData(sim):\n \"\"\"Read fault information from HDF5 file.\n \"\"\"\n filename = \"output/%s-fault-slabtop.h5\" % sim\n h5 = h5py.File(filename, \"r\")\n vertices = h5['geometry/vertices'][:]\n slip = h5['vertex_fields/slip'][:,:,:]\n tstamps = h5[\"time\"][:]\n h5.close()\n\n data = {\n \"time\": tstamps,\n \"vertices\": vertices,\n \"slip\": slip\n }\n return data\n\n# ----------------------------------------------------------------------\ndef plot(sim):\n\n # Get fault data for simulation.\n data = getData(sim)\n \n # Create sort key corresponding to increasing depth.\n indices = numpy.argsort(data[\"vertices\"][:,1])[::-1]\n\n # Calculate down-dip distance from trench and get sorted data.\n #dist = calcDist(data[\"vertices\"][indices,:])\n dist = -data[\"vertices\"][indices,1]\n slip = data[\"slip\"][:,indices,:]\n\n figure = pyplot.figure(figsize=(5.0, 3.0), facecolor='white', dpi=150)\n figure.set_facecolor('white')\n\n axes = figure.add_axes([0.15, 0.15, 0.80, 0.82])\n\n for i,t in enumerate(data[\"time\"]):\n color = \"blue\"\n lw = 0.5\n if i % 10 == 0:\n color = \"red\"\n lw = 1.0\n axes.plot(-slip[i,:,0], dist/1.0e+3, '-', color=color, linewidth=lw)\n axes.set_xlabel(\"Slip (m)\")\n #axes.set_ylabel(\"Down-dip Dist. (km)\")\n axes.set_ylabel(\"Depth (km)\")\n axes.invert_yaxis()\n\n\n pyplot.show()\n pyplot.savefig(\"subduction2d_%s_slip.pdf\" % sim)\n return\n\n# ======================================================================\nif __name__ == \"__main__\":\n\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--sim\", action=\"store\", dest=\"sim\", default=\"step05\")\n args = parser.parse_args()\n\n plot(args.sim)\n\n\n# End of file\n" ]
[ [ "numpy.cumsum", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "numpy.argsort", "matplotlib.pyplot.show", "matplotlib.use" ] ]
artificially-ai/FewShotVision
[ "909bc414ea27ef0300091e1dd6baba4fb063324b" ]
[ "utils/io_utils.py" ]
[ "import glob\nimport os\n\nimport numpy as np\nimport torch\n\nfrom utils import configs, backbones\n\nmodel_dict = dict(\n Conv4=backbones.Conv4,\n Conv4S=backbones.Conv4S,\n Conv6=backbones.Conv6,\n ResNet10=backbones.ResNet10,\n ResNet18=backbones.ResNet18,\n ResNet34=backbones.ResNet34,\n ResNet50=backbones.ResNet50,\n ResNet101=backbones.ResNet101,\n)\n\n\ndef path_to_step_output(dataset, backbone, method, output_dir=configs.save_dir):\n \"\"\"\n Defines the path where the outputs will be saved on the disk\n Args:\n dataset (str): name of the dataset\n backbone (str): name of the backbone of the model\n method (str): name of the used method\n output_dir (str): may be common to other experiments\n\n Returns:\n str: path to the output of the step\n \"\"\"\n checkpoint_dir = os.path.join(\n output_dir,\n dataset,\n '_'.join([method, backbone]),\n )\n\n if not os.path.isdir(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n return checkpoint_dir\n\n\ndef set_and_print_random_seed(random_seed, save=False, checkpoint_dir='./'):\n \"\"\"\n Set and print numpy random seed, for reproducibility of the training,\n and set torch seed based on numpy random seed\n Args:\n random_seed (int): seed for random instantiations ; if none is provided, a seed is randomly defined\n save (bool): if True, the numpy random seed is saved in seeds.txt\n checkpoint_dir (str): output folder where the seed is saved\n Returns:\n int: numpy random seed\n\n \"\"\"\n if random_seed is None:\n random_seed = np.random.randint(0, 2 ** 32 - 1)\n np.random.seed(random_seed)\n torch.manual_seed(np.random.randint(0, 2**32-1))\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n prompt = 'Random seed : {}\\n'.format(random_seed)\n print(prompt)\n\n if save:\n with open(os.path.join(checkpoint_dir, 'seeds.txt'), 'a') as f:\n f.write(prompt)\n\n return random_seed\n\n\ndef get_path_to_json(dataset, split):\n \"\"\"\n\n Args:\n dataset (str): which dataset to load\n split (str): whether to use base, val or novel dataset\n\n Returns:\n str: path to JSON file\n \"\"\"\n if dataset == 'cross':\n if split == 'base':\n path_to_json_file = configs.data_dir['miniImageNet'] + 'all.json'\n else:\n path_to_json_file = configs.data_dir['CUB'] + split + '.json'\n elif dataset == 'cross_char':\n if split == 'base':\n path_to_json_file = configs.data_dir['omniglot'] + 'noLatin.json'\n else:\n path_to_json_file = configs.data_dir['emnist'] + split + '.json'\n else:\n path_to_json_file = configs.data_dir[dataset] + split + '.json'\n\n return path_to_json_file\n\n\ndef get_assigned_file(checkpoint_dir, num):\n # TODO: returns path to .tar file corresponding to epoch num in checkpoint_dir (even if it doesn't exist)\n assign_file = os.path.join(checkpoint_dir, '{:d}.tar'.format(num))\n return assign_file\n\n\ndef get_resume_file(checkpoint_dir):\n # TODO: returns path to .tar file corresponding to maximal epoch in checkpoint_dir, None if checkpoint_dir is empty\n # TODO What happens if checkpoint_dir only contains best_model.tar ?\n filelist = glob.glob(os.path.join(checkpoint_dir, '*.tar'))\n if len(filelist) == 0:\n return None\n\n filelist = [x for x in filelist if os.path.basename(x) != 'best_model.tar']\n epochs = np.array([int(os.path.splitext(os.path.basename(x))[0]) for x in filelist])\n max_epoch = np.max(epochs)\n resume_file = os.path.join(checkpoint_dir, '{:d}.tar'.format(max_epoch))\n return resume_file\n\n\ndef get_best_file(checkpoint_dir):\n # TODO returns best_model.tar in checkpoint_dir if there is one, else returns get_resume_file(checkpoint_dir)\n best_file = os.path.join(checkpoint_dir, 'best_model.tar')\n if os.path.isfile(best_file):\n return best_file\n else:\n return get_resume_file(checkpoint_dir)\n" ]
[ [ "numpy.max", "numpy.random.seed", "numpy.random.randint" ] ]
phaustin/a301_2020
[ "9be7ead5f641013e2cec4e736ea76171b849e8d5" ]
[ "sat_lib/process_bands.py" ]
[ "import a301_lib\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom pyhdf.SD import SD\nfrom pyhdf.SD import SDC\nfrom pathlib import Path\nimport h5py\nfrom contextlib import contextmanager\nimport os\nfrom sat_lib.modismeta_read import get_core\n\n@contextmanager\ndef cd(newdir):\n prevdir = os.getcwd()\n os.chdir(newdir)\n try:\n yield\n finally:\n os.chdir(prevdir)\n\ndef readband(the_file,the_band):\n \"\"\"\n read and calibrate a MODIS band from an open hdf4 SD dataset\n\n Parameters\n ----------\n\n the_file:pyhdf.SD object\n the dataset open for reading\n the_band: int\n band number for MODIS (1-36)\n\n Returns\n -------\n the_chan_calibrated: ndarray\n the pixel radiances in W/m^2/sr/micron\n \"\"\"\n longwave_data = the_file.select(\"EV_1KM_Emissive\") # select sds\n longwave_bands = the_file.select(\"Band_1KM_Emissive\")\n band_nums = longwave_bands.get()\n thechan_index = int(np.searchsorted(band_nums, the_band))\n print(f\"reading ban {the_band}\")\n print(thechan_index)\n thechan_data = longwave_data[thechan_index, :, :]\n scales = longwave_data.attributes()[\"radiance_scales\"]\n offsets = longwave_data.attributes()[\"radiance_offsets\"]\n thechan_scale = scales[thechan_index]\n thechan_offset = offsets[thechan_index]\n thechan_calibrated = (thechan_data - thechan_offset) * thechan_scale\n return thechan_calibrated\n\ndef write_bands(outname,chan_rads,core_metadata):\n \"\"\"\n write a MODIS band 30 to an h5 file\n\n Parameters\n ----------\n\n outname: str\n name of output hdf\n chan_rads: dict\n the pixel radiances in W/m^2/sr/micron\n key: channel number (int)\n value: radiance (ndarray)\n\n Returns\n -------\n None-- the_file is closed by this function\n \"\"\"\n with h5py.File(outname, \"w\") as f:\n group = f.create_group(\"channels\")\n for key, value in chan_rads.items():\n chan_name = f\"chan{key}\"\n radiance_array = value\n radiance_array = radiance_array.astype(np.float32)\n dset = group.create_dataset(chan_name, radiance_array.shape,\n dtype=radiance_array.dtype)\n dset[...] = radiance_array[...]\n dset.attrs['units'] = \"W/m^2/micron/ sr\"\n f.attrs[\"history\"] = 'written by process.py'\n f.attrs[\"CoreMetadata.0\"] = core_metadata\n print(f\"wrote {outname}\")\n\n\n\n\nif __name__ == \"__main__\":\n import a301_lib\n sat_data = a301_lib.sat_data / \"hdf4_files\"\n with cd(sat_data):\n all_files = list(sat_data.glob(\"MYD021KM*2105*hdf\"))\n all_files = [item for item in all_files if (item.parent.name != \"h5_dir\"\n and item.name.find('MYD02') >= 0)]\n print(f\"found {all_files}\")\n out_dir = sat_data /\"h5_dir\"\n out_dir.mkdir(parents=True, exist_ok=True)\n for a_file in all_files[:]:\n core_metadata = get_core(a_file)\n out_file = out_dir / f\"oct9_{a_file.name}\"\n out_file = out_file.with_suffix('.h5')\n print(f\"reading {a_file}, writing {out_file}\")\n the_sd = SD(str(a_file), SDC.READ)\n band_list = [30,31,32]\n rad_dict = {}\n for the_band in band_list:\n rad_dict[the_band] = readband(the_sd,the_band)\n the_sd.end()\n write_bands(out_file,rad_dict,core_metadata)\n" ]
[ [ "numpy.searchsorted" ] ]
Klimorg/template_segmentation
[ "f5a5066905acb06c66793d9a361eae8570652af2" ]
[ "src/pipelines/classic.py" ]
[ "from typing import List, Tuple\n\nimport albumentations as A\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nfrom src.pipelines.base_pipeline import BasePipeline\n\n# class Tensorize(object):\n# \"\"\"\n# Class used to create tensor datasets for TensorFlow.\n\n# Inheritance:\n# object: The base class of the class hierarchy, used only to enforce WPS306.\n# See https://wemake-python-stylegui.de/en/latest/pages/usage/violations/consistency.html#consistency.\n\n# Args:\n# n_classes (int): Number of classes in the dataset.\n# img_shape (Tuple[int,int,int]): Dimension of the image, format is (H,W,C).\n# random_seed (int): Fixed random seed for reproducibility.\n# \"\"\"\n\n# def __init__(\n# self,\n# n_classes: int,\n# img_shape: Tuple[int, int, int],\n# random_seed: int,\n# ) -> None:\n# \"\"\"Initialization of the class Tensorize.\n\n# Initialize the class, the number of classes in the datasets, the shape of the\n# images and the random seed.\n# \"\"\"\n\n# self.n_classes = n_classes\n# self.img_shape = img_shape\n# self.random_seed = random_seed\n# self.AUTOTUNE = tf.data.AUTOTUNE\n\n# def load_images(self, data_frame: pd.DataFrame, column_name: str) -> List[str]:\n# \"\"\"Load the images as a list.\n\n# Take the dataframe containing the observations and the masks and the return the\n# column containing the observations as a list.\n\n# Args:\n# data_frame (pd.DataFrame): Dataframe containing the dataset.\n# column_name (str): The name of the column containing the observations.\n\n# Returns:\n# The list of observations deduced from the dataframe.\n# \"\"\"\n# return data_frame[column_name].tolist()\n\n# @tf.function\n# def parse_image_and_mask(\n# self,\n# image: str,\n# mask: str,\n# ) -> Tuple[np.ndarray, np.ndarray]:\n# \"\"\"Transform image and mask.\n\n# Parse image and mask to go from path to a resized np.ndarray.\n\n# Args:\n# filename (str): The path of the image to parse.\n# mask (str): The mask of the image.\n\n# Returns:\n# A np.ndarray corresponding to the image and the corresponding one-hot mask.\n# \"\"\"\n# resized_dims = [self.img_shape[0], self.img_shape[1]]\n# # convert the mask to one-hot encoding\n# # decode image\n# image = tf.io.read_file(image)\n# # Don't use tf.image.decode_image,\n# # or the output shape will be undefined\n# image = tf.image.decode_jpeg(image)\n# # This will convert to float values in [0, 1]\n# image = tf.image.convert_image_dtype(image, tf.float32)\n# image = tf.image.resize(\n# image,\n# resized_dims,\n# method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,\n# )\n\n# mask = tf.io.read_file(mask)\n# # Don't use tf.image.decode_image,\n# # or the output shape will be undefined\n# mask = tf.io.decode_png(mask, channels=1)\n# mask = tf.image.resize(\n# mask,\n# resized_dims,\n# method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,\n# )\n\n# return image, mask\n\n# def train_preprocess(\n# self,\n# image: np.ndarray,\n# mask: np.ndarray,\n# ) -> Tuple[np.ndarray, np.ndarray]:\n# \"\"\"Augmentation preprocess, if needed.\n\n# Args:\n# image (np.ndarray): The image to augment.\n# mask (np.ndarray): The corresponding mask.\n\n# Returns:\n# The augmented pair.\n# \"\"\"\n\n# aug = A.Compose(\n# [\n# A.HorizontalFlip(p=0.5),\n# A.VerticalFlip(p=0.5),\n# A.RandomRotate90(p=0.5),\n# A.Transpose(p=0.5),\n# ],\n# )\n\n# augmented = aug(image=image, mask=mask)\n\n# image = augmented[\"image\"]\n# mask = augmented[\"mask\"]\n\n# image = tf.cast(x=image, dtype=tf.float32)\n# mask = tf.cast(x=mask, dtype=tf.float32)\n\n# return image, mask\n\n# @tf.function\n# def apply_augments(\n# self,\n# image: np.ndarray,\n# mask: np.ndarray,\n# ) -> Tuple[np.ndarray, np.ndarray]:\n# \"\"\"Apply augmentation (roations, transposition, flips), if needed.\n\n# Args:\n# image (np.ndarray): A numpy array representing an image of the dataset.\n# mask (np.ndarray): A numpy array representing a mask of the dataset.\n\n# Returns:\n# An augmented pair (image, mask).\n# \"\"\"\n\n# image, mask = tf.numpy_function(\n# func=self.train_preprocess,\n# inp=[image, mask],\n# Tout=[tf.float32, tf.float32],\n# )\n\n# img_shape = [self.img_shape[0], self.img_shape[1], 3]\n# mask_shape = [self.img_shape[0], self.img_shape[1], 1]\n\n# image = tf.ensure_shape(image, shape=img_shape)\n# mask = tf.ensure_shape(mask, shape=mask_shape)\n\n# return image, mask\n\n# def create_train_dataset(\n# self,\n# data_path: str,\n# batch: int,\n# repet: int,\n# prefetch: int,\n# augment: bool,\n# ) -> tf.data.Dataset:\n# \"\"\"Creation of a tensor dataset for TensorFlow.\n\n# Args:\n# data_path (str): Path where the csv file containing the dataframe is\n# located.\n# batch (int): Batch size, usually 32.\n# repet (int): How many times the dataset has to be repeated.\n# prefetch (int): How many batch the CPU has to prepare in advance for the\n# GPU.\n# augment (bool): Does the dataset has to be augmented or no.\n\n# Returns:\n# A batch of observations and masks.\n# \"\"\"\n# df = pd.read_csv(data_path)\n# features = self.load_images(data_frame=df, column_name=\"filename\")\n# masks = self.load_images(data_frame=df, column_name=\"mask\")\n\n# dataset = tf.data.Dataset.from_tensor_slices((features, masks))\n# dataset = dataset.cache()\n# dataset = dataset.shuffle(len(features), seed=self.random_seed)\n# dataset = dataset.repeat(repet)\n# dataset = dataset.map(\n# self.parse_image_and_mask,\n# num_parallel_calls=self.AUTOTUNE,\n# )\n# if augment:\n# dataset = dataset.map(self.apply_augments, num_parallel_calls=self.AUTOTUNE)\n# dataset = dataset.batch(batch)\n# return dataset.prefetch(prefetch)\n\n# def create_test_dataset(\n# self,\n# data_path: str,\n# batch: int,\n# repet: int,\n# prefetch: int,\n# ) -> tf.data.Dataset:\n# \"\"\"Creation of a tensor dataset for TensorFlow.\n\n# Args:\n# data_path (str): Path where the csv file containing the dataframe is\n# located.\n# batch (int): Batch size, usually 32.\n# repet (int): How many times the dataset has to be repeated.\n# prefetch (int): How many batch the CPU has to prepare in advance for the\n# GPU.\n# augment (bool): Does the dataset has to be augmented or no.\n\n# Returns:\n# A batch of observations and masks.\n# \"\"\"\n# df = pd.read_csv(data_path)\n# features = self.load_images(data_frame=df, column_name=\"filename\")\n# masks = self.load_images(data_frame=df, column_name=\"mask\")\n\n# dataset = tf.data.Dataset.from_tensor_slices((features, masks))\n# dataset = dataset.cache()\n# dataset = dataset.shuffle(len(features), seed=self.random_seed)\n# dataset = dataset.repeat(repet)\n# dataset = dataset.map(\n# self.parse_image_and_mask,\n# num_parallel_calls=self.AUTOTUNE,\n# )\n# dataset = dataset.batch(batch)\n# return dataset.prefetch(prefetch)\n\n\nclass BaseDataset(BasePipeline):\n \"\"\"\n Class used to create tensor datasets for TensorFlow.\n\n Inheritance:\n object: The base class of the class hierarchy, used only to enforce WPS306.\n See https://wemake-python-stylegui.de/en/latest/pages/usage/violations/consistency.html#consistency.\n\n Args:\n n_classes (int): Number of classes in the dataset.\n img_shape (Tuple[int,int,int]): Dimension of the image, format is (H,W,C).\n random_seed (int): Fixed random seed for reproducibility.\n \"\"\"\n\n def __init__(\n self,\n *args,\n **kwargs,\n ) -> None:\n \"\"\"Initialization of the class Tensorize.\n\n Initialize the class, the number of classes in the datasets, the shape of the\n images and the random seed.\n \"\"\"\n super().__init__(\n *args,\n **kwargs,\n )\n\n def create_train_dataset(\n self,\n data_path: str,\n batch: int,\n repet: int,\n prefetch: int,\n augment: bool,\n ) -> tf.data.Dataset:\n \"\"\"Creation of a tensor dataset for TensorFlow.\n\n Args:\n data_path (str): Path where the csv file containing the dataframe is\n located.\n batch (int): Batch size, usually 32.\n repet (int): How many times the dataset has to be repeated.\n prefetch (int): How many batch the CPU has to prepare in advance for the\n GPU.\n augment (bool): Does the dataset has to be augmented or no.\n\n Returns:\n A batch of observations and masks.\n \"\"\"\n df = pd.read_csv(data_path)\n features = self.load_images(data_frame=df, column_name=\"filename\")\n masks = self.load_images(data_frame=df, column_name=\"mask\")\n\n dataset = tf.data.Dataset.from_tensor_slices((features, masks))\n dataset = dataset.cache()\n dataset = dataset.shuffle(len(features), seed=self.random_seed)\n dataset = dataset.repeat(repet)\n dataset = dataset.map(\n self.parse_image_and_mask,\n num_parallel_calls=self.AUTOTUNE,\n )\n if augment:\n dataset = dataset.map(self.apply_augments, num_parallel_calls=self.AUTOTUNE)\n dataset = dataset.batch(batch)\n return dataset.prefetch(prefetch)\n" ]
[ [ "pandas.read_csv", "tensorflow.data.Dataset.from_tensor_slices" ] ]
jim22k/python-suitesparse-graphblas
[ "09eb560950dd848cadef6115c78ef4ed2e1ae80f" ]
[ "setup.py" ]
[ "from setuptools import setup, find_packages, Extension\nfrom glob import glob\n\ntry:\n from Cython.Build import cythonize\n from Cython.Compiler.Options import get_directive_defaults\n\n use_cython = True\nexcept ImportError:\n use_cython = False\nimport numpy as np\nimport os\nimport sys\nimport versioneer\n\ndefine_macros = [(\"NPY_NO_DEPRECATED_API\", \"NPY_1_7_API_VERSION\")]\n\nif use_cython:\n suffix = \".pyx\"\n directive_defaults = get_directive_defaults()\n directive_defaults[\"binding\"] = True\n directive_defaults[\"language_level\"] = 3\n if os.environ.get(\"CYTHON_COVERAGE\"):\n directive_defaults[\"linetrace\"] = True\n define_macros.append((\"CYTHON_TRACE_NOGIL\", \"1\"))\nelse:\n suffix = \".c\"\n\ninclude_dirs = [np.get_include(), os.path.join(sys.prefix, \"include\")]\next_modules = [\n Extension(\n name[: -len(suffix)].replace(\"/\", \".\").replace(\"\\\\\", \".\"),\n [name],\n include_dirs=include_dirs,\n define_macros=define_macros,\n )\n for name in glob(f\"suitesparse_graphblas/**/*{suffix}\", recursive=True)\n]\nif use_cython:\n ext_modules = cythonize(ext_modules, include_path=include_dirs)\n\nwith open(\"README.md\") as f:\n long_description = f.read()\n\npackage_data = {\"suitesparse_graphblas\": [\"*.pyx\", \"*.pxd\", \"*.h\"]}\nif sys.platform == \"win32\":\n package_data[\"suitesparse_graphblas\"].append(\"*.dll\")\n\nsetup(\n name=\"suitesparse-graphblas\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"SuiteSparse:GraphBLAS Python bindings.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=find_packages(),\n author=\"Michel Pelletier, James Kitchen, Erik Welch\",\n author_email=\"[email protected],[email protected],[email protected]\",\n url=\"https://github.com/GraphBLAS/python-suitesparse-graphblas\",\n ext_modules=ext_modules,\n cffi_modules=[\"suitesparse_graphblas/build.py:ffibuilder\"],\n python_requires=\">=3.7\",\n install_requires=[\"cffi>=1.0.0\", \"numpy>=1.15\"],\n setup_requires=[\"cffi>=1.0.0\", \"pytest-runner\"],\n tests_require=[\"pytest\"],\n license=\"Apache License 2.0\",\n package_data=package_data,\n include_package_data=True,\n)\n" ]
[ [ "numpy.get_include" ] ]
satyami3/stock
[ "d3d3f65a25feb764e6f735f422251fd0ede520fc" ]
[ "scheduled_tasks/economy/get_upcoming_events_date.py" ]
[ "import json\nimport sqlite3\nimport tabula\nimport pandas as pd\nfrom datetime import datetime, timedelta\n\nconn = sqlite3.connect(r\"database/database.db\", check_same_thread=False)\ndb = conn.cursor()\n\ncurrent_date = datetime.utcnow()\n\n\ndef get_next_retail_sales_date():\n \"\"\"\n Get next retail sales release date\n \"\"\"\n df = tabula.read_pdf(r\"https://www.census.gov/retail/marts/www/martsdates.pdf\", pages=1)[0]\n df[\"Release Date\"] = pd.to_datetime(df[\"Release Date\"], errors='coerce')\n df = df[df[\"Release Date\"] >= current_date].iloc[0]\n df['Release Date'] = df['Release Date'].strftime('%Y-%m-%d')\n return df\n\n\ndef get_next_cpi_date():\n \"\"\"\n Get next CPI release date\n \"\"\"\n df = pd.read_html(r\"https://www.bls.gov/schedule/news_release/cpi.htm\")[0][:-1]\n df[\"Release Date\"] = pd.to_datetime(df[\"Release Date\"], errors='coerce')\n df = df[df[\"Release Date\"] >= current_date].iloc[0]\n df['Release Date'] = df['Release Date'].strftime('%Y-%m-%d')\n return df\n\n\ndef to_week_day(date):\n \"\"\"\n Get the next closest weekday\n Parameters\n ----------\n date : datetime\n Date to find the next closest weekday\n \"\"\"\n if date.weekday() in {5, 6}:\n date += timedelta(days=-date.weekday() + 7)\n return str(date.date())\n\n\ndef get_next_rrp_treasury_date(date):\n return to_week_day(date)\n\n\ndef get_holidays():\n \"\"\"\n Get holidays in US when stock market is closed\n \"\"\"\n holidays_df = pd.read_html(r\"https://www.sec.gov/edgar/filer-information/calendar\")[0]\n holidays_df[\"Date\"] = pd.to_datetime(holidays_df[\"Date\"])\n print(holidays_df)\n return holidays_df\n\n\nif __name__ == '__main__':\n db.execute(\"SELECT record_date from reverse_repo ORDER BY record_date DESC LIMIT 1\")\n record_date = db.fetchone()\n rrp_treasury_date = get_next_rrp_treasury_date(datetime.strptime(record_date[0], \"%Y-%m-%d\") + timedelta(days=1))\n retail_df = get_next_retail_sales_date()\n cpi_df = get_next_cpi_date()\n\n with open(r\"database/economic_date.json\", \"w\") as r:\n information = {\n \"Retail Sales\": {\"Ref Month\": retail_df[\"Data Month\"], \"Release Date\": retail_df[\"Release Date\"]},\n \"Inflation\": {\"Ref Month\": cpi_df[\"Reference Month\"], \"Release Date\": cpi_df[\"Release Date\"]},\n \"Daily Treasury\": {\"Release Date\": rrp_treasury_date},\n \"Reverse Repo\": {\"Release Date\": rrp_treasury_date},\n }\n json.dump(information, r, indent=4)\n" ]
[ [ "pandas.to_datetime", "pandas.read_html" ] ]
Shumpei-Kikuta/BentoML
[ "4fe508934ab431ea5c414ee9d8b84c2104688381" ]
[ "tests/conftest.py" ]
[ "import functools\nimport glob\nimport inspect\nimport os\n\nimport imageio\nimport numpy as np\nimport pytest\n\nfrom bentoml.yatai.client import YataiClient\nfrom tests.bento_service_examples.example_bento_service import ExampleBentoService\n\n\ndef pytest_configure():\n '''\n global constants for tests\n '''\n # async request client\n async def assert_request(\n method,\n url,\n headers=None,\n data=None,\n timeout=None,\n assert_status=None,\n assert_data=None,\n ):\n if assert_status is None:\n assert_status = 200\n\n import aiohttp\n\n try:\n async with aiohttp.ClientSession() as sess:\n async with sess.request(\n method, url, data=data, headers=headers, timeout=timeout\n ) as r:\n r_body = await r.read()\n except RuntimeError:\n # the event loop has been closed due to previous task failed, ignore\n return\n\n if callable(assert_status):\n assert assert_status(r.status), f\"{r.status} {r_body}\"\n else:\n assert r.status == assert_status, f\"{r.status} {r_body}\"\n\n if assert_data is not None:\n if callable(assert_data):\n assert assert_data(r_body), r_body\n else:\n assert r_body == assert_data\n\n pytest.assert_request = assert_request\n\n # dataframe json orients\n pytest.DF_ORIENTS = {\n 'split',\n 'records',\n 'index',\n 'columns',\n 'values',\n # 'table', # TODO(bojiang)\n }\n pytest.DF_AUTO_ORIENTS = {\n 'records',\n 'columns',\n }\n\n def _since_version(ver: str):\n def _wrapper(func):\n if not inspect.iscoroutinefunction(func):\n\n @functools.wraps(func)\n def _wrapped(*args, **kwargs):\n from packaging import version\n\n bundle_ver = os.environ.get(\"BUNDLE_BENTOML_VERSION\")\n if bundle_ver and version.parse(bundle_ver) < version.parse(ver):\n pytest.skip()\n return func(*args, **kwargs)\n\n else:\n\n @functools.wraps(func)\n async def _wrapped(*args, **kwargs):\n from packaging import version\n\n bundle_ver = os.environ.get(\"BUNDLE_BENTOML_VERSION\")\n if bundle_ver and version.parse(bundle_ver) < version.parse(ver):\n pytest.skip()\n return await func(*args, **kwargs)\n\n return _wrapped\n\n return _wrapper\n\n pytest.since_bentoml_version = _since_version\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--batch-request\", action=\"store_false\")\n\n\[email protected]()\ndef is_batch_request(pytestconfig):\n return pytestconfig.getoption(\"batch_request\")\n\n\[email protected]()\ndef bin_file(tmpdir):\n bin_file_ = tmpdir.join(\"bin_file.bin\")\n with open(bin_file_, \"wb\") as of:\n of.write(\"â\".encode('gb18030'))\n return str(bin_file_)\n\n\[email protected]()\ndef bin_files(tmpdir):\n for i in range(10):\n bin_file_ = tmpdir.join(f\"{i}.bin\")\n with open(bin_file_, \"wb\") as of:\n of.write(f\"â{i}\".encode('gb18030'))\n return sorted(glob.glob(str(tmpdir.join(\"*.bin\"))))\n\n\[email protected]()\ndef unicode_file(tmpdir):\n bin_file_ = tmpdir.join(\"bin_file.unicode\")\n with open(bin_file_, \"wb\") as of:\n of.write(\"â\".encode('utf-8'))\n return str(bin_file_)\n\n\[email protected]()\ndef unicode_files(tmpdir):\n for i in range(10):\n bin_file_ = tmpdir.join(f\"{i}.list.unicode\")\n with open(bin_file_, \"wb\") as of:\n of.write(f\"â{i}\".encode('utf-8'))\n return sorted(glob.glob(str(tmpdir.join(\"*.list.unicode\"))))\n\n\[email protected]()\ndef img_file(tmpdir):\n img_file_ = tmpdir.join(\"test_img.jpg\")\n imageio.imwrite(str(img_file_), np.zeros((10, 10)))\n return str(img_file_)\n\n\[email protected]()\ndef img_files(tmpdir):\n for i in range(10):\n img_file_ = tmpdir.join(f\"{i}.list.jpg\")\n imageio.imwrite(str(img_file_), np.zeros((10, 10)))\n return sorted(glob.glob(str(tmpdir.join(\"*.list.jpg\"))))\n\n\[email protected]()\ndef json_file(tmpdir):\n json_file_ = tmpdir.join(\"test.json\")\n with open(json_file_, \"w\") as of:\n of.write('{\"name\": \"kaith\", \"game\": \"morrowind\"}')\n return str(json_file_)\n\n\[email protected]()\ndef json_files(tmpdir):\n for i in range(10):\n file_ = tmpdir.join(f\"{i}.list.json\")\n with open(file_, \"w\") as of:\n of.write('{\"i\": %d, \"name\": \"kaith\", \"game\": \"morrowind\"}' % i)\n return sorted(glob.glob(str(tmpdir.join(\"*.list.json\"))))\n\n\nclass TestModel(object):\n def predict_dataframe(self, df):\n return df[\"col1\"] * 2\n\n def predict_image(self, input_datas):\n for input_data in input_datas:\n assert input_data is not None\n return [input_data.shape for input_data in input_datas]\n\n def predict_multi_images(self, original, compared):\n return (original == compared).all()\n\n def predict_json(self, input_jsons):\n assert input_jsons\n return [{\"ok\": True}] * len(input_jsons)\n\n\[email protected]()\ndef example_bento_service_class():\n # When the ExampleBentoService got saved and loaded again in the test, the two class\n # attribute below got set to the loaded BentoService class. Resetting it here so it\n # does not effect other tests\n ExampleBentoService._bento_service_bundle_path = None\n ExampleBentoService._bento_service_bundle_version = None\n return ExampleBentoService\n\n\[email protected]()\ndef bento_service(example_bento_service_class): # pylint:disable=redefined-outer-name\n \"\"\"Create a new ExampleBentoService\n \"\"\"\n test_model = TestModel()\n test_svc = example_bento_service_class()\n test_svc.pack('model', test_model)\n return test_svc\n\n\[email protected]()\ndef bento_bundle_path(bento_service): # pylint:disable=redefined-outer-name\n \"\"\"Create a new ExampleBentoService, saved it to tmpdir, and return full saved_path\n \"\"\"\n saved_path = bento_service.save()\n yield saved_path\n delete_saved_bento_service(bento_service.name, bento_service.version)\n\n\ndef delete_saved_bento_service(name, version):\n yc = YataiClient()\n yc.repository.delete(f'{name}:{version}')\n" ]
[ [ "numpy.zeros" ] ]
brucearctor/datacatalog-connectors-rdbms
[ "7ff5dc858ea7aa21486343304fc281692480cdb8" ]
[ "google-datacatalog-rdbms-connector/src/google/datacatalog_connectors/rdbms/prepare/sql_objects/sql_objects_datacatalog_entry_factory.py" ]
[ "#!/usr/bin/python\n#\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pandas as pd\nfrom google.cloud import datacatalog\nfrom google.protobuf import timestamp_pb2\nfrom google.datacatalog_connectors.commons.prepare.base_entry_factory import \\\n BaseEntryFactory\n\nfrom google.datacatalog_connectors.rdbms.scrape import constants\n\n\nclass SQLObjectsDataCatalogEntryFactory(BaseEntryFactory):\n\n def __init__(self, project_id, location_id, entry_resource_url_prefix,\n entry_group_id, sql_objects_config):\n self.__project_id = project_id\n self.__location_id = location_id\n self.__entry_resource_url_prefix = entry_resource_url_prefix\n self.__entry_group_id = entry_group_id\n self.__sql_objects_config = sql_objects_config\n\n def make_entry_for_sql_object(self, sql_object_key, sql_object_type,\n sql_object_item):\n sql_object_config = self.__sql_objects_config[sql_object_key]\n\n metadata_def = sql_object_config[\n constants.SQL_OBJECT_ITEM_METADATA_DEF_KEY]\n\n name = sql_object_item[constants.SQL_OBJECT_ITEM_NAME]\n\n entry_id = self._format_id(name)\n entry = datacatalog.Entry()\n\n entry.user_specified_type = sql_object_type\n entry.user_specified_system = self.__entry_group_id\n\n entry.display_name = self._format_display_name(name)\n\n sql_object_fields = metadata_def[constants.SQL_OBJECT_FIELDS]\n\n sql_object_fields = self.__filter_entry_model_fields(sql_object_fields)\n\n self.__set_entry_system_timestamps(entry, sql_object_fields,\n sql_object_item)\n\n self.__set_entry_description(entry, sql_object_fields, sql_object_item)\n\n entry.name = datacatalog.DataCatalogClient.entry_path(\n self.__project_id, self.__location_id, self.__entry_group_id,\n entry_id)\n\n entry.linked_resource = '{}/{}'.format(\n self.__entry_resource_url_prefix, entry_id)\n\n return entry_id, entry\n\n @classmethod\n def __filter_entry_model_fields(cls, sql_object_fields):\n sql_object_fields = [\n field for field in sql_object_fields\n if field[constants.SQL_OBJECT_FIELD_TARGET][\n constants.SQL_OBJECT_FIELD_TARGET_MODEL] ==\n constants.SQL_OBJECT_ENTRY_MODEL\n ]\n return sql_object_fields\n\n @classmethod\n def __set_entry_system_timestamps(cls, entry, sql_object_fields,\n sql_object_item):\n\n created_time_field = cls.__find_sql_object_field(\n sql_object_fields, constants.SQL_OBJECT_ENTRY_CREATE_TIME)\n\n if created_time_field:\n created_time = cls.__get_sql_object_field_value(\n sql_object_item, created_time_field)\n\n update_time_field = cls.__find_sql_object_field(\n sql_object_fields, constants.SQL_OBJECT_ENTRY_UPDATE_TIME)\n\n update_time = None\n if update_time_field:\n update_time = cls.__get_sql_object_field_value(\n sql_object_item, update_time_field)\n\n create_time, update_time = \\\n cls.__convert_source_system_timestamp_fields(\n created_time,\n update_time)\n\n if create_time and update_time:\n created_timestamp = timestamp_pb2.Timestamp()\n created_timestamp.FromSeconds(create_time)\n entry.source_system_timestamps.create_time = created_timestamp\n\n updated_timestamp = timestamp_pb2.Timestamp()\n updated_timestamp.FromSeconds(update_time)\n entry.source_system_timestamps.update_time = updated_timestamp\n\n @classmethod\n def __set_entry_description(cls, entry, sql_object_fields,\n sql_object_item):\n description_field = cls.__find_sql_object_field(\n sql_object_fields, constants.SQL_OBJECT_ENTRY_DESCRIPTION)\n\n if description_field:\n description = sql_object_item.get(\n description_field[constants.SQL_OBJECT_FIELD_TARGET][\n constants.SQL_OBJECT_FIELD_TARGET_NAME])\n\n if pd.isna(description):\n description = ''\n\n entry.description = description\n\n @classmethod\n def __find_sql_object_field(cls, sql_object_fields, field_name):\n return next(\n iter([\n field for field in sql_object_fields\n if field[constants.SQL_OBJECT_FIELD_TARGET][\n constants.SQL_OBJECT_FIELD_TARGET_NAME] == field_name\n ]), None)\n\n @classmethod\n def __get_sql_object_field_value(cls, sql_object_item, field):\n return sql_object_item.get(field[constants.SQL_OBJECT_FIELD_TARGET][\n constants.SQL_OBJECT_FIELD_TARGET_NAME])\n\n @classmethod\n def __convert_timestamp_value_to_epoch(cls, timestamp_value):\n # In case it is not a valid timestamp field, we ignore it.\n if pd.notnull(timestamp_value) and isinstance(timestamp_value,\n pd.Timestamp):\n return int(timestamp_value.timestamp())\n\n @classmethod\n def __convert_source_system_timestamp_fields(cls, raw_create_time,\n raw_update_time):\n create_time = cls.__convert_timestamp_value_to_epoch(raw_create_time)\n if not pd.isnull(raw_update_time):\n update_time = cls.__convert_timestamp_value_to_epoch(\n raw_update_time)\n else:\n update_time = create_time\n return create_time, update_time\n" ]
[ [ "pandas.notnull", "pandas.isna", "pandas.isnull" ] ]
carseven/color-blind-test-hack
[ "debac7cc1c8176ff722e1e7fb5f5eae12a92d3a0" ]
[ "src/model.py" ]
[ "import numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow.python.keras import models, layers\r\nfrom tensorflow.python.keras.datasets import mnist\r\nfrom tensorflow.python.keras.preprocessing.image import ImageDataGenerator\r\nimport random\r\nimport json\r\n\r\n\r\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\r\n\r\n# Las imagenes se convierten en tensores de 3 dimnsiones para poder ser\r\n# con las conv2d de keras.\r\ntrain_images = train_images.reshape((60000, 28, 28, 1))\r\n\r\n# Se normalizan las imagenes en un factor 1/255 y se convierten en tipo float\r\ntrain_images = train_images.astype('float32') / 255\r\n\r\n# Las imagenes se convierten en tensores de 3 dimnsiones para poder ser\r\n# con las conv2d de keras.\r\ntest_images = test_images.reshape((10000, 28, 28, 1))\r\n\r\n# Se normalizan las imagenes en un factor 1/255 y se convierten en tipo float\r\ntest_images = test_images.astype('float32') / 255\r\n\r\n# Se codifican las etiquetas como one-hot enconding\r\ntrain_labels = tf.keras.utils.to_categorical(train_labels)\r\ntest_labels = tf.keras.utils.to_categorical(test_labels)\r\n\r\n\"\"\"### Aumentación de datos\"\"\"\r\n\r\n\r\n# Función propia, ruido gaussiano\r\n\r\ndef ruido(imagen):\r\n varianza = 0.1\r\n desviacion = varianza * random.random()\r\n ruido = np.random.normal(0, desviacion, imagen.shape)\r\n imagen += ruido\r\n np.clip(imagen, 0., 255.)\r\n return imagen\r\n\r\n\r\n# Configuración del generador de imagenes.\r\ndatagen = ImageDataGenerator(zoom_range=0.1,\r\n width_shift_range=0.1,\r\n height_shift_range=0.1,\r\n preprocessing_function=ruido)\r\n\r\n# Solo utilizamos aumentación en el conjunto de entrenamiento. Se indica al\r\n# al generador que imagenes tiene que procesar\r\ndatagen.fit(train_images)\r\n\r\n\r\n# Se indica que es un modelo secuencial\r\nmodel = models.Sequential()\r\n\r\n# Se añaden las capas al modelo\r\n\r\n# Bloque 1 CNN\r\nmodel.add(layers.Conv2D(32, (3, 3),\r\n activation='relu',\r\n padding='same',\r\n use_bias=True,\r\n input_shape=(28, 28, 1)))\r\nmodel.add(layers.BatchNormalization())\r\nmodel.add(layers.MaxPooling2D((2, 2)))\r\nmodel.add(layers.Dropout(0.25))\r\n\r\n# Bloque 2 CNN\r\nmodel.add(layers.Conv2D(64, (3, 3),\r\n activation='relu',\r\n padding='same',\r\n use_bias=True))\r\nmodel.add(layers.BatchNormalization())\r\nmodel.add(layers.MaxPooling2D((2, 2)))\r\nmodel.add(layers.Dropout(0.25))\r\n\r\n# Bloque 3 CNN\r\nmodel.add(layers.Conv2D(64, (3, 3),\r\n activation='relu',\r\n padding='same',\r\n use_bias=True))\r\nmodel.add(layers.BatchNormalization())\r\nmodel.add(layers.Dropout(0.25))\r\n\r\n# Bloque 4 FC\r\nmodel.add(layers.Flatten())\r\nmodel.add(layers.Dense(64, activation='relu'))\r\nmodel.add(layers.Dropout(0.5))\r\nmodel.add(layers.Dense(10, activation='softmax'))\r\n\r\n# Se configura la función de perdidas y el algoritmo de apredizaje.\r\nmodel.compile(optimizer='adam',\r\n loss='categorical_crossentropy',\r\n metrics=['accuracy'])\r\n\r\n# Visualización de los bloques y parametros del modelo implementado.\r\nmodel.summary()\r\n\r\n# Se indica que datos alimentan al modelo en la fase de entrenamiento y en la\r\n# de validación. En este caso los datos de entrenamiento viene generador tras\r\n# procesar el conjunto de entrenamiento.\r\nhistory = model.fit(datagen.flow(train_images, train_labels,\r\n batch_size=256),\r\n steps_per_epoch=int(train_images.shape[0] / 256) + 1,\r\n epochs=20,\r\n validation_data=(test_images, test_labels))\r\n\r\ntest_loss, test_acc = model.evaluate(test_images, test_labels)\r\nprint('Test accuracy:', test_acc)\r\n\r\npwd = '/Users/carseven/dev/color-blind-test-hack/'\r\n\r\nmodel.save_weights(pwd + 'src/model-data/mnist.tf', save_format='tf')\r\n\r\nmodel_config = model.to_json()\r\nwith open(pwd + 'src/model-data/model-config.json',\r\n 'w',\r\n encoding='utf-8') as f:\r\n json.dump(model_config, f, ensure_ascii=False, indent=4)\r\n" ]
[ [ "tensorflow.keras.utils.to_categorical", "tensorflow.python.keras.layers.Dense", "tensorflow.python.keras.models.Sequential", "tensorflow.python.keras.layers.MaxPooling2D", "numpy.clip", "tensorflow.python.keras.datasets.mnist.load_data", "tensorflow.python.keras.preprocessing.image.ImageDataGenerator", "tensorflow.python.keras.layers.BatchNormalization", "tensorflow.python.keras.layers.Dropout", "numpy.random.normal", "tensorflow.python.keras.layers.Flatten", "tensorflow.python.keras.layers.Conv2D" ] ]
SocratesNFR/evodynamic
[ "682b610096182bde2298cdca352e7b319a0e4c41" ]
[ "examples/reservoir/test_mnist_esn_with_memory.py" ]
[ "\"\"\"\nTesting features and method for\nEcho State Network - Reservoir for MNIST digit classification with memory\n\"\"\"\n\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nimport numpy as np\nimport evodynamic.experiment as experiment\nimport evodynamic.connection.random as conn_random\nimport evodynamic.connection as connection\nimport evodynamic.connection.custom as conn_custom\nimport evodynamic.cells.activation as act\nimport evodynamic.utils as utils\n\nmnist = tf.keras.datasets.mnist\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nx_train_num_images = x_train.shape[0]\nx_train_image_shape = x_train.shape[1:3]\nx_test_num_images = x_test.shape[0]\nx_test_image_shape = x_test.shape[1:3]\n\nx_train = ((x_train / 255.0) > 0.5).astype(np.float64)\nx_train = x_train.reshape(x_train.shape[0],-1)\nx_train = np.transpose(x_train)\n\nx_test = ((x_test / 255.0) > 0.5).astype(np.float64)\nx_test = x_test.reshape(x_test.shape[0],-1)\nx_test = np.transpose(x_test)\n\ny_train_one_hot = np.zeros((y_train.max()+1, y_train.size))\ny_train_one_hot[y_train,np.arange(y_train.size)] = 1\ny_train = y_train_one_hot\n\ny_test_one_hot = np.zeros((y_test.max()+1, y_test.size))\ny_test_one_hot[y_test,np.arange(y_test.size)] = 1\ny_test = y_test_one_hot\n\nepochs = 1\nbatch_size = 100\nnum_batches = int(np.ceil(x_train_num_images / batch_size))\nnum_batches_test = int(np.ceil(x_test_num_images / batch_size))\nwidth = 28*28\ninput_size = 28*28\noutput_layer_size = 10\nimage_num_pixels = x_train_image_shape[0] * x_train_image_shape[1]\nmemory_size = 2\n\nexp = experiment.Experiment(input_start=0,input_delay=0,training_start=1,\n training_delay=1,reset_cells_after_train=True,\n batch_size=batch_size)\n\n\ninput_esn = exp.add_input(tf.float64, [input_size], \"input_esn\")\ndesired_output = exp.add_desired_output(tf.float64, [output_layer_size], \"desired_output\")\n\ng_esn = exp.add_group_cells(name=\"g_esn\", amount=width)\ng_esn_real = g_esn.add_real_state(state_name='g_esn_real')\n\nexp.add_connection(\"input_conn\", connection.IndexConnection(input_esn,g_esn_real,\n np.arange(width)))\n\n\nindices = [[i,i] for i in range(width)]\nvalues = [1]*width\ndense_shape = [width, width]\n\ng_esn_real_conn = conn_custom.create_custom_sparse_matrix('g_esn_real_conn',\n indices,\n values,\n dense_shape)\n\nexp.add_connection(\"g_esn_conn\",\n connection.WeightedConnection(g_esn_real,\n g_esn_real,act.relu,\n g_esn_real_conn))\n\ng_esn_memory = exp.add_state_memory(g_esn_real,memory_size)\n\noutput_layer = exp.add_group_cells(name=\"output_layer\", amount=output_layer_size)\noutput_layer_real_state = output_layer.add_real_state(state_name='output_layer_real_state')\n\nesn_output_conn = conn_random.create_xavier_connection(\"esn_output_conn\", memory_size*width, output_layer_size)\nexp.add_trainable_connection(\"output_conn\",\n connection.WeightedConnection(g_esn_memory,\n output_layer_real_state,\n act.sigmoid,\n esn_output_conn))\n\nc_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(\n logits=exp.trainable_connections[\"output_conn\"].output,\n labels=desired_output,\n axis=0))\n\nexp.set_training(c_loss,0.03)\n\n# Monitors are needed because \"reset_cells_after_train=True\"\nexp.add_monitor(\"output_layer\", \"output_layer_real_state\", timesteps=1)\nexp.add_monitor(\"g_esn\", \"g_esn_real\", timesteps=1)\n\nexp.initialize_cells()\n\nfor epoch in range(epochs):\n print(\"Epoch:\", epoch)\n shuffled_indices = np.random.permutation(x_train_num_images)\n batch_indices = np.split(shuffled_indices,\\\n np.arange(batch_size,x_train_num_images,batch_size))\n for step, batch_idx in enumerate(batch_indices):\n input_esn_batch = x_train[:,batch_idx]\n\n desired_output_batch = y_train[:,batch_idx]\n\n input_esn_batch_1 = np.array(input_esn_batch)\n input_esn_batch_2 = np.array(input_esn_batch)\n\n split_img_idx = width//2\n input_esn_batch_1[:split_img_idx,:] = 0\n input_esn_batch_2[split_img_idx:,:] = 0\n\n feed_dict = {input_esn: input_esn_batch_2, desired_output: desired_output_batch}\n # Double run step\n exp.run_step(feed_dict=feed_dict)\n\n feed_dict = {input_esn: input_esn_batch_1, desired_output: desired_output_batch}\n exp.run_step(feed_dict=feed_dict)\n res_ca = exp.get_monitor(\"g_esn\", \"g_esn_real\")[:,:,0]\n prediction_batch = exp.get_monitor(\"output_layer\", \"output_layer_real_state\")[0,:,:]\n accuracy_batch = np.sum(np.argmax(prediction_batch, axis=0) == np.argmax(desired_output_batch, axis=0)) / batch_size\n\n utils.progressbar_loss_accu(step+1, num_batches, exp.training_loss, accuracy_batch)\n\n print(\"Testing...\")\n\n # Testing!\n shuffled_indices_test = np.random.permutation(x_test_num_images)\n batch_indices_test = np.split(shuffled_indices_test,\\\n np.arange(batch_size,x_test_num_images,batch_size))\n for step_test, batch_idx in enumerate(batch_indices_test):\n input_esn_batch = x_test[:,batch_idx]\n\n desired_output_batch = y_test[:,batch_idx]\n\n input_esn_batch_1 = np.array(input_esn_batch)\n input_esn_batch_2 = np.array(input_esn_batch)\n\n split_img_idx = width//2\n input_esn_batch_1[:split_img_idx,:] = 0\n input_esn_batch_2[split_img_idx:,:] = 0\n\n feed_dict = {input_esn: input_esn_batch_2, desired_output: desired_output_batch}\n # Double run step\n exp.run_step(feed_dict=feed_dict, testing = True)\n\n feed_dict = {input_esn: input_esn_batch_1, desired_output: desired_output_batch}\n exp.run_step(feed_dict=feed_dict, testing = True)\n res_ca = exp.get_monitor(\"g_esn\", \"g_esn_real\")[:,:,0]\n prediction_batch = exp.get_monitor(\"output_layer\", \"output_layer_real_state\")[0,:,:]\n accuracy_batch = np.sum(np.argmax(prediction_batch, axis=0) == np.argmax(desired_output_batch, axis=0)) / batch_size\n\n utils.progressbar_loss_accu(step_test+1, num_batches_test, exp.training_loss, accuracy_batch)" ]
[ [ "numpy.transpose", "numpy.ceil", "numpy.random.permutation", "numpy.argmax", "numpy.arange", "numpy.array", "tensorflow.compat.v1.disable_v2_behavior", "tensorflow.compat.v1.nn.softmax_cross_entropy_with_logits_v2" ] ]
banboooo044/natural-language-sentiment-anaysis
[ "e18d7c0373d9f0a00d5a3cc14abf671081bc940b" ]
[ "classifier/code-analysis/nb_gridCV.py" ]
[ "# 詳しい説明は同様のプログラム logis_gradCV.py を参照\nimport sys\nsys.path.append('../')\nimport numpy as np\nimport pandas as pd\n\nfrom scipy import sparse\n\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import make_scorer\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom src.runner import Runner\nfrom src.util import Logger\nfrom src.model_NB import ModelMultinomialNB\n\n\nlogger = Logger()\n\ndef makefig(result):\n sns.set_style(\"whitegrid\")\n ax = sns.boxenplot(data = result, width=0.4)\n ax.set_ylabel('Accuracy', size=14)\n ax.tick_params(labelsize=14)\n plt.savefig(f'../model/tuning/{NAME}-NB.png',dpi=300)\n\n\nif __name__ == '__main__':\n base_params = {\n 'alpha' : 1.0,\n 'fit_prior' : True,\n 'class_prior' : None\n }\n params_NB = dict(base_params)\n param_grid_ = {'alpha': [0.001, 0.01, 0.1, 1, 10, 100]}\n\n features = [\n \"bow\", \"n-gram\",\"tf-idf\", \"n-gram-tf-idf\"\n ]\n\n results = [ ]\n NAME = \":\".join(features)\n for name in features:\n x = Runner.load_x_train(name)\n y = Runner.load_y_train()\n model = ModelMultinomialNB(name, **dict(params_NB))\n search = GridSearchCV( model, cv=6, param_grid=param_grid_ , return_train_score=True, verbose=10, refit=True )\n search.fit(x, y)\n results.append( (search, name) )\n logger.info(f'{name} - bestscore : {search.best_score_} - result :{search.cv_results_[\"mean_test_score\"]}')\n \n res = pd.DataFrame.from_dict(\n { name : search.cv_results_[\"mean_test_score\"] for search, name in results }, \n orient='index', \n columns=param_grid_['alpha']\n )\n\n for search, name in results:\n logger.info(f'{name} - bestscore : {search.best_score_}')\n \n res.to_csv(f'../model/tuning/{NAME}-NB.csv')\n\n makefig(res)\n" ]
[ [ "matplotlib.pyplot.savefig", "sklearn.model_selection.GridSearchCV", "pandas.DataFrame.from_dict" ] ]
vkola-lab/multi-GPU
[ "d50cff0d587b640fb3af94329102d3eacdd70aa5" ]
[ "xfdlfw/metric/roc_auc.py" ]
[ "\"\"\"\nCreated on Thu Oct 14 14:47:38 2021\n\n@author: cxue2\n\"\"\"\n\nfrom ._metric import Metric\nfrom ._misc import _numpy\nimport sklearn.metrics as M\n\n\nclass RocAuc(Metric):\n\n @_numpy\n def __call__(self, output, y_true):\n\n return M.roc_auc_score(y_true, output[:, 1], **self.kwargs)\n" ]
[ [ "sklearn.metrics.roc_auc_score" ] ]
maljovec/ann-benchmarks
[ "03f9b3db562794787c936f9ea661ad3b08d5f062" ]
[ "ann_benchmarks/datasets.py" ]
[ "import h5py\nimport numpy\nimport os\nimport random\nimport sys\n\nimport subprocess\n# import samplers\n# import pyDOE\n# import ghalton\n\ntry:\n from urllib import urlretrieve\nexcept ImportError:\n from urllib.request import urlretrieve # Python 3\n\n\ndef download(src, dst):\n if not os.path.exists(dst):\n # TODO: should be atomic\n print('downloading %s -> %s...' % (src, dst))\n urlretrieve(src, dst)\n\n\ndef get_dataset_fn(dataset):\n if not os.path.exists('data'):\n os.mkdir('data')\n return os.path.join('data', '%s.hdf5' % dataset)\n\n\ndef get_dataset(which):\n hdf5_fn = get_dataset_fn(which)\n try:\n url = 'http://ann-benchmarks.com/%s.hdf5' % which\n download(url, hdf5_fn)\n except:\n print(\"Cannot download %s\" % url)\n if which in DATASETS:\n print(\"Creating dataset locally\")\n DATASETS[which](hdf5_fn)\n hdf5_f = h5py.File(hdf5_fn)\n return hdf5_f\n\n\n# Everything below this line is related to creating datasets\n# You probably never need to do this at home, just rely on the prepared datasets at http://ann-benchmarks.com\n\ndef write_output(train, test, fn, distance, point_type='float', count=100):\n from ann_benchmarks.algorithms.bruteforce import BruteForceBLAS\n n = 0\n f = h5py.File(fn, 'w')\n f.attrs['distance'] = distance\n f.attrs['point_type'] = point_type\n print('train size: %9d * %4d' % train.shape)\n print('test size: %9d * %4d' % test.shape)\n f.create_dataset('train', (len(train), len(\n train[0])), dtype=train.dtype)[:] = train\n f.create_dataset('test', (len(test), len(\n test[0])), dtype=test.dtype)[:] = test\n neighbors = f.create_dataset('neighbors', (len(test), count), dtype='i')\n distances = f.create_dataset('distances', (len(test), count), dtype='f')\n bf = BruteForceBLAS(distance, precision=train.dtype)\n bf.fit(train)\n queries = []\n for i, x in enumerate(test):\n if i % 1000 == 0:\n print('%d/%d...' % (i, test.shape[0]))\n res = list(bf.query_with_distances(x, count))\n res.sort(key=lambda t: t[-1])\n neighbors[i] = [j for j, _ in res]\n distances[i] = [d for _, d in res]\n f.close()\n\n\ndef train_test_split(X, test_size=10000):\n import sklearn.model_selection\n print('Splitting %d*%d into train/test' % X.shape)\n return sklearn.model_selection.train_test_split(X, test_size=test_size, random_state=1)\n\n\ndef glove(out_fn, d):\n import zipfile\n\n url = 'http://nlp.stanford.edu/data/glove.twitter.27B.zip'\n fn = os.path.join('data', 'glove.twitter.27B.zip')\n download(url, fn)\n with zipfile.ZipFile(fn) as z:\n print('preparing %s' % out_fn)\n z_fn = 'glove.twitter.27B.%dd.txt' % d\n X = []\n for line in z.open(z_fn):\n v = [float(x) for x in line.strip().split()[1:]]\n X.append(numpy.array(v))\n X_train, X_test = train_test_split(X)\n write_output(numpy.array(X_train), numpy.array(\n X_test), out_fn, 'angular')\n\n\ndef _load_texmex_vectors(f, n, k):\n import struct\n\n v = numpy.zeros((n, k))\n for i in range(n):\n f.read(4) # ignore vec length\n v[i] = struct.unpack('f' * k, f.read(k*4))\n\n return v\n\n\ndef _get_irisa_matrix(t, fn):\n import struct\n m = t.getmember(fn)\n f = t.extractfile(m)\n k, = struct.unpack('i', f.read(4))\n n = m.size // (4 + 4*k)\n f.seek(0)\n return _load_texmex_vectors(f, n, k)\n\n\ndef sift(out_fn):\n import tarfile\n\n url = 'ftp://ftp.irisa.fr/local/texmex/corpus/sift.tar.gz'\n fn = os.path.join('data', 'sift.tar.tz')\n download(url, fn)\n with tarfile.open(fn, 'r:gz') as t:\n train = _get_irisa_matrix(t, 'sift/sift_base.fvecs')\n test = _get_irisa_matrix(t, 'sift/sift_query.fvecs')\n write_output(train, test, out_fn, 'euclidean')\n\n\ndef gist(out_fn):\n import tarfile\n\n url = 'ftp://ftp.irisa.fr/local/texmex/corpus/gist.tar.gz'\n fn = os.path.join('data', 'gist.tar.tz')\n download(url, fn)\n with tarfile.open(fn, 'r:gz') as t:\n train = _get_irisa_matrix(t, 'gist/gist_base.fvecs')\n test = _get_irisa_matrix(t, 'gist/gist_query.fvecs')\n write_output(train, test, out_fn, 'euclidean')\n\n\ndef _load_mnist_vectors(fn):\n import gzip\n import struct\n\n print('parsing vectors in %s...' % fn)\n f = gzip.open(fn)\n type_code_info = {\n 0x08: (1, \"!B\"),\n 0x09: (1, \"!b\"),\n 0x0B: (2, \"!H\"),\n 0x0C: (4, \"!I\"),\n 0x0D: (4, \"!f\"),\n 0x0E: (8, \"!d\")\n }\n magic, type_code, dim_count = struct.unpack(\"!hBB\", f.read(4))\n assert magic == 0\n assert type_code in type_code_info\n\n dimensions = [struct.unpack(\"!I\", f.read(4))[0] for i in range(dim_count)]\n\n entry_count = dimensions[0]\n entry_size = numpy.product(dimensions[1:])\n\n b, format_string = type_code_info[type_code]\n vectors = []\n for i in range(entry_count):\n vectors.append([struct.unpack(format_string, f.read(b))[0]\n for j in range(entry_size)])\n return numpy.array(vectors)\n\n\ndef mnist(out_fn):\n download(\n 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', 'mnist-train.gz')\n download(\n 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', 'mnist-test.gz')\n train = _load_mnist_vectors('mnist-train.gz')\n test = _load_mnist_vectors('mnist-test.gz')\n write_output(train, test, out_fn, 'euclidean')\n\n\ndef fashion_mnist(out_fn):\n download('http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',\n 'fashion-mnist-train.gz')\n download('http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',\n 'fashion-mnist-test.gz')\n train = _load_mnist_vectors('fashion-mnist-train.gz')\n test = _load_mnist_vectors('fashion-mnist-test.gz')\n write_output(train, test, out_fn, 'euclidean')\n\n\ndef transform_bag_of_words(filename, n_dimensions, out_fn):\n import gzip\n from scipy.sparse import lil_matrix\n from sklearn.feature_extraction.text import TfidfTransformer\n from sklearn import random_projection\n with gzip.open(filename, 'rb') as f:\n file_content = f.readlines()\n entries = int(file_content[0])\n words = int(file_content[1])\n file_content = file_content[3:] # strip first three entries\n print(\"building matrix...\")\n A = lil_matrix((entries, words))\n for e in file_content:\n doc, word, cnt = [int(v) for v in e.strip().split()]\n A[doc - 1, word - 1] = cnt\n print(\"normalizing matrix entries with tfidf...\")\n B = TfidfTransformer().fit_transform(A)\n print(\"reducing dimensionality...\")\n C = random_projection.GaussianRandomProjection(\n n_components=n_dimensions).fit_transform(B)\n X_train, X_test = train_test_split(C)\n write_output(numpy.array(X_train), numpy.array(\n X_test), out_fn, 'angular')\n\n\ndef nytimes(out_fn, n_dimensions):\n fn = 'nytimes_%s.txt.gz' % n_dimensions\n download('https://archive.ics.uci.edu/ml/machine-learning-databases/bag-of-words/docword.nytimes.txt.gz', fn)\n transform_bag_of_words(fn, n_dimensions, out_fn)\n\n\ndef random(out_fn, n_dims, n_samples, centers, distance):\n import sklearn.datasets\n\n X, _ = sklearn.datasets.make_blobs(\n n_samples=n_samples, n_features=n_dims, centers=centers, random_state=1)\n X_train, X_test = train_test_split(X, test_size=0.1)\n write_output(X_train, X_test, out_fn, distance)\n\ndef random_bitstring(out_fn, n_dims, n_samples, n_queries):\n import sklearn.datasets\n\n Y, _ = sklearn.datasets.make_blobs(n_samples=n_samples, n_features=n_dims, centers=n_queries, random_state=1)\n X = numpy.zeros((n_samples, n_dims), dtype=numpy.bool)\n for i, vec in enumerate(Y):\n X[i] = numpy.array([v > 0 for v in vec], dtype=numpy.bool)\n\n X_train, X_test = train_test_split(X, test_size=n_queries)\n write_output(X_train, X_test, out_fn, 'hamming', 'bit')\n\n\ndef word2bits(out_fn, path, fn):\n import tarfile\n local_fn = fn + '.tar.gz'\n url = 'http://web.stanford.edu/~maxlam/word_vectors/compressed/%s/%s.tar.gz' % (\n path, fn)\n download(url, local_fn)\n print('parsing vectors in %s...' % local_fn)\n with tarfile.open(local_fn, 'r:gz') as t:\n f = t.extractfile(fn)\n n_words, k = [int(z) for z in next(f).strip().split()]\n X = numpy.zeros((n_words, k), dtype=numpy.bool)\n for i in range(n_words):\n X[i] = numpy.array([float(z) > 0 for z in next(f).strip().split()[1:]], dtype=numpy.bool)\n\n X_train, X_test = train_test_split(X, test_size=1000)\n write_output(X_train, X_test, out_fn, 'hamming', 'bit')\n\ndef sift_hamming(out_fn, fn):\n import tarfile\n local_fn = fn + '.tar.gz'\n url = 'http://sss.projects.itu.dk/ann-benchmarks/datasets/%s.tar.gz' % fn\n download(url, local_fn)\n print('parsing vectors in %s...' % local_fn)\n with tarfile.open(local_fn, 'r:gz') as t:\n f = t.extractfile(fn)\n lines = f.readlines()\n X = numpy.zeros((len(lines), 256), dtype=numpy.bool)\n for i, line in enumerate(lines):\n X[i] = numpy.array([int(x) > 0 for x in line.decode().strip()], dtype=numpy.bool)\n X_train, X_test = train_test_split(X, test_size = 1000)\n write_output(X_train, X_test, out_fn, 'hamming', 'bit')\n\ndef lastfm(out_fn, n_dimensions, test_size=50000):\n # This tests out ANN methods for retrieval on simple matrix factorization based\n # recommendation algorithms. The idea being that the query/test vectors are user factors\n # and the train set are item factors from the matrix factorization model.\n\n # Since the predictor is a dot product, we transform the factors first as described in this\n # paper: https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/XboxInnerProduct.pdf\n # This hopefully replicates the experiments done in this post:\n # http://www.benfrederickson.com/approximate-nearest-neighbours-for-recommender-systems/\n\n # The dataset is from \"Last.fm Dataset - 360K users\":\n # http://www.dtic.upf.edu/~ocelma/MusicRecommendationDataset/lastfm-360K.html\n\n # this requires the implicit package to generate the factors (on my desktop/gpu this only\n # takes 4-5 seconds to train - but could take 1-2 minutes on a laptop)\n from implicit.datasets.lastfm import get_lastfm\n from implicit.approximate_als import augment_inner_product_matrix\n import implicit\n\n # train an als model on the lastfm data\n _, _, play_counts = get_lastfm()\n model = implicit.als.AlternatingLeastSquares(factors=n_dimensions)\n model.fit(implicit.nearest_neighbours.bm25_weight(play_counts, K1=100, B=0.8))\n\n # transform item factors so that each one has the same norm, and transform the user\n # factors such by appending a 0 column\n _, item_factors = augment_inner_product_matrix(model.item_factors)\n user_factors = numpy.append(model.user_factors,\n numpy.zeros((model.user_factors.shape[0], 1)),\n axis=1)\n\n # only query the first 50k users (speeds things up signficantly without changing results)\n user_factors = user_factors[:test_size]\n\n # after that transformation a cosine lookup will return the same results as the inner product\n # on the untransformed data\n write_output(item_factors, user_factors, out_fn, 'angular')\n\n\n# Writing my own custom samplers to know how these algorithms perform on\n# denser dimensionalities (up to 10D and 1 billion points) and\n# I need to test the time it takes to build a full graph\n\n\ndef uniform(out_fn, seed, n_dims, n_samples, distance):\n numpy.random.seed(seed)\n X = numpy.random.uniform(size=(n_samples, n_dims))\n write_output(X, X, out_fn, distance)\n\n\ndef normal(out_fn, seed, n_dims, n_samples, distance):\n numpy.random.seed(seed)\n X = numpy.clip(numpy.random.normal(\n loc=0.5, scale=0.15, size=(n_samples, n_dims)), 0, 1)\n write_output(X, X, out_fn, distance)\n\n\ndef cvt(out_fn, seed, n_dims, n_samples, distance):\n result = subprocess.run(['samplers/cvt/createCVT', '-N', str(n_samples),\n '-D', str(n_dims), '-seed',\n str(seed), '-ann', '1',\n '-iterations', '1000000'],\n stdout=subprocess.PIPE)\n lines = result.stdout.decode('utf-8').strip().split('\\n')\n X = numpy.zeros((n_samples, n_dims))\n for i, line in enumerate(lines):\n X[i, :] = list(map(float, line.strip().split(' ')))\n write_output(X, X, out_fn, distance)\n\n\ndef shell(out_fn, seed, n_dims, n_samples, distance):\n numpy.random.seed(seed)\n r = numpy.atleast_2d(numpy.random.uniform(low=0.5, high=1, size=n_samples)).T\n sampler = samplers.DirectionalSampler(n_dims)\n X = ((r * sampler.generate_samples(n_samples)) + 1) / 2.\n write_output(X, X, out_fn, distance)\n\n\ndef lhs(out_fn, seed, n_dims, n_samples, distance):\n numpy.random.seed(seed)\n X = pyDOE.lhs(n_dims, n_samples)\n write_output(X, X, out_fn, distance)\n\n\ndef halton(out_fn, seed, n_dims, n_samples, distance):\n sequencer = ghalton.GeneralizedHalton(n_dims, seed)\n X = numpy.array(sequencer.get(n_samples))\n write_output(X, X, out_fn, distance)\n\n\nDATASETS = {\n 'fashion-mnist-784-euclidean': fashion_mnist,\n 'gist-960-euclidean': gist,\n 'glove-25-angular': lambda out_fn: glove(out_fn, 25),\n 'glove-50-angular': lambda out_fn: glove(out_fn, 50),\n 'glove-100-angular': lambda out_fn: glove(out_fn, 100),\n 'glove-200-angular': lambda out_fn: glove(out_fn, 200),\n 'mnist-784-euclidean': mnist,\n 'random-xs-20-euclidean': lambda out_fn: random(out_fn, 20, 10000, 100, 'euclidean'),\n 'random-s-100-euclidean': lambda out_fn: random(out_fn, 100, 100000, 1000, 'euclidean'),\n 'random-xs-20-angular': lambda out_fn: random(out_fn, 20, 10000, 100, 'angular'),\n 'random-s-100-angular': lambda out_fn: random(out_fn, 100, 100000, 1000, 'angular'),\n 'random-xs-16-hamming': lambda out_fn: random_bitstring(out_fn, 16, 10000, 100),\n 'random-s-128-hamming': lambda out_fn: random_bitstring(out_fn, 128, 50000, 1000),\n 'random-l-256-hamming': lambda out_fn: random_bitstring(out_fn, 256, 100000, 1000),\n 'sift-128-euclidean': sift,\n 'nytimes-256-angular': lambda out_fn: nytimes(out_fn, 256),\n 'nytimes-16-angular': lambda out_fn: nytimes(out_fn, 16),\n 'word2bits-800-hamming': lambda out_fn: word2bits(out_fn, '400K', 'w2b_bitlevel1_size800_vocab400K'),\n 'uniform-5-euclidean': lambda out_fn: uniform(out_fn, 0, 5, 10000000, 'euclidean'),\n 'normal-5-euclidean': lambda out_fn: normal(out_fn, 0, 5, 10000000, 'euclidean'),\n 'cvt-5-euclidean': lambda out_fn: cvt(out_fn, 0, 5, 10000000, 'euclidean'),\n 'shell-5-euclidean': lambda out_fn: shell(out_fn, 0, 5, 10000000, 'euclidean'),\n 'lhs-5-euclidean': lambda out_fn: lhs(out_fn, 0, 5, 10000000, 'euclidean'),\n 'halton-5-euclidean': lambda out_fn: halton(out_fn, 0, 5, 10000000, 'euclidean'),\n 'uniform-3-euclidean': lambda out_fn: uniform(out_fn, 0, 3, 1000000, 'euclidean'),\n 'normal-3-euclidean': lambda out_fn: normal(out_fn, 0, 3, 1000000, 'euclidean'),\n 'cvt-3-euclidean': lambda out_fn: cvt(out_fn, 0, 3, 1000000, 'euclidean'),\n 'shell-3-euclidean': lambda out_fn: shell(out_fn, 0, 3, 1000000, 'euclidean'),\n 'lhs-3-euclidean': lambda out_fn: lhs(out_fn, 0, 3, 1000000, 'euclidean'),\n 'halton-3-euclidean': lambda out_fn: halton(out_fn, 0, 3, 1000000, 'euclidean'),\n 'lastfm-64-dot': lambda out_fn: lastfm(out_fn, 64),\n 'sift-256-hamming': lambda out_fn: sift_hamming(out_fn, 'sift.hamming.256'),\n}\n" ]
[ [ "numpy.random.uniform", "numpy.zeros", "sklearn.feature_extraction.text.TfidfTransformer", "numpy.random.seed", "numpy.random.normal", "numpy.product", "scipy.sparse.lil_matrix", "numpy.array", "sklearn.random_projection.GaussianRandomProjection" ] ]
anikeshkamath/IITGn_SURF_2016
[ "396ce97e4ed8980a8d650a9e486a97b427bfc731" ]
[ "Graph_Algorithms/indegree_CountMin.py" ]
[ "'''\nThis module is a streaming algorithm developed to compute the (in)degree centrality of vertices using CountMin sketch.\nCountMin provides approximate frequencies for each distinct element in the input stream. Accuracy of the approximation based on the dimensions of the 2D array used to store these frequencies. Exact value of the gurantee is derived in the CountMin paper.\nIndegree centrality can be seen as the frequency of the destination vertex. In the case of undirected graphs, the edge aslso increases the source vertex frequency. In this way, CountMin is used for finite space streaming algorithm for computation of indegree centrality of the vertices, which otherwise would have required maintaining an entire graph structure.\n'''\n\nfrom streampy_classes import Stream\nfrom streampy_classes import Agent\nfrom streampy_classes import Operators\nimport networkx as nx\nimport numpy as np\nimport hashlib\n\n'''\nmy_hash_value is the hash function used. This makes use of md5 hash library, which gives less collisions than the hashing done by a python dictionary\nouter_update updates the 2D array. As discussed in Count-Min algorithm, multiple copies of the array is kept so as to get a better guarantee on the aaproximate frequency provided.\nagent_update is the function that is fed to the corresponding agent eventually.\n'''\n\ndef my_hash_value(input_element, array_no, size_of_array):\n m = hashlib.md5()\n m.update(str(input_element) + str(array_no))\n hash_hex = int(m.hexdigest()[-8:],16)\n return (hash_hex)%(size_of_array)\n\ndef outer_update(directedness):\n def agent_update(ip_lst, c_struct):\n lst=ip_lst[0]\n for i in lst:\n source = i[0]\n sink = i[1]\n for j in range(c_struct.shape[0]):\n ind_sink = my_hash_value(sink, j, c_struct.shape[1])\n c_struct[j][ind_sink] += 1\n if not directedness:\n ind_source = my_hash_value(source, j, c_struct.shape[1])\n c_struct[j][ind_source] += 1\n return [], c_struct\n return agent_update\n\n\nclass indegree_CountMin(object):\n\n '''\n is_directed is the boolean for directedness of graph\n iStream is the input stream of edges\n count_structure is the 2D array that maintains the frequencies\n no_array being number of arrays and size_array being size of each array\n \n '''\n\n def __init__(self, iStream, is_directed, no_arrays, size_array, w_s = 15, step_size = 15, oStream= []):\n self.input_stream = iStream\n self.count_struct = np.zeros([no_arrays, size_array], 'float')\n self.is_directed = is_directed\n self.window_size = w_s\n self.step_size = step_size\n update = outer_update(self.is_directed)\n self.count_Agent = Operators.window_agent(update, [self.input_stream], [oStream], self.count_struct, None, self.window_size, self.step_size)\n\n def query(self, queried_vertice):\n lst_of_freqs = []\n nu_rows = self.count_struct.shape[0] \n for j in range(nu_rows):\n ind = my_hash_value(queried_vertice, j, self.count_struct.shape[1])\n lst_of_freqs.append(self.count_struct[j][ind])\n return min(lst_of_freqs)\n\n\n\n\n \n\n \n\n \n\n \n \n" ]
[ [ "numpy.zeros" ] ]
brain-link/cmne
[ "0d87e0bcd0041b250ee484f39341a0bf83f72949" ]
[ "archive/I_implementation/II_run_crossvalidation.py" ]
[ "#**\n# @file Option_4_bio_mne_comparison.py\n# @author Christoph Dinh <[email protected]>;\n# Matti Hamalainen <[email protected]>\n# @version 1.0\n# @date May, 2017\n#\n# @section LICENSE\n#\n# Copyright (C) 2017, Christoph Dinh. All rights reserved.\n#\n# @brief Model inverse operator with Deep Learning Model\n# to estimate a MNE-dSPM inverse solution on single epochs\n#\n#**\n\n#==================================================================================\n#%%\nimport os\nimport sys\n\nimport config as cfg\n\nsys.path.append(cfg.repo_path + 'I_implementation/I_cmne/II_training') #Add relative path to include modules\nsys.path.append(cfg.repo_path + 'I_implementation/helpers')\nsys.path.append(cfg.repo_path + 'I_implementation/I_cmne/I_hyperparameter_evaluation')\n\nimport numpy as np\nimport random\nimport matplotlib\nmatplotlib.use('Agg')# remove for plt.show()\nimport matplotlib.pyplot as plt\n\nimport pandas as pd\n\nimport datetime\n\nfrom mne.minimum_norm import apply_inverse\n\nimport mne\nfrom mne.minimum_norm import apply_inverse_epochs, read_inverse_operator\n\nfrom keras.models import load_model\n\nfrom helpers.cmnesettings import CMNESettings\nfrom helpers.cmnedata import CMNEData, standardize, reshape_future_data\n\n\n###################################################################################################\n# The Script\n###################################################################################################\n## assr_270LP_fs900 fs_1_nu_10_lb_80\n# look_back = 80\n\n# # 0\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_10_lb_80_2020-07-26_062129.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job4_it_0.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job4_it_0_fs_1_nu_10_lb_80.txt'\n\n# # 1\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_10_lb_80_2020-07-29_001638.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job4_it_1.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job4_it_1_fs_1_nu_10_lb_80.txt'\n\n# # 2\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_10_lb_80_2020-07-29_051144.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job4_it_2.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job4_it_2_fs_1_nu_10_lb_80.txt'\n\n# # 3\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_10_lb_80_2020-07-29_095246.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job4_it_3.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job4_it_3_fs_1_nu_10_lb_80.txt'\n\n# # 4\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_10_lb_80_2020-07-29_144903.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job4_it_4.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job4_it_4_fs_1_nu_10_lb_80.txt'\n\n# # 5\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_10_lb_80_2020-07-29_195717.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job4_it_5.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job4_it_5_fs_1_nu_10_lb_80.txt'\n\n# # 6\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_10_lb_80_2020-07-30_010414.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job4_it_6.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job4_it_6_fs_1_nu_10_lb_80.txt'\n\n## assr_270LP_fs900 fs_1_nu_160_lb_80\n# look_back = 80\n\n# # 0\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_160_lb_80_2020-07-24_211559.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job5_it_0.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job5_it_0_fs_1_nu_160_lb_80.txt'\n\n# # 1\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_160_lb_80_2020-07-25_023115.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job5_it_1.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job5_it_1_fs_1_nu_160_lb_80.txt'\n\n# # 2\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_160_lb_80_2020-07-25_074010.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job5_it_2.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job5_it_2_fs_1_nu_160_lb_80.txt'\n\n# # 3\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_160_lb_80_2020-07-25_125038.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job5_it_3.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job5_it_3_fs_1_nu_160_lb_80.txt'\n\n# # 4\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_160_lb_80_2020-07-25_180344.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job5_it_4.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job5_it_4_fs_1_nu_160_lb_80.txt'\n\n# # 5 <<<\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_160_lb_80_2020-07-25_235531.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job5_it_5.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job5_it_5_fs_1_nu_160_lb_80.txt'\n\n# # 6\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_160_lb_80_2020-07-26_061135.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job5_it_6.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job5_it_6_fs_1_nu_160_lb_80.txt'\n\n## assr_270LP_fs900 fs_1_nu_1280_lb_80\n# look_back = 80\n\n# # 0\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_1280_lb_80_2020-07-24_211925.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job6_it_0.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job6_it_0_fs_1_nu_1280_lb_80.txt'\n\n# # 1\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_1280_lb_80_2020-07-25_023622.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job6_it_1.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job6_it_1_fs_1_nu_1280_lb_80.txt'\n\n# # 2\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_1280_lb_80_2020-07-25_074933.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job6_it_2.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job6_it_2_fs_1_nu_1280_lb_80.txt'\n\n# # 3\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_1280_lb_80_2020-07-25_130259.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job6_it_3.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job6_it_3_fs_1_nu_1280_lb_80.txt'\n\n# # 4\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_1280_lb_80_2020-07-25_182002.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job6_it_4.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job6_it_4_fs_1_nu_1280_lb_80.txt'\n\n# # 5\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_1280_lb_80_2020-07-26_001853.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job6_it_5.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job6_it_5_fs_1_nu_1280_lb_80.txt'\n\n# # 6\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_1280_lb_80_2020-07-26_063512.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job6_it_6.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job6_it_6_fs_1_nu_1280_lb_80.txt'\n\n## assr_270LP_fs900 fs_1_nu_1280_lb_10\n# look_back = 10\n\n# # 0\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb10/eval_hyper_model_meg-eeg_fs_1_nu_1280_lb_10_2020-07-24_160109.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb10/assr_270LP_fs900_cross_idcs_job3_it_0.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb10/assr_270LP_fs900_cross_mse_job3_it_0_fs_1_nu_1280_lb_10.txt'\n\n## assr_270LP_fs900 fs_1_nu_1280_lb_160\nlook_back = 160\n\n# 0\nfname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb160/eval_hyper_model_meg-eeg_fs_1_nu_1280_lb_160_2020-07-25_101246.h5'\nfname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb160/assr_270LP_fs900_cross_idcs_job9_it_0.txt'\nfname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb160/assr_270LP_fs900_cross_mse_job9_it_0_fs_1_nu_1280_lb_160.txt'\n\n#%% Data Settings\ndata_settings = CMNESettings( repo_path=cfg.repo_path, data_path=cfg.data_path,\n fname_raw=cfg.fname_raw,\n fname_inv=cfg.fname_inv,\n fname_eve=cfg.fname_eve,\n fname_test_idcs=cfg.fname_test_idcs,\n meg_and_eeg=cfg.meg_and_eeg\n )\n\nevent_id, tmin, tmax = 1, -0.2, 0.5\ntrain_percentage = 0.85\ncross_validation_percentage = 0.85\n\ndata = CMNEData(cmne_settings=data_settings)\ndata.load_data(event_id=event_id, tmin=tmin, tmax=tmax, train_percentage=train_percentage)\n\n#################################\n#%%\nnum_train_idcs = len(data.train_idcs())\n\nwhole_list = list(range(num_train_idcs))\n\nif os.path.isfile(fname_cross_validation_idcs):\n cross_validation_train_idcs = []\n with open(fname_cross_validation_idcs, \"r\") as f:\n for line in f:\n cross_validation_train_idcs.append(int(line.strip()))\n cross_validation_test_idcs = [item for item in whole_list if item not in cross_validation_train_idcs]\n\n\n sel_epochs = data.train_epochs(cross_validation_test_idcs)\n\n nave = 2 #len(epochs)\n \n # Compute inverse solution and stcs for each epoch\n # Use the same inverse operator as with evoked data (i.e., set nave)\n # If you use a different nave, dSPM just scales by a factor sqrt(nave)\n #sel_epochs = mne.set_eeg_reference(sel_epochs, ref_channels=None, copy=True)[0]\n sel_epochs.apply_proj()\n\n # Compute inverse solution and stcs for each epoch\n # Use the same inverse operator as with evoked data (i.e., set nave)\n # If you use a different nave, dSPM just scales by a factor sqrt(nave)\n\n stcs = apply_inverse_epochs(sel_epochs, inverse_operator=data.inv_op(), lambda2=data.lambda2(), method=data.method(), pick_ori=\"normal\", nave=nave)\n\n # Attention - just an approximation, since not all stc are considered for the mean and the std\n stc_data = np.hstack([stc.data for stc in stcs])\n stc_mean = np.mean(stc_data, axis=1)\n stc_std = np.std(stc_data, axis=1)\n stc_data = None\n #Attention end\n\n # load model\n lstm_model = load_model(fname_model)\n\n future_steps = 1\n\n count_stcs = 1;\n #################################\n # %%\n with open(fname_cross_validation_mse, \"w\") as f:\n for stc in stcs:\n print('STC %d'%(count_stcs))\n stc_normalized = standardize(stc.data,mean=stc_mean,std=stc_std)\n stc_normalized_T = stc_normalized.transpose()\n \n feature_list, label_list = reshape_future_data(stc=stc_normalized_T, look_back=look_back, future_steps=future_steps)\n\n features = np.array(feature_list)\n labels = np.array(label_list)\n \n #%% LSTM estimation\n step = 1;\n for feature, label in (zip(features, labels)):\n stc_prior = np.expand_dims(feature, axis=0)\n stc_predict = lstm_model.predict(stc_prior)\n stc_mse = ((stc_predict - label)**2).mean(axis=1)\n \n #print('STC %d, Step %d, Error %f'%(count_stcs, step, stc_mse))\n\n f.write(str(stc_mse) +\"\\n\")\n step = step + 1;\n \n count_stcs = count_stcs + 1;\n\n if count_stcs == 11:\n break # break here" ]
[ [ "numpy.hstack", "numpy.expand_dims", "matplotlib.use", "numpy.std", "numpy.array", "numpy.mean" ] ]
mo-vic/HanZiGan
[ "dd8dc4b1eeffb01f928d7a4e5931cf6af1d7c1a4" ]
[ "utils/utils.py" ]
[ "import math\n\nimport torch\nfrom torch._six import inf\nfrom torchvision.utils import make_grid\n\nimport numpy as np\nfrom tqdm import tqdm\n\n\ndef _grad_norm(parameters, norm_type=2):\n r\"\"\"Compute gradient norm of an iterable of parameters.\n\n The norm is computed over all gradients together, as if they were\n concatenated into a single vector.\n\n Arguments:\n parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a\n single Tensor.\n norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for\n infinity norm.\n\n Returns:\n Total norm of the parameters (viewed as a single vector).\n \"\"\"\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n parameters = list(filter(lambda p: p.grad is not None, parameters))\n norm_type = float(norm_type)\n if norm_type == inf:\n total_norm = max(p.grad.data.abs().max() for p in parameters)\n else:\n total_norm = 0\n for p in parameters:\n param_norm = p.grad.data.norm(norm_type)\n total_norm += param_norm.item() ** norm_type\n total_norm = total_norm ** (1. / norm_type)\n\n return total_norm\n\n\ndef train(model, dataloader, criterion, optimizer, use_gpu, writer, epoch, scheduler, num_fakes, flip_rate, show_freq):\n all_acc = []\n all_d_loss = []\n all_g_loss = []\n\n d_optimizer, g_optimizer = optimizer\n d_scheduler, g_scheduler = scheduler\n\n for idx, data in tqdm(enumerate(dataloader), desc=\"Training Epoch {}\".format(epoch)):\n # train discriminator\n d_optimizer.zero_grad()\n g_optimizer.zero_grad()\n labels = torch.cat([torch.bernoulli(torch.ones((data.size(0), 1)) * flip_rate), torch.zeros((num_fakes, 1))],\n dim=0)\n if use_gpu:\n data, labels = data.cuda(), labels.cuda()\n outputs = model(data, mode='D')\n d_loss = criterion(outputs, labels)\n d_loss.backward()\n d_optimizer.step()\n\n all_d_loss.append(d_loss.item())\n acc = (torch.ge(outputs, 0.5).long().data == labels.long().data).double().mean()\n all_acc.append(acc.item())\n\n writer.add_scalar(\"train_d_grad_norm\", _grad_norm(model.parameters()),\n global_step=epoch * len(dataloader) + idx)\n writer.add_scalar(\"train_d_loss\", d_loss.item(), global_step=epoch * len(dataloader) + idx)\n writer.add_scalar(\"train_acc\", acc.item(), global_step=epoch * len(dataloader) + idx)\n\n # train generator\n d_optimizer.zero_grad()\n g_optimizer.zero_grad()\n fake_images, outputs = model(mode='G')\n labels = torch.ones((num_fakes, 1))\n if use_gpu:\n labels = labels.cuda()\n g_loss = criterion(outputs, labels)\n g_loss.backward()\n g_optimizer.step()\n\n all_g_loss.append(g_loss.item())\n\n if idx % show_freq == 0:\n fake_images = make_grid(fake_images, nrow=round(math.sqrt(num_fakes)))\n writer.add_image(\"fake_images\", fake_images, global_step=epoch * len(dataloader) + idx)\n real_images = make_grid(data, nrow=round(math.sqrt(data.size(0))))\n writer.add_image(\"real_images\", real_images, global_step=epoch * len(dataloader) + idx)\n\n writer.add_scalar(\"train_g_grad_norm\", _grad_norm(model.parameters()),\n global_step=epoch * len(dataloader) + idx)\n writer.add_scalar(\"train_g_loss\", g_loss.item(), global_step=epoch * len(dataloader) + idx)\n\n writer.add_scalar(\"acc\", np.mean(all_acc).item(), global_step=epoch)\n writer.add_scalar(\"d_loss\", np.mean(all_d_loss).item(), global_step=epoch)\n writer.add_scalar(\"g_loss\", np.mean(all_g_loss).item(), global_step=epoch)\n\n d_scheduler.step(np.mean(all_d_loss).item())\n g_scheduler.step(np.mean(all_g_loss).item())\n\n print(\"Epoch {}: total discriminator loss: {}\".format(epoch, np.mean(all_d_loss).item()), end=',')\n print(\"total generator loss: {}, global accuracy:{}.\".format(np.mean(all_g_loss), np.mean(all_acc)))\n" ]
[ [ "torch.zeros", "torch.ge", "torch.ones", "numpy.mean" ] ]
lacie-life/MasterStudy
[ "16bb79a41555693c7e8cbb3c248c4670e0097073" ]
[ "ComputerVision/Bag-of-Visual-Words/BoVW.py" ]
[ "import argparse\nimport cv2\nimport numpy as np\nimport os\nfrom sklearn.cluster import KMeans\nfrom sklearn.cluster import MiniBatchKMeans\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import StandardScaler\nfrom matplotlib import pyplot as plt\nfrom sklearn import svm, datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.utils.multiclass import unique_labels\nfrom sklearn.metrics.pairwise import chi2_kernel\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import accuracy_score\n\nclass_names = []\n\ndef labelLoader(path):\n global class_names\n class_names = os.listdir(path)\n\n\ndef getFiles(train, path):\n images = []\n for folder in os.listdir(path):\n for file in os.listdir(os.path.join(path, folder)):\n images.append(os.path.join(path, os.path.join(folder, file)))\n\n if (train is True):\n np.random.shuffle(images)\n return images\n\n\ndef getDescriptors(sift, img):\n kp, des = sift.detectAndCompute(img, None)\n return des\n\n\ndef readImage(img_path):\n # print(img_path)\n img = cv2.imread(img_path, 0)\n return cv2.resize(img, (150, 150))\n\n\ndef vstackDescriptors(descriptor_list):\n descriptors = np.array(descriptor_list[0])\n for descriptor in descriptor_list[1:]:\n descriptors = np.vstack((descriptors, descriptor))\n\n return descriptors\n\n\ndef clusterDescriptors(descriptors, no_clusters, batch):\n kmeans = KMeans(n_clusters=no_clusters).fit(descriptors)\n # kmeans = MiniBatchKMeans(n_clusters=no_clusters, batch_size=batch, verbose=1).fit(descriptors)\n return kmeans\n\n\ndef extractFeatures(kmeans, descriptor_list, image_count, no_clusters):\n im_features = np.array([np.zeros(no_clusters) for i in range(image_count)])\n for i in range(image_count):\n for j in range(len(descriptor_list[i])):\n feature = descriptor_list[i][j]\n feature = feature.reshape(1, 128)\n idx = kmeans.predict(feature)\n im_features[i][idx] += 1\n\n return im_features\n\n\ndef normalizeFeatures(scale, features):\n return scale.transform(features)\n\n\ndef plotHistogram(im_features, no_clusters):\n x_scalar = np.arange(no_clusters)\n y_scalar = np.array([abs(np.sum(im_features[:, h], dtype=np.int32)) for h in range(no_clusters)])\n\n plt.bar(x_scalar, y_scalar)\n plt.xlabel(\"Visual Word Index\")\n plt.ylabel(\"Frequency\")\n plt.title(\"Complete Vocabulary Generated\")\n plt.xticks(x_scalar + 0.4, x_scalar)\n plt.savefig(\"histogram.png\")\n\n\ndef svcParamSelection(X, y, kernel, nfolds):\n Cs = [0.5, 0.1, 0.15, 0.2, 0.3]\n gammas = [0.1, 0.11, 0.095, 0.105]\n param_grid = {'C': Cs, 'gamma': gammas}\n grid_search = GridSearchCV(SVC(kernel=kernel), param_grid, cv=nfolds)\n grid_search.fit(X, y)\n grid_search.best_params_\n return grid_search.best_params_\n\n\ndef findSVM(im_features, train_labels, kernel):\n features = im_features\n if (kernel == \"precomputed\"):\n features = np.dot(im_features, im_features.T)\n\n params = svcParamSelection(features, train_labels, kernel, 5)\n C_param, gamma_param = params.get(\"C\"), params.get(\"gamma\")\n print(C_param, gamma_param)\n\n svm = SVC(kernel=kernel, C=C_param, gamma=gamma_param)\n svm.fit(features, train_labels)\n return svm\n\n\ndef plotConfusionMatrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n cm = confusion_matrix(y_true, y_pred)\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax\n\n\ndef plotConfusions(true, predictions):\n np.set_printoptions(precision=2)\n\n plotConfusionMatrix(true, predictions, classes=class_names,\n title='Confusion matrix, without normalization')\n\n plotConfusionMatrix(true, predictions, classes=class_names, normalize=True,\n title='Normalized confusion matrix')\n\n plt.savefig(\"confusions.png\")\n\n\ndef findAccuracy(true, predictions):\n print('accuracy score: %0.3f' % accuracy_score(true, predictions))\n\n\ndef trainModel(path, no_clusters, kernel, batch):\n images = getFiles(True, path)\n print(\"Train images path detected.\")\n sift = cv2.xfeatures2d.SIFT_create()\n descriptor_list = []\n train_labels = np.array([])\n image_count = len(images)\n\n for img_path in images:\n for i in range(0, len(class_names)):\n if class_names[i] in img_path:\n class_index = i\n\n train_labels = np.append(train_labels, class_index)\n img = readImage(img_path)\n des = getDescriptors(sift, img)\n descriptor_list.append(des)\n\n\n # descriptors = vstackDescriptors(descriptor_list)\n # print(\"Descriptors vstacked.\")\n\n kmeans = clusterDescriptors(descriptor_list, no_clusters, batch)\n print(\"Descriptors clustered.\")\n\n im_features = extractFeatures(kmeans, descriptor_list, image_count, no_clusters)\n print(\"Images features extracted.\")\n\n scale = StandardScaler().fit(im_features)\n im_features = scale.transform(im_features)\n print(\"Train images normalized.\")\n\n plotHistogram(im_features, no_clusters)\n print(\"Features histogram plotted.\")\n\n svm = findSVM(im_features, train_labels, kernel)\n print(\"SVM fitted.\")\n print(\"Training completed.\")\n\n return kmeans, scale, svm, im_features\n\n\ndef testModel(path, kmeans, scale, svm, im_features, no_clusters, kernel):\n test_images = getFiles(False, path)\n print(\"Test images path detected.\")\n\n count = 0\n true = []\n descriptor_list = []\n\n index = list(range(0, len(class_names)))\n\n # name_dict = dict(zip(index, class_names.copy()))\n\n name_dict = {\n \"0\": class_names[0],\n \"1\": class_names[1],\n \"2\": class_names[2],\n \"3\": class_names[3],\n \"4\": class_names[4],\n \"5\": class_names[5],\n \"6\": class_names[6],\n \"7\": class_names[7]\n }\n print(name_dict)\n\n sift = cv2.xfeatures2d.SIFT_create()\n\n for img_path in test_images:\n img = readImage(img_path)\n des = getDescriptors(sift, img)\n\n if (des is not None):\n count += 1\n descriptor_list.append(des)\n\n for i in range(0, len(class_names)):\n if class_names[i] in img_path:\n print(img_path)\n print(class_names[i])\n true.append(class_names[i])\n\n test_features = extractFeatures(kmeans, descriptor_list, count, no_clusters)\n\n test_features = scale.transform(test_features)\n\n kernel_test = test_features\n if (kernel == \"precomputed\"):\n kernel_test = np.dot(test_features, im_features.T)\n\n predictions = [name_dict[str(int(i))] for i in svm.predict(kernel_test)]\n print(\"Test images classified.\")\n\n plotConfusions(true, predictions)\n print(\"Confusion matrixes plotted.\")\n\n findAccuracy(true, predictions)\n print(\"Accuracy calculated.\")\n print(\"Execution done.\")\n\n\ndef execute(train_path, test_path, no_clusters, kernel, batch):\n kmeans, scale, svm, im_features = trainModel(train_path, no_clusters, kernel, batch)\n testModel(test_path, kmeans, scale, svm, im_features, no_clusters, kernel)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--train_path', action=\"store\", dest=\"train_path\", default=\"/home/jun/Github/BoVW/random-2/train\")\n parser.add_argument('--test_path', action=\"store\", dest=\"test_path\", default=\"/home/jun/Github/BoVW/random-2/test\")\n parser.add_argument('--word', action=\"store\", dest=\"word\", default=500)\n parser.add_argument('--batch', action=\"store\", dest=\"batch\", default=3000)\n parser.add_argument('--kernel_type', action=\"store\", dest=\"kernel_type\", default=\"linear\")\n\n args = vars(parser.parse_args())\n if (not (args['kernel_type'] == \"linear\" or args['kernel_type'] == \"precomputed\")):\n print(\"Kernel type must be either linear or precomputed\")\n exit(0)\n\n labelLoader(args['train_path'])\n print(class_names)\n execute(args['train_path'], args['test_path'], int(args['word']), args['kernel_type'], args['batch'])\n" ]
[ [ "numpy.sum", "sklearn.cluster.KMeans", "matplotlib.pyplot.ylabel", "numpy.vstack", "numpy.append", "matplotlib.pyplot.xticks", "matplotlib.pyplot.savefig", "numpy.set_printoptions", "matplotlib.pyplot.title", "matplotlib.pyplot.bar", "sklearn.svm.SVC", "numpy.zeros", "matplotlib.pyplot.subplots", "numpy.arange", "sklearn.metrics.confusion_matrix", "sklearn.metrics.accuracy_score", "sklearn.preprocessing.StandardScaler", "numpy.random.shuffle", "numpy.array", "numpy.dot", "matplotlib.pyplot.xlabel" ] ]
RomanShen/radial-bnn
[ "7c8bc85397c1461a6fd5ea9adf0631f9ade27f6c" ]
[ "src/models/distributions.py" ]
[ "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\" \n@author: romanshen \n@file: distributions.py \n@time: 2021/05/07\n@contact: [email protected]\n\"\"\"\n\n\nimport torch\n\n# Priors\n\n\ndef gaussian_prior(name, log2pi, mu, sigma, device):\n \"\"\"\n Args:\n *args: {\"mu\": , \"sigma\":, \"log2pi\"}\n Returns: log_gaussian_pdf that takes a weight of arbitrary shape\n \"\"\"\n if mu == 0 and sigma == 1:\n # We handle this case slightly differently as it is common and can be made more efficient\n def log_gaussian_pdf(x):\n x = x.view(x.shape[0], -1)\n return -log2pi * x.shape[1] / 2 - torch.sum(x ** 2) / 2.0\n\n return log_gaussian_pdf\n else:\n mu_tensor = torch.tensor(\n mu, requires_grad=False, dtype=torch.float32, device=device\n )\n sigma_tensor = torch.tensor(\n sigma, requires_grad=False, dtype=torch.float32, device=device\n )\n two_sigma_squared = 2 * (sigma_tensor ** 2)\n log_sigma = torch.log(sigma_tensor)\n\n def log_gaussian_pdf(x):\n x = x.view(x.shape[0], -1)\n log_pd = -log2pi * x.shape[1] / 2\n log_pd = log_pd - torch.sum((x - mu_tensor) ** 2, dim=1) / two_sigma_squared\n log_pd = log_pd - log_sigma * x.shape[1] / 2\n return log_pd\n\n return log_gaussian_pdf\n\n\n# Sampling noise distributions\n\n\ndef radial(size):\n \"\"\"\n Creates a distribution that is unit Gaussian along r and uniform over \\theta.\n :param size: The size of the weight distribution to be generated.\n Zeroth dimension is variational samples.\n 1+ dimensions are the weight for each sample from the variational distribution.\n The same weight is applied to each example in a batch.\n :return: noise distribution\n \"\"\"\n if torch.cuda.is_available():\n device = \"cuda\"\n else:\n device = \"cpu\"\n\n # First we find a random direction (\\epsilon_{\\text{MFVI}} in equation (3) on page 4)\n epsilon_mfvi = torch.randn(size, device=device)\n\n # Then we pick a distance (r in equation (3) on page 4)\n distance = torch.randn((size[0]), device=device)\n\n # Then we normalize each variational sample independently\n if len(size) == 2:\n normalizing_factor = torch.norm(\n epsilon_mfvi.view(size[0], -1), p=2, dim=1\n ).unsqueeze(1)\n distance = distance.unsqueeze(1)\n elif len(size) == 3:\n normalizing_factor = (\n torch.norm(epsilon_mfvi.view(size[0], -1), p=2, dim=1)\n .unsqueeze(1)\n .unsqueeze(1)\n )\n distance = distance.unsqueeze(1).unsqueeze(1)\n elif len(size) == 5:\n # Here we have a CNN with dimensions (var samples, out_channels, in_channels, kernel, kernel)\n normalizing_factor = (\n torch.norm(epsilon_mfvi.view(size[0], -1), p=2, dim=1)\n .unsqueeze(1)\n .unsqueeze(1)\n .unsqueeze(1)\n .unsqueeze(1)\n )\n distance = distance.unsqueeze(1).unsqueeze(1).unsqueeze(1).unsqueeze(1)\n else:\n raise ValueError(\n \"Number of dimensions for epsilon not expected. Are you sure you wanted size {}\".format(\n size\n )\n )\n\n direction = epsilon_mfvi / normalizing_factor\n epsilon_radial = direction * distance\n return epsilon_radial\n\n\ndef gaussian(size):\n \"\"\"\n Returns a tensor of random epsilon using the default gaussian unit distribution\n :param size: shape of tensor to return (tuple)\n :return: FloatTensor of Size\n \"\"\"\n if torch.cuda.is_available():\n device = \"cuda\"\n else:\n device = \"cpu\"\n\n epsilon_mfvi = torch.randn(size, device=device)\n return epsilon_mfvi\n" ]
[ [ "torch.sum", "torch.randn", "torch.tensor", "torch.cuda.is_available", "torch.log" ] ]
lkilcher/dolfyn-light
[ "416bf6aa8a3455cebf973f416c9e4ba89a801a71" ]
[ "dolfyn/adv/motion.py" ]
[ "import numpy as np\nimport scipy.signal as sig\nfrom scipy.integrate import cumtrapz\nfrom .rotate import inst2earth, _rotate_vel2body\nimport warnings\n\n\nclass CalcMotion(object):\n\n \"\"\"\n A 'calculator' for computing the velocity of points that are\n rigidly connected to an ADV-body with an IMU.\n\n Parameters\n ----------\n\n advo : `adv_raw<dolfyn.adv.base.adv_raw>`\n The IMU-adv object that will be used to compute motion.\n\n accel_filtfreq : float\n the frequency at which to high-pass filter the acceleration\n signal to remove low-frequency drift.\n\n vel_filtfreq : float (optional)\n a second frequency to high-pass filter the integrated\n acceleration. (default: 1/3 of accel_filtfreq)\n\n Examples\n --------\n\n >>> from dolfyn.adv import api as avm\n >>> from dolfyn.adv import motion as avmot\n\n >>> dat = avm.read_nortek('my_data_file.vec')\n\n >>> mcalc = avmot.CalcMotion(dat)\n\n # Calculate the motion of a point that is (.3, .1, .06) meters\n # from the adv-body origin:\n >>> mot = mcalc([.3, .1, .06])\n\n \"\"\"\n\n def __init__(self, advo,\n accel_filtfreq=1. / 30,\n vel_filtfreq=None,\n to_earth=True):\n\n self.advo = advo\n self.accel_filtfreq = accel_filtfreq\n if vel_filtfreq is None:\n vel_filtfreq = accel_filtfreq / 3\n self.accelvel_filtfreq = vel_filtfreq\n self.to_earth = to_earth\n\n self._set_Accel()\n self._set_AccelStable()\n self.AngRt = advo.AngRt # No copy because not modified.\n\n def _set_Accel(self, ):\n advo = self.advo\n if advo.props['coord_sys'] == 'inst':\n self.Accel = np.einsum('ijk,ik->jk',\n advo.orientmat,\n advo.Accel)\n elif self.advo.props['coord_sys'] == 'earth':\n self.Accel = advo.Accel.copy()\n else:\n raise Exception((\"Invalid coordinate system '%s'. The coordinate \"\n \"system must either be 'earth' or 'inst' to \"\n \"perform motion correction.\")\n % (self.advo.props['coord_sys'], ))\n\n def _set_AccelStable(self, ):\n \"\"\"\n \"\"\"\n self.AccelStable = acc = self.Accel.copy()\n if self.accel_filtfreq == 0:\n acc[:] = acc.mean(-1)[..., None]\n else:\n flt = sig.butter(1, self.accel_filtfreq / (self.advo.fs / 2))\n for idx in range(3):\n acc[idx] = sig.filtfilt(flt[0], flt[1], acc[idx])\n\n def __call__(self, vec):\n \"\"\"\n Calculate the motion of the point specified by vec (in meters,\n in the adv-body coordinate system).\n\n Parameters\n ----------\n\n vec : |np.ndarray| (len(3) or 3 x M)\n The vector in meters (or set of vectors) from the\n body-origin (center of head end-cap) to the point of\n interest (in the body coord-sys).\n\n Returns\n -------\n umot : |np.ndarray| (3 x M x N_time)\n The motion (velocity) array (3, n_time).\n\n \"\"\"\n return self.calc_uacc() + self.calc_urot(np.array(vec), )\n\n def calc_uacc(self, ):\n \"\"\"\n Calculates the translational velocity from the acceleration\n signal.\n\n Returns\n -------\n uacc : |np.ndarray| (3 x n_time)\n The acceleration-induced velocity array (3, n_time).\n \"\"\"\n samp_freq = self.advo.fs\n\n hp = self.Accel - self.AccelStable\n\n dat = np.concatenate((np.zeros(list(hp.shape[:-1]) + [1]),\n cumtrapz(hp, dx=1. / samp_freq)), axis=-1)\n if self.accelvel_filtfreq > 0:\n filt_freq = self.accelvel_filtfreq\n # 8th order butterworth filter.\n filt = sig.butter(2, float(filt_freq) / (samp_freq / 2))\n for idx in range(hp.shape[0]):\n dat[idx] = dat[idx] - sig.filtfilt(filt[0], filt[1], dat[idx])\n return dat\n\n def calc_urot(self, vec, to_earth=None):\n\n \"\"\"\n Calculate the induced velocity due to rotations of the instrument\n about the IMU center.\n\n Parameters\n ----------\n\n vec : |np.ndarray| (len(3) or 3 x M)\n The vector in meters (or vectors) from the body-origin\n (center of head end-cap) to the point of interest (in the\n body coord-sys).\n\n Returns\n -------\n urot : |np.ndarray| (3 x M x N_time)\n The rotation-induced velocity array (3, n_time).\n\n \"\"\"\n\n if to_earth is None:\n to_earth = self.to_earth\n\n dimflag = False\n if vec.ndim == 1:\n vec = vec.copy().reshape((3, 1))\n dimflag = True\n\n # Correct for the body->imu distance.\n # The nortek_body2imu vector is subtracted because of\n # vector addition:\n # body2head = body2imu + imu2head\n # Thus:\n # imu2head = body2head - body2imu\n vec = vec - self.advo.body2imu_vec[:, None]\n\n # This motion of the point *vec* due to rotations should be the\n # cross-product of omega (rotation vector) and the vector.\n # u=dz*omegaY-dy*omegaZ,v=dx*omegaZ-dz*omegaX,w=dy*omegaX-dx*omegaY\n # where vec=[dx,dy,dz], and AngRt=[omegaX,omegaY,omegaZ]\n urot = np.array([(vec[2][:, None] * self.AngRt[1] -\n vec[1][:, None] * self.AngRt[2]),\n (vec[0][:, None] * self.AngRt[2] -\n vec[2][:, None] * self.AngRt[0]),\n (vec[1][:, None] * self.AngRt[0] -\n vec[0][:, None] * self.AngRt[1]),\n ])\n\n if to_earth:\n urot = np.einsum('jik,jlk->ilk', self.advo['orientmat'], urot)\n\n if dimflag:\n return urot[:, 0, :]\n\n return urot\n\n\ndef _calc_probe_pos(advo, separate_probes=False):\n \"\"\"\n !!!Currently this only works for Nortek Vectors!\n\n In the future, we could use the transformation matrix (and a\n probe-length lookup-table?)\n \"\"\"\n # According to the ADV_DataSheet, the probe-length radius is\n # 8.6cm @ 120deg from probe-stem axis. If I subtract 1cm\n # (!!!checkthis) to get acoustic receiver center, this is\n # 7.6cm. In the coordinate sys of the center of the probe\n # then, the positions of the centers of the receivers is:\n if advo.make_model == 'Nortek VECTOR' and separate_probes:\n r = 0.076\n # The angle between the x-y plane and the probes\n phi = -30. * np.pi / 180.\n theta = np.array([0., 120., 240.]) * np.pi / \\\n 180. # The angles of the probes from the x-axis.\n return (np.dot(advo.props['body2head_rotmat'].T,\n np.array([r * np.cos(theta),\n r * np.sin(theta),\n r * np.tan(phi) * np.ones(3)])) +\n advo.props['body2head_vec'][:, None]\n )\n else:\n return advo.props['body2head_vec']\n\n\ndef correct_motion(advo,\n accel_filtfreq=1. / 30,\n vel_filtfreq=None,\n to_earth=True,\n separate_probes=False, ):\n \"\"\"\n This function performs motion correction on an IMU-ADV data\n object. The IMU and ADV data should be tightly synchronized and\n contained in a single data object.\n\n Parameters\n ----------\n\n advo : dolfyn.adv.adv class\n\n accel_filtfreq : float\n the frequency at which to high-pass filter the acceleration\n signal to remove low-frequency drift.\n\n vel_filtfreq : float (optional)\n a second frequency to high-pass filter the integrated\n acceleration. (default: 1/3 of accel_filtfreq)\n\n to_earth : bool (optional, default: True)\n All variables in the advo.props['rotate_vars'] list will be\n rotated into either the earth frame (to_earth=True) or the\n instrument frame (to_earth=False).\n\n separate_probes : bool (optional, default: False)\n a flag to perform motion-correction at the probe tips, and\n perform motion correction in beam-coordinates, then transform\n back into XYZ/earth coordinates. This correction seems to be\n lower than the noise levels of the ADV, so the defualt is to not\n use it (False).\n\n Returns\n -------\n This function returns None, it operates on the input data object,\n ``advo``. The following attributes are added to `advo`:\n\n ``uraw`` is the uncorrected velocity\n\n ``urot`` is the rotational component of the head motion (from\n AngRt)\n\n ``uacc`` is the translational component of the head motion (from\n Accel)\n\n ``AccelStable`` is the low-pass filtered Accel signal\n\n The primary velocity vector attribute, ``_u``, is motion corrected\n such that:\n\n _u = uraw + urot + uacc\n\n The signs are correct in this equation. The measured velocity\n induced by head-motion is *in the opposite direction* of the head\n motion. i.e. when the head moves one way in stationary flow, it\n measures a velocity in the opposite direction. Therefore, to\n remove the motion from the raw signal we *add* the head motion.\n\n Notes\n -----\n\n Acceleration signals from inertial sensors are notorious for\n having a small bias that can drift slowly in time. When\n integrating these signals to estimate velocity the bias is\n amplified and leads to large errors in the estimated\n velocity. There are two methods for removing these errors,\n\n 1) high-pass filter the acceleration signal prior and/or after\n integrating. This implicitly assumes that the low-frequency\n translational velocity is zero.\n 2) provide a slowly-varying reference position (often from a GPS)\n to an IMU that can use the signal (usually using Kalman\n filters) to debias the acceleration signal.\n\n Because method (1) removes `real` low-frequency acceleration,\n method (2) is more accurate. However, providing reference position\n estimates to undersea instruments is practically challenging and\n expensive. Therefore, lacking the ability to use method (2), this\n function utilizes method (1).\n\n For deployments in which the ADV is mounted on a mooring, or other\n semi-fixed structure, the assumption of zero low-frequency\n translational velocity is a reasonable one. However, for\n deployments on ships, gliders, or other moving objects it is\n not. The measured velocity, after motion-correction, will still\n hold some of this contamination and will be a sum of the ADV\n motion and the measured velocity on long time scales. If\n low-frequency motion is known separate from the ADV (e.g. from a\n bottom-tracking ADP, or from a ship's GPS), it may be possible to\n remove that signal from the ADV signal in post-processing. The\n accuracy of this approach has not, to my knowledge, been tested\n yet.\n\n Examples\n --------\n\n >>> from dolfyn.adv import api as avm\n >>> dat = avm.read_nortek('my_data_file.vec')\n >>> avm.motion.correct_motion(dat)\n\n ``dat`` will now have motion-corrected.\n\n \"\"\"\n\n if hasattr(advo, 'urot'):\n raise Exception('The data object already appears to have been motion corrected.')\n\n if advo.props['coord_sys'] != 'inst':\n raise Exception('The data object must be in the instrument frame to be motion corrected.')\n\n if vel_filtfreq is None:\n vel_filtfreq = accel_filtfreq / 3\n\n # Be sure the velocity data has been rotated to the body frame.\n _rotate_vel2body(advo)\n\n # Create the motion 'calculator':\n calcobj = CalcMotion(advo,\n accel_filtfreq=accel_filtfreq,\n vel_filtfreq=vel_filtfreq,\n to_earth=to_earth)\n\n ##########\n # Calculate the translational velocity (from the Accel):\n advo.groups['orient'].add('uacc')\n advo.uacc = calcobj.calc_uacc()\n # Copy AccelStable to the adv-object.\n advo.groups['orient'].add('AccelStable')\n advo.AccelStable = calcobj.AccelStable\n\n ##########\n # Calculate rotational velocity (from AngRt):\n pos = _calc_probe_pos(advo, separate_probes)\n # Calculate the velocity of the head (or probes).\n urot = calcobj.calc_urot(pos, to_earth=False)\n if separate_probes:\n # The head->beam transformation matrix\n transMat = advo.config.head.get('TransMatrix', None)\n # The body->head transformation matrix\n rmat = advo.props['body2head_rotmat']\n\n # 1) Rotate body-coordinate velocities to head-coord.\n urot = np.dot(rmat, urot)\n # 2) Rotate body-coord to beam-coord (einsum),\n # 3) Take along beam-component (diagonal),\n # 4) Rotate back to head-coord (einsum),\n urot = np.einsum('ij,kj->ik',\n transMat,\n np.diagonal(np.einsum('ij,jkl->ikl',\n np.linalg.inv(transMat),\n urot)\n ))\n # 5) Rotate back to body-coord.\n urot = np.dot(rmat.T, urot)\n advo.urot = urot\n advo.groups['orient'].add('urot')\n\n ##########\n # Rotate the data into the correct coordinate system.\n # inst2earth expects a 'rotate_vars' property.\n # Add urot, uacc, AccelStable, to it.\n if 'rotate_vars' not in advo.props.keys():\n advo.props['rotate_vars'] = {'_u', 'urot', 'uacc',\n 'Accel', 'AccelStable',\n 'AngRt', 'Mag'}\n else:\n advo.props['rotate_vars'].update({'urot', 'uacc', 'AccelStable'})\n\n # NOTE: Accel, AccelStable, and uacc are in the earth-frame after\n # calc_uacc() call.\n if to_earth:\n advo.Accel = calcobj.Accel\n inst2earth(advo, rotate_vars=advo.props['rotate_vars'] -\n {'Accel', 'AccelStable', 'uacc', })\n else:\n # rotate these variables back to the instrument frame.\n inst2earth(advo, reverse=True,\n rotate_vars={'AccelStable', 'uacc', },\n force=True,\n )\n\n ##########\n # Copy _u -> uraw prior to motion correction:\n advo.add_data('uraw', advo._u.copy(), 'main')\n # Add it to rotate_vars:\n advo.props['rotate_vars'].update({'uraw', })\n\n ##########\n # Remove motion from measured velocity!\n # NOTE: The plus sign is because the measured-induced velocities\n # are in the opposite direction of the head motion.\n # i.e. when the head moves one way in stationary flow, it\n # measures a velocity in the opposite direction.\n advo._u += (advo.urot + advo.uacc)\n\n\nclass CorrectMotion(object):\n\n \"\"\"\n This object performs motion correction on an IMU-ADV data\n object. The IMU and ADV data should be tightly synchronized and\n contained in a single data object.\n\n Parameters\n ----------\n\n accel_filtfreq : float\n the frequency at which to high-pass filter the acceleration\n signal to remove low-frequency drift.\n\n vel_filtfreq : float (optional)\n a second frequency to high-pass filter the integrated\n acceleration. (default: 1/3 of accel_filtfreq)\n\n separate_probes : bool (optional: False)\n a flag to perform motion-correction at the probe tips, and\n perform motion correction in beam-coordinates, then transform\n back into XYZ/earth coordinates. This correction seems to be\n lower than the noise levels of the ADV, so the defualt is to not\n use it (False).\n\n Notes\n -----\n\n Acceleration signals from inertial sensors are notorious for\n having a small bias that can drift slowly in time. When\n integrating these signals to estimate velocity the bias is\n amplified and leads to large errors in the estimated\n velocity. There are two methods for removing these errors,\n\n 1) high-pass filter the acceleration signal prior and/or after\n integrating. This implicitly assumes that the low-frequency\n translational velocity is zero.\n 2) provide a slowly-varying reference position (often from a GPS)\n to an IMU that can use the signal (usually using Kalman\n filters) to debias the acceleration signal.\n\n Because method (1) removes `real` low-frequency acceleration,\n method (2) is more accurate. However, providing reference position\n estimates to undersea instruments is practically challenging and\n expensive. Therefore, lacking the ability to use method (2), this\n function utilizes method (1).\n\n For deployments in which the ADV is mounted on a mooring, or other\n semi-fixed structure, the assumption of zero low-frequency\n translational velocity is a reasonable one. However, for\n deployments on ships, gliders, or other moving objects it is\n not. The measured velocity, after motion-correction, will still\n hold some of this contamination and will be a sum of the ADV\n motion and the measured velocity on long time scales. If\n low-frequency motion is known separate from the ADV (e.g. from a\n bottom-tracking ADP, or from a ship's GPS), it may be possible to\n remove that signal from the ADV signal in post-processing. The\n accuracy of this approach has not, to my knowledge, been tested\n yet.\n\n Examples\n --------\n\n >>> from dolfyn.adv import api as avm\n >>> dat = avm.read_nortek('my_data_file.vec')\n >>> mc = avm.CorrectMotion(0.1)\n >>> corrected_data = mc(dat)\n\n \"\"\"\n\n def __init__(self, accel_filtfreq=1. / 30,\n vel_filtfreq=None,\n separate_probes=False):\n\n self.accel_filtfreq = accel_filtfreq\n if vel_filtfreq is None:\n vel_filtfreq = accel_filtfreq / 3\n self.accelvel_filtfreq = vel_filtfreq\n self.separate_probes = separate_probes\n warnings.warn(\"The 'CorrectMotion' class is being deprecated \"\n \"and will be removed in a future DOLfYN release. \"\n \"Use the 'correct_motion' function instead.\",\n DeprecationWarning)\n\n def _rotate_vel2body(self, advo):\n # The transpose should do head to body.\n advo._u = np.dot(advo.props['body2head_rotmat'].T, advo._u)\n\n def _calc_rot_vel(self, calcobj):\n \"\"\"\n Calculate the 'rotational' velocity as measured by the IMU\n rate sensor.\n \"\"\"\n advo = calcobj.advo\n\n # This returns a 3x3 array of probe positions if\n # separate_probes is True.\n pos = self._calc_probe_pos(advo)\n\n # Calculate the velocity of the head (or probes).\n urot = calcobj.calc_urot(pos, to_earth=False)\n\n if self.separate_probes:\n # The head->beam transformation matrix\n transMat = advo.config.head.get('TransMatrix', None)\n # The body->head transformation matrix\n rmat = advo.props['body2head_rotmat']\n\n # 1) Rotate body-coordinate velocities to head-coord.\n urot = np.dot(rmat, urot)\n # 2) Rotate body-coord to beam-coord (einsum),\n # 3) Take along beam-component (diagonal),\n # 4) Rotate back to head-coord (einsum),\n urot = np.einsum('ij,kj->ik',\n transMat,\n np.diagonal(np.einsum('ij,jkl->ikl',\n np.linalg.inv(transMat),\n urot)\n ))\n # 5) Rotate back to body-coord.\n urot = np.dot(rmat.T, urot)\n\n advo.urot = urot\n advo.groups['orient'].add('urot')\n\n def _calc_probe_pos(self, advo):\n \"\"\"\n !!!Currently this only works for Nortek Vectors!\n\n In the future, we could use the transformation matrix (and a\n probe-length lookup-table?)\n \"\"\"\n # According to the ADV_DataSheet, the probe-length radius is\n # 8.6cm @ 120deg from probe-stem axis. If I subtract 1cm\n # (!!!checkthis) to get acoustic receiver center, this is\n # 7.6cm. In the coordinate sys of the center of the probe\n # then, the positions of the centers of the receivers is:\n if advo.make_model == 'Nortek VECTOR' and self.separate_probes:\n r = 0.076\n # The angle between the x-y plane and the probes\n phi = -30. * np.pi / 180.\n theta = np.array([0., 120., 240.]) * np.pi / \\\n 180. # The angles of the probes from the x-axis.\n return (np.dot(advo.props['body2head_rotmat'].T,\n np.array([r * np.cos(theta),\n r * np.sin(theta),\n r * np.tan(phi) * np.ones(3)])) +\n advo.props['body2head_vec'][:, None]\n )\n else:\n return advo.props['body2head_vec']\n\n def _calc_accel_vel(self, calcobj):\n advo = calcobj.advo\n advo.groups['orient'].add('uacc')\n advo.uacc = calcobj.calc_uacc()\n\n def __call__(self, advo, to_earth=True):\n \"\"\"\n Perform motion correction on an IMU-equipped ADV object.\n\n Parameters\n ----------\n advo : :class:`ADVraw <base.ADVraw>`\n The adv object on which to perform motion correction.\n It must contain the following data attributes:\n\n - _u : The velocity array.\n - Accel : The translational acceleration array.\n - AngRt : The rotation-rate array.\n - orientmat : The orientation matrix.\n - props : a dictionary that has 'body2head_vec',\n 'body2head_rotmat' and 'coord_sys'.\n\n to_earth : bool (optional, default: True)\n A boolean that specifies whether the data should be\n rotated into the earth frame.\n\n Notes\n -----\n\n After calling this function, `advo` will have *urot* and\n *uacc* data attributes. The velocity vector attribute ``_u``\n will be motion corrected according to:\n\n u_corr = u_raw + uacc + urot\n\n Therefore, to recover the 'raw' velocity, subtract uacc and\n urot from ``_u``.\n\n This method does not return a data object, it operates on\n (motion corrects) the input `advo`.\n\n \"\"\"\n\n calcobj = CalcMotion(advo,\n accel_filtfreq=self.accel_filtfreq,\n vel_filtfreq=self.accelvel_filtfreq,\n to_earth=to_earth)\n\n if 'rotate_vars' not in advo.props.keys():\n advo.props['rotate_vars'] = {'_u', 'urot', 'uacc', 'uraw',\n 'Accel', 'AccelStable',\n 'AngRt', 'Mag'}\n else:\n advo.props['rotate_vars'].update({'urot', 'uacc', 'AccelStable', 'uraw'})\n\n self._rotate_vel2body(advo)\n self._calc_rot_vel(calcobj)\n self._calc_accel_vel(calcobj)\n\n # calcobj.Accel, calcobj.AccelStable, and uacc are already in\n # the earth frame.\n advo.groups['orient'].add('AccelStable')\n advo.AccelStable = calcobj.AccelStable\n advo.add_data('uraw', advo._u.copy(), 'main')\n if to_earth:\n advo.Accel = calcobj.Accel\n inst2earth(advo, rotate_vars=advo.props['rotate_vars'] -\n {'Accel', 'AccelStable', 'uacc', })\n else:\n # rotate these variables back to the instrument frame.\n inst2earth(advo, reverse=True,\n rotate_vars={'AccelStable', 'uacc', },\n force=True,\n )\n # NOTE: The plus sign is because the measured-induced velocities\n # are in the opposite direction of the head motion.\n # i.e. when the head moves one way in stationary flow, it\n # measures a velocity in the opposite direction.\n advo._u += (advo.urot + advo.uacc)\n" ]
[ [ "numpy.ones", "scipy.signal.butter", "numpy.einsum", "scipy.integrate.cumtrapz", "numpy.linalg.inv", "numpy.cos", "scipy.signal.filtfilt", "numpy.tan", "numpy.array", "numpy.sin", "numpy.dot" ] ]
OSUPCVLab/PlantDiseaseCNN
[ "aeed18e9e30e53670d3e9a0bd7bd71cc73f01691" ]
[ "dcgan-generat images/model/Generator.py" ]
[ "import torch.nn as nn\n\nclass Generator(nn.Module):\n def __init__(self, nc, ngf, nz):\n super(Generator,self).__init__()\n self.layer1 = nn.Sequential(nn.ConvTranspose2d(nz,ngf*32,kernel_size=4),\n nn.BatchNorm2d(ngf*32),\n nn.ReLU())\n\n self.layer2 = nn.Sequential(nn.ConvTranspose2d(ngf*32,ngf*16,kernel_size=4,stride=2,padding=1),\n nn.BatchNorm2d(ngf*16),\n nn.ReLU())\n\n self.layer3 = nn.Sequential(nn.ConvTranspose2d(ngf*16,ngf*8,kernel_size=4,stride=2,padding=1),\n nn.BatchNorm2d(ngf*8),\n nn.ReLU())\n # 4 x 4\n self.layer4 = nn.Sequential(nn.ConvTranspose2d(ngf*8,ngf*4,kernel_size=4,stride=2,padding=1),\n nn.BatchNorm2d(ngf*4),\n nn.ReLU())\n # 8 x 8\n self.layer5 = nn.Sequential(nn.ConvTranspose2d(ngf*4,ngf*2,kernel_size=4,stride=2,padding=1),\n nn.BatchNorm2d(ngf*2),\n nn.ReLU())\n\n self.layer6 = nn.Sequential(nn.ConvTranspose2d(ngf * 2, ngf, kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(ngf),\n nn.ReLU())\n # 16 x 16\n self.layer7 = nn.Sequential(nn.ConvTranspose2d(ngf,nc,kernel_size=4,stride=2,padding=1),\n nn.Tanh())\n\n def forward(self,x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.layer5(out)\n out = self.layer6(out)\n out = self.layer7(out)\n return out\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.Tanh", "torch.nn.ConvTranspose2d" ] ]
rcjackson/ACT
[ "c57fb55094b142bbbef63e7069d4024049996139" ]
[ "act/qc/qctests.py" ]
[ "\"\"\"\nact.qc.qctests\n------------------------------\n\nHere we define the methods for performing the tests and putting the\nresults in the ancillary quality control varible. If you add a test\nto this file you will need to add a method reference in the main\nqcfilter class definition to make it callable.\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nimport warnings\nfrom act.utils import get_missing_value, convert_units\n\n\n# This is a Mixins class used to allow using qcfilter class that is already\n# registered to the xarray object. All the methods in this class will be added\n# to the qcfilter class. Doing this to make the code spread across more files\n# so it is more manageable and readable. Additinal files of tests can be added\n# to qcfilter by creating a new class in the new file and adding to qcfilter\n# class declaration.\nclass QCTests:\n \"\"\"\n This is a Mixins class used to allow using qcfilter class that is already\n registered to the xarray object. All the methods in this class will be added\n to the qcfilter class. Doing this to make the code spread across more files\n so it is more manageable and readable. Additinal files of tests can be added\n to qcfilter by creating a new class in the new file and adding to qcfilter\n class declaration.\n\n \"\"\"\n def __init__(self, obj, **kwargs):\n self._obj = obj\n\n def add_missing_value_test(self, var_name, missing_value=None,\n missing_value_att_name='missing_value',\n test_number=None, test_assessment='Bad',\n test_meaning=None, flag_value=False,\n prepend_text=None):\n \"\"\"\n Method to add indication in quality control variable\n where data value is set to missing value.\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n missing_value : int or float\n Optional missing value to use. If not provided will attempt\n to get it from the variable attribute or use NaN.\n missing_value_att_name : str\n Optional attribute name to use.\n test_meaning : str\n The optional text description to add to flag_meanings\n describing the test. Will add a default if not set.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will set a default if not set.\n test_number : int\n Optional test number to use. If not set will ues next\n available test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n if test_meaning is None:\n test_meaning = 'Value is set to missing_value.'\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n if missing_value is None:\n missing_value = get_missing_value(self._obj, var_name, nodefault=True)\n if (missing_value is None and\n self._obj[var_name].values.dtype.type in\n (type(0.0), np.float16, np.float32, np.float64)):\n missing_value = float('nan')\n else:\n missing_value = -9999\n\n # Ensure missing_value attribute is matching data type\n missing_value = np.array(missing_value, dtype=self._obj[var_name].values.dtype.type)\n\n # New method using straight numpy instead of masked array\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n if np.isnan(missing_value) is False:\n index = np.equal(self._obj[var_name].values, missing_value)\n else:\n index = np.isnan(self._obj[var_name].values)\n\n test_dict = self._obj.qcfilter.add_test(\n var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n try:\n self._obj[var_name].attrs[missing_value_att_name]\n except KeyError:\n self._obj[var_name].attrs[missing_value_att_name] = missing_value\n\n return test_dict\n\n def add_less_test(self, var_name, limit_value, test_meaning=None,\n test_assessment='Bad', test_number=None,\n flag_value=False, limit_attr_name=None,\n prepend_text=None):\n \"\"\"\n Method to perform a less than test (i.e. minimum value) and add\n result to ancillary quality control variable. If ancillary\n quality control variable does not exist it will be created.\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n limit_value : int or float or None\n Limit value to use in test. The value will be written\n to the quality control variable as an attribute. If set\n to None, will return without adding test.\n test_meaning : str\n The optional text description to add to flag_meanings\n describing the test. Will add a default if not set.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will set a default if not set.\n test_number : int\n Optional test number to use. If not set will ues next\n available test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n limit_attr_name : str\n Optional attribute name to store the limit_value under\n quality control ancillary variable.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n if limit_value is None:\n return\n\n if limit_attr_name is None:\n if test_assessment == 'Suspect' or test_assessment == 'Indeterminate':\n attr_name = 'warn_min'\n else:\n attr_name = 'fail_min'\n else:\n attr_name = limit_attr_name\n\n if test_meaning is None:\n test_meaning = ('Data value less than {}.').format(attr_name)\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n # New method with straight numpy\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n index = np.less(self._obj[var_name].values, limit_value)\n\n result = self._obj.qcfilter.add_test(\n var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n # Ensure limit_value attribute is matching data type\n limit_value = np.array(limit_value, dtype=self._obj[var_name].values.dtype.type)\n\n qc_var_name = result['qc_variable_name']\n self._obj[qc_var_name].attrs[attr_name] = limit_value\n\n return result\n\n def add_greater_test(self, var_name, limit_value, test_meaning=None,\n test_assessment='Bad', test_number=None,\n flag_value=False, limit_attr_name=None,\n prepend_text=None):\n \"\"\"\n Method to perform a greater than test (i.e. maximum value) and add\n result to ancillary quality control variable. If ancillary\n quality control variable does not exist it will be created.\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n limit_value : int or float or None\n Limit value to use in test. The value will be written\n to the quality control variable as an attribute. If set\n to None will return without setting test.\n test_meaning : str\n The optional text description to add to flag_meanings\n describing the test. Will add a default if not set.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will set a default if not set.\n test_number : int\n Optional test number to use. If not set will ues next\n available test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n limit_attr_name : str\n Optional attribute name to store the limit_value under\n quality control ancillary variable.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n if limit_value is None:\n return\n\n if limit_attr_name is None:\n if test_assessment == 'Suspect' or test_assessment == 'Indeterminate':\n attr_name = 'warn_max'\n else:\n attr_name = 'fail_max'\n else:\n attr_name = limit_attr_name\n\n if test_meaning is None:\n test_meaning = ('Data value greater than {}.').format(attr_name)\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n index = np.greater(self._obj[var_name].values, limit_value)\n\n result = self._obj.qcfilter.add_test(\n var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n # Ensure limit_value attribute is matching data type\n limit_value = np.array(limit_value, dtype=self._obj[var_name].values.dtype.type)\n\n qc_var_name = result['qc_variable_name']\n self._obj[qc_var_name].attrs[attr_name] = limit_value\n\n return result\n\n def add_less_equal_test(self, var_name, limit_value, test_meaning=None,\n test_assessment='Bad', test_number=None,\n flag_value=False, limit_attr_name=None,\n prepend_text=None):\n \"\"\"\n Method to perform a less than or equal to test\n (i.e. minimum value) and add result to ancillary quality control\n variable. If ancillary quality control variable does not exist it\n will be created.\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n limit_value : int or float or None\n Limit value to use in test. The value will be written\n to the quality control variable as an attribute. If set\n to None will return without setttin test.\n test_meaning : str\n The optional text description to add to flag_meanings\n describing the test. Will add a default if not set.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will set a default if not set.\n test_number : int\n Optional test number to use. If not set will ues next\n available test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n limit_attr_name : str\n Optional attribute name to store the limit_value under\n quality control ancillary variable.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n if limit_value is None:\n return\n\n if limit_attr_name is None:\n if test_assessment == 'Suspect' or test_assessment == 'Indeterminate':\n attr_name = 'warn_min'\n else:\n attr_name = 'fail_min'\n else:\n attr_name = limit_attr_name\n\n if test_meaning is None:\n test_meaning = ('Data value less than '\n 'or equal to {}.').format(attr_name)\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n index = np.less_equal(self._obj[var_name].values, limit_value)\n\n result = self._obj.qcfilter.add_test(\n var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n # Ensure limit_value attribute is matching data type\n limit_value = np.array(limit_value, dtype=self._obj[var_name].values.dtype.type)\n\n qc_var_name = result['qc_variable_name']\n self._obj[qc_var_name].attrs[attr_name] = limit_value\n\n return result\n\n def add_greater_equal_test(self, var_name, limit_value, test_meaning=None,\n test_assessment='Bad', test_number=None,\n flag_value=False, limit_attr_name=None,\n prepend_text=None):\n \"\"\"\n Method to perform a greater than or equal to test\n (i.e. maximum value) and add result to ancillary quality control\n variable. If ancillary quality control variable does not exist it\n will be created.\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n limit_value : int or float or None\n Limit value to use in test. The value will be written\n to the quality control variable as an attribute. If set\n to None will return without setttin test.\n test_meaning : str\n The optional text description to add to flag_meanings\n describing the test. Will add a default if not set.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will set a default if not set.\n test_number : int\n Optional test number to use. If not set will ues next\n available test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n limit_attr_name : str\n Optional attribute name to store the limit_value under\n quality control ancillary variable.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n if limit_value is None:\n return\n\n if limit_attr_name is None:\n if test_assessment == 'Suspect' or test_assessment == 'Indeterminate':\n attr_name = 'warn_max'\n else:\n attr_name = 'fail_max'\n else:\n attr_name = limit_attr_name\n\n if test_meaning is None:\n test_meaning = ('Data value greater than '\n 'or equal to {}.').format(attr_name)\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n index = np.greater_equal(self._obj[var_name].values, limit_value)\n\n result = self._obj.qcfilter.add_test(\n var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n # Ensure limit_value attribute is matching data type\n limit_value = np.array(limit_value, dtype=self._obj[var_name].values.dtype.type)\n\n qc_var_name = result['qc_variable_name']\n self._obj[qc_var_name].attrs[attr_name] = limit_value\n\n return result\n\n def add_equal_to_test(self, var_name, limit_value, test_meaning=None,\n test_assessment='Bad', test_number=None,\n flag_value=False, limit_attr_name=None,\n prepend_text=None):\n \"\"\"\n Method to perform an equal test and add result to ancillary quality\n control variable. If ancillary quality control variable does not\n exist it will be created.\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n limit_value : int or float or None\n Limit value to use in test. The value will be written\n to the quality control variable as an attribute. If set\n to None will return without setttin test.\n test_meaning : str\n The optional text description to add to flag_meanings\n describing the test. Will add a default if not set.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will set a default if not set.\n test_number : int\n Optional test number to use. If not set will ues next\n available test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n limit_attr_name : str\n Optional attribute name to store the limit_value under\n quality control ancillary variable.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n if limit_value is None:\n return\n\n if limit_attr_name is None:\n if test_assessment == 'Suspect' or test_assessment == 'Indeterminate':\n attr_name = 'warn_equal_to'\n else:\n attr_name = 'fail_equal_to'\n else:\n attr_name = limit_attr_name\n\n if test_meaning is None:\n test_meaning = 'Data value equal to {}.'.format(attr_name)\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n index = np.equal(self._obj[var_name].values, limit_value)\n\n result = self._obj.qcfilter.add_test(\n var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n # Ensure limit_value attribute is matching data type\n limit_value = np.array(limit_value, dtype=self._obj[var_name].values.dtype.type)\n\n qc_var_name = result['qc_variable_name']\n self._obj[qc_var_name].attrs[attr_name] = limit_value\n\n return result\n\n def add_not_equal_to_test(self, var_name, limit_value, test_meaning=None,\n test_assessment='Bad', test_number=None,\n flag_value=False, limit_attr_name=None,\n prepend_text=None):\n \"\"\"\n Method to perform a not equal to test and add result to ancillary\n quality control variable. If ancillary quality control variable does\n not exist it will be created.\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n limit_value : int or float or None\n Limit value to use in test. The value will be written\n to the quality control variable as an attribute. If set\n to None will return without setttin test.\n test_meaning : str\n The optional text description to add to flag_meanings\n describing the test. Will add a default if not set.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will set a default if not set.\n test_number : int\n Optional test number to use. If not set will ues next\n available test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n limit_attr_name : str\n Optional attribute name to store the limit_value under\n quality control ancillary variable.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n if limit_value is None:\n return\n\n if limit_attr_name is None:\n if test_assessment == 'Suspect' or test_assessment == 'Indeterminate':\n attr_name = 'warn_not_equal_to'\n else:\n attr_name = 'fail_not_equal_to'\n else:\n attr_name = limit_attr_name\n\n if test_meaning is None:\n test_meaning = 'Data value not equal to {}.'.format(attr_name)\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n index = np.not_equal(self._obj[var_name].values, limit_value)\n\n result = self._obj.qcfilter.add_test(\n var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n # Ensure limit_value attribute is matching data type\n limit_value = np.array(limit_value, dtype=self._obj[var_name].values.dtype.type)\n\n qc_var_name = result['qc_variable_name']\n self._obj[qc_var_name].attrs[attr_name] = limit_value\n\n return result\n\n def add_outside_test(self, var_name, limit_value_lower, limit_value_upper,\n test_meaning=None,\n test_assessment='Bad', test_number=None,\n flag_value=False, limit_attr_names=None,\n prepend_text=None):\n \"\"\"\n Method to perform a less than or greater than test\n (i.e. outide minimum and maximum value) and add\n result to ancillary quality control variable. If ancillary\n quality control variable does not exist it will be created.\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n limit_value_lower : int or float\n Lower limit value to use in test. The value will be written\n to the quality control variable as an attribute.\n limit_value_upper : int or float\n Upper limit value to use in test. The value will be written\n to the quality control variable as an attribute.\n test_meaning : str\n The optional text description to add to flag_meanings\n describing the test. Will add a default if not set.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will set a default if not set.\n test_number : int\n Optional test number to use. If not set will ues next\n available test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n limit_attr_names : list of str\n Optional attribute name to store the limit_value under\n quality control ancillary variable. First value is\n lower limit attribute name and second value is\n upper limit attribute name.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n\n if limit_attr_names is None:\n if test_assessment == 'Suspect' or test_assessment == 'Indeterminate':\n attr_name_lower = 'warn_lower_range'\n attr_name_upper = 'warn_upper_range'\n else:\n attr_name_lower = 'fail_lower_range'\n attr_name_upper = 'fail_upper_range'\n else:\n attr_name_lower = limit_attr_names[0]\n attr_name_upper = limit_attr_names[1]\n\n if test_meaning is None:\n test_meaning = ('Data value less than {} '\n 'or greater than {}.').format(attr_name_lower,\n attr_name_upper)\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n data = np.ma.masked_outside(self._obj[var_name].values,\n limit_value_lower, limit_value_upper)\n if data.mask.size == 1:\n data.mask = np.full(data.data.shape, data.mask, dtype=bool)\n\n index = data.mask\n\n result = self._obj.qcfilter.add_test(\n var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n # Ensure limit_value attribute is matching data type\n limit_value_lower = np.array(limit_value_lower, dtype=self._obj[var_name].values.dtype.type)\n limit_value_upper = np.array(limit_value_upper, dtype=self._obj[var_name].values.dtype.type)\n\n qc_var_name = result['qc_variable_name']\n self._obj[qc_var_name].attrs[attr_name_lower] = limit_value_lower\n self._obj[qc_var_name].attrs[attr_name_upper] = limit_value_upper\n\n return result\n\n def add_inside_test(self, var_name, limit_value_lower, limit_value_upper,\n test_meaning=None, test_assessment='Bad',\n test_number=None, flag_value=False,\n limit_attr_names=None,\n prepend_text=None):\n \"\"\"\n Method to perform a greater than or less than test\n (i.e. between minimum and maximum value) and add\n result to ancillary quality control variable. If ancillary\n quality control variable does not exist it will be created.\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n limit_value_lower : int or float\n Lower limit value to use in test. The value will be written\n to the quality control variable as an attribute.\n limit_value_upper : int or float\n Upper limit value to use in test. The value will be written\n to the quality control variable as an attribute.\n test_meaning : str\n The optional text description to add to flag_meanings\n describing the test. Will add a default if not set.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will set a default if not set.\n test_number : int\n Optional test number to use. If not set will ues next\n available test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n limit_attr_names : list of str\n Optional attribute name to store the limit_value under\n quality control ancillary variable. First value is\n lower limit attribute name and second value is\n upper limit attribute name.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n\n if limit_attr_names is None:\n if test_assessment == 'Suspect' or test_assessment == 'Indeterminate':\n attr_name_lower = 'warn_lower_range_inner'\n attr_name_upper = 'warn_upper_range_inner'\n else:\n attr_name_lower = 'fail_lower_range_inner'\n attr_name_upper = 'fail_upper_range_inner'\n else:\n attr_name_lower = limit_attr_names[0]\n attr_name_upper = limit_attr_names[1]\n\n if test_meaning is None:\n test_meaning = ('Data value greater than {} '\n 'or less than {}.').format(attr_name_lower,\n attr_name_upper)\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n data = np.ma.masked_inside(self._obj[var_name].values,\n limit_value_lower, limit_value_upper)\n if data.mask.size == 1:\n data.mask = np.full(data.data.shape, data.mask, dtype=bool)\n\n index = data.mask\n\n result = self._obj.qcfilter.add_test(\n var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n # Ensure limit_value attribute is matching data type\n limit_value_lower = np.array(limit_value_lower, dtype=self._obj[var_name].values.dtype.type)\n limit_value_upper = np.array(limit_value_upper, dtype=self._obj[var_name].values.dtype.type)\n\n qc_var_name = result['qc_variable_name']\n self._obj[qc_var_name].attrs[attr_name_lower] = limit_value_lower\n self._obj[qc_var_name].attrs[attr_name_upper] = limit_value_upper\n\n return result\n\n def add_persistence_test(self, var_name, window=10, test_limit=0.0001,\n min_periods=1, center=True, test_meaning=None,\n test_assessment='Bad', test_number=None,\n flag_value=False, prepend_text=None):\n \"\"\"\n Method to perform a persistence test over 1-D data..\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n window : int\n Optional number of data samples to use in the calculation of\n standard deviation to test for consistent data.\n test_limit : float\n Optional test limit to use where the standard deviation less\n than will trigger the test.\n min_periods : int\n Optional number of minimum values to use in the moving window.\n Setting to 1 so this correctly handles NaNs.\n center : boolean\n Optional where within the moving window to report the standard\n deviation values. Used in the .rolling.std() calculation with xarray.\n test_meaning : str\n The optional text description to add to flag_meanings\n describing the test. Will add a default if not set.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will set a default if not set.\n test_number : int\n Optional test number to use. If not set will ues next\n available test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n data = self._obj[var_name]\n if window > data.size:\n window = data.size\n\n if test_meaning is None:\n test_meaning = ('Data failing persistence test. '\n 'Standard Deviation over a window of {} values '\n 'less than {}.').format(window, test_limit)\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n stddev = data.rolling(time=window, min_periods=min_periods, center=True).std()\n index = stddev < test_limit\n\n result = self._obj.qcfilter.add_test(\n var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n return result\n\n def add_difference_test(self, var_name, dataset2_dict=None, ds2_var_name=None,\n diff_limit=None, tolerance=\"1m\",\n set_test_regardless=True,\n apply_assessment_to_dataset2=None,\n apply_tests_to_dataset2=None,\n test_meaning=None, test_assessment='Bad',\n test_number=None, flag_value=False,\n prepend_text=None):\n \"\"\"\n Method to perform a comparison test on time series data. Tested on 1-D\n data only. Will check if units and long_name indicate a direction and\n compensate for 0 to 360 degree transition.\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n dataset2_dict : dict\n Dictionary with key equal to datastream name and value\n equal to xarray dataset containging variable to compare. If no provided\n will assume second dataset is the same as self dataset.\n ds2_var_name : str\n Comparison dataset variable name to compare.\n diff_limit : int or float\n Difference limit for comparison.\n apply_assessment_to_dataset2 : str or list of str\n Option to filter comparison dataset variable using corresponsing\n quality control variable using assessments. Example would be\n ['Bad'], where all quality control data with assessment Bad will\n not be used in this test.\n apply_tests_to_dataset2 : int or list of int\n Option to filter comparison dataset variable using corresponding\n quality control variable using test numbers. Example would be\n [2,4], where all quality control data with test numbers 2 or 4 set\n will not be used in this test.\n tolerance : str\n Optional text indicating the time tolerance for aligning two\n DataArrays.\n set_test_regardless : boolean\n Option to set test description even if no data in comparison data\n set.\n test_meaning : str\n Optional text description to add to flag_meanings\n describing the test. Will use a default if not set.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will use a default if not set.\n test_number : int\n Optional test number to use. If not set will use next available\n test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n if dataset2_dict is None:\n dataset2_dict = {'second_dataset': self._obj}\n\n if not isinstance(dataset2_dict, dict):\n raise ValueError('You did not provide a dictionary containing the '\n 'datastream name as the key and xarray dataset as '\n 'the value for dataset2_dict for add_difference_test().')\n\n if diff_limit is None:\n raise ValueError('You did not provide a test limit for add_difference_test().')\n\n datastream2 = list(dataset2_dict.keys())[0]\n dataset2 = dataset2_dict[datastream2]\n\n if set_test_regardless is False and type(dataset2) != xr.core.dataset.Dataset:\n return\n\n if test_meaning is None:\n if dataset2 is self._obj:\n var_name2 = f'{ds2_var_name}'\n else:\n var_name2 = f'{datastream2}:{ds2_var_name}'\n\n test_meaning = (f'Difference between {var_name} and {var_name2} '\n f'greater than {diff_limit} {self._obj[var_name].attrs[\"units\"]}')\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n if tolerance is not None:\n tolerance = pd.Timedelta(tolerance)\n\n index = []\n if type(dataset2) == xr.core.dataset.Dataset:\n if apply_assessment_to_dataset2 is not None or apply_tests_to_dataset2 is not None:\n dataset2[ds2_var_name].values = dataset2.qcfilter.get_masked_data(\n ds2_var_name, rm_assessments=apply_assessment_to_dataset2,\n rm_tests=apply_tests_to_dataset2, return_nan_array=True)\n\n df_a = pd.DataFrame({'time': self._obj['time'].values,\n var_name: self._obj[var_name].values})\n data_b = convert_units(dataset2[ds2_var_name].values,\n dataset2[ds2_var_name].attrs['units'],\n self._obj[var_name].attrs['units'])\n ds2_var_name = ds2_var_name + '_newname'\n df_b = pd.DataFrame({'time': dataset2['time'].values,\n ds2_var_name: data_b})\n\n if tolerance is not None:\n tolerance = pd.Timedelta(tolerance)\n\n pd_c = pd.merge_asof(df_a, df_b, on='time', tolerance=tolerance,\n direction=\"nearest\")\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n # Check if variable is for wind direction comparisons. Fix\n # for 0 - 360 degrees transition. This is done by adding 360 degrees to\n # all wind values and using modulus to get the minimum difference number.\n # This is done for both a-b and b-a and then choosing the minimum number\n # to compensate for large differences.\n wdir_units = ['deg', 'degree', 'degrees', 'degs']\n if (self._obj[var_name].attrs['units'] in wdir_units and\n 'direction' in self._obj[var_name].attrs['long_name'].lower()):\n diff1 = np.mod(np.absolute((pd_c[var_name] + 360.) -\n (pd_c[ds2_var_name] + 360.)), 360)\n diff2 = np.mod(np.absolute((pd_c[ds2_var_name] + 360.) -\n (pd_c[var_name] + 360.)), 360)\n diff = np.array([diff1, diff2])\n diff = np.nanmin(diff, axis=0)\n\n else:\n diff = np.absolute(pd_c[var_name] - pd_c[ds2_var_name])\n\n index = diff > diff_limit\n\n result = self._obj.qcfilter.add_test(\n var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n return result\n\n def add_delta_test(self, var_name, diff_limit=1, test_meaning=None,\n limit_attr_name=None,\n test_assessment='Indeterminate', test_number=None,\n flag_value=False, prepend_text=None):\n \"\"\"\n Method to perform a difference test on adjacent values in time series.\n Will flag both values where a difference is greater\n than or equal to the difference limit. Tested with 1-D data only. Not\n sure what will happen with higher dimentioned data.\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n diff_limit : int or float\n Difference limit\n test_meaning : str\n Optional text description to add to flag_meanings\n describing the test. Will use a default if not set.\n limit_attr_name : str\n Optional attribute name to store the limit_value under\n quality control ancillary variable.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will use a default if not set.\n test_number : int\n Optional test number to use. If not set will use next available\n test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n\n if limit_attr_name is None:\n if test_assessment == 'Suspect' or test_assessment == 'Indeterminate':\n attr_name = 'warn_delta'\n else:\n attr_name = 'fail_delta'\n else:\n attr_name = limit_attr_name\n\n if test_meaning is None:\n test_meaning = f'Difference between current and previous values exceeds {attr_name}.'\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n # Check if variable is for wind direction comparisons by units. Fix\n # for 0 - 360 degrees transition. This is done by adding 360 degrees to\n # all wind values and using modulus to get the minimum difference number.\n wdir_units = ['deg', 'degree', 'degrees', 'degs']\n if (self._obj[var_name].attrs['units'] in wdir_units and\n 'direction' in self._obj[var_name].attrs['long_name'].lower()):\n abs_diff = np.mod(np.abs(np.diff(self._obj[var_name].values)), 360)\n else:\n abs_diff = np.abs(np.diff(self._obj[var_name].values))\n\n index = np.where(abs_diff >= diff_limit)[0]\n if index.size > 0:\n index = np.append(index, index + 1)\n index = np.unique(index)\n\n result = self._obj.qcfilter.add_test(var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n # Ensure min value attribute is matching data type\n diff_limit = np.array(diff_limit, dtype=self._obj[var_name].values.dtype.type)\n\n qc_var_name = result['qc_variable_name']\n self._obj[qc_var_name].attrs[attr_name] = diff_limit\n\n return result\n" ]
[ [ "numpy.diff", "numpy.less", "numpy.greater_equal", "numpy.append", "numpy.ma.masked_outside", "pandas.merge_asof", "numpy.absolute", "numpy.isnan", "numpy.ma.masked_inside", "numpy.where", "numpy.unique", "numpy.equal", "numpy.greater", "pandas.Timedelta", "numpy.not_equal", "pandas.DataFrame", "numpy.nanmin", "numpy.less_equal", "numpy.array", "numpy.full" ] ]
aishraghavan/introtodeeplearning
[ "2cd42e4279cea670ee45b95e1ca92ae899eaf721" ]
[ "mitdeeplearning/lab3.py" ]
[ "import io\nimport base64\nfrom IPython.display import HTML\nimport gym\nimport numpy as np\n\ndef play_video(filename):\n encoded = base64.b64encode(io.open(filename, 'r+b').read())\n embedded = HTML(data='''\n <video controls>\n <source src=\"data:video/mp4;base64,{0}\" type=\"video/mp4\" />\n </video>'''.format(encoded.decode('ascii')))\n\n return embedded\n\ndef preprocess_pong(image):\n I = image[35:195] # Crop\n I = I[::2, ::2, 0] # Downsample width and height by a factor of 2\n I[I == 144] = 0 # Remove background type 1\n I[I == 109] = 0 # Remove background type 2\n I[I != 0] = 1 # Set remaining elements (paddles, ball, etc.) to 1\n return I.astype(np.float).reshape(80, 80, 1)\n\n\ndef save_video_of_model(model, env_name, obs_diff=False, pp_fn=None):\n import skvideo.io\n from pyvirtualdisplay import Display\n display = Display(visible=0, size=(400, 300))\n display.start()\n\n if pp_fn is None:\n pp_fn = lambda x: x\n\n env = gym.make(env_name)\n obs = env.reset()\n obs = pp_fn(obs)\n prev_obs = obs\n\n filename = env_name + \".mp4\"\n output_video = skvideo.io.FFmpegWriter(filename)\n\n counter = 0\n done = False\n while not done:\n frame = env.render(mode='rgb_array')\n output_video.writeFrame(frame)\n\n if obs_diff:\n input_obs = obs - prev_obs\n else:\n input_obs = obs\n action = model(np.expand_dims(input_obs, 0)).numpy().argmax()\n\n prev_obs = obs\n obs, reward, done, info = env.step(action)\n obs = pp_fn(obs)\n counter += 1\n\n output_video.close()\n print(\"Successfully saved {} frames into {}!\".format(counter, filename))\n return filename\n" ]
[ [ "numpy.expand_dims" ] ]
zubairahmed-ai/keras-yolo3
[ "5ba9ab3c99ee1b5e71b614a5464cb316d35cc9b3" ]
[ "yolo.py" ]
[ "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nRun a YOLO_v3 style detection model on test images.\n\"\"\"\n\nimport colorsys\nimport os\nimport random\nimport time\nimport cv2\nimport numpy as np\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom PIL import Image, ImageDraw, ImageFont\nfrom timeit import time\nfrom timeit import default_timer as timer ### to calculate FPS\n\nfrom yolo3.model import yolo_eval\n\nclass YOLO(object):\n def __init__(self):\n self.model_path = 'model_data/yolo.h5'\n self.anchors_path = 'model_data/yolo_anchors.txt'\n self.classes_path = 'model_data/coco_classes.txt'\n self.score = 0.3\n self.iou = 0.5\n self.class_names = self._get_class()\n self.anchors = self._get_anchors()\n self.sess = K.get_session()\n self.boxes, self.scores, self.classes = self.generate()\n\n def _get_class(self):\n classes_path = os.path.expanduser(self.classes_path)\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n def _get_anchors(self):\n anchors_path = os.path.expanduser(self.anchors_path)\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n anchors = np.array(anchors).reshape(-1, 2)\n return anchors\n\n def generate(self):\n model_path = os.path.expanduser(self.model_path)\n assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'\n\n self.yolo_model = load_model(model_path)\n print('{} model, anchors, and classes loaded.'.format(model_path))\n\n self.model_image_size = self.yolo_model.layers[0].input_shape[1:3]\n\n # Generate colors for drawing bounding boxes.\n hsv_tuples = [(x / len(self.class_names), 1., 1.)\n for x in range(len(self.class_names))]\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n self.colors = list(\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\n self.colors))\n random.seed(10101) # Fixed seed for consistent colors across runs.\n random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.\n random.seed(None) # Reset seed to default.\n\n # Generate output tensor targets for filtered bounding boxes.\n # TODO: Wrap these backend operations with Keras layers.\n self.input_image_shape = K.placeholder(shape=(2, ))\n boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou)\n return boxes, scores, classes\n\n def detect_image(self, image):\n start = time.time()\n resized_image = image.resize(tuple(reversed(self.model_image_size)), Image.BICUBIC)\n image_data = np.array(resized_image, dtype='float32')\n\n print(image_data.shape)\n image_data /= 255.\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n\n out_boxes, out_scores, out_classes = self.sess.run(\n [self.boxes, self.scores, self.classes],\n feed_dict={\n self.yolo_model.input: image_data,\n self.input_image_shape: [image.size[1], image.size[0]],\n K.learning_phase(): 0\n })\n\n print('Found {} boxes for {}'.format(len(out_boxes), 'img'))\n\n font = ImageFont.truetype(font='font/FiraMono-Medium.otf', size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\n thickness = (image.size[0] + image.size[1]) // 300\n\n for i, c in reversed(list(enumerate(out_classes))):\n predicted_class = self.class_names[c]\n box = out_boxes[i]\n score = out_scores[i]\n\n label = '{} {:.2f}'.format(predicted_class, score)\n draw = ImageDraw.Draw(image)\n label_size = draw.textsize(label, font)\n\n top, left, bottom, right = box\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\n print(label, (left, top), (right, bottom))\n\n if top - label_size[1] >= 0:\n text_origin = np.array([left, top - label_size[1]])\n else:\n text_origin = np.array([left, top + 1])\n\n # My kingdom for a good redistributable image drawing library.\n for i in range(thickness):\n draw.rectangle(\n [left + i, top + i, right - i, bottom - i],\n outline=self.colors[c])\n draw.rectangle(\n [tuple(text_origin), tuple(text_origin + label_size)],\n fill=self.colors[c])\n draw.text(text_origin, label, fill=(0, 0, 0), font=font)\n del draw\n\n end = time.time()\n print(end - start)\n return image\n\n def close_session(self):\n self.sess.close()\n\n\ndef detect_video(yolo,video_path):\n vid = cv2.VideoCapture(video_path) ### TODO: will video path other than 0 be used?\n if not vid.isOpened():\n raise IOError(\"Couldn't open webcam\")\n accum_time = 0\n curr_fps = 0\n fps = \"FPS: ??\"\n prev_time = timer()\n while True:\n return_value, frame = vid.read()\n image = Image.fromarray(frame)\n image = yolo.detect_image(image)\n result = np.asarray(image)\n curr_time = timer()\n exec_time = curr_time - prev_time\n prev_time = curr_time\n accum_time = accum_time + exec_time\n curr_fps = curr_fps + 1\n if accum_time > 1:\n accum_time = accum_time - 1\n fps = \"FPS: \" + str(curr_fps)\n curr_fps = 0\n cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50,\n color=(255, 0, 0), thickness=2)\n cv2.namedWindow(\"result\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"result\",result)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n yolo.close_session()\n\n\ndef detect_img(yolo):\n while True:\n img = input('Input image filename:')\n try:\n image = Image.open(img)\n except:\n print('Open Error! Try again!')\n continue\n else:\n r_image = yolo.detect_image(image)\n r_image.show()\n yolo.close_session()\n\n\n\nif __name__ == '__main__':\n detect_img(YOLO())\n" ]
[ [ "numpy.array", "numpy.expand_dims", "numpy.floor", "numpy.asarray" ] ]
DavidAdamczyk/tensorflow-cmake
[ "8d848fae993a791dd44bc9cdcf9ad91f5795bf52" ]
[ "custom_op/example.py" ]
[ "#!/usr/bin/env python\n# 2018, Patrick Wieschollek <[email protected]>\n\nfrom __future__ import print_function\nimport numpy as np\nimport tensorflow as tf\nfrom user_ops import matrix_add\n\nnp.random.seed(42)\ntf.set_random_seed(42)\n\nmatA = np.random.randn(1, 2, 3, 4).astype(np.float32) * 10\nmatB = np.random.randn(1, 2, 3, 4).astype(np.float32) * 10\n\n\nA = tf.placeholder(tf.float32, shape=[None, 2, 3, 4])\nB = tf.placeholder(tf.float32, shape=[None, 2, 3, 4])\n\nbias = 42.\n\nactual_op = matrix_add(A, B, bias)\n\n\nwith tf.Session() as sess:\n print (sess.run(actual_op, {A: matA, B: matB}))\n" ]
[ [ "tensorflow.placeholder", "numpy.random.randn", "numpy.random.seed", "tensorflow.set_random_seed", "tensorflow.Session" ] ]
62theories/tf-flask
[ "c6954f0f3c4082165c92c77bb06d2fec6e75a8c4" ]
[ "official/projects/edgetpu/nlp/utils/utils_test.py" ]
[ "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for utils.py.\"\"\"\n\nfrom absl import flags\nimport tensorflow as tf\nimport yaml\n\nfrom official.projects.edgetpu.nlp.configs import params\nfrom official.projects.edgetpu.nlp.modeling import model_builder\nfrom official.projects.edgetpu.nlp.utils import utils\n\nFLAGS = flags.FLAGS\n\n\n# Helper function to compare two nested Dicts.\n# Note that this function only ensures all the fields in dict_a have definition\n# and same value in dict_b. This function does not guarantee that\n# dict_a == dict_b.\ndef nested_dict_compare(dict_a, dict_b):\n for k, v in sorted(dict_a.items()):\n if k not in dict_b:\n return False\n if isinstance(v, dict) and isinstance(dict_b[k], dict):\n if not nested_dict_compare(dict_a[k], dict_b[k]):\n return False\n else:\n # A caveat: When dict_a[k] = 1, dict_b[k] = True, the return is True.\n if dict_a[k] != dict_b[k]:\n return False\n return True\n\n\nclass UtilsTest(tf.test.TestCase):\n\n def test_config_override(self):\n # Define several dummy flags which are call by the utils.config_override\n # function.\n file_path = 'third_party/tensorflow_models/official/projects/edgetpu/nlp/experiments/mobilebert_edgetpu_m.yaml'\n flags.DEFINE_string('tpu', None, 'tpu_address.')\n flags.DEFINE_list('config_file', [file_path],\n 'A list of config files path.')\n flags.DEFINE_string('params_override', None, 'Override params.')\n flags.DEFINE_string('model_dir', '/tmp/', 'Model saving directory.')\n flags.DEFINE_list('mode', ['train'], 'Job mode.')\n flags.DEFINE_bool('use_vizier', False,\n 'Whether to enable vizier based hyperparameter search.')\n experiment_params = params.EdgeTPUBERTCustomParams()\n experiment_params = utils.config_override(experiment_params, FLAGS)\n experiment_params_dict = experiment_params.as_dict()\n\n with tf.io.gfile.GFile(file_path, 'r') as f:\n loaded_dict = yaml.load(f, Loader=yaml.FullLoader)\n\n # experiment_params contains all the configs but the loaded_dict might\n # only contains partial of the configs.\n self.assertTrue(nested_dict_compare(loaded_dict, experiment_params_dict))\n\n def test_load_checkpoint(self):\n \"\"\"Test the pretrained model can be successfully loaded.\"\"\"\n experiment_params = params.EdgeTPUBERTCustomParams()\n student_pretrainer = experiment_params.student_model\n student_pretrainer.encoder.type = 'mobilebert'\n pretrainer = model_builder.build_bert_pretrainer(\n pretrainer_cfg=student_pretrainer,\n name='test_model')\n # Makes sure the pretrainer variables are created.\n checkpoint_path = self.create_tempfile().full_path\n _ = pretrainer(pretrainer.inputs)\n pretrainer.save_weights(checkpoint_path)\n\n utils.load_checkpoint(pretrainer, checkpoint_path)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.test.main", "tensorflow.io.gfile.GFile" ] ]
kozodoi/BMS-Molecular-Translation
[ "881b252a3c30e5b0afce2ce2c5da73d02755394d" ]
[ "codes/decoder.py" ]
[ "####### RNN DECODER\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Attention(nn.Module):\n '''\n Attention network for calculate attention value\n '''\n def __init__(self, encoder_dim, decoder_dim, attention_dim):\n '''\n :param encoder_dim: input size of encoder network\n :param decoder_dim: input size of decoder network\n :param attention_dim: input size of attention network\n '''\n super(Attention, self).__init__()\n self.encoder_att = nn.Linear(encoder_dim, attention_dim) # linear layer to transform encoded image\n self.decoder_att = nn.Linear(decoder_dim, attention_dim) # linear layer to transform decoder's output\n self.full_att = nn.Linear(attention_dim, 1) # linear layer to calculate values to be softmax-ed\n self.relu = nn.ReLU()\n self.softmax = nn.Softmax(dim = 1) # softmax layer to calculate weights\n\n def forward(self, encoder_out, decoder_hidden):\n att1 = self.encoder_att(encoder_out) # (batch_size, num_pixels, attention_dim)\n att2 = self.decoder_att(decoder_hidden) # (batch_size, attention_dim)\n att = self.full_att(self.relu(att1 + att2.unsqueeze(1))).squeeze(2) # (batch_size, num_pixels)\n alpha = self.softmax(att) # (batch_size, num_pixels)\n attention_weighted_encoding = (encoder_out * alpha.unsqueeze(2)).sum(dim = 1) # (batch_size, encoder_dim)\n return attention_weighted_encoding, alpha\n\n\nclass DecoderWithAttention(nn.Module):\n '''\n Decoder network with attention network used for training\n '''\n\n def __init__(self, attention_dim, embed_dim, decoder_dim, vocab_size, device, encoder_dim, dropout):\n '''\n :param attention_dim: input size of attention network\n :param embed_dim: input size of embedding network\n :param decoder_dim: input size of decoder network\n :param vocab_size: total number of characters used in training\n :param encoder_dim: input size of encoder network\n :param dropout: dropout rate\n '''\n super(DecoderWithAttention, self).__init__()\n self.encoder_dim = encoder_dim\n self.attention_dim = attention_dim\n self.embed_dim = embed_dim\n self.decoder_dim = decoder_dim\n self.vocab_size = vocab_size\n self.dropout = dropout\n self.device = device\n self.attention = Attention(encoder_dim, decoder_dim, attention_dim) # attention network\n self.embedding = nn.Embedding(vocab_size, embed_dim) # embedding layer\n self.dropout = nn.Dropout(p = self.dropout)\n self.decode_step = nn.LSTMCell(embed_dim + encoder_dim, decoder_dim, bias = True) # decoding LSTMCell\n self.init_h = nn.Linear(encoder_dim, decoder_dim) # linear layer to find initial hidden state of LSTMCell\n self.init_c = nn.Linear(encoder_dim, decoder_dim) # linear layer to find initial cell state of LSTMCell\n self.f_beta = nn.Linear(decoder_dim, encoder_dim) # linear layer to create a sigmoid-activated gate\n self.sigmoid = nn.Sigmoid()\n self.fc = nn.Linear(decoder_dim, vocab_size) # linear layer to find scores over vocabulary\n self.init_weights() # initialize some layers with the uniform distribution\n\n def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)\n\n def load_pretrained_embeddings(self, embeddings):\n self.embedding.weight = nn.Parameter(embeddings)\n\n def fine_tune_embeddings(self, fine_tune = True):\n for p in self.embedding.parameters():\n p.requires_grad = fine_tune\n\n def init_hidden_state(self, encoder_out):\n mean_encoder_out = encoder_out.mean(dim = 1)\n h = self.init_h(mean_encoder_out) # (batch_size, decoder_dim)\n c = self.init_c(mean_encoder_out)\n return h, c\n\n def forward(self, encoder_out, encoded_captions, caption_lengths):\n '''\n :param encoder_out: output of encoder network\n :param encoded_captions: transformed sequence from character to integer\n :param caption_lengths: length of transformed sequence\n '''\n batch_size = encoder_out.size(0)\n encoder_dim = encoder_out.size(-1)\n vocab_size = self.vocab_size\n encoder_out = encoder_out.view(batch_size, -1, encoder_dim) # (batch_size, num_pixels, encoder_dim)\n num_pixels = encoder_out.size(1)\n caption_lengths, sort_ind = caption_lengths.squeeze(1).sort(dim=0, descending=True)\n encoder_out = encoder_out[sort_ind]\n encoded_captions = encoded_captions[sort_ind]\n \n # embedding transformed sequence for vector\n embeddings = self.embedding(encoded_captions) # (batch_size, max_caption_length, embed_dim)\n \n # initialize hidden state and cell state of LSTM cell\n h, c = self.init_hidden_state(encoder_out) # (batch_size, decoder_dim)\n \n # set decode length by caption length - 1 because of omitting start token\n decode_lengths = (caption_lengths - 1).tolist()\n predictions = torch.zeros(batch_size, max(decode_lengths), vocab_size, device = self.device)\n alphas = torch.zeros(batch_size, max(decode_lengths), num_pixels, device = self.device)\n \n # predict sequence\n for t in range(max(decode_lengths)):\n batch_size_t = sum([l > t for l in decode_lengths])\n attention_weighted_encoding, alpha = self.attention(encoder_out[:batch_size_t], h[:batch_size_t])\n gate = self.sigmoid(self.f_beta(h[:batch_size_t])) # gating scalar, (batch_size_t, encoder_dim)\n attention_weighted_encoding = gate * attention_weighted_encoding\n h, c = self.decode_step(\n torch.cat([embeddings[:batch_size_t, t, :], attention_weighted_encoding], dim=1),\n (h[:batch_size_t], c[:batch_size_t])) # (batch_size_t, decoder_dim)\n preds = self.fc(self.dropout(h)) # (batch_size_t, vocab_size)\n predictions[:batch_size_t, t, :] = preds\n alphas[:batch_size_t, t, :] = alpha\n return predictions, encoded_captions, decode_lengths, alphas, sort_ind\n \n def predict(self, encoder_out, decode_lengths, tokenizer):\n batch_size = encoder_out.size(0)\n encoder_dim = encoder_out.size(-1)\n vocab_size = self.vocab_size\n encoder_out = encoder_out.view(batch_size, -1, encoder_dim) # (batch_size, num_pixels, encoder_dim)\n num_pixels = encoder_out.size(1)\n # embed start tocken for LSTM input\n start_tockens = torch.ones(batch_size, dtype=torch.long, device = self.device) * tokenizer.stoi['<sos>']\n embeddings = self.embedding(start_tockens)\n # initialize hidden state and cell state of LSTM cell\n h, c = self.init_hidden_state(encoder_out) # (batch_size, decoder_dim)\n predictions = torch.zeros(batch_size, decode_lengths, vocab_size, device = self.device)\n # predict sequence\n '''\n for t in range(decode_lengths):\n attention_weighted_encoding, alpha = self.attention(encoder_out, h)\n gate = self.sigmoid(self.f_beta(h)) # gating scalar, (batch_size_t, encoder_dim)\n attention_weighted_encoding = gate * attention_weighted_encoding\n h, c = self.decode_step(\n torch.cat([embeddings, attention_weighted_encoding], dim=1),\n (h, c)) # (batch_size_t, decoder_dim)\n preds = self.fc(self.dropout(h)) # (batch_size_t, vocab_size)\n predictions[:, t, :] = preds\n #if np.argmax(preds.detach().cpu().numpy()) == tokenizer.stoi['<eos>']:\n # break\n embeddings = self.embedding(torch.argmax(preds, -1))\n '''\n \n # predict sequence\n end_condition = torch.zeros(batch_size, dtype=torch.long, device = self.device)\n for t in range(decode_lengths):\n attention_weighted_encoding, alpha = self.attention(encoder_out, h)\n gate = self.sigmoid(self.f_beta(h)) # gating scalar, (batch_size_t, encoder_dim)\n attention_weighted_encoding = gate * attention_weighted_encoding\n h, c = self.decode_step(\n torch.cat([embeddings, attention_weighted_encoding], dim=1),\n (h, c)) # (batch_size_t, decoder_dim)\n preds = self.fc(self.dropout(h)) # (batch_size_t, vocab_size)\n predictions[:, t, :] = preds\n end_condition |= (torch.argmax(preds, -1) == tokenizer.stoi[\"<eos>\"])\n if end_condition.sum() == batch_size:\n break\n embeddings = self.embedding(torch.argmax(preds, -1))\n \n return predictions\n \n \n # beam search\n def forward_step(self, prev_tokens, hidden, encoder_out, function):\n\n h, c = hidden\n h, c = h.squeeze(0), c.squeeze(0)\n\n embeddings = self.embedding(prev_tokens)\n if embeddings.dim() == 3:\n embeddings = embeddings.squeeze(1)\n\n attention_weighted_encoding, alpha = self.attention(encoder_out, h)\n gate = self.sigmoid(self.f_beta(h)) # gating scalar, (batch_size_t, encoder_dim)\n attention_weighted_encoding = gate * attention_weighted_encoding\n h, c = self.decode_step(\n torch.cat([embeddings, attention_weighted_encoding], dim=1),\n (h, c)) # (batch_size_t, decoder_dim)\n preds = self.fc(self.dropout(h)) # (batch_size_t, vocab_size)\n\n hidden = (h.unsqueeze(0), c.unsqueeze(0))\n predicted_softmax = function(preds, dim=1)\n return predicted_softmax, hidden, None\n\n\n\n####### TOP-K DECODER\n\ndef _inflate(tensor, times, dim):\n # repeat_dims = [1] * tensor.dim()\n # repeat_dims[dim] = times\n # return tensor.repeat(*repeat_dims)\n return torch.repeat_interleave(tensor, times, dim)\n\n\nclass TopKDecoder(torch.nn.Module):\n r\"\"\"\n Top-K decoding with beam search.\n\n Args:\n decoder_rnn (DecoderRNN): An object of DecoderRNN used for decoding.\n k (int): Size of the beam.\n\n Inputs: inputs, encoder_hidden, encoder_outputs, function, teacher_forcing_ratio\n - **inputs** (seq_len, batch, input_size): list of sequences, whose length is the batch size and within which\n each sequence is a list of token IDs. It is used for teacher forcing when provided. (default is `None`)\n - **encoder_hidden** (num_layers * num_directions, batch_size, hidden_size): tensor containing the features\n in the hidden state `h` of encoder. Used as the initial hidden state of the decoder.\n - **encoder_outputs** (batch, seq_len, hidden_size): tensor with containing the outputs of the encoder.\n Used for attention mechanism (default is `None`).\n - **function** (torch.nn.Module): A function used to generate symbols from RNN hidden state\n (default is `torch.nn.functional.log_softmax`).\n - **teacher_forcing_ratio** (float): The probability that teacher forcing will be used. A random number is\n drawn uniformly from 0-1 for every decoding token, and if the sample is smaller than the given value,\n teacher forcing would be used (default is 0).\n\n Outputs: decoder_outputs, decoder_hidden, ret_dict\n - **decoder_outputs** (batch): batch-length list of tensors with size (max_length, hidden_size) containing the\n outputs of the decoder.\n - **decoder_hidden** (num_layers * num_directions, batch, hidden_size): tensor containing the last hidden\n state of the decoder.\n - **ret_dict**: dictionary containing additional information as follows {*length* : list of integers\n representing lengths of output sequences, *topk_length*: list of integers representing lengths of beam search\n sequences, *sequence* : list of sequences, where each sequence is a list of predicted token IDs,\n *topk_sequence* : list of beam search sequences, each beam is a list of token IDs, *inputs* : target\n outputs if provided for decoding}.\n \"\"\"\n\n def __init__(self, decoder_rnn, k, decoder_dim, max_length, tokenizer):\n super(TopKDecoder, self).__init__()\n self.rnn = decoder_rnn\n self.k = k\n self.hidden_size = decoder_dim # self.rnn.hidden_size\n self.V = len(tokenizer)\n self.SOS = tokenizer.stoi[\"<sos>\"]\n self.EOS = tokenizer.stoi[\"<eos>\"]\n self.max_length = max_length\n self.tokenizer = tokenizer\n\n def forward(self, inputs=None, encoder_hidden=None, encoder_outputs=None, function=F.log_softmax,\n teacher_forcing_ratio=0, retain_output_probs=True):\n \"\"\"\n Forward rnn for MAX_LENGTH steps. Look at :func:`seq2seq.models.DecoderRNN.DecoderRNN.forward_rnn` for details.\n \"\"\"\n\n # inputs, batch_size, max_length = self.rnn._validate_args(inputs, encoder_hidden, encoder_outputs,\n # function, teacher_forcing_ratio)\n\n batch_size = encoder_outputs.size(0)\n max_length = self.max_length\n\n self.pos_index = (torch.LongTensor(range(batch_size)) * self.k).view(-1, 1).to(device)\n\n # Inflate the initial hidden states to be of size: b*k x h\n # encoder_hidden = self.rnn._init_state(encoder_hidden)\n if encoder_hidden is None:\n hidden = None\n else:\n if isinstance(encoder_hidden, tuple):\n # hidden = tuple([_inflate(h, self.k, 1) for h in encoder_hidden])\n hidden = tuple([h.squeeze(0) for h in encoder_hidden])\n hidden = tuple([_inflate(h, self.k, 0) for h in hidden])\n hidden = tuple([h.unsqueeze(0) for h in hidden])\n else:\n # hidden = _inflate(encoder_hidden, self.k, 1)\n raise RuntimeError(\"Not supported\")\n\n # ... same idea for encoder_outputs and decoder_outputs\n if True: # self.rnn.use_attention:\n inflated_encoder_outputs = _inflate(encoder_outputs, self.k, 0)\n else:\n inflated_encoder_outputs = None\n\n # Initialize the scores; for the first step,\n # ignore the inflated copies to avoid duplicate entries in the top k\n sequence_scores = torch.Tensor(batch_size * self.k, 1)\n sequence_scores.fill_(-float('Inf'))\n sequence_scores.index_fill_(0, torch.LongTensor([i * self.k for i in range(0, batch_size)]), 0.0)\n sequence_scores = sequence_scores.to(device)\n\n # Initialize the input vector\n input_var = torch.transpose(torch.LongTensor([[self.SOS] * batch_size * self.k]), 0, 1).to(device)\n\n # Store decisions for backtracking\n stored_outputs = list()\n stored_scores = list()\n stored_predecessors = list()\n stored_emitted_symbols = list()\n stored_hidden = list()\n\n for i in range(0, max_length):\n\n # Run the RNN one step forward\n log_softmax_output, hidden, _ = self.rnn.forward_step(input_var, hidden,\n inflated_encoder_outputs, function=function)\n # If doing local backprop (e.g. supervised training), retain the output layer\n if retain_output_probs:\n stored_outputs.append(log_softmax_output)\n\n # To get the full sequence scores for the new candidates, add the local scores for t_i to the predecessor scores for t_(i-1)\n sequence_scores = _inflate(sequence_scores, self.V, 1)\n sequence_scores += log_softmax_output.squeeze(1)\n scores, candidates = sequence_scores.view(batch_size, -1).topk(self.k, dim=1)\n\n # Reshape input = (bk, 1) and sequence_scores = (bk, 1)\n input_var = (candidates % self.V).view(batch_size * self.k, 1)\n sequence_scores = scores.view(batch_size * self.k, 1)\n\n # Update fields for next timestep\n predecessors = (candidates // self.V + self.pos_index.expand_as(candidates)).view(batch_size * self.k, 1)\n if isinstance(hidden, tuple):\n hidden = tuple([h.index_select(1, predecessors.squeeze()) for h in hidden])\n else:\n hidden = hidden.index_select(1, predecessors.squeeze())\n\n # Update sequence scores and erase scores for end-of-sentence symbol so that they aren't expanded\n stored_scores.append(sequence_scores.clone())\n eos_indices = input_var.data.eq(self.EOS)\n if eos_indices.nonzero().dim() > 0:\n sequence_scores.data.masked_fill_(eos_indices, -float('inf'))\n\n # Cache results for backtracking\n stored_predecessors.append(predecessors)\n stored_emitted_symbols.append(input_var)\n stored_hidden.append(hidden)\n\n # Do backtracking to return the optimal values\n output, h_t, h_n, s, l, p = self._backtrack(stored_outputs, stored_hidden,\n stored_predecessors, stored_emitted_symbols,\n stored_scores, batch_size, self.hidden_size)\n\n # Build return objects\n decoder_outputs = [step[:, 0, :] for step in output]\n if isinstance(h_n, tuple):\n decoder_hidden = tuple([h[:, :, 0, :] for h in h_n])\n else:\n decoder_hidden = h_n[:, :, 0, :]\n metadata = {}\n metadata['inputs'] = inputs\n metadata['output'] = output\n metadata['h_t'] = h_t\n metadata['score'] = s\n metadata['topk_length'] = l\n metadata['topk_sequence'] = p\n metadata['length'] = [seq_len[0] for seq_len in l]\n metadata['sequence'] = [seq[0] for seq in p]\n return decoder_outputs, decoder_hidden, metadata\n\n def _backtrack(self, nw_output, nw_hidden, predecessors, symbols, scores, b, hidden_size):\n \"\"\"Backtracks over batch to generate optimal k-sequences.\n\n Args:\n nw_output [(batch*k, vocab_size)] * sequence_length: A Tensor of outputs from network\n nw_hidden [(num_layers, batch*k, hidden_size)] * sequence_length: A Tensor of hidden states from network\n predecessors [(batch*k)] * sequence_length: A Tensor of predecessors\n symbols [(batch*k)] * sequence_length: A Tensor of predicted tokens\n scores [(batch*k)] * sequence_length: A Tensor containing sequence scores for every token t = [0, ... , seq_len - 1]\n b: Size of the batch\n hidden_size: Size of the hidden state\n\n Returns:\n output [(batch, k, vocab_size)] * sequence_length: A list of the output probabilities (p_n)\n from the last layer of the RNN, for every n = [0, ... , seq_len - 1]\n\n h_t [(batch, k, hidden_size)] * sequence_length: A list containing the output features (h_n)\n from the last layer of the RNN, for every n = [0, ... , seq_len - 1]\n\n h_n(batch, k, hidden_size): A Tensor containing the last hidden state for all top-k sequences.\n\n score [batch, k]: A list containing the final scores for all top-k sequences\n\n length [batch, k]: A list specifying the length of each sequence in the top-k candidates\n\n p (batch, k, sequence_len): A Tensor containing predicted sequence\n \"\"\"\n\n lstm = isinstance(nw_hidden[0], tuple)\n\n # initialize return variables given different types\n output = list()\n h_t = list()\n p = list()\n # Placeholder for last hidden state of top-k sequences.\n # If a (top-k) sequence ends early in decoding, `h_n` contains\n # its hidden state when it sees EOS. Otherwise, `h_n` contains\n # the last hidden state of decoding.\n if lstm:\n state_size = nw_hidden[0][0].size()\n h_n = tuple([torch.zeros(state_size).to(device), torch.zeros(state_size).to(device)])\n else:\n h_n = torch.zeros(nw_hidden[0].size()).to(device)\n l = [[self.max_length] * self.k for _ in range(b)] # Placeholder for lengths of top-k sequences\n # Similar to `h_n`\n\n # the last step output of the beams are not sorted\n # thus they are sorted here\n sorted_score, sorted_idx = scores[-1].view(b, self.k).topk(self.k)\n # initialize the sequence scores with the sorted last step beam scores\n s = sorted_score.clone()\n\n batch_eos_found = [0] * b # the number of EOS found\n # in the backward loop below for each batch\n\n t = self.max_length - 1\n # initialize the back pointer with the sorted order of the last step beams.\n # add self.pos_index for indexing variable with b*k as the first dimension.\n t_predecessors = (sorted_idx + self.pos_index.expand_as(sorted_idx)).view(b * self.k)\n while t >= 0:\n # Re-order the variables with the back pointer\n current_output = nw_output[t].index_select(0, t_predecessors)\n if lstm:\n current_hidden = tuple([h.index_select(1, t_predecessors) for h in nw_hidden[t]])\n else:\n current_hidden = nw_hidden[t].index_select(1, t_predecessors)\n current_symbol = symbols[t].index_select(0, t_predecessors)\n # Re-order the back pointer of the previous step with the back pointer of\n # the current step\n t_predecessors = predecessors[t].index_select(0, t_predecessors).squeeze()\n\n # This tricky block handles dropped sequences that see EOS earlier.\n # The basic idea is summarized below:\n #\n # Terms:\n # Ended sequences = sequences that see EOS early and dropped\n # Survived sequences = sequences in the last step of the beams\n #\n # Although the ended sequences are dropped during decoding,\n # their generated symbols and complete backtracking information are still\n # in the backtracking variables.\n # For each batch, everytime we see an EOS in the backtracking process,\n # 1. If there is survived sequences in the return variables, replace\n # the one with the lowest survived sequence score with the new ended\n # sequences\n # 2. Otherwise, replace the ended sequence with the lowest sequence\n # score with the new ended sequence\n #\n eos_indices = symbols[t].data.squeeze(1).eq(self.EOS).nonzero()\n if eos_indices.dim() > 0:\n for i in range(eos_indices.size(0) - 1, -1, -1):\n # Indices of the EOS symbol for both variables\n # with b*k as the first dimension, and b, k for\n # the first two dimensions\n idx = eos_indices[i]\n b_idx = int(idx[0] // self.k)\n # The indices of the replacing position\n # according to the replacement strategy noted above\n res_k_idx = self.k - (batch_eos_found[b_idx] % self.k) - 1\n batch_eos_found[b_idx] += 1\n res_idx = b_idx * self.k + res_k_idx\n\n # Replace the old information in return variables\n # with the new ended sequence information\n t_predecessors[res_idx] = predecessors[t][idx[0]]\n current_output[res_idx, :] = nw_output[t][idx[0], :]\n if lstm:\n current_hidden[0][:, res_idx, :] = nw_hidden[t][0][:, idx[0], :]\n current_hidden[1][:, res_idx, :] = nw_hidden[t][1][:, idx[0], :]\n h_n[0][:, res_idx, :] = nw_hidden[t][0][:, idx[0], :].data\n h_n[1][:, res_idx, :] = nw_hidden[t][1][:, idx[0], :].data\n else:\n current_hidden[:, res_idx, :] = nw_hidden[t][:, idx[0], :]\n h_n[:, res_idx, :] = nw_hidden[t][:, idx[0], :].data\n current_symbol[res_idx, :] = symbols[t][idx[0]]\n s[b_idx, res_k_idx] = scores[t][idx[0]].data[0]\n l[b_idx][res_k_idx] = t + 1\n\n # record the back tracked results\n output.append(current_output)\n h_t.append(current_hidden)\n p.append(current_symbol)\n\n t -= 1\n\n # Sort and re-order again as the added ended sequences may change\n # the order (very unlikely)\n s, re_sorted_idx = s.topk(self.k)\n for b_idx in range(b):\n l[b_idx] = [l[b_idx][k_idx.item()] for k_idx in re_sorted_idx[b_idx, :]]\n\n re_sorted_idx = (re_sorted_idx + self.pos_index.expand_as(re_sorted_idx)).view(b * self.k)\n\n # Reverse the sequences and re-order at the same time\n # It is reversed because the backtracking happens in reverse time order\n output = [step.index_select(0, re_sorted_idx).view(b, self.k, -1) for step in reversed(output)]\n p = [step.index_select(0, re_sorted_idx).view(b, self.k, -1) for step in reversed(p)]\n if lstm:\n h_t = [tuple([h.index_select(1, re_sorted_idx).view(-1, b, self.k, hidden_size) for h in step]) for step in reversed(h_t)]\n h_n = tuple([h.index_select(1, re_sorted_idx.data).view(-1, b, self.k, hidden_size) for h in h_n])\n else:\n h_t = [step.index_select(1, re_sorted_idx).view(-1, b, self.k, hidden_size) for step in reversed(h_t)]\n h_n = h_n.index_select(1, re_sorted_idx.data).view(-1, b, self.k, hidden_size)\n s = s.data\n\n return output, h_t, h_n, s, l, p\n\n def _mask_symbol_scores(self, score, idx, masking_score=-float('inf')):\n score[idx] = masking_score\n\n def _mask(self, tensor, idx, dim=0, masking_score=-float('inf')):\n if len(idx.size()) > 0:\n indices = idx[:, 0]\n tensor.index_fill_(dim, indices, masking_score)" ]
[ [ "torch.ones", "torch.nn.Linear", "torch.argmax", "torch.repeat_interleave", "torch.LongTensor", "torch.nn.Softmax", "torch.nn.Embedding", "torch.nn.Parameter", "torch.nn.LSTMCell", "torch.cat", "torch.zeros", "torch.nn.Sigmoid", "torch.nn.ReLU", "torch.nn.Dropout", "torch.Tensor" ] ]
ijcai2022-5500/anego
[ "9a2e5f29f0ec0787ad8ce7822089345053442887" ]
[ "cocoa/analysis/analyzer.py" ]
[ "\"\"\"Functions that analyze dialogues and models.\n\"\"\"\nimport json\nfrom collections import defaultdict\nimport numpy as np\n\nfrom cocoa.core.entity import is_entity\nfrom cocoa.model.util import entropy, safe_div\nfrom cocoa.model.counter import build_vocabulary, count_ngrams\nfrom cocoa.model.ngram import MLENgramModel\n\nfrom core.tokenizer import tokenize\n\nall_vocab = None\nno_ent_vocab = None\n\nclass Analyzer(object):\n def __init__(self, lexicon):\n self.lexicon = lexicon\n\n def example_stats(self, examples, agent=None):\n stats = {}\n stats['num_dialogues'] = len(examples)\n stats['num_turns_per_dialogue'] = np.mean([len(e.events) for e in examples])\n utterances = [tokenize(e.data) \\\n for example in examples \\\n for e in example.events if e.action == 'message' and\n (not agent or example.agents[e.agent] == agent)]\n stats['num_tokens_per_turn'] = np.mean([len(u) for u in utterances])\n\n vocab = set()\n for u in utterances:\n vocab.update(u)\n stats['vocab_size'] = len(vocab)\n global all_vocab\n all_vocab = vocab\n stats['corpus_perplexity'] = self.sequence_perplexity(utterances)\n\n self.print_stats(stats, 'dataset stats')\n return stats\n\n def intent_sequence_perplexity(self, intent_sequences, n=3):\n H = 0.\n N = 0\n for intent, sequences in intent_sequences.iteritems():\n model = self.build_lm(sequences, n)\n H_, N_ = self.total_entropy(model, sequences)\n H += H_\n N += N_\n H = safe_div(H, N)\n return np.power(2, H)\n\n def total_entropy(self, model, sequences):\n H = 0.\n N = 0\n for s in sequences:\n h, n = model.entropy(s, average=False)\n H += h\n N += n\n return H, N\n\n def build_lm(self, sequences, n):\n vocab = build_vocabulary(1, *sequences)\n counter = count_ngrams(n, vocab, sequences, pad_left=True, pad_right=False)\n model = MLENgramModel(counter)\n return model\n\n def sequence_perplexity(self, sequences, n=3):\n model = self.build_lm(sequences, n)\n H, N = self.total_entropy(model, sequences)\n H = safe_div(H, N)\n return np.power(2, H)\n\n def print_stats(self, stats, name):\n print ('='*5, name.upper(), '='*5)\n print (json.dumps(stats, indent=2))\n\n def parser_stats(self, parsed_dialogues, agent=None):\n stats = {}\n non_entity_vocab = set()\n ents = set()\n stats['intents'] = defaultdict(int)\n intent_utterances = defaultdict(list)\n\n for dialogue in parsed_dialogues:\n for utterance in dialogue:\n if agent and utterance.agent != agent:\n continue\n if utterance.tokens is not None:\n tokens = [x.canonical.type if is_entity(x) else x for x in utterance.tokens]\n e = [x.surface for x in utterance.tokens if is_entity(x)]\n ents.update(e)\n non_entity_vocab.update(tokens)\n if utterance.lf and utterance.lf.intent != '<start>':\n stats['intents'][utterance.lf.intent] += 1\n if utterance.text is not None:\n intent_utterances[utterance.lf.intent].append(tokenize(utterance.text))\n stats['non_entity_vocab_size'] = len(non_entity_vocab)\n #print 'entities:', len(ents)\n #global no_ent_vocab\n #no_ent_vocab = non_entity_vocab\n #for x in all_vocab:\n # if not x in non_entity_vocab:\n # print x\n\n stats['intent_corpus_perplexity'] = self.intent_sequence_perplexity(intent_utterances)\n\n # Percentage intents\n #s = float(sum(stats['intents'].values()))\n #stats['intents'] = sorted(\n # [(k, v, v / s) for k, v in stats['intents'].iteritems()],\n # key=lambda x: x[1], reverse=True)\n\n self.print_stats(stats, 'parser stats')\n return stats\n\n def manager_stats(self, manager):\n stats = {}\n stats['actions'] = manager.actions\n\n # Most likely sequence\n action_seq = [{'context': ('<start>', '<start>')}]\n for i in xrange(10):\n state = action_seq[-1]\n context = state['context']\n\n freqdist = manager.model.freqdist(context)\n counts = [x[1] for x in freqdist]\n ent = entropy(counts, normalized=False)\n state['entropy'] = ent\n\n state['most_likely_action'] = manager.most_likely_action(context, freqdist)\n state['min_entropy_action'] = manager.min_entropy_action(context, freqdist)\n\n new_context = (context[-1], state['most_likely_action'])\n action_seq.append({'context': new_context})\n\n stats['action_seq'] = action_seq\n\n self.print_stats(stats, 'manager stats')\n return stats\n\n #def generator_stats(self, generator):\n" ]
[ [ "numpy.power" ] ]
qdmy/Adelaidet-Quantization
[ "e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b" ]
[ "codebase/third_party/spos_ofa/ofa/imagenet_classification/networks/mobilenet_v3.py" ]
[ "# Once for All: Train One Network and Specialize it for Efficient Deployment\n# Han Cai, Chuang Gan, Tianzhe Wang, Zhekai Zhang, Song Han\n# International Conference on Learning Representations (ICLR), 2020.\n\nimport copy\nimport torch.nn as nn\n\nfrom codebase.third_party.spos_ofa.ofa.utils.layers import set_layer_from_config, MBConvLayer, ConvLayer, IdentityLayer, LinearLayer, ResidualBlock\nfrom codebase.third_party.spos_ofa.ofa.utils import MyNetwork, make_divisible, MyGlobalAvgPool2d\n\n__all__ = ['MobileNetV3', 'MobileNetV3Large']\n\n\nclass MobileNetV3(MyNetwork):\n\n\tdef __init__(self, first_conv, blocks, final_expand_layer, feature_mix_layer, classifier):\n\t\tsuper(MobileNetV3, self).__init__()\n\n\t\tself.first_conv = first_conv\n\t\tself.blocks = nn.ModuleList(blocks)\n\t\tself.final_expand_layer = final_expand_layer\n\t\tself.global_avg_pool = MyGlobalAvgPool2d(keep_dim=True)\n\t\tself.feature_mix_layer = feature_mix_layer\n\t\tself.classifier = classifier\n\n\tdef forward(self, x):\n\t\tx = self.first_conv(x)\n\t\tfor block in self.blocks:\n\t\t\tx = block(x)\n\t\tx = self.final_expand_layer(x)\n\t\tx = self.global_avg_pool(x) # global average pooling\n\t\tx = self.feature_mix_layer(x)\n\t\tx = x.view(x.size(0), -1)\n\t\tx = self.classifier(x)\n\t\treturn x\n\n\t@property\n\tdef module_str(self):\n\t\t_str = self.first_conv.module_str + '\\n'\n\t\tfor block in self.blocks:\n\t\t\t_str += block.module_str + '\\n'\n\t\t_str += self.final_expand_layer.module_str + '\\n'\n\t\t_str += self.global_avg_pool.__repr__() + '\\n'\n\t\t_str += self.feature_mix_layer.module_str + '\\n'\n\t\t_str += self.classifier.module_str\n\t\treturn _str\n\n\t@property\n\tdef config(self):\n\t\treturn {\n\t\t\t'name': MobileNetV3.__name__,\n\t\t\t'bn': self.get_bn_param(),\n\t\t\t'first_conv': self.first_conv.config,\n\t\t\t'blocks': [\n\t\t\t\tblock.config for block in self.blocks\n\t\t\t],\n\t\t\t'final_expand_layer': self.final_expand_layer.config,\n\t\t\t'feature_mix_layer': self.feature_mix_layer.config,\n\t\t\t'classifier': self.classifier.config,\n\t\t}\n\n\t@staticmethod\n\tdef build_from_config(config):\n\t\tfirst_conv = set_layer_from_config(config['first_conv'])\n\t\tfinal_expand_layer = set_layer_from_config(config['final_expand_layer'])\n\t\tfeature_mix_layer = set_layer_from_config(config['feature_mix_layer'])\n\t\tclassifier = set_layer_from_config(config['classifier'])\n\n\t\tblocks = []\n\t\tfor block_config in config['blocks']:\n\t\t\tblocks.append(ResidualBlock.build_from_config(block_config))\n\n\t\tnet = MobileNetV3(first_conv, blocks, final_expand_layer, feature_mix_layer, classifier)\n\t\tif 'bn' in config:\n\t\t\tnet.set_bn_param(**config['bn'])\n\t\telse:\n\t\t\tnet.set_bn_param(momentum=0.1, eps=1e-5)\n\n\t\treturn net\n\n\tdef zero_last_gamma(self):\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, ResidualBlock):\n\t\t\t\tif isinstance(m.conv, MBConvLayer) and isinstance(m.shortcut, IdentityLayer):\n\t\t\t\t\tm.conv.point_linear.bn.weight.data.zero_()\n\n\t@property\n\tdef grouped_block_index(self):\n\t\tinfo_list = []\n\t\tblock_index_list = []\n\t\tfor i, block in enumerate(self.blocks[1:], 1):\n\t\t\tif block.shortcut is None and len(block_index_list) > 0:\n\t\t\t\tinfo_list.append(block_index_list)\n\t\t\t\tblock_index_list = []\n\t\t\tblock_index_list.append(i)\n\t\tif len(block_index_list) > 0:\n\t\t\tinfo_list.append(block_index_list)\n\t\treturn info_list\n\n\t@staticmethod\n\tdef build_net_via_cfg(cfg, input_channel, last_channel, n_classes, dropout_rate):\n\t\t# first conv layer\n\t\tfirst_conv = ConvLayer(\n\t\t\t3, input_channel, kernel_size=3, stride=2, use_bn=True, act_func='h_swish', ops_order='weight_bn_act'\n\t\t)\n\t\t# build mobile blocks\n\t\tfeature_dim = input_channel\n\t\tblocks = []\n\t\tfor stage_id, block_config_list in cfg.items():\n\t\t\tfor k, mid_channel, out_channel, use_se, act_func, stride, expand_ratio in block_config_list:\n\t\t\t\tmb_conv = MBConvLayer(\n\t\t\t\t\tfeature_dim, out_channel, k, stride, expand_ratio, mid_channel, act_func, use_se\n\t\t\t\t)\n\t\t\t\tif stride == 1 and out_channel == feature_dim:\n\t\t\t\t\tshortcut = IdentityLayer(out_channel, out_channel)\n\t\t\t\telse:\n\t\t\t\t\tshortcut = None\n\t\t\t\tblocks.append(ResidualBlock(mb_conv, shortcut))\n\t\t\t\tfeature_dim = out_channel\n\t\t# final expand layer\n\t\tfinal_expand_layer = ConvLayer(\n\t\t\tfeature_dim, feature_dim * 6, kernel_size=1, use_bn=True, act_func='h_swish', ops_order='weight_bn_act',\n\t\t)\n\t\t# feature mix layer\n\t\tfeature_mix_layer = ConvLayer(\n\t\t\tfeature_dim * 6, last_channel, kernel_size=1, bias=False, use_bn=False, act_func='h_swish',\n\t\t)\n\t\t# classifier\n\t\tclassifier = LinearLayer(last_channel, n_classes, dropout_rate=dropout_rate)\n\n\t\treturn first_conv, blocks, final_expand_layer, feature_mix_layer, classifier\n\n\t@staticmethod\n\tdef adjust_cfg(cfg, ks=None, expand_ratio=None, depth_param=None, stage_width_list=None):\n\t\tfor i, (stage_id, block_config_list) in enumerate(cfg.items()):\n\t\t\tfor block_config in block_config_list:\n\t\t\t\tif ks is not None and stage_id != '0':\n\t\t\t\t\tblock_config[0] = ks\n\t\t\t\tif expand_ratio is not None and stage_id != '0':\n\t\t\t\t\tblock_config[-1] = expand_ratio\n\t\t\t\t\tblock_config[1] = None\n\t\t\t\t\tif stage_width_list is not None:\n\t\t\t\t\t\tblock_config[2] = stage_width_list[i]\n\t\t\tif depth_param is not None and stage_id != '0':\n\t\t\t\tnew_block_config_list = [block_config_list[0]]\n\t\t\t\tnew_block_config_list += [copy.deepcopy(block_config_list[-1]) for _ in range(depth_param - 1)]\n\t\t\t\tcfg[stage_id] = new_block_config_list\n\t\treturn cfg\n\n\tdef load_state_dict(self, state_dict, **kwargs):\n\t\tcurrent_state_dict = self.state_dict()\n\n\t\tfor key in state_dict:\n\t\t\tif key not in current_state_dict:\n\t\t\t\tassert '.mobile_inverted_conv.' in key\n\t\t\t\tnew_key = key.replace('.mobile_inverted_conv.', '.conv.')\n\t\t\telse:\n\t\t\t\tnew_key = key\n\t\t\tcurrent_state_dict[new_key] = state_dict[key]\n\t\tsuper(MobileNetV3, self).load_state_dict(current_state_dict)\n\n\nclass MobileNetV3Large(MobileNetV3):\n\n\tdef __init__(self, n_classes=1000, width_mult=1.0, bn_param=(0.1, 1e-5), dropout_rate=0.2,\n\t ks=None, expand_ratio=None, depth_param=None, stage_width_list=None):\n\t\tinput_channel = 16\n\t\tlast_channel = 1280\n\n\t\tinput_channel = make_divisible(input_channel * width_mult, MyNetwork.CHANNEL_DIVISIBLE)\n\t\tlast_channel = make_divisible(last_channel * width_mult, MyNetwork.CHANNEL_DIVISIBLE) \\\n\t\t\tif width_mult > 1.0 else last_channel\n\n\t\tcfg = {\n\t\t\t# k, exp, c, se, nl, s, e,\n\t\t\t'0': [\n\t\t\t\t[3, 16, 16, False, 'relu', 1, 1],\n\t\t\t],\n\t\t\t'1': [\n\t\t\t\t[3, 64, 24, False, 'relu', 2, None], # 4\n\t\t\t\t[3, 72, 24, False, 'relu', 1, None], # 3\n\t\t\t],\n\t\t\t'2': [\n\t\t\t\t[5, 72, 40, True, 'relu', 2, None], # 3\n\t\t\t\t[5, 120, 40, True, 'relu', 1, None], # 3\n\t\t\t\t[5, 120, 40, True, 'relu', 1, None], # 3\n\t\t\t],\n\t\t\t'3': [\n\t\t\t\t[3, 240, 80, False, 'h_swish', 2, None], # 6\n\t\t\t\t[3, 200, 80, False, 'h_swish', 1, None], # 2.5\n\t\t\t\t[3, 184, 80, False, 'h_swish', 1, None], # 2.3\n\t\t\t\t[3, 184, 80, False, 'h_swish', 1, None], # 2.3\n\t\t\t],\n\t\t\t'4': [\n\t\t\t\t[3, 480, 112, True, 'h_swish', 1, None], # 6\n\t\t\t\t[3, 672, 112, True, 'h_swish', 1, None], # 6\n\t\t\t],\n\t\t\t'5': [\n\t\t\t\t[5, 672, 160, True, 'h_swish', 2, None], # 6\n\t\t\t\t[5, 960, 160, True, 'h_swish', 1, None], # 6\n\t\t\t\t[5, 960, 160, True, 'h_swish', 1, None], # 6\n\t\t\t]\n\t\t}\n\n\t\tcfg = self.adjust_cfg(cfg, ks, expand_ratio, depth_param, stage_width_list)\n\t\t# width multiplier on mobile setting, change `exp: 1` and `c: 2`\n\t\tfor stage_id, block_config_list in cfg.items():\n\t\t\tfor block_config in block_config_list:\n\t\t\t\tif block_config[1] is not None:\n\t\t\t\t\tblock_config[1] = make_divisible(block_config[1] * width_mult, MyNetwork.CHANNEL_DIVISIBLE)\n\t\t\t\tblock_config[2] = make_divisible(block_config[2] * width_mult, MyNetwork.CHANNEL_DIVISIBLE)\n\n\t\tfirst_conv, blocks, final_expand_layer, feature_mix_layer, classifier = self.build_net_via_cfg(\n\t\t\tcfg, input_channel, last_channel, n_classes, dropout_rate\n\t\t)\n\t\tsuper(MobileNetV3Large, self).__init__(first_conv, blocks, final_expand_layer, feature_mix_layer, classifier)\n\t\t# set bn param\n\t\tself.set_bn_param(*bn_param)\n" ]
[ [ "torch.nn.ModuleList" ] ]
Soum-Soum/Tensorflow_Face_Finder
[ "fec6c15d2df7012608511ad87f4b55731bf99478" ]
[ "venv1/Lib/site-packages/tensorflow/python/keras/_impl/keras/layers/advanced_activations.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Layers that act as activation functions.\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.keras._impl.keras import activations\r\nfrom tensorflow.python.keras._impl.keras import backend as K\r\nfrom tensorflow.python.keras._impl.keras import constraints\r\nfrom tensorflow.python.keras._impl.keras import initializers\r\nfrom tensorflow.python.keras._impl.keras import regularizers\r\nfrom tensorflow.python.keras._impl.keras.engine import InputSpec\r\nfrom tensorflow.python.keras._impl.keras.engine import Layer\r\nfrom tensorflow.python.keras._impl.keras.engine.base_layer import shape_type_conversion\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\n@tf_export('keras.layers.LeakyReLU')\r\nclass LeakyReLU(Layer):\r\n \"\"\"Leaky version of a Rectified Linear Unit.\r\n\r\n It allows a small gradient when the unit is not active:\r\n `f(x) = alpha * x for x < 0`,\r\n `f(x) = x for x >= 0`.\r\n\r\n Input shape:\r\n Arbitrary. Use the keyword argument `input_shape`\r\n (tuple of integers, does not include the samples axis)\r\n when using this layer as the first layer in a model.\r\n\r\n Output shape:\r\n Same shape as the input.\r\n\r\n Arguments:\r\n alpha: float >= 0. Negative slope coefficient.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, alpha=0.3, **kwargs):\r\n super(LeakyReLU, self).__init__(**kwargs)\r\n self.supports_masking = True\r\n self.alpha = K.cast_to_floatx(alpha)\r\n\r\n def call(self, inputs):\r\n return K.relu(inputs, alpha=self.alpha)\r\n\r\n def get_config(self):\r\n config = {'alpha': float(self.alpha)}\r\n base_config = super(LeakyReLU, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n @shape_type_conversion\r\n def compute_output_shape(self, input_shape):\r\n return input_shape\r\n\r\n\r\n@tf_export('keras.layers.PReLU')\r\nclass PReLU(Layer):\r\n \"\"\"Parametric Rectified Linear Unit.\r\n\r\n It follows:\r\n `f(x) = alpha * x for x < 0`,\r\n `f(x) = x for x >= 0`,\r\n where `alpha` is a learned array with the same shape as x.\r\n\r\n Input shape:\r\n Arbitrary. Use the keyword argument `input_shape`\r\n (tuple of integers, does not include the samples axis)\r\n when using this layer as the first layer in a model.\r\n\r\n Output shape:\r\n Same shape as the input.\r\n\r\n Arguments:\r\n alpha_initializer: initializer function for the weights.\r\n alpha_regularizer: regularizer for the weights.\r\n alpha_constraint: constraint for the weights.\r\n shared_axes: the axes along which to share learnable\r\n parameters for the activation function.\r\n For example, if the incoming feature maps\r\n are from a 2D convolution\r\n with output shape `(batch, height, width, channels)`,\r\n and you wish to share parameters across space\r\n so that each filter only has one set of parameters,\r\n set `shared_axes=[1, 2]`.\r\n\r\n \"\"\"\r\n\r\n def __init__(self,\r\n alpha_initializer='zeros',\r\n alpha_regularizer=None,\r\n alpha_constraint=None,\r\n shared_axes=None,\r\n **kwargs):\r\n super(PReLU, self).__init__(**kwargs)\r\n self.supports_masking = True\r\n self.alpha_initializer = initializers.get(alpha_initializer)\r\n self.alpha_regularizer = regularizers.get(alpha_regularizer)\r\n self.alpha_constraint = constraints.get(alpha_constraint)\r\n if shared_axes is None:\r\n self.shared_axes = None\r\n elif not isinstance(shared_axes, (list, tuple)):\r\n self.shared_axes = [shared_axes]\r\n else:\r\n self.shared_axes = list(shared_axes)\r\n\r\n @shape_type_conversion\r\n def build(self, input_shape):\r\n param_shape = list(input_shape[1:])\r\n self.param_broadcast = [False] * len(param_shape)\r\n if self.shared_axes is not None:\r\n for i in self.shared_axes:\r\n param_shape[i - 1] = 1\r\n self.param_broadcast[i - 1] = True\r\n self.alpha = self.add_weight(\r\n shape=param_shape,\r\n name='alpha',\r\n initializer=self.alpha_initializer,\r\n regularizer=self.alpha_regularizer,\r\n constraint=self.alpha_constraint)\r\n # Set input spec\r\n axes = {}\r\n if self.shared_axes:\r\n for i in range(1, len(input_shape)):\r\n if i not in self.shared_axes:\r\n axes[i] = input_shape[i]\r\n self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)\r\n self.built = True\r\n\r\n def call(self, inputs, mask=None):\r\n pos = K.relu(inputs)\r\n if K.backend() == 'theano':\r\n neg = (\r\n K.pattern_broadcast(self.alpha, self.param_broadcast) *\r\n (inputs - K.abs(inputs)) * 0.5)\r\n else:\r\n neg = -self.alpha * K.relu(-inputs)\r\n return pos + neg\r\n\r\n def get_config(self):\r\n config = {\r\n 'alpha_initializer': initializers.serialize(self.alpha_initializer),\r\n 'alpha_regularizer': regularizers.serialize(self.alpha_regularizer),\r\n 'alpha_constraint': constraints.serialize(self.alpha_constraint),\r\n 'shared_axes': self.shared_axes\r\n }\r\n base_config = super(PReLU, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n @shape_type_conversion\r\n def compute_output_shape(self, input_shape):\r\n return input_shape\r\n\r\n\r\n@tf_export('keras.layers.ELU')\r\nclass ELU(Layer):\r\n \"\"\"Exponential Linear Unit.\r\n\r\n It follows:\r\n `f(x) = alpha * (exp(x) - 1.) for x < 0`,\r\n `f(x) = x for x >= 0`.\r\n\r\n Input shape:\r\n Arbitrary. Use the keyword argument `input_shape`\r\n (tuple of integers, does not include the samples axis)\r\n when using this layer as the first layer in a model.\r\n\r\n Output shape:\r\n Same shape as the input.\r\n\r\n Arguments:\r\n alpha: scale for the negative factor.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, alpha=1.0, **kwargs):\r\n super(ELU, self).__init__(**kwargs)\r\n self.supports_masking = True\r\n self.alpha = K.cast_to_floatx(alpha)\r\n\r\n def call(self, inputs):\r\n return K.elu(inputs, self.alpha)\r\n\r\n def get_config(self):\r\n config = {'alpha': float(self.alpha)}\r\n base_config = super(ELU, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n @shape_type_conversion\r\n def compute_output_shape(self, input_shape):\r\n return input_shape\r\n\r\n\r\n@tf_export('keras.layers.ThresholdedReLU')\r\nclass ThresholdedReLU(Layer):\r\n \"\"\"Thresholded Rectified Linear Unit.\r\n\r\n It follows:\r\n `f(x) = x for x > theta`,\r\n `f(x) = 0 otherwise`.\r\n\r\n Input shape:\r\n Arbitrary. Use the keyword argument `input_shape`\r\n (tuple of integers, does not include the samples axis)\r\n when using this layer as the first layer in a model.\r\n\r\n Output shape:\r\n Same shape as the input.\r\n\r\n Arguments:\r\n theta: float >= 0. Threshold location of activation.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, theta=1.0, **kwargs):\r\n super(ThresholdedReLU, self).__init__(**kwargs)\r\n self.supports_masking = True\r\n self.theta = K.cast_to_floatx(theta)\r\n\r\n def call(self, inputs, mask=None):\r\n return inputs * K.cast(K.greater(inputs, self.theta), K.floatx())\r\n\r\n def get_config(self):\r\n config = {'theta': float(self.theta)}\r\n base_config = super(ThresholdedReLU, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n @shape_type_conversion\r\n def compute_output_shape(self, input_shape):\r\n return input_shape\r\n\r\n\r\n@tf_export('keras.layers.Softmax')\r\nclass Softmax(Layer):\r\n \"\"\"Softmax activation function.\r\n\r\n Input shape:\r\n Arbitrary. Use the keyword argument `input_shape`\r\n (tuple of integers, does not include the samples axis)\r\n when using this layer as the first layer in a model.\r\n\r\n Output shape:\r\n Same shape as the input.\r\n\r\n Arguments:\r\n axis: Integer, axis along which the softmax normalization is applied.\r\n \"\"\"\r\n\r\n def __init__(self, axis=-1, **kwargs):\r\n super(Softmax, self).__init__(**kwargs)\r\n self.supports_masking = True\r\n self.axis = axis\r\n\r\n def call(self, inputs):\r\n return activations.softmax(inputs, axis=self.axis)\r\n\r\n def get_config(self):\r\n config = {'axis': self.axis}\r\n base_config = super(Softmax, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n @shape_type_conversion\r\n def compute_output_shape(self, input_shape):\r\n return input_shape\r\n" ]
[ [ "tensorflow.python.keras._impl.keras.backend.greater", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.keras._impl.keras.backend.relu", "tensorflow.python.keras._impl.keras.initializers.serialize", "tensorflow.python.keras._impl.keras.backend.cast_to_floatx", "tensorflow.python.keras._impl.keras.constraints.serialize", "tensorflow.python.keras._impl.keras.backend.pattern_broadcast", "tensorflow.python.keras._impl.keras.activations.softmax", "tensorflow.python.keras._impl.keras.regularizers.serialize", "tensorflow.python.keras._impl.keras.backend.abs", "tensorflow.python.keras._impl.keras.backend.elu", "tensorflow.python.keras._impl.keras.initializers.get", "tensorflow.python.keras._impl.keras.regularizers.get", "tensorflow.python.keras._impl.keras.backend.backend", "tensorflow.python.keras._impl.keras.backend.floatx", "tensorflow.python.keras._impl.keras.constraints.get" ] ]
WangY0906/mmdetection-for-study
[ "c89703006a2a5250f4d1c71e0aad958d72526885" ]
[ "mmdet/models/detectors/cascade_rcnn.py" ]
[ "from __future__ import division\n\nimport torch\nimport torch.nn as nn\n\nfrom .base import BaseDetector\nfrom .test_mixins import RPNTestMixin\nfrom .. import builder\nfrom ..registry import DETECTORS\nfrom mmdet.core import (build_assigner, bbox2roi, bbox2result, build_sampler,\n merge_aug_masks)\n\n\[email protected]_module\nclass CascadeRCNN(BaseDetector, RPNTestMixin):\n\n def __init__(self,\n num_stages,\n backbone,\n neck=None,\n shared_head=None,\n rpn_head=None,\n bbox_roi_extractor=None,\n bbox_head=None,\n mask_roi_extractor=None,\n mask_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None):\n assert bbox_roi_extractor is not None\n assert bbox_head is not None\n super(CascadeRCNN, self).__init__()\n\n self.num_stages = num_stages\n self.backbone = builder.build_backbone(backbone)\n\n if neck is not None:\n self.neck = builder.build_neck(neck)\n\n if rpn_head is not None:\n self.rpn_head = builder.build_head(rpn_head)\n\n if shared_head is not None:\n self.shared_head = builder.build_shared_head(shared_head)\n\n if bbox_head is not None:\n self.bbox_roi_extractor = nn.ModuleList()\n self.bbox_head = nn.ModuleList()\n if not isinstance(bbox_roi_extractor, list):\n bbox_roi_extractor = [\n bbox_roi_extractor for _ in range(num_stages)\n ]\n if not isinstance(bbox_head, list):\n bbox_head = [bbox_head for _ in range(num_stages)]\n assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages\n for roi_extractor, head in zip(bbox_roi_extractor, bbox_head):\n self.bbox_roi_extractor.append(\n builder.build_roi_extractor(roi_extractor))\n self.bbox_head.append(builder.build_head(head))\n\n if mask_head is not None:\n self.mask_head = nn.ModuleList()\n if not isinstance(mask_head, list):\n mask_head = [mask_head for _ in range(num_stages)]\n assert len(mask_head) == self.num_stages\n for head in mask_head:\n self.mask_head.append(builder.build_head(head))\n if mask_roi_extractor is not None:\n self.share_roi_extractor = False\n self.mask_roi_extractor = nn.ModuleList()\n if not isinstance(mask_roi_extractor, list):\n mask_roi_extractor = [\n mask_roi_extractor for _ in range(num_stages)\n ]\n assert len(mask_roi_extractor) == self.num_stages\n for roi_extractor in mask_roi_extractor:\n self.mask_roi_extractor.append(\n builder.build_roi_extractor(roi_extractor))\n else:\n self.share_roi_extractor = True\n self.mask_roi_extractor = self.bbox_roi_extractor\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n self.init_weights(pretrained=pretrained)\n\n @property\n def with_rpn(self):\n return hasattr(self, 'rpn_head') and self.rpn_head is not None\n\n def init_weights(self, pretrained=None):\n super(CascadeRCNN, self).init_weights(pretrained)\n self.backbone.init_weights(pretrained=pretrained)\n if self.with_neck:\n if isinstance(self.neck, nn.Sequential):\n for m in self.neck:\n m.init_weights()\n else:\n self.neck.init_weights()\n if self.with_rpn:\n self.rpn_head.init_weights()\n if self.with_shared_head:\n self.shared_head.init_weights(pretrained=pretrained)\n for i in range(self.num_stages):\n if self.with_bbox:\n self.bbox_roi_extractor[i].init_weights()\n self.bbox_head[i].init_weights()\n if self.with_mask:\n if not self.share_roi_extractor:\n self.mask_roi_extractor[i].init_weights()\n self.mask_head[i].init_weights()\n\n def extract_feat(self, img):\n x = self.backbone(img)\n if self.with_neck:\n x = self.neck(x)\n return x\n\n def forward_train(self,\n img,\n img_meta,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n gt_masks=None,\n proposals=None):\n x = self.extract_feat(img)\n\n losses = dict()\n\n if self.with_rpn:\n rpn_outs = self.rpn_head(x)\n rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta,\n self.train_cfg.rpn)\n rpn_losses = self.rpn_head.loss(\n *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)\n losses.update(rpn_losses)\n\n proposal_cfg = self.train_cfg.get('rpn_proposal',\n self.test_cfg.rpn)\n proposal_inputs = rpn_outs + (img_meta, proposal_cfg)\n proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)\n else:\n proposal_list = proposals\n\n for i in range(self.num_stages):\n self.current_stage = i\n rcnn_train_cfg = self.train_cfg.rcnn[i]\n lw = self.train_cfg.stage_loss_weights[i]\n\n # assign gts and sample proposals\n sampling_results = []\n if self.with_bbox or self.with_mask:\n bbox_assigner = build_assigner(rcnn_train_cfg.assigner)\n bbox_sampler = build_sampler(\n rcnn_train_cfg.sampler, context=self)\n num_imgs = img.size(0)\n if gt_bboxes_ignore is None:\n gt_bboxes_ignore = [None for _ in range(num_imgs)]\n\n for j in range(num_imgs):\n assign_result = bbox_assigner.assign(\n proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j],\n gt_labels[j])\n sampling_result = bbox_sampler.sample(\n assign_result,\n proposal_list[j],\n gt_bboxes[j],\n gt_labels[j],\n feats=[lvl_feat[j][None] for lvl_feat in x])\n sampling_results.append(sampling_result)\n\n # bbox head forward and loss\n bbox_roi_extractor = self.bbox_roi_extractor[i]\n bbox_head = self.bbox_head[i]\n\n rois = bbox2roi([res.bboxes for res in sampling_results])\n bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],\n rois)\n if self.with_shared_head:\n bbox_feats = self.shared_head(bbox_feats)\n cls_score, bbox_pred = bbox_head(bbox_feats)\n\n bbox_targets = bbox_head.get_target(sampling_results, gt_bboxes,\n gt_labels, rcnn_train_cfg)\n loss_bbox = bbox_head.loss(cls_score, bbox_pred, *bbox_targets)\n for name, value in loss_bbox.items():\n losses['s{}.{}'.format(i, name)] = (\n value * lw if 'loss' in name else value)\n\n # mask head forward and loss\n if self.with_mask:\n if not self.share_roi_extractor:\n mask_roi_extractor = self.mask_roi_extractor[i]\n pos_rois = bbox2roi(\n [res.pos_bboxes for res in sampling_results])\n mask_feats = mask_roi_extractor(\n x[:mask_roi_extractor.num_inputs], pos_rois)\n if self.with_shared_head:\n mask_feats = self.shared_head(mask_feats)\n else:\n # reuse positive bbox feats\n pos_inds = []\n device = bbox_feats.device\n for res in sampling_results:\n pos_inds.append(\n torch.ones(\n res.pos_bboxes.shape[0],\n device=device,\n dtype=torch.uint8))\n pos_inds.append(\n torch.zeros(\n res.neg_bboxes.shape[0],\n device=device,\n dtype=torch.uint8))\n pos_inds = torch.cat(pos_inds)\n mask_feats = bbox_feats[pos_inds]\n mask_head = self.mask_head[i]\n mask_pred = mask_head(mask_feats)\n mask_targets = mask_head.get_target(sampling_results, gt_masks,\n rcnn_train_cfg)\n pos_labels = torch.cat(\n [res.pos_gt_labels for res in sampling_results])\n loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels)\n for name, value in loss_mask.items():\n losses['s{}.{}'.format(i, name)] = (\n value * lw if 'loss' in name else value)\n\n # refine bboxes\n if i < self.num_stages - 1:\n pos_is_gts = [res.pos_is_gt for res in sampling_results]\n roi_labels = bbox_targets[0] # bbox_targets is a tuple\n with torch.no_grad():\n proposal_list = bbox_head.refine_bboxes(\n rois, roi_labels, bbox_pred, pos_is_gts, img_meta)\n\n return losses\n\n def simple_test(self, img, img_meta, proposals=None, rescale=False):\n x = self.extract_feat(img)\n proposal_list = self.simple_test_rpn(\n x, img_meta, self.test_cfg.rpn) if proposals is None else proposals\n\n img_shape = img_meta[0]['img_shape']\n ori_shape = img_meta[0]['ori_shape']\n scale_factor = img_meta[0]['scale_factor']\n\n # \"ms\" in variable names means multi-stage\n ms_bbox_result = {}\n ms_segm_result = {}\n ms_scores = []\n rcnn_test_cfg = self.test_cfg.rcnn\n\n rois = bbox2roi(proposal_list)\n for i in range(self.num_stages):\n bbox_roi_extractor = self.bbox_roi_extractor[i]\n bbox_head = self.bbox_head[i]\n\n bbox_feats = bbox_roi_extractor(\n x[:len(bbox_roi_extractor.featmap_strides)], rois)\n if self.with_shared_head:\n bbox_feats = self.shared_head(bbox_feats)\n\n cls_score, bbox_pred = bbox_head(bbox_feats)\n ms_scores.append(cls_score)\n\n if self.test_cfg.keep_all_stages:\n det_bboxes, det_labels = bbox_head.get_det_bboxes(\n rois,\n cls_score,\n bbox_pred,\n img_shape,\n scale_factor,\n rescale=rescale,\n cfg=rcnn_test_cfg)\n bbox_result = bbox2result(det_bboxes, det_labels,\n bbox_head.num_classes)\n ms_bbox_result['stage{}'.format(i)] = bbox_result\n\n if self.with_mask:\n mask_roi_extractor = self.mask_roi_extractor[i]\n mask_head = self.mask_head[i]\n if det_bboxes.shape[0] == 0:\n mask_classes = mask_head.num_classes - 1\n segm_result = [[] for _ in range(mask_classes)]\n else:\n _bboxes = (\n det_bboxes[:, :4] *\n scale_factor if rescale else det_bboxes)\n mask_rois = bbox2roi([_bboxes])\n mask_feats = mask_roi_extractor(\n x[:len(mask_roi_extractor.featmap_strides)],\n mask_rois)\n if self.with_shared_head:\n mask_feats = self.shared_head(mask_feats, i)\n mask_pred = mask_head(mask_feats)\n segm_result = mask_head.get_seg_masks(\n mask_pred, _bboxes, det_labels, rcnn_test_cfg,\n ori_shape, scale_factor, rescale)\n ms_segm_result['stage{}'.format(i)] = segm_result\n\n if i < self.num_stages - 1:\n bbox_label = cls_score.argmax(dim=1)\n rois = bbox_head.regress_by_class(rois, bbox_label, bbox_pred,\n img_meta[0])\n\n cls_score = sum(ms_scores) / self.num_stages\n det_bboxes, det_labels = self.bbox_head[-1].get_det_bboxes(\n rois,\n cls_score,\n bbox_pred,\n img_shape,\n scale_factor,\n rescale=rescale,\n cfg=rcnn_test_cfg)\n bbox_result = bbox2result(det_bboxes, det_labels,\n self.bbox_head[-1].num_classes)\n ms_bbox_result['ensemble'] = bbox_result\n\n if self.with_mask:\n if det_bboxes.shape[0] == 0:\n mask_classes = self.mask_head[-1].num_classes - 1\n segm_result = [[] for _ in range(mask_classes)]\n else:\n _bboxes = (\n det_bboxes[:, :4] *\n scale_factor if rescale else det_bboxes)\n mask_rois = bbox2roi([_bboxes])\n aug_masks = []\n for i in range(self.num_stages):\n mask_roi_extractor = self.mask_roi_extractor[i]\n mask_feats = mask_roi_extractor(\n x[:len(mask_roi_extractor.featmap_strides)], mask_rois)\n if self.with_shared_head:\n mask_feats = self.shared_head(mask_feats)\n mask_pred = self.mask_head[i](mask_feats)\n aug_masks.append(mask_pred.sigmoid().cpu().numpy())\n merged_masks = merge_aug_masks(aug_masks,\n [img_meta] * self.num_stages,\n self.test_cfg.rcnn)\n segm_result = self.mask_head[-1].get_seg_masks(\n merged_masks, _bboxes, det_labels, rcnn_test_cfg,\n ori_shape, scale_factor, rescale)\n ms_segm_result['ensemble'] = segm_result\n\n if not self.test_cfg.keep_all_stages:\n if self.with_mask:\n results = (ms_bbox_result['ensemble'],\n ms_segm_result['ensemble'])\n else:\n results = ms_bbox_result['ensemble']\n else:\n if self.with_mask:\n results = {\n stage: (ms_bbox_result[stage], ms_segm_result[stage])\n for stage in ms_bbox_result\n }\n else:\n results = ms_bbox_result\n\n return results\n\n def aug_test(self, img, img_meta, proposals=None, rescale=False):\n raise NotImplementedError\n\n def show_result(self, data, result, img_norm_cfg, **kwargs):\n if self.with_mask:\n ms_bbox_result, ms_segm_result = result\n if isinstance(ms_bbox_result, dict):\n result = (ms_bbox_result['ensemble'],\n ms_segm_result['ensemble'])\n else:\n if isinstance(result, dict):\n result = result['ensemble']\n super(CascadeRCNN, self).show_result(data, result, img_norm_cfg,\n **kwargs)\n" ]
[ [ "torch.ones", "torch.no_grad", "torch.nn.ModuleList", "torch.zeros", "torch.cat" ] ]
KevinKronk/multiclass-classification
[ "8a938e5dd3418caad24118f75fa11f2aab856b2f" ]
[ "multiclass_classification/cost.py" ]
[ "import numpy as np\nfrom scipy.special import expit\n\n\ndef log_cost(theta, x, y_i, hyper_p):\n \"\"\"\n Logistic regression cost function with regularization.\n\n Parameters\n ----------\n theta : array_like\n Shape (n+1,). Parameter values for function.\n\n x : array_like\n Shape (m, n+1). Features in model.\n\n y_i : array_like\n Shape (m,). Labels for in current class i (1) or not (0).\n\n hyper_p : float\n Value of the hyperparameter for regularization.\n\n Returns\n -------\n cost : float\n Value of cost function at given parameters.\n \"\"\"\n\n size = y_i.size\n\n h = expit(x @ theta.T)\n\n first = -y_i * np.log(h)\n second = -(1 - y_i) * np.log(1 - h)\n reg = (hyper_p / (2 * size)) * np.sum(np.power(theta, 2))\n\n cost = (np.sum(first + second) / size) + reg\n return cost\n" ]
[ [ "numpy.sum", "numpy.log", "scipy.special.expit", "numpy.power" ] ]
Swapnil8991/RLonASG
[ "d040fb5ac4431198b92544958d70924d5bec92ff" ]
[ "GameEnvs/ConnectFour/generateData.py" ]
[ "import numpy as np\n\nfrom C4Board import C4Board\nfrom random import seed, choice\nfrom os import urandom\nfrom time import time\nfrom itertools import cycle\nfrom sys import argv\n\n\ndef getTrainingData(noOfGames, dataGenFlag, inpTrainFile, outTrainFile):\n\t\n\tturnFlag = 0\n\tgameEndState = 0\n\t\n\ttempOutTrainList = [] # stores expected input position\n\tboardStates = [] # stores the board state at input\n\n\tinpTrainList = []\n\toutTrainList = [] \n\n\tfor i in range(noOfGames):\n\t\tboardStates.append(b.board.tolist())\n\t\t# print(\"\\n First boardState: \\n\", boardStates)\n\t\temptyPositions = list(range(0, 7))\n\n\t\twhile b.count < 42:\n\t\t\tif b.count > 7:\n\t\t\t\tstatus, wSindex = b.checkWin()\n\t\t\t\tif status == 0:\n\t\t\t\t\tprint(f\"Game Draw! {b.getwStateSum()}\\n\")\n\t\t\t\t\tbreak\n\t\t\t\telif status == 1 and dataGenFlag == 1 and turnFlag == 1:\n\t\t\t\t\tprint(f\"Player X Wins! (wState[{wSindex}]: {b.wState[wSindex]}) {b.getwStateSum()}\\n\")\n\t\t\t\t\tfor i in range(len(tempOutTrainList)):\n\t\t\t\t\t\tif i % 2 == 0:\n\t\t\t\t\t\t\toutTrainList.append(tempOutTrainList[i])\n\t\t\t\t\t\n\t\t\t\t\t\t\tinpTrainList.append(boardStates[i])\n\t\t\t\t\tbreak\n\t\t\t\telif status == 2:\n\t\t\t\t\tprint(f\"Player O Wins! (wState[{wSindex}]: {b.wState[wSindex]}) {b.getwStateSum()}\\n\")\n\t\t\t\t\tbreak\n\n\t\t\tcPChar = next(playerCharToggler)\n\t\t\tcPNum = next(playerNumToggler)\n\t\t\tif cPChar == 'X' and turnFlag == 0:\n\t\t\t\tturnFlag = 1\n\t\t\telif cPChar == 'Y' and turnFlag == 0:\n\t\t\t\tturnFlag = 2\n\t\t\tposition = choice(emptyPositions)\n\t\t\t# print(f\"\\nPlayer {cPChar}: {position}\")\n\t\t\t# b.makeMove(cPNum, position)\n\t\t\t# print(f\"\\nPlayer {cPChar}: \", end='', flush=True)\n\t\t\tb.makeMove(cPNum, position )\n\t\t\tprint(f\"\\nPlayer {cPChar}: \", end='', flush=True)\n\t\t\tboardStates.append(b.board.tolist())\n\t\t\t# print(\"\\nboardStates: \\n\", boardStates)\n\n\t\t\tzeroList = [0, 0, 0, 0, 0, 0, 0]\n\t\t\tzeroList[position] = 1\n\t\t\ttempOutTrainList.append(zeroList)\n\t\t\t# print(\"\\nExpected output by NN: \\n\", tempOutTrainList)\n\t\t\tb.printBoard()\n\t\tb.resetBoard()\n\t\t\n\tprint(\"\\n\\n inpTrainList: \\n\", len(inpTrainList))\n\tprint(\"\\n\\n outTrainList: \\n\", len(outTrainList))\n\n\txOutArray = np.array(outTrainList)\n\txInpArray = np.array(inpTrainList)\n\tnp.savetxt('__data__/' + outTrainFile, xOutArray, fmt='%d')\n\tnp.savetxt('__data__/' + inpTrainFile, xInpArray, fmt='%d')\n\n\t\nif __name__ == '__main__':\n\n\tif len(argv) != 5:\n\t\tprint(\"Provide no. of games, dataGenFlag (1|2), inpTrainFile, outTrainFile\")\n\telse:\n\t\tstartTime = time()\n\n\t\tb = C4Board()\n\t\tplayerCharToggler = cycle(['X', 'O']) # D-Char\n\t\tplayerNumToggler = cycle([3, -2]) # D-Val\n\t\tseed(urandom(128))\n\n\t\tgetTrainingData(int(argv[1]), int(argv[2]), argv[3], argv[4])\n\n\t\tprint(f\"Time taken: {time() - startTime}s\\n\")" ]
[ [ "numpy.array", "numpy.savetxt" ] ]
alcunha/iwildcam2021ufam
[ "243c3e4b91d5756d1e7fcdf8ae75344a373d3b84" ]
[ "classification/eval_main.py" ]
[ "# Copyright 2021 Fagner Cunha\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Tool to evaluate classifiers.\n\nSet the environment variable PYTHONHASHSEED to a reproducible value\nbefore you start the python process to ensure that the model trains\nor infers with reproducibility\n\"\"\"\nimport json\nimport os\nimport random\n\nfrom absl import app\nfrom absl import flags\nimport numpy as np\nfrom sklearn.metrics import accuracy_score\nimport tensorflow as tf\n\nfrom iwildcamlib import CategoryMap\nimport bags\nimport dataloader\nimport geoprior\nimport model_builder\n\nos.environ['TF_DETERMINISTIC_OPS'] = '1'\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'model_name', default='efficientnet-b0',\n help=('Model name of the archtecture'))\n\nflags.DEFINE_integer(\n 'input_size', default=224,\n help=('Input size of the model'))\n\nflags.DEFINE_bool(\n 'use_bags', default=False,\n help=('Use Balanced Group Softmax to train model'))\n\nflags.DEFINE_integer(\n 'empty_class_id', default=0,\n help=('Empty class id for balanced group softmax'))\n\nflags.DEFINE_bool(\n 'use_full_image', default=False,\n help=('Ignore bounding boxes and use full image'))\n\nflags.DEFINE_integer(\n 'batch_size', default=32,\n help=('Batch size used during training.'))\n\nflags.DEFINE_string(\n 'ckpt_dir', default=None,\n help=('Location of the model checkpoint files'))\n\nflags.DEFINE_string(\n 'annotations_json', default=None,\n help=('Path to json file containing the training annotations json for'\n ' the iWildCam2021 competition'))\n\nflags.DEFINE_string(\n 'train_dataset_split', default=None,\n help=('Path to json file containing the train/validation split based on'\n ' locations.'))\n\nflags.DEFINE_string(\n 'test_info_json', default=None,\n help=('Path to json file containing the test information json for'\n ' the iWildCam2021 competition'))\n\nflags.DEFINE_string(\n 'dataset_dir', default=None,\n help=('Path to directory containing test images.'))\n\nflags.DEFINE_string(\n 'megadetector_results_json', default=None,\n help=('Path to json file containing megadetector results.'))\n\nflags.DEFINE_integer(\n 'log_frequence', default=500,\n help=('Log prediction every n steps'))\n\nflags.DEFINE_string(\n 'geo_prior_ckpt_dir', default=None,\n help=('Location of the checkpoint files for the geo prior model'))\n\nflags.DEFINE_integer(\n 'geo_prior_input_size', default=6,\n help=('Input size for the geo prior model'))\n\nflags.DEFINE_bool(\n 'use_bn_geo_prior', default=False,\n help=('Include Batch Normalization to the geo prior model'))\n\nflags.DEFINE_integer(\n 'embed_dim', default=256,\n help=('Embedding dimension for geo prior model'))\n\nif 'random_seed' not in list(FLAGS):\n flags.DEFINE_integer(\n 'random_seed', default=42,\n help=('Random seed for reproductible experiments'))\n\nflags.mark_flag_as_required('ckpt_dir')\nflags.mark_flag_as_required('annotations_json')\nflags.mark_flag_as_required('test_info_json')\nflags.mark_flag_as_required('dataset_dir')\nflags.mark_flag_as_required('megadetector_results_json')\n\ndef load_train_validation_split():\n if FLAGS.train_dataset_split is None:\n return None, None\n\n with tf.io.gfile.GFile(FLAGS.train_dataset_split, 'r') as json_file:\n json_data = json.load(json_file)\n\n return json_data['train'], json_data['validation']\n\ndef _load_model(num_classes, bal_group_softmax=None):\n model = model_builder.create(model_name=FLAGS.model_name,\n num_classes=num_classes,\n input_size=FLAGS.input_size,\n unfreeze_layers=0,\n bags=bal_group_softmax)\n checkpoint_path = os.path.join(FLAGS.ckpt_dir, \"ckp\")\n model.load_weights(checkpoint_path)\n\n if bal_group_softmax is not None:\n model = bal_group_softmax.create_prediction_model(model)\n\n return model\n\ndef _load_geo_prior_model(num_classes):\n if FLAGS.geo_prior_ckpt_dir is not None:\n rand_sample_generator = dataloader.RandSpatioTemporalGenerator()\n\n geo_prior_model = geoprior.FCNet(\n num_inputs=FLAGS.geo_prior_input_size,\n embed_dim=FLAGS.embed_dim,\n num_classes=num_classes,\n use_bn=FLAGS.use_bn_geo_prior,\n rand_sample_generator=rand_sample_generator)\n\n checkpoint_path = os.path.join(FLAGS.geo_prior_ckpt_dir, \"ckp\")\n geo_prior_model.load_weights(checkpoint_path)\n\n return geo_prior_model\n else:\n return None\n\ndef _build_input_data(category_map):\n include_geo_data = FLAGS.geo_prior_ckpt_dir is not None\n\n input_data = dataloader.JsonWBBoxInputProcessor(\n dataset_json=FLAGS.test_info_json,\n dataset_dir=FLAGS.dataset_dir,\n megadetector_results_json=FLAGS.megadetector_results_json,\n batch_size=FLAGS.batch_size,\n batch_drop_remainder=False,\n category_map=category_map,\n is_training=False,\n output_size=FLAGS.input_size,\n crop_mode='full' if FLAGS.use_full_image else 'bbox',\n provide_validity_info_output=include_geo_data,\n provide_coord_date_encoded_input=include_geo_data,\n provide_instance_id=True,\n seed=FLAGS.random_seed)\n\n return input_data.make_source_dataset()\n\ndef mix_predictions(cnn_preds, prior_preds, valid):\n valid = tf.expand_dims(valid, axis=-1)\n return cnn_preds*prior_preds*valid + (1 - valid)*cnn_preds\n\ndef predict_w_geo_prior(batch, metadata, model, geo_prior_model):\n cnn_input = batch[:-1]\n prior_input = batch[-1]\n label, valid, _ = metadata\n\n cnn_preds = model(cnn_input, training=False)\n prior_preds = geo_prior_model(prior_input, training=False)\n preds = mix_predictions(cnn_preds, prior_preds, valid)\n\n return label, preds\n\ndef _decode_one_hot(one_hot_tensor):\n return tf.argmax(one_hot_tensor, axis=1).numpy()\n\ndef predict_classifier(model, geo_prior_model, dataset):\n labels = []\n predictions = []\n count = 0\n\n for batch, metadata in dataset:\n if geo_prior_model is not None:\n label, preds = predict_w_geo_prior(batch,\n metadata,\n model,\n geo_prior_model)\n else:\n preds = model(batch, training=False)\n label, _ = metadata\n\n labels += list(_decode_one_hot(label))\n predictions += list(_decode_one_hot(preds))\n\n if count % FLAGS.log_frequence == 0:\n tf.compat.v1.logging.info('Finished eval step %d' % count)\n count += 1\n\n return labels, predictions\n\ndef set_random_seeds():\n random.seed(FLAGS.random_seed)\n np.random.seed(FLAGS.random_seed)\n tf.random.set_seed(FLAGS.random_seed)\n\ndef main(_):\n set_random_seeds()\n\n category_map = CategoryMap(FLAGS.annotations_json)\n train_loc, _ = load_train_validation_split()\n bal_group_softmax = bags.BalancedGroupSoftmax(\n FLAGS.annotations_json,\n category_map,\n FLAGS.empty_class_id,\n selected_locations=train_loc) if FLAGS.use_bags else None\n dataset, _ = _build_input_data(category_map)\n num_classes = category_map.get_num_classes()\n model = _load_model(num_classes, bal_group_softmax)\n geo_prior_model = _load_geo_prior_model(num_classes)\n\n labels, predictions = predict_classifier(model, geo_prior_model, dataset)\n\n accuracy = accuracy_score(labels, predictions)\n\n print(\"Accuracy: %s\" % accuracy)\n\nif __name__ == '__main__':\n app.run(main)\n" ]
[ [ "tensorflow.io.gfile.GFile", "numpy.random.seed", "tensorflow.expand_dims", "sklearn.metrics.accuracy_score", "tensorflow.argmax", "tensorflow.random.set_seed", "tensorflow.compat.v1.logging.info" ] ]
wsuchy/estimator
[ "b22a912de2693322622d6f50e3a19e98fecac441" ]
[ "tensorflow_estimator/python/estimator/hooks/basic_session_run_hooks_test.py" ]
[ "# pylint: disable=g-bad-file-header\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for basic_session_run_hooks.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os.path\nimport shutil\nimport tempfile\nimport time\n\nfrom tensorflow.python.training import checkpoint_utils\nfrom tensorflow.python.client import session as session_lib\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import meta_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables as variables_lib\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.platform import tf_logging\nfrom tensorflow.python.summary import summary as summary_lib\nfrom tensorflow.python.summary.writer import writer_cache\nfrom tensorflow.python.training import monitored_session\nfrom tensorflow.python.training import session_run_hook\nfrom tensorflow.python.training import training_util\nfrom tensorflow_estimator.python.estimator.hooks import basic_session_run_hooks\nfrom tensorflow_estimator.python.estimator.hooks import fake_summary_writer\n\n# Provide a realistic start time for unit tests where we need to mock out\n# calls to time.time().\nMOCK_START_TIME = 1484695987.209386\n\n\nclass MockCheckpointSaverListener(\n basic_session_run_hooks.CheckpointSaverListener):\n\n def __init__(self):\n self.begin_count = 0\n self.before_save_count = 0\n self.after_save_count = 0\n self.end_count = 0\n self.ask_for_stop = False\n\n def begin(self):\n self.begin_count += 1\n\n def before_save(self, session, global_step):\n self.before_save_count += 1\n\n def after_save(self, session, global_step):\n self.after_save_count += 1\n if self.ask_for_stop:\n return True\n\n def end(self, session, global_step):\n self.end_count += 1\n\n def get_counts(self):\n return {\n 'begin': self.begin_count,\n 'before_save': self.before_save_count,\n 'after_save': self.after_save_count,\n 'end': self.end_count\n }\n\n\n@test_util.deprecated_graph_mode_only\nclass SecondOrStepTimerTest(test.TestCase):\n\n def test_raise_in_both_secs_and_steps(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.SecondOrStepTimer(every_secs=2.0, every_steps=10)\n\n def test_raise_in_none_secs_and_steps(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.SecondOrStepTimer()\n\n @test.mock.patch.object(time, 'time')\n def test_every_secs(self, mock_time):\n mock_time.return_value = MOCK_START_TIME\n timer = basic_session_run_hooks.SecondOrStepTimer(every_secs=1.0)\n self.assertTrue(timer.should_trigger_for_step(1))\n\n timer.update_last_triggered_step(1)\n self.assertFalse(timer.should_trigger_for_step(1))\n self.assertFalse(timer.should_trigger_for_step(2))\n\n mock_time.return_value += 1.0\n self.assertFalse(timer.should_trigger_for_step(1))\n self.assertTrue(timer.should_trigger_for_step(2))\n\n def test_every_steps(self):\n timer = basic_session_run_hooks.SecondOrStepTimer(every_steps=3)\n self.assertTrue(timer.should_trigger_for_step(1))\n\n timer.update_last_triggered_step(1)\n self.assertFalse(timer.should_trigger_for_step(1))\n self.assertFalse(timer.should_trigger_for_step(2))\n self.assertFalse(timer.should_trigger_for_step(3))\n self.assertTrue(timer.should_trigger_for_step(4))\n\n def test_update_last_triggered_step(self):\n timer = basic_session_run_hooks.SecondOrStepTimer(every_steps=1)\n\n elapsed_secs, elapsed_steps = timer.update_last_triggered_step(1)\n self.assertEqual(None, elapsed_secs)\n self.assertEqual(None, elapsed_steps)\n\n elapsed_secs, elapsed_steps = timer.update_last_triggered_step(5)\n self.assertLess(0, elapsed_secs)\n self.assertEqual(4, elapsed_steps)\n\n elapsed_secs, elapsed_steps = timer.update_last_triggered_step(7)\n self.assertLess(0, elapsed_secs)\n self.assertEqual(2, elapsed_steps)\n\n\n@test_util.deprecated_graph_mode_only\nclass StopAtStepTest(test.TestCase):\n\n def test_raise_in_both_last_step_and_num_steps(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.StopAtStepHook(num_steps=10, last_step=20)\n\n def test_stop_based_on_last_step(self):\n h = basic_session_run_hooks.StopAtStepHook(last_step=10)\n with ops.Graph().as_default():\n global_step = training_util.get_or_create_global_step()\n no_op = control_flow_ops.no_op()\n h.begin()\n with session_lib.Session() as sess:\n mon_sess = monitored_session._HookedSession(sess, [h])\n sess.run(state_ops.assign(global_step, 5))\n h.after_create_session(sess, None)\n mon_sess.run(no_op)\n self.assertFalse(mon_sess.should_stop())\n sess.run(state_ops.assign(global_step, 9))\n mon_sess.run(no_op)\n self.assertFalse(mon_sess.should_stop())\n sess.run(state_ops.assign(global_step, 10))\n mon_sess.run(no_op)\n self.assertTrue(mon_sess.should_stop())\n sess.run(state_ops.assign(global_step, 11))\n mon_sess._should_stop = False\n mon_sess.run(no_op)\n self.assertTrue(mon_sess.should_stop())\n\n def test_stop_based_on_num_step(self):\n h = basic_session_run_hooks.StopAtStepHook(num_steps=10)\n\n with ops.Graph().as_default():\n global_step = training_util.get_or_create_global_step()\n no_op = control_flow_ops.no_op()\n h.begin()\n with session_lib.Session() as sess:\n mon_sess = monitored_session._HookedSession(sess, [h])\n sess.run(state_ops.assign(global_step, 5))\n h.after_create_session(sess, None)\n mon_sess.run(no_op)\n self.assertFalse(mon_sess.should_stop())\n sess.run(state_ops.assign(global_step, 13))\n mon_sess.run(no_op)\n self.assertFalse(mon_sess.should_stop())\n sess.run(state_ops.assign(global_step, 14))\n mon_sess.run(no_op)\n self.assertFalse(mon_sess.should_stop())\n sess.run(state_ops.assign(global_step, 15))\n mon_sess.run(no_op)\n self.assertTrue(mon_sess.should_stop())\n sess.run(state_ops.assign(global_step, 16))\n mon_sess._should_stop = False\n mon_sess.run(no_op)\n self.assertTrue(mon_sess.should_stop())\n\n def test_stop_based_with_multiple_steps(self):\n h = basic_session_run_hooks.StopAtStepHook(num_steps=10)\n\n with ops.Graph().as_default():\n global_step = training_util.get_or_create_global_step()\n no_op = control_flow_ops.no_op()\n h.begin()\n with session_lib.Session() as sess:\n mon_sess = monitored_session._HookedSession(sess, [h])\n sess.run(state_ops.assign(global_step, 5))\n h.after_create_session(sess, None)\n mon_sess.run(no_op)\n self.assertFalse(mon_sess.should_stop())\n sess.run(state_ops.assign(global_step, 15))\n mon_sess.run(no_op)\n self.assertTrue(mon_sess.should_stop())\n\n\n@test_util.deprecated_graph_mode_only\nclass LoggingTensorHookTest(test.TestCase):\n\n def setUp(self):\n # Mock out logging calls so we can verify whether correct tensors are being\n # monitored.\n self._actual_log = tf_logging.info\n self.logged_message = None\n\n def mock_log(*args, **kwargs):\n self.logged_message = args\n self._actual_log(*args, **kwargs)\n\n tf_logging.info = mock_log\n\n def tearDown(self):\n tf_logging.info = self._actual_log\n\n def test_illegal_args(self):\n with self.assertRaisesRegexp(ValueError, 'nvalid every_n_iter'):\n basic_session_run_hooks.LoggingTensorHook(tensors=['t'], every_n_iter=0)\n with self.assertRaisesRegexp(ValueError, 'nvalid every_n_iter'):\n basic_session_run_hooks.LoggingTensorHook(tensors=['t'], every_n_iter=-10)\n with self.assertRaisesRegexp(ValueError, 'xactly one of'):\n basic_session_run_hooks.LoggingTensorHook(\n tensors=['t'], every_n_iter=5, every_n_secs=5)\n with self.assertRaisesRegexp(ValueError, 'xactly one of'):\n basic_session_run_hooks.LoggingTensorHook(tensors=['t'])\n\n def test_print_at_end_only(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n t = constant_op.constant(42.0, name='foo')\n train_op = constant_op.constant(3)\n hook = basic_session_run_hooks.LoggingTensorHook(\n tensors=[t.name], at_end=True)\n hook.begin()\n mon_sess = monitored_session._HookedSession(sess, [hook])\n self.evaluate(variables_lib.global_variables_initializer())\n self.logged_message = ''\n for _ in range(3):\n mon_sess.run(train_op)\n # assertNotRegexpMatches is not supported by python 3.1 and later\n self.assertEqual(str(self.logged_message).find(t.name), -1)\n\n hook.end(sess)\n self.assertRegexpMatches(str(self.logged_message), t.name)\n\n def _validate_print_every_n_steps(self, sess, at_end):\n t = constant_op.constant(42.0, name='foo')\n\n train_op = constant_op.constant(3)\n hook = basic_session_run_hooks.LoggingTensorHook(\n tensors=[t.name], every_n_iter=10, at_end=at_end)\n hook.begin()\n mon_sess = monitored_session._HookedSession(sess, [hook])\n self.evaluate(variables_lib.global_variables_initializer())\n mon_sess.run(train_op)\n self.assertRegexpMatches(str(self.logged_message), t.name)\n for _ in range(3):\n self.logged_message = ''\n for _ in range(9):\n mon_sess.run(train_op)\n # assertNotRegexpMatches is not supported by python 3.1 and later\n self.assertEqual(str(self.logged_message).find(t.name), -1)\n mon_sess.run(train_op)\n self.assertRegexpMatches(str(self.logged_message), t.name)\n\n # Add additional run to verify proper reset when called multiple times.\n self.logged_message = ''\n mon_sess.run(train_op)\n # assertNotRegexpMatches is not supported by python 3.1 and later\n self.assertEqual(str(self.logged_message).find(t.name), -1)\n\n self.logged_message = ''\n hook.end(sess)\n if at_end:\n self.assertRegexpMatches(str(self.logged_message), t.name)\n else:\n # assertNotRegexpMatches is not supported by python 3.1 and later\n self.assertEqual(str(self.logged_message).find(t.name), -1)\n\n def test_print_every_n_steps(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n self._validate_print_every_n_steps(sess, at_end=False)\n # Verify proper reset.\n self._validate_print_every_n_steps(sess, at_end=False)\n\n def test_print_every_n_steps_and_end(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n self._validate_print_every_n_steps(sess, at_end=True)\n # Verify proper reset.\n self._validate_print_every_n_steps(sess, at_end=True)\n\n def test_print_first_step(self):\n # if it runs every iteration, first iteration has None duration.\n with ops.Graph().as_default(), session_lib.Session() as sess:\n t = constant_op.constant(42.0, name='foo')\n train_op = constant_op.constant(3)\n hook = basic_session_run_hooks.LoggingTensorHook(\n tensors={'foo': t}, every_n_iter=1)\n hook.begin()\n mon_sess = monitored_session._HookedSession(sess, [hook])\n self.evaluate(variables_lib.global_variables_initializer())\n mon_sess.run(train_op)\n self.assertRegexpMatches(str(self.logged_message), 'foo')\n # in first run, elapsed time is None.\n self.assertEqual(str(self.logged_message).find('sec'), -1)\n\n def _validate_print_every_n_secs(self, sess, at_end, mock_time):\n t = constant_op.constant(42.0, name='foo')\n train_op = constant_op.constant(3)\n\n hook = basic_session_run_hooks.LoggingTensorHook(\n tensors=[t.name], every_n_secs=1.0, at_end=at_end)\n hook.begin()\n mon_sess = monitored_session._HookedSession(sess, [hook])\n self.evaluate(variables_lib.global_variables_initializer())\n\n mon_sess.run(train_op)\n self.assertRegexpMatches(str(self.logged_message), t.name)\n\n # assertNotRegexpMatches is not supported by python 3.1 and later\n self.logged_message = ''\n mon_sess.run(train_op)\n self.assertEqual(str(self.logged_message).find(t.name), -1)\n mock_time.return_value += 1.0\n\n self.logged_message = ''\n mon_sess.run(train_op)\n self.assertRegexpMatches(str(self.logged_message), t.name)\n\n self.logged_message = ''\n hook.end(sess)\n if at_end:\n self.assertRegexpMatches(str(self.logged_message), t.name)\n else:\n # assertNotRegexpMatches is not supported by python 3.1 and later\n self.assertEqual(str(self.logged_message).find(t.name), -1)\n\n @test.mock.patch.object(time, 'time')\n def test_print_every_n_secs(self, mock_time):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n mock_time.return_value = MOCK_START_TIME\n self._validate_print_every_n_secs(sess, at_end=False, mock_time=mock_time)\n # Verify proper reset.\n self._validate_print_every_n_secs(sess, at_end=False, mock_time=mock_time)\n\n @test.mock.patch.object(time, 'time')\n def test_print_every_n_secs_and_end(self, mock_time):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n mock_time.return_value = MOCK_START_TIME\n self._validate_print_every_n_secs(sess, at_end=True, mock_time=mock_time)\n # Verify proper reset.\n self._validate_print_every_n_secs(sess, at_end=True, mock_time=mock_time)\n\n def test_print_formatter(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n t = constant_op.constant(42.0, name='foo')\n train_op = constant_op.constant(3)\n hook = basic_session_run_hooks.LoggingTensorHook(\n tensors=[t.name], every_n_iter=10,\n formatter=lambda items: 'qqq=%s' % items[t.name])\n hook.begin()\n mon_sess = monitored_session._HookedSession(sess, [hook])\n self.evaluate(variables_lib.global_variables_initializer())\n mon_sess.run(train_op)\n self.assertEqual(self.logged_message[0], 'qqq=42.0')\n\n\n@test_util.deprecated_graph_mode_only\nclass CheckpointSaverHookTest(test.TestCase):\n\n def setUp(self):\n self.model_dir = tempfile.mkdtemp()\n self.graph = ops.Graph()\n with self.graph.as_default():\n self.scaffold = monitored_session.Scaffold()\n self.global_step = training_util.get_or_create_global_step()\n self.train_op = training_util._increment_global_step(1)\n\n def tearDown(self):\n shutil.rmtree(self.model_dir, ignore_errors=True)\n\n def test_saves_when_saver_and_scaffold_both_missing(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, save_steps=1)\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n self.assertEqual(1,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n def test_raise_when_saver_and_scaffold_both_present(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, saver=self.scaffold.saver, scaffold=self.scaffold)\n\n def test_raise_in_both_secs_and_steps(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, save_secs=10, save_steps=20)\n\n def test_raise_in_none_secs_and_steps(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.CheckpointSaverHook(self.model_dir)\n\n def test_save_secs_saves_in_first_step(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, save_secs=2, scaffold=self.scaffold)\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n self.assertEqual(1,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n def test_save_secs_calls_listeners_at_begin_and_end(self):\n with self.graph.as_default():\n listener = MockCheckpointSaverListener()\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir,\n save_secs=2,\n scaffold=self.scaffold,\n listeners=[listener])\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op) # hook runs here\n mon_sess.run(self.train_op) # hook won't run here, so it does at end\n hook.end(sess) # hook runs here\n self.assertEqual({\n 'begin': 1,\n 'before_save': 2,\n 'after_save': 2,\n 'end': 1\n }, listener.get_counts())\n\n def test_listener_with_monitored_session(self):\n with ops.Graph().as_default():\n scaffold = monitored_session.Scaffold()\n global_step = training_util.get_or_create_global_step()\n train_op = training_util._increment_global_step(1)\n listener = MockCheckpointSaverListener()\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir,\n save_steps=1,\n scaffold=scaffold,\n listeners=[listener])\n with monitored_session.SingularMonitoredSession(\n hooks=[hook],\n scaffold=scaffold,\n checkpoint_dir=self.model_dir) as sess:\n sess.run(train_op)\n sess.run(train_op)\n global_step_val = sess.raw_session().run(global_step)\n listener_counts = listener.get_counts()\n self.assertEqual(2, global_step_val)\n self.assertEqual({\n 'begin': 1,\n 'before_save': 3,\n 'after_save': 3,\n 'end': 1\n }, listener_counts)\n\n def test_listener_stops_training_in_after_save(self):\n with ops.Graph().as_default():\n scaffold = monitored_session.Scaffold()\n training_util.get_or_create_global_step()\n train_op = training_util._increment_global_step(1)\n listener = MockCheckpointSaverListener()\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, save_steps=1, scaffold=scaffold, listeners=[listener])\n with monitored_session.SingularMonitoredSession(\n hooks=[hook], scaffold=scaffold,\n checkpoint_dir=self.model_dir) as sess:\n sess.run(train_op)\n self.assertFalse(sess.should_stop())\n sess.run(train_op)\n self.assertFalse(sess.should_stop())\n listener.ask_for_stop = True\n sess.run(train_op)\n self.assertTrue(sess.should_stop())\n\n def test_listener_with_default_saver(self):\n with ops.Graph().as_default():\n global_step = training_util.get_or_create_global_step()\n train_op = training_util._increment_global_step(1)\n listener = MockCheckpointSaverListener()\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir,\n save_steps=1,\n listeners=[listener])\n with monitored_session.SingularMonitoredSession(\n hooks=[hook],\n checkpoint_dir=self.model_dir) as sess:\n sess.run(train_op)\n sess.run(train_op)\n global_step_val = sess.raw_session().run(global_step)\n listener_counts = listener.get_counts()\n self.assertEqual(2, global_step_val)\n self.assertEqual({\n 'begin': 1,\n 'before_save': 3,\n 'after_save': 3,\n 'end': 1\n }, listener_counts)\n\n with ops.Graph().as_default():\n global_step = training_util.get_or_create_global_step()\n with monitored_session.SingularMonitoredSession(\n checkpoint_dir=self.model_dir) as sess2:\n global_step_saved_val = sess2.run(global_step)\n self.assertEqual(2, global_step_saved_val)\n\n def test_two_listeners_with_default_saver(self):\n with ops.Graph().as_default():\n global_step = training_util.get_or_create_global_step()\n train_op = training_util._increment_global_step(1)\n listener1 = MockCheckpointSaverListener()\n listener2 = MockCheckpointSaverListener()\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir,\n save_steps=1,\n listeners=[listener1, listener2])\n with monitored_session.SingularMonitoredSession(\n hooks=[hook],\n checkpoint_dir=self.model_dir) as sess:\n sess.run(train_op)\n sess.run(train_op)\n global_step_val = sess.raw_session().run(global_step)\n listener1_counts = listener1.get_counts()\n listener2_counts = listener2.get_counts()\n self.assertEqual(2, global_step_val)\n self.assertEqual({\n 'begin': 1,\n 'before_save': 3,\n 'after_save': 3,\n 'end': 1\n }, listener1_counts)\n self.assertEqual(listener1_counts, listener2_counts)\n\n with ops.Graph().as_default():\n global_step = training_util.get_or_create_global_step()\n with monitored_session.SingularMonitoredSession(\n checkpoint_dir=self.model_dir) as sess2:\n global_step_saved_val = sess2.run(global_step)\n self.assertEqual(2, global_step_saved_val)\n\n @test.mock.patch.object(time, 'time')\n def test_save_secs_saves_periodically(self, mock_time):\n with self.graph.as_default():\n mock_time.return_value = MOCK_START_TIME\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, save_secs=2, scaffold=self.scaffold)\n hook.begin()\n self.scaffold.finalize()\n\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n\n mock_time.return_value = MOCK_START_TIME\n mon_sess.run(self.train_op) # Saved.\n\n mock_time.return_value = MOCK_START_TIME + 0.5\n mon_sess.run(self.train_op) # Not saved.\n\n self.assertEqual(1,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n # Simulate 2.5 seconds of sleep.\n mock_time.return_value = MOCK_START_TIME + 2.5\n mon_sess.run(self.train_op) # Saved.\n\n mock_time.return_value = MOCK_START_TIME + 2.6\n mon_sess.run(self.train_op) # Not saved.\n\n mock_time.return_value = MOCK_START_TIME + 2.7\n mon_sess.run(self.train_op) # Not saved.\n\n self.assertEqual(3,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n # Simulate 7.5 more seconds of sleep (10 seconds from start.\n mock_time.return_value = MOCK_START_TIME + 10\n mon_sess.run(self.train_op) # Saved.\n self.assertEqual(6,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n @test.mock.patch.object(time, 'time')\n def test_save_secs_calls_listeners_periodically(self, mock_time):\n with self.graph.as_default():\n mock_time.return_value = MOCK_START_TIME\n listener = MockCheckpointSaverListener()\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir,\n save_secs=2,\n scaffold=self.scaffold,\n listeners=[listener])\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n\n mock_time.return_value = MOCK_START_TIME + 0.5\n mon_sess.run(self.train_op) # hook runs here\n\n mock_time.return_value = MOCK_START_TIME + 0.5\n mon_sess.run(self.train_op)\n\n mock_time.return_value = MOCK_START_TIME + 3.0\n mon_sess.run(self.train_op) # hook runs here\n\n mock_time.return_value = MOCK_START_TIME + 3.5\n mon_sess.run(self.train_op)\n\n mock_time.return_value = MOCK_START_TIME + 4.0\n mon_sess.run(self.train_op)\n\n mock_time.return_value = MOCK_START_TIME + 6.5\n mon_sess.run(self.train_op) # hook runs here\n\n mock_time.return_value = MOCK_START_TIME + 7.0\n mon_sess.run(self.train_op) # hook won't run here, so it does at end\n\n mock_time.return_value = MOCK_START_TIME + 7.5\n hook.end(sess) # hook runs here\n self.assertEqual({\n 'begin': 1,\n 'before_save': 4,\n 'after_save': 4,\n 'end': 1\n }, listener.get_counts())\n\n def test_save_steps_saves_in_first_step(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, save_steps=2, scaffold=self.scaffold)\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n self.assertEqual(1,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n def test_save_steps_saves_periodically(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, save_steps=2, scaffold=self.scaffold)\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n mon_sess.run(self.train_op)\n # Not saved\n self.assertEqual(1,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n mon_sess.run(self.train_op)\n # saved\n self.assertEqual(3,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n mon_sess.run(self.train_op)\n # Not saved\n self.assertEqual(3,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n mon_sess.run(self.train_op)\n # saved\n self.assertEqual(5,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n def test_save_saves_at_end(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, save_secs=2, scaffold=self.scaffold)\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n mon_sess.run(self.train_op)\n hook.end(sess)\n self.assertEqual(2,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n def test_summary_writer_defs(self):\n fake_summary_writer.FakeSummaryWriter.install()\n writer_cache.FileWriterCache.clear()\n summary_writer = writer_cache.FileWriterCache.get(self.model_dir)\n\n with self.graph.as_default():\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, save_steps=2, scaffold=self.scaffold)\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n hook.after_create_session(sess, None)\n mon_sess.run(self.train_op)\n summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.model_dir,\n expected_added_meta_graphs=[\n meta_graph.create_meta_graph_def(\n graph_def=self.graph.as_graph_def(add_shapes=True),\n saver_def=self.scaffold.saver.saver_def)\n ])\n\n fake_summary_writer.FakeSummaryWriter.uninstall()\n\n def test_save_checkpoint_before_first_train_step(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, save_steps=2, scaffold=self.scaffold)\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n mon_sess = monitored_session._HookedSession(sess, [hook])\n sess.run(self.scaffold.init_op)\n hook.after_create_session(sess, None)\n # Verifies that checkpoint is saved at step 0.\n self.assertEqual(0,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n # Verifies that no checkpoint is saved after one training step.\n mon_sess.run(self.train_op)\n self.assertEqual(0,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n # Verifies that checkpoint is saved after save_steps.\n mon_sess.run(self.train_op)\n self.assertEqual(2,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n\n@test_util.deprecated_graph_mode_only\nclass CheckpointSaverHookMultiStepTest(test.TestCase):\n\n def setUp(self):\n self.model_dir = tempfile.mkdtemp()\n self.graph = ops.Graph()\n self.steps_per_run = 5\n with self.graph.as_default():\n self.scaffold = monitored_session.Scaffold()\n self.global_step = training_util.get_or_create_global_step()\n self.train_op = training_util._increment_global_step(self.steps_per_run)\n\n def tearDown(self):\n shutil.rmtree(self.model_dir, ignore_errors=True)\n\n def test_save_steps_saves_in_first_step(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir,\n save_steps=2*self.steps_per_run,\n scaffold=self.scaffold)\n hook._set_steps_per_run(self.steps_per_run)\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n self.assertEqual(5,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n def test_save_steps_saves_periodically(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir,\n save_steps=2*self.steps_per_run,\n scaffold=self.scaffold)\n hook._set_steps_per_run(self.steps_per_run)\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n # Saved (step=5)\n self.assertEqual(5,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n mon_sess.run(self.train_op)\n # Not saved (step=10)\n self.assertEqual(5,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n mon_sess.run(self.train_op)\n # Saved (step=15)\n self.assertEqual(15,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n mon_sess.run(self.train_op)\n # Not saved (step=20)\n self.assertEqual(15,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n mon_sess.run(self.train_op)\n # Saved (step=25)\n self.assertEqual(25,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n def test_save_steps_saves_at_end(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir,\n save_steps=2*self.steps_per_run,\n scaffold=self.scaffold)\n hook._set_steps_per_run(self.steps_per_run)\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n mon_sess.run(self.train_op)\n hook.end(sess)\n self.assertEqual(10,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n\n@test_util.deprecated_graph_mode_only\nclass ResourceCheckpointSaverHookTest(test.TestCase):\n\n def setUp(self):\n self.model_dir = tempfile.mkdtemp()\n self.graph = ops.Graph()\n with self.graph.as_default():\n self.scaffold = monitored_session.Scaffold()\n with variable_scope.variable_scope('foo', use_resource=True):\n self.global_step = training_util.get_or_create_global_step()\n self.train_op = training_util._increment_global_step(1)\n\n def test_save_steps_saves_periodically(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, save_steps=2, scaffold=self.scaffold)\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n mon_sess.run(self.train_op)\n # Not saved\n self.assertEqual(1,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n mon_sess.run(self.train_op)\n # saved\n self.assertEqual(3,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n mon_sess.run(self.train_op)\n # Not saved\n self.assertEqual(3,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n mon_sess.run(self.train_op)\n # saved\n self.assertEqual(5,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n\n@test_util.deprecated_graph_mode_only\nclass StepCounterHookTest(test.TestCase):\n\n def setUp(self):\n self.log_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.log_dir, ignore_errors=True)\n\n @test.mock.patch.object(time, 'time')\n def test_step_counter_every_n_steps(self, mock_time):\n mock_time.return_value = MOCK_START_TIME\n with ops.Graph().as_default() as g, session_lib.Session() as sess:\n training_util.get_or_create_global_step()\n train_op = training_util._increment_global_step(1)\n summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir, g)\n hook = basic_session_run_hooks.StepCounterHook(\n summary_writer=summary_writer, every_n_steps=10)\n hook.begin()\n self.evaluate(variables_lib.global_variables_initializer())\n mon_sess = monitored_session._HookedSession(sess, [hook])\n with test.mock.patch.object(tf_logging, 'warning') as mock_log:\n for _ in range(30):\n mock_time.return_value += 0.01\n mon_sess.run(train_op)\n # logging.warning should not be called.\n self.assertIsNone(mock_log.call_args)\n hook.end(sess)\n summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_graph=g,\n expected_summaries={})\n self.assertItemsEqual([11, 21], summary_writer.summaries.keys())\n for step in [11, 21]:\n summary_value = summary_writer.summaries[step][0].value[0]\n self.assertEqual('global_step/sec', summary_value.tag)\n self.assertGreater(summary_value.simple_value, 0)\n\n @test.mock.patch.object(time, 'time')\n def test_step_counter_every_n_secs(self, mock_time):\n mock_time.return_value = MOCK_START_TIME\n with ops.Graph().as_default() as g, session_lib.Session() as sess:\n training_util.get_or_create_global_step()\n train_op = training_util._increment_global_step(1)\n summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir, g)\n hook = basic_session_run_hooks.StepCounterHook(\n summary_writer=summary_writer, every_n_steps=None, every_n_secs=0.1)\n\n hook.begin()\n self.evaluate(variables_lib.global_variables_initializer())\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(train_op)\n mock_time.return_value += 0.2\n mon_sess.run(train_op)\n mock_time.return_value += 0.2\n mon_sess.run(train_op)\n hook.end(sess)\n\n summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_graph=g,\n expected_summaries={})\n self.assertTrue(summary_writer.summaries, 'No summaries were created.')\n self.assertItemsEqual([2, 3], summary_writer.summaries.keys())\n for summary in summary_writer.summaries.values():\n summary_value = summary[0].value[0]\n self.assertEqual('global_step/sec', summary_value.tag)\n self.assertGreater(summary_value.simple_value, 0)\n\n def test_global_step_name(self):\n with ops.Graph().as_default() as g, session_lib.Session() as sess:\n with variable_scope.variable_scope('bar'):\n variable_scope.get_variable(\n 'foo',\n initializer=0,\n trainable=False,\n collections=[\n ops.GraphKeys.GLOBAL_STEP, ops.GraphKeys.GLOBAL_VARIABLES\n ])\n train_op = training_util._increment_global_step(1)\n summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir, g)\n hook = basic_session_run_hooks.StepCounterHook(\n summary_writer=summary_writer, every_n_steps=1, every_n_secs=None)\n\n hook.begin()\n self.evaluate(variables_lib.global_variables_initializer())\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(train_op)\n mon_sess.run(train_op)\n hook.end(sess)\n\n summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_graph=g,\n expected_summaries={})\n self.assertTrue(summary_writer.summaries, 'No summaries were created.')\n self.assertItemsEqual([2], summary_writer.summaries.keys())\n summary_value = summary_writer.summaries[2][0].value[0]\n self.assertEqual('bar/foo/sec', summary_value.tag)\n\n def test_log_warning_if_global_step_not_increased(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n training_util.get_or_create_global_step()\n train_op = training_util._increment_global_step(0) # keep same.\n self.evaluate(variables_lib.global_variables_initializer())\n hook = basic_session_run_hooks.StepCounterHook(\n every_n_steps=1, every_n_secs=None)\n hook.begin()\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(train_op) # Run one step to record global step.\n with test.mock.patch.object(tf_logging, 'log_first_n') as mock_log:\n for _ in range(30):\n mon_sess.run(train_op)\n self.assertRegexpMatches(\n str(mock_log.call_args),\n 'global step.*has not been increased')\n hook.end(sess)\n\n def _setup_steps_per_run_test(self,\n every_n_steps,\n steps_per_run,\n graph,\n sess):\n training_util.get_or_create_global_step()\n self.train_op = training_util._increment_global_step(steps_per_run)\n self.summary_writer = fake_summary_writer.FakeSummaryWriter(\n self.log_dir, graph)\n self.hook = basic_session_run_hooks.StepCounterHook(\n summary_writer=self.summary_writer, every_n_steps=every_n_steps)\n self.hook._set_steps_per_run(steps_per_run)\n self.hook.begin()\n self.evaluate(variables_lib.global_variables_initializer())\n self.mon_sess = monitored_session._HookedSession(sess, [self.hook])\n\n @test.mock.patch.object(time, 'time')\n def test_steps_per_run_less_than_every_n_steps(self, mock_time):\n mock_time.return_value = MOCK_START_TIME\n with ops.Graph().as_default() as g, session_lib.Session() as sess:\n self._setup_steps_per_run_test(10, 5, g, sess)\n\n # Logs at 15, 25\n for _ in range(5):\n mock_time.return_value += 0.01\n self.mon_sess.run(self.train_op)\n\n self.hook.end(sess)\n self.summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_graph=g,\n expected_summaries={})\n self.assertItemsEqual([15, 25], self.summary_writer.summaries.keys())\n for step in [15, 25]:\n summary_value = self.summary_writer.summaries[step][0].value[0]\n self.assertEqual('global_step/sec', summary_value.tag)\n self.assertGreater(summary_value.simple_value, 0)\n\n @test.mock.patch.object(time, 'time')\n def test_steps_per_run_equal_every_n_steps(self, mock_time):\n mock_time.return_value = MOCK_START_TIME\n with ops.Graph().as_default() as g, session_lib.Session() as sess:\n self._setup_steps_per_run_test(5, 5, g, sess)\n\n # Logs at 10, 15, 20, 25\n for _ in range(5):\n mock_time.return_value += 0.01\n self.mon_sess.run(self.train_op)\n\n self.hook.end(sess)\n self.summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_graph=g,\n expected_summaries={})\n self.assertItemsEqual([10, 15, 20, 25],\n self.summary_writer.summaries.keys())\n for step in [10, 15, 20, 25]:\n summary_value = self.summary_writer.summaries[step][0].value[0]\n self.assertEqual('global_step/sec', summary_value.tag)\n self.assertGreater(summary_value.simple_value, 0)\n\n @test.mock.patch.object(time, 'time')\n def test_steps_per_run_greater_than_every_n_steps(self, mock_time):\n mock_time.return_value = MOCK_START_TIME\n with ops.Graph().as_default() as g, session_lib.Session() as sess:\n self._setup_steps_per_run_test(5, 10, g, sess)\n\n # Logs at 20, 30, 40, 50\n for _ in range(5):\n mock_time.return_value += 0.01\n self.mon_sess.run(self.train_op)\n\n self.hook.end(sess)\n self.summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_graph=g,\n expected_summaries={})\n self.assertItemsEqual([20, 30, 40, 50],\n self.summary_writer.summaries.keys())\n for step in [20, 30, 40, 50]:\n summary_value = self.summary_writer.summaries[step][0].value[0]\n self.assertEqual('global_step/sec', summary_value.tag)\n self.assertGreater(summary_value.simple_value, 0)\n\n\n@test_util.deprecated_graph_mode_only\nclass SummarySaverHookTest(test.TestCase):\n\n def setUp(self):\n test.TestCase.setUp(self)\n\n self.log_dir = 'log/dir'\n self.summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir)\n\n var = variables_lib.Variable(0.0)\n tensor = state_ops.assign_add(var, 1.0)\n tensor2 = tensor * 2\n self.summary_op = summary_lib.scalar('my_summary', tensor)\n self.summary_op2 = summary_lib.scalar('my_summary2', tensor2)\n\n training_util.get_or_create_global_step()\n self.train_op = training_util._increment_global_step(1)\n\n def test_raise_when_scaffold_and_summary_op_both_missing(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.SummarySaverHook()\n\n def test_raise_when_scaffold_and_summary_op_both_present(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.SummarySaverHook(\n scaffold=monitored_session.Scaffold(), summary_op=self.summary_op)\n\n def test_raise_in_both_secs_and_steps(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.SummarySaverHook(\n save_secs=10, save_steps=20, summary_writer=self.summary_writer)\n\n def test_raise_in_none_secs_and_steps(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.SummarySaverHook(\n save_secs=None, save_steps=None, summary_writer=self.summary_writer)\n\n def test_save_steps(self):\n hook = basic_session_run_hooks.SummarySaverHook(\n save_steps=8,\n summary_writer=self.summary_writer,\n summary_op=self.summary_op)\n\n with self.cached_session() as sess:\n hook.begin()\n self.evaluate(variables_lib.global_variables_initializer())\n mon_sess = monitored_session._HookedSession(sess, [hook])\n for _ in range(30):\n mon_sess.run(self.train_op)\n hook.end(sess)\n\n self.summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_summaries={\n 1: {\n 'my_summary': 1.0\n },\n 9: {\n 'my_summary': 2.0\n },\n 17: {\n 'my_summary': 3.0\n },\n 25: {\n 'my_summary': 4.0\n },\n })\n\n def test_multiple_summaries(self):\n hook = basic_session_run_hooks.SummarySaverHook(\n save_steps=8,\n summary_writer=self.summary_writer,\n summary_op=[self.summary_op, self.summary_op2])\n\n with self.cached_session() as sess:\n hook.begin()\n self.evaluate(variables_lib.global_variables_initializer())\n mon_sess = monitored_session._HookedSession(sess, [hook])\n for _ in range(10):\n mon_sess.run(self.train_op)\n hook.end(sess)\n\n self.summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_summaries={\n 1: {\n 'my_summary': 1.0,\n 'my_summary2': 2.0\n },\n 9: {\n 'my_summary': 2.0,\n 'my_summary2': 4.0\n },\n })\n\n @test.mock.patch.object(time, 'time')\n def test_save_secs_saving_once_every_step(self, mock_time):\n mock_time.return_value = MOCK_START_TIME\n hook = basic_session_run_hooks.SummarySaverHook(\n save_secs=0.5,\n summary_writer=self.summary_writer,\n summary_op=self.summary_op)\n\n with self.cached_session() as sess:\n hook.begin()\n self.evaluate(variables_lib.global_variables_initializer())\n mon_sess = monitored_session._HookedSession(sess, [hook])\n for _ in range(4):\n mon_sess.run(self.train_op)\n mock_time.return_value += 0.5\n hook.end(sess)\n\n self.summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_summaries={\n 1: {\n 'my_summary': 1.0\n },\n 2: {\n 'my_summary': 2.0\n },\n 3: {\n 'my_summary': 3.0\n },\n 4: {\n 'my_summary': 4.0\n },\n })\n\n @test.mock.patch.object(time, 'time')\n def test_save_secs_saving_once_every_three_steps(self, mock_time):\n mock_time.return_value = 1484695987.209386\n hook = basic_session_run_hooks.SummarySaverHook(\n save_secs=9.,\n summary_writer=self.summary_writer,\n summary_op=self.summary_op)\n\n with self.cached_session() as sess:\n hook.begin()\n self.evaluate(variables_lib.global_variables_initializer())\n mon_sess = monitored_session._HookedSession(sess, [hook])\n for _ in range(8):\n mon_sess.run(self.train_op)\n mock_time.return_value += 3.1\n hook.end(sess)\n\n # 24.8 seconds passed (3.1*8), it saves every 9 seconds starting from first:\n self.summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_summaries={\n 1: {\n 'my_summary': 1.0\n },\n 4: {\n 'my_summary': 2.0\n },\n 7: {\n 'my_summary': 3.0\n },\n })\n\n\n@test_util.deprecated_graph_mode_only\nclass GlobalStepWaiterHookTest(test.TestCase):\n\n def test_not_wait_for_step_zero(self):\n with ops.Graph().as_default():\n training_util.get_or_create_global_step()\n hook = basic_session_run_hooks.GlobalStepWaiterHook(wait_until_step=0)\n hook.begin()\n with session_lib.Session() as sess:\n # Before run should return without waiting gstep increment.\n hook.before_run(\n session_run_hook.SessionRunContext(\n original_args=None, session=sess))\n\n @test.mock.patch.object(time, 'sleep')\n def test_wait_for_step(self, mock_sleep):\n with ops.Graph().as_default():\n gstep = training_util.get_or_create_global_step()\n hook = basic_session_run_hooks.GlobalStepWaiterHook(wait_until_step=1000)\n hook.begin()\n\n with session_lib.Session() as sess:\n # Mock out calls to time.sleep() to update the global step.\n\n class Context(object):\n counter = 0\n\n def mock_sleep_side_effect(seconds):\n del seconds # argument is ignored\n Context.counter += 1\n if Context.counter == 1:\n # The first time sleep() is called, we update the global_step from\n # 0 to 500.\n sess.run(state_ops.assign(gstep, 500))\n elif Context.counter == 2:\n # The second time sleep() is called, we update the global_step from\n # 500 to 1100.\n sess.run(state_ops.assign(gstep, 1100))\n else:\n raise AssertionError(\n 'Expected before_run() to terminate after the second call to '\n 'time.sleep()')\n\n mock_sleep.side_effect = mock_sleep_side_effect\n\n # Run the mocked-out interaction with the hook.\n self.evaluate(variables_lib.global_variables_initializer())\n run_context = session_run_hook.SessionRunContext(\n original_args=None, session=sess)\n hook.before_run(run_context)\n self.assertEqual(Context.counter, 2)\n\n\n@test_util.deprecated_graph_mode_only\nclass FinalOpsHookTest(test.TestCase):\n\n def test_final_ops_is_scalar_tensor(self):\n with ops.Graph().as_default():\n expected_value = 4\n final_ops = constant_op.constant(expected_value)\n\n hook = basic_session_run_hooks.FinalOpsHook(final_ops)\n hook.begin()\n\n with session_lib.Session() as session:\n hook.end(session)\n self.assertEqual(expected_value,\n hook.final_ops_values)\n\n def test_final_ops_is_tensor(self):\n with ops.Graph().as_default():\n expected_values = [1, 6, 3, 5, 2, 4]\n final_ops = constant_op.constant(expected_values)\n\n hook = basic_session_run_hooks.FinalOpsHook(final_ops)\n hook.begin()\n\n with session_lib.Session() as session:\n hook.end(session)\n self.assertListEqual(expected_values,\n hook.final_ops_values.tolist())\n\n def test_final_ops_triggers_out_of_range_error(self):\n with ops.Graph().as_default():\n dataset = dataset_ops.Dataset.range(1)\n iterator = dataset_ops.make_one_shot_iterator(dataset)\n read_ops = iterator.get_next()\n final_ops = read_ops\n\n hook = basic_session_run_hooks.FinalOpsHook(final_ops)\n hook.begin()\n\n with session_lib.Session() as session:\n session.run(read_ops)\n with test.mock.patch.object(tf_logging, 'warning') as mock_log:\n with self.assertRaisesRegexp(errors.OutOfRangeError,\n 'End of sequence'):\n hook.end(session)\n self.assertRegexpMatches(\n str(mock_log.call_args),\n 'dependency back to some input source')\n\n def test_final_ops_with_dictionary(self):\n with ops.Graph().as_default():\n expected_values = [4, -3]\n final_ops = array_ops.placeholder(dtype=dtypes.float32)\n final_ops_feed_dict = {final_ops: expected_values}\n\n hook = basic_session_run_hooks.FinalOpsHook(\n final_ops, final_ops_feed_dict)\n hook.begin()\n\n with session_lib.Session() as session:\n hook.end(session)\n self.assertListEqual(expected_values,\n hook.final_ops_values.tolist())\n\n\n@test_util.deprecated_graph_mode_only\nclass ResourceSummarySaverHookTest(test.TestCase):\n\n def setUp(self):\n test.TestCase.setUp(self)\n\n self.log_dir = 'log/dir'\n self.summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir)\n\n var = variable_scope.get_variable('var', initializer=0.0, use_resource=True)\n tensor = state_ops.assign_add(var, 1.0)\n self.summary_op = summary_lib.scalar('my_summary', tensor)\n\n with variable_scope.variable_scope('foo', use_resource=True):\n training_util.create_global_step()\n self.train_op = training_util._increment_global_step(1)\n\n def test_save_steps(self):\n hook = basic_session_run_hooks.SummarySaverHook(\n save_steps=8,\n summary_writer=self.summary_writer,\n summary_op=self.summary_op)\n\n with self.cached_session() as sess:\n hook.begin()\n self.evaluate(variables_lib.global_variables_initializer())\n mon_sess = monitored_session._HookedSession(sess, [hook])\n for _ in range(30):\n mon_sess.run(self.train_op)\n hook.end(sess)\n\n self.summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_summaries={\n 1: {\n 'my_summary': 1.0\n },\n 9: {\n 'my_summary': 2.0\n },\n 17: {\n 'my_summary': 3.0\n },\n 25: {\n 'my_summary': 4.0\n },\n })\n\n\n@test_util.deprecated_graph_mode_only\nclass FeedFnHookTest(test.TestCase):\n\n def test_feeding_placeholder(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n x = array_ops.placeholder(dtype=dtypes.float32)\n y = x + 1\n hook = basic_session_run_hooks.FeedFnHook(\n feed_fn=lambda: {x: 1.0})\n hook.begin()\n mon_sess = monitored_session._HookedSession(sess, [hook])\n self.assertEqual(mon_sess.run(y), 2)\n\n\n@test_util.deprecated_graph_mode_only\nclass ProfilerHookTest(test.TestCase):\n\n def setUp(self):\n super(ProfilerHookTest, self).setUp()\n self.output_dir = tempfile.mkdtemp()\n self.graph = ops.Graph()\n self.filepattern = os.path.join(self.output_dir, 'timeline-*.json')\n with self.graph.as_default():\n self.global_step = training_util.get_or_create_global_step()\n self.train_op = state_ops.assign_add(self.global_step, 1)\n\n def tearDown(self):\n super(ProfilerHookTest, self).tearDown()\n shutil.rmtree(self.output_dir, ignore_errors=True)\n\n def _count_timeline_files(self):\n return len(gfile.Glob(self.filepattern))\n\n def test_raise_in_both_secs_and_steps(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.ProfilerHook(save_secs=10, save_steps=20)\n\n def test_raise_in_none_secs_and_steps(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.ProfilerHook(save_secs=None, save_steps=None)\n\n def test_save_secs_does_not_save_in_first_step(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.ProfilerHook(\n save_secs=2, output_dir=self.output_dir)\n with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess:\n sess.run(self.train_op)\n self.assertEqual(0, self._count_timeline_files())\n\n @test.mock.patch.object(time, 'time')\n def test_save_secs_saves_periodically(self, mock_time):\n # Pick a fixed start time.\n with self.graph.as_default():\n mock_time.return_value = MOCK_START_TIME\n hook = basic_session_run_hooks.ProfilerHook(\n save_secs=2, output_dir=self.output_dir)\n with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess:\n sess.run(self.train_op) # Not saved.\n self.assertEqual(0, self._count_timeline_files())\n # Simulate 2.5 seconds of sleep.\n mock_time.return_value = MOCK_START_TIME + 2.5\n sess.run(self.train_op) # Saved.\n self.assertEqual(1, self._count_timeline_files())\n\n # Pretend some small amount of time has passed.\n mock_time.return_value = MOCK_START_TIME + 2.6\n sess.run(self.train_op) # Not saved.\n # Edge test just before we should save the timeline.\n mock_time.return_value = MOCK_START_TIME + 4.4\n sess.run(self.train_op) # Not saved.\n self.assertEqual(1, self._count_timeline_files())\n\n mock_time.return_value = MOCK_START_TIME + 4.5\n sess.run(self.train_op) # Saved.\n self.assertEqual(2, self._count_timeline_files())\n\n def test_save_steps_does_not_save_in_first_step(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.ProfilerHook(\n save_steps=1, output_dir=self.output_dir)\n with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess:\n sess.run(self.train_op) # Not saved.\n self.assertEqual(0, self._count_timeline_files())\n\n def test_save_steps_saves_periodically(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.ProfilerHook(\n save_steps=2, output_dir=self.output_dir)\n with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess:\n self.assertEqual(0, self._count_timeline_files())\n sess.run(self.train_op) # Not saved.\n self.assertEqual(0, self._count_timeline_files())\n sess.run(self.train_op) # Saved.\n self.assertEqual(1, self._count_timeline_files())\n sess.run(self.train_op) # Not saved.\n self.assertEqual(1, self._count_timeline_files())\n sess.run(self.train_op) # Saved.\n self.assertEqual(2, self._count_timeline_files())\n sess.run(self.train_op) # Not saved.\n self.assertEqual(2, self._count_timeline_files())\n\n def test_run_metadata_saves(self):\n writer_cache.FileWriterCache.clear()\n fake_summary_writer.FakeSummaryWriter.install()\n fake_writer = writer_cache.FileWriterCache.get(self.output_dir)\n with self.graph.as_default():\n hook = basic_session_run_hooks.ProfilerHook(\n save_steps=1, output_dir=self.output_dir)\n with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess:\n sess.run(self.train_op) # Not saved.\n sess.run(self.train_op) # Saved.\n self.assertEqual(\n list(fake_writer._added_run_metadata.keys()), ['step_2'])\n fake_summary_writer.FakeSummaryWriter.uninstall()\n\n\nif __name__ == '__main__':\n test.main()\n" ]
[ [ "tensorflow.python.summary.summary.scalar", "tensorflow.python.data.ops.dataset_ops.Dataset.range", "tensorflow.python.data.ops.dataset_ops.make_one_shot_iterator", "tensorflow.python.training.training_util.create_global_step", "tensorflow.python.training.session_run_hook.SessionRunContext", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.summary.writer.writer_cache.FileWriterCache.get", "tensorflow.python.summary.writer.writer_cache.FileWriterCache.clear", "tensorflow.python.framework.constant_op.constant", "tensorflow.python.framework.ops.Graph", "tensorflow.python.platform.test.TestCase.setUp", "tensorflow.python.ops.variable_scope.get_variable", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.platform.gfile.Glob", "tensorflow.python.training.monitored_session.SingularMonitoredSession", "tensorflow.python.client.session.Session", "tensorflow.python.ops.state_ops.assign_add", "tensorflow.python.training.monitored_session.Scaffold", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.ops.variables.Variable", "tensorflow.python.ops.state_ops.assign", "tensorflow.python.training.training_util._increment_global_step", "tensorflow.python.training.checkpoint_utils.load_variable", "tensorflow.python.training.monitored_session._HookedSession", "tensorflow.python.training.training_util.get_or_create_global_step", "tensorflow.python.platform.test.main", "tensorflow.python.ops.control_flow_ops.no_op", "tensorflow.python.platform.test.mock.patch.object" ] ]
clementpoiret/sparseml
[ "8442a6ef8ba11fb02f5e51472dd68b72438539b9" ]
[ "tests/sparseml/pytorch/models/classification/test_mobilenet.py" ]
[ "# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom typing import Union\n\nimport pytest\nimport torch\n\nfrom sparseml.pytorch.models import ModelRegistry, mobilenet\nfrom tests.sparseml.pytorch.models.utils import compare_model\n\n\[email protected](\n os.getenv(\"NM_ML_SKIP_PYTORCH_TESTS\", False),\n reason=\"Skipping pytorch tests\",\n)\[email protected](\n os.getenv(\"NM_ML_SKIP_MODEL_TESTS\", False),\n reason=\"Skipping model tests\",\n)\[email protected](\n \"key,pretrained,test_input\",\n [\n (\"mobilenet\", False, True),\n (\"mobilenet\", True, False),\n (\"mobilenet\", \"base\", False),\n (\"mobilenet\", \"pruned-conservative\", False),\n (\"mobilenet\", \"pruned-moderate\", False),\n ],\n)\ndef test_mobilenets(key: str, pretrained: Union[bool, str], test_input: bool):\n model = ModelRegistry.create(key, pretrained)\n diff_model = mobilenet()\n\n if pretrained:\n compare_model(model, diff_model, same=False)\n match_model = ModelRegistry.create(key, pretrained)\n compare_model(model, match_model, same=True)\n\n if test_input:\n input_shape = ModelRegistry.input_shape(key)\n batch = torch.randn(1, *input_shape)\n out = model(batch)\n assert isinstance(out, tuple)\n for tens in out:\n assert tens.shape[0] == 1\n assert tens.shape[1] == 1000\n" ]
[ [ "torch.randn" ] ]
Totoketchup/Adaptive-MultiSpeaker-Separation
[ "8e7e869b8050643a777e315d1ddac577a8dc85ff" ]
[ "models/SC_V2.py" ]
[ "# -*- coding: utf-8 -*-\nimport tensorflow as tf\nfrom utils.ops import BLSTM, Conv1D, Reshape, Normalize, f_props, scope, log10\nfrom models.network import Separator\n\nclass L41ModelV2(Separator):\n\n\tdef __init__(self, graph=None, **kwargs):\n\t\tkwargs['mask_a'] = 1.0\n\t\tkwargs['mask_b'] = -1.0\n\n\t\tsuper(L41ModelV2, self).__init__(graph, **kwargs)\n\n\t\twith self.graph.as_default():\n\t\t\t# Define the speaker vectors to use during training\n\t\t\tself.speaker_vectors =tf.Variable(tf.truncated_normal(\n\t\t\t\t\t\t\t\t [self.num_speakers, self.embedding_size],\n\t\t\t\t\t\t\t\t stddev=tf.sqrt(2/float(self.embedding_size))), name='speaker_centroids')\n\t\tself.init_separator()\n\n\t@scope\n\tdef prediction(self):\n\t\t# L41 network\n\t\tshape = tf.shape(self.X)\n\n\t\tself.true_masks = 1.0 + self.y\n\n\t\tX_in = tf.identity(self.X)\n\t\t\n\n\t\tlayers = [BLSTM(self.layer_size, name='BLSTM_'+str(i), drop_val=self.rdropout) for i in range(self.nb_layers)]\n\n\t\tlayers_sp = [\n\t\t\tConv1D([1, self.layer_size, self.embedding_size*self.F]),\n\t\t\tReshape([self.B, shape[1], self.F, self.embedding_size]),\n\t\t]\n\n\t\tlayers += layers_sp\n\n\t\ty = f_props(layers, X_in)\n\t\t\n\t\treturn y\n\n\t@scope\n\tdef cost(self):\n\t\t\"\"\"\n\t\tConstruct the cost function op for the negative sampling cost\n\t\t\"\"\"\n\n\t\tif self.loss_with_silence:\n\t\t\tmax_ = tf.reduce_max(tf.abs(self.X), [1, 2], keep_dims=True)\n\t\t\tlog_compare = log10(tf.divide(max_, tf.abs(self.X)))\n\t\t\tmask = tf.cast(log_compare < self.threshold_silence_loss, tf.float32)\n\t\t\ttf.summary.image('separator/silence_mask', tf.expand_dims(mask,3), max_outputs=1)\n\t\t\ty_a_b = self.y * tf.expand_dims(mask, 3)\n\t\t\ty_0_1 = (self.y + 1.0)/2.0 * tf.expand_dims(mask, 3)\n\t\telse:\n\t\t\ty_a_b = self.y\n\t\t\ty_0_1 = (self.y + 1.0)/2.0 \n\n\n\t\ttf.summary.image('mask/true/1', tf.abs(tf.expand_dims(y_0_1[:,:,:,0],3)))\n\t\ttf.summary.image('mask/true/2', tf.abs(tf.expand_dims(y_0_1[:,:,:,1],3)))\n\n\n\t\t# Get the embedded T-F vectors from the network\n\t\tembedding = self.prediction # [B, T, F, E]\n\n\t\tembedding_broad = tf.expand_dims(embedding, 4) # [B, T, F, E, 1]\n\t\ty_broad = tf.expand_dims(y_0_1, 3) # [B, T, F, 1, S] \n\t\tv_mean = tf.reduce_sum(embedding_broad * y_broad, [1,2]) / ( 1e-12 + tf.expand_dims(tf.reduce_sum(y_0_1, [1,2]), 1))# [B, E, S]\n\t\t\n\t\t#\n\t\t# Reconstruction loss\n\t\t#\n\n\t\twith tf.name_scope('reconstruction_loss'):\n\n\t\t\tv_mean_broad = tf.expand_dims(v_mean, 1) # [B, 1, E, S]\n\t\t\tv_mean_broad = tf.expand_dims(v_mean_broad, 1) # [B, 1, 1, E, S]\n\n\t\t\tassignments = tf.reduce_sum(v_mean_broad * embedding_broad, 3) # [B, T, F, S]\n\n\t\t\tassignments = tf.nn.sigmoid(assignments) # [B, T, F, S]\n\n\t\t\tmasked_input = tf.expand_dims(self.X_input, 3) * assignments\n\n\t\t\t# X_non_mix [B, T, F, S]\t\t\t\n\t\t\tcost_recons = tf.reduce_mean(tf.square(self.X_non_mix - masked_input), axis=[1, 2])\n\t\t\tcost_recons = tf.reduce_mean(cost_recons, axis=-1) # Mean among all speakers [B, S]\n\t\t\tcost_recons = tf.reduce_mean(cost_recons)\n\t\t\ttf.summary.scalar('value', cost_recons)\n\n\t\t#\n\t\t# Constrast loss\n\t\t#\n\t\twith tf.name_scope('source_contrastive_loss'):\n\n\t\t\tspeaker_vectors = tf.nn.l2_normalize(self.speaker_vectors, 1)\n\t\t\tembedding = tf.nn.l2_normalize(embedding, -1)\n\n\t\t\tI = tf.expand_dims(self.I, axis=2) # [B, S, 1]\n\t\t\t# Gathering the speaker_vectors [|S|, E]\n\t\t\tVspeakers = tf.gather_nd(speaker_vectors, I) # [B, S, E]\n\t\t\t\n\t\t\t# Expand the dimensions in preparation for broadcasting\n\t\t\tVspeakers_broad = tf.expand_dims(Vspeakers, 1)\n\t\t\tVspeakers_broad = tf.expand_dims(Vspeakers_broad, 1) # [B, 1, 1, S, E]\n\t\t\tembedding_broad = tf.expand_dims(embedding, 3)\n\n\t\t\t# Compute the dot product between the embedding vectors and speaker\n\t\t\t# vectors\n\t\t\tdot = tf.reduce_sum(Vspeakers_broad * embedding_broad, 4)\n\n\t\t\t# Compute the cost for every element\n\n\t\t\tsc_cost = -tf.log(tf.nn.sigmoid(y_a_b * dot))\n\n\t\t\tsc_cost = tf.reduce_mean(sc_cost, 3) # Average the cost over all speakers in the input\n\t\t\tsc_cost = tf.reduce_mean(sc_cost, 0)\t# Average the cost over all batches\n\t\t\tsc_cost = tf.reduce_mean(sc_cost) \n\t\t\ttf.summary.scalar('value', sc_cost)\n\n\t\tcost = sc_cost + cost_recons\n\t\ttf.summary.scalar('total', cost)\n\n\t\treturn cost" ]
[ [ "tensorflow.nn.sigmoid", "tensorflow.summary.scalar", "tensorflow.shape", "tensorflow.gather_nd", "tensorflow.expand_dims", "tensorflow.reduce_mean", "tensorflow.cast", "tensorflow.name_scope", "tensorflow.abs", "tensorflow.square", "tensorflow.identity", "tensorflow.reduce_sum", "tensorflow.nn.l2_normalize" ] ]
Cury30/Anomaly_detection
[ "fad172f6d9cc8dad73a79bc89290c578d67e2b35" ]
[ "create_bg.py" ]
[ "import cv2\nimport os\nimport numpy as np\nimport argparse\nimport uuid\nimport sys\nimport scipy.spatial\nimport matplotlib.pyplot as plt\n\n\nmodel_path = str(sys.argv[1])\nROADMASKDIR = model_path + \"/RoadMask/\"\nMINUTEMASKDIR = model_path + \"/MinuteMask/\"\n#INPUTVIDEOPATH = os.environ['AICITYVIDEOPATH'] + \"/test-data/\"\nINPUTVIDEOPATH = model_path + \"/Dataset/\"\ndarktexfile=open(\"dark.txt\",\"w\")\ndarkthreshold=290000\nvideo_amount = len(next(os.walk(INPUTVIDEOPATH))[2]) + 1\n\n\n\nprint(\"Using Input Video Path : \"+INPUTVIDEOPATH)\n\ndef unsharp_mask(image, kernel_size=(7, 7), sigma=1.0, amount=1.0, threshold=0):\n blurred = cv2.GaussianBlur(image, kernel_size, sigma)\n sharpened = float(amount + 1) * image - float(amount) * blurred\n sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))\n sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))\n sharpened = sharpened.round().astype(np.uint8)\n if threshold > 0:\n low_contrast_mask = np.absolute(image - blurred) < threshold\n np.copyto(sharpened, image, where=low_contrast_mask)\n return sharpened\n\ndef apply_filter(frame):\n frame = cv2.GaussianBlur(frame, (3, 3), 0)\n ret, frame = cv2.threshold(frame, 220, 255, cv2.THRESH_BINARY)\n return frame\n\ndef mkdir_ifndef(dirname):\n if not os.path.isdir(dirname):\n os.mkdir(dirname)\n\ndef create_bg(vidnum): \n mkdir_ifndef(ROADMASKDIR)\n mkdir_ifndef(MINUTEMASKDIR)\n print(INPUTVIDEOPATH+str(vidnum)+\".mp4\") #modificacion\n cap = cv2.VideoCapture(INPUTVIDEOPATH+str(vidnum)+\".mp4\")\n vh = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n vw = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n print(\"VH: {}, VW: {}\".format(vh,vw)) #modificacion\n length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n print(\"Length: {}\".format(length)) #modificacion\n weight=255.0/length\n vroi = 255 * np.ones((vw, vh), dtype=np.uint8)\n vroi2 = 255 * np.ones((vw, vh), dtype=np.uint8)\n bs = cv2.createBackgroundSubtractorMOG2(detectShadows=False)\n bs.setBackgroundRatio(0.6)\n bs.setHistory(256)\n bs.setNMixtures(4)\n bs.setVarInit(15)\n bs.setVarThreshold(25)\n cmpx_reduction_frames = 256\n learn_rate=0.007\n cmpx_reduction_factor = 1 - np.exp(256 * np.log(0.995))\n masksum = np.zeros((vw, vh), np.float32) \n (rAvg, gAvg, bAvg) = (None, None, None)\n maskcount=0\n total=0\n while True:\n ret, frame = cap.read()\n frame_num = cap.get(cv2.CAP_PROP_POS_FRAMES)\n if not ret:\n break\n if frame_num == bs.getHistory():\n learn_rate = 0.005\n bs.setComplexityReductionThreshold(cmpx_reduction_factor)\n frame = cv2.bitwise_and(frame, frame, mask=vroi)\n fg_img = bs.apply(frame, learningRate=learn_rate)\n bg_img = bs.getBackgroundImage()\n ret, fg_img = cv2.threshold(fg_img, 192, 255, cv2.THRESH_BINARY)\n fg_mask = apply_filter(fg_img)\n fg_mask2 = fg_mask.copy()\n fg_mask = cv2.bitwise_and(fg_mask, fg_mask, mask=vroi2)\n sharpened_image = unsharp_mask(bg_img)\n kernel = np.ones((5,5), np.uint8) \n img_erosion = cv2.erode(fg_mask, kernel, iterations=3) \n img_dilation = cv2.dilate(img_erosion, kernel, iterations=3)\n opening = cv2.morphologyEx(img_dilation, cv2.MORPH_OPEN, kernel)\n masksum=masksum+(opening*weight)\n (B, G, R) = cv2.split(sharpened_image.astype(\"float\"))\n if rAvg is None:\n rAvg = R\n bAvg = B\n gAvg = G\n else:\n rAvg = ((total * rAvg) + (1 * R)) / (total + 1.0)\n gAvg = ((total * gAvg) + (1 * G)) / (total + 1.0)\n bAvg = ((total * bAvg) + (1 * B)) / (total + 1.0)\n total+=1\n if(frame_num%(30*60)==0):\n maskcount+=1\n mkdir_ifndef(MINUTEMASKDIR+str(vidnum))\n total=0\n avg = cv2.merge([bAvg, gAvg, rAvg]).astype(\"uint8\")\n cv2.imwrite(MINUTEMASKDIR+str(vidnum)+\"/\"+str(maskcount)+\".png\",avg)\n (rAvg, gAvg, bAvg) = (None, None, None)\n if(maskcount==1):\n img=plt.imread(MINUTEMASKDIR+str(vidnum)+\"/\"+str(maskcount)+\".png\")\n intensity = img.sum(axis=2)\n pixelsum=0\n for row in intensity:\n pixelsum+=sum(row)\n if(pixelsum < darkthreshold):\n darktexfile.write(str(vidnum)+\"\\n\")\n else:\n if(frame_num%(length/4)==0): #This part is just because of the limit on google collab. Shuldnt be here\n maskcount+=1\n mkdir_ifndef(MINUTEMASKDIR+str(vidnum))\n total=0\n avg = cv2.merge([bAvg, gAvg, rAvg]).astype(\"uint8\")\n cv2.imwrite(MINUTEMASKDIR+str(vidnum)+\"/\"+str(maskcount)+\".png\",avg)\n (rAvg, gAvg, bAvg) = (None, None, None)\n if(maskcount==1):\n img=plt.imread(MINUTEMASKDIR+str(vidnum)+\"/\"+str(maskcount)+\".png\")\n intensity = img.sum(axis=2)\n pixelsum=0\n for row in intensity:\n pixelsum+=sum(row)\n if(pixelsum < darkthreshold):\n darktexfile.write(str(vidnum)+\"\\n\")\n\n masksum=apply_filter(masksum) \n cv2.imwrite(ROADMASKDIR+str(vidnum)+\".png\",masksum)\n cap.release()\n\ndef find_freeze():\n out = open(\"freeze.txt\",'w')\n for i in range(1,video_amount):\n count = 1\n videoPath = INPUTVIDEOPATH + \"%d.mp4\"%(i)\n cap = cv2.VideoCapture(videoPath)\n ret, frame2 = cap.read()\n start = -1\n consec = 0\n while(cap.isOpened()):\n frame1 = frame2\n ret, frame2 = cap.read()\n if not ret:\n break\n count +=1\n difference = cv2.subtract(frame1, frame2)\n b, g, r = cv2.split(difference)\n if cv2.countNonZero(b) <= 3000 and cv2.countNonZero(g) <= 3000 and cv2.countNonZero(r) <= 3000:\n if(start == -1):\n start = count - 1\n consec = 0\n elif(start != -1):\n consec += 1\n if(consec > 10):\n if(count - start - consec > 120):\n out.write(\"%d %d %d\\n\"%(i, start, count-1-consec))\n start = -1\n consec = 0\n if(start != - 1 and start != count -1):\n start = - 1\n out.close()\n\nif __name__ == \"__main__\":\n for i in range(1,video_amount):\n create_bg(i)\n find_freeze()\n" ]
[ [ "numpy.ones", "numpy.zeros", "numpy.copyto", "numpy.log", "numpy.absolute" ] ]
spencerking/qiskit-experiments
[ "11a254b010afe35933aaabac70de12b5b5a244bf" ]
[ "qiskit_experiments/library/quantum_volume/qv_analysis.py" ]
[ "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\"\"\"\nQuantum Volume analysis class.\n\"\"\"\n\nimport math\n\nimport warnings\nfrom typing import Optional\nimport numpy as np\n\nfrom qiskit_experiments.framework import BaseAnalysis, AnalysisResultData, FitVal\nfrom qiskit_experiments.curve_analysis import plot_scatter, plot_errorbar\n\n\nclass QuantumVolumeAnalysis(BaseAnalysis):\n r\"\"\"A class to analyze quantum volume experiments.\n\n # section: overview\n Calculate the quantum volume of the analysed system.\n The quantum volume is determined by the largest successful circuit depth.\n A depth is successful if it has 'mean heavy-output probability' > 2/3 with confidence\n level > 0.977 (corresponding to z_value = 2), and at least 100 trials have been ran.\n we assume the error (standard deviation) of the heavy output probability is due to a\n binomial distribution. The standard deviation for binomial distribution is\n :math:`\\sqrt{(np(1-p))}`, where :math:`n` is the number of trials and :math:`p`\n is the success probability.\n \"\"\"\n\n # pylint: disable = arguments-differ\n def _run_analysis(\n self,\n experiment_data,\n plot: bool = True,\n ax: Optional[\"matplotlib.pyplot.AxesSubplot\"] = None,\n ):\n \"\"\"Run analysis on circuit data.\n\n Args:\n experiment_data (ExperimentData): the experiment data to analyze.\n plot (bool): If True generate a plot of fitted data.\n ax (AxesSubplot): Optional, matplotlib axis to add plot to.\n\n Returns:\n tuple: A pair ``(result_data figures)`` where\n ``result_data`` is a list of\n :class:`AnalysisResultData` objects, and ``figures`` may be\n None, a single figure, or a list of figures.\n \"\"\"\n depth = experiment_data.experiment.num_qubits\n data = experiment_data.data()\n num_trials = len(data)\n heavy_output_prob_exp = []\n\n for data_trial in data:\n heavy_output = self._calc_ideal_heavy_output(\n data_trial[\"metadata\"][\"ideal_probabilities\"], data_trial[\"metadata\"][\"depth\"]\n )\n heavy_output_prob_exp.append(\n self._calc_exp_heavy_output_probability(data_trial, heavy_output)\n )\n\n hop_result, qv_result = self._calc_quantum_volume(heavy_output_prob_exp, depth, num_trials)\n\n if plot:\n ax = self._format_plot(hop_result, ax=ax)\n figures = [ax.get_figure()]\n else:\n figures = None\n return [hop_result, qv_result], figures\n\n @staticmethod\n def _calc_ideal_heavy_output(probabilities_vector, depth):\n \"\"\"\n Calculate the bit strings of the heavy output for the ideal simulation\n\n Args:\n ideal_data (dict): the simulation result of the ideal circuit\n\n Returns:\n list: the bit strings of the heavy output\n \"\"\"\n\n format_spec = \"{0:0%db}\" % depth\n # Keys are bit strings and values are probabilities of observing those strings\n all_output_prob_ideal = {\n format_spec.format(b): float(np.real(probabilities_vector[b]))\n for b in range(2 ** depth)\n }\n\n median_probabilities = float(np.real(np.median(probabilities_vector)))\n heavy_strings = list(\n filter(\n lambda x: all_output_prob_ideal[x] > median_probabilities,\n list(all_output_prob_ideal.keys()),\n )\n )\n return heavy_strings\n\n @staticmethod\n def _calc_exp_heavy_output_probability(data, heavy_outputs):\n \"\"\"\n Calculate the probability of measuring heavy output string in the data\n\n Args:\n data (dict): the result of the circuit exectution\n heavy_outputs (list): the bit strings of the heavy output from the ideal simulation\n\n Returns:\n int: heavy output probability\n \"\"\"\n circ_shots = sum(data[\"counts\"].values())\n\n # Calculate the number of heavy output counts in the experiment\n heavy_output_counts = sum([data[\"counts\"].get(value, 0) for value in heavy_outputs])\n\n # Calculate the experimental heavy output probability\n return heavy_output_counts / circ_shots\n\n @staticmethod\n def _calc_z_value(mean, sigma):\n \"\"\"Calculate z value using mean and sigma.\n\n Args:\n mean (float): mean\n sigma (float): standard deviation\n\n Returns:\n float: z_value in standard normal distibution.\n \"\"\"\n\n if sigma == 0:\n # Assign a small value for sigma if sigma = 0\n sigma = 1e-10\n warnings.warn(\"Standard deviation sigma should not be zero.\")\n\n z_value = (mean - 2 / 3) / sigma\n\n return z_value\n\n @staticmethod\n def _calc_confidence_level(z_value):\n \"\"\"Calculate confidence level using z value.\n\n Accumulative probability for standard normal distribution\n in [-z, +infinity] is 1/2 (1 + erf(z/sqrt(2))),\n where z = (X - mu)/sigma = (hmean - 2/3)/sigma\n\n Args:\n z_value (float): z value in in standard normal distibution.\n\n Returns:\n float: confidence level in decimal (not percentage).\n \"\"\"\n\n confidence_level = 0.5 * (1 + math.erf(z_value / 2 ** 0.5))\n\n return confidence_level\n\n def _calc_quantum_volume(self, heavy_output_prob_exp, depth, trials):\n \"\"\"\n Calc the quantum volume of the analysed system.\n quantum volume is determined by the largest successful depth.\n A depth is successful if it has 'mean heavy-output probability' > 2/3 with confidence\n level > 0.977 (corresponding to z_value = 2), and at least 100 trials have been ran.\n we assume the error (standard deviation) of the heavy output probability is due to a\n binomial distribution. standard deviation for binomial distribution is sqrt(np(1-p)),\n where n is the number of trials and p is the success probability.\n\n Returns:\n dict: quantum volume calculations -\n the quantum volume,\n whether the results passed the threshold,\n the confidence of the result,\n the heavy output probability for each trial,\n the mean heavy output probability,\n the error of the heavy output probability,\n the depth of the circuit,\n the number of trials ran\n \"\"\"\n quantum_volume = 1\n success = False\n\n mean_hop = np.mean(heavy_output_prob_exp)\n sigma_hop = (mean_hop * ((1.0 - mean_hop) / trials)) ** 0.5\n z = 2\n threshold = 2 / 3 + z * sigma_hop\n z_value = self._calc_z_value(mean_hop, sigma_hop)\n confidence_level = self._calc_confidence_level(z_value)\n if confidence_level > 0.977:\n quality = \"good\"\n else:\n quality = \"bad\"\n\n # Must have at least 100 trials\n if trials < 100:\n warnings.warn(\"Must use at least 100 trials to consider Quantum Volume as successful.\")\n\n if mean_hop > threshold and trials >= 100:\n quantum_volume = 2 ** depth\n success = True\n\n hop_result = AnalysisResultData(\n \"mean_HOP\",\n value=FitVal(mean_hop, sigma_hop),\n quality=quality,\n extra={\n \"HOPs\": heavy_output_prob_exp,\n \"two_sigma\": 2 * sigma_hop,\n \"depth\": depth,\n \"trials\": trials,\n },\n )\n\n qv_result = AnalysisResultData(\n \"quantum_volume\",\n value=quantum_volume,\n quality=quality,\n extra={\n \"success\": success,\n \"confidence\": confidence_level,\n \"depth\": depth,\n \"trials\": trials,\n },\n )\n return hop_result, qv_result\n\n @staticmethod\n def _format_plot(\n hop_result: AnalysisResultData, ax: Optional[\"matplotlib.pyplot.AxesSubplot\"] = None\n ):\n \"\"\"Format the QV plot\n\n Args:\n hop_result: the heavy output probability analysis result.\n ax: matplotlib axis to add plot to.\n\n Returns:\n AxesSubPlot: the matplotlib axes containing the plot.\n \"\"\"\n trials = hop_result.extra[\"trials\"]\n heavy_probs = hop_result.extra[\"HOPs\"]\n trial_list = np.arange(1, trials + 1) # x data\n\n hop_accumulative = np.cumsum(heavy_probs) / trial_list\n two_sigma = 2 * (hop_accumulative * (1 - hop_accumulative) / trial_list) ** 0.5\n\n # Plot inidivual HOP as scatter\n ax = plot_scatter(\n trial_list,\n heavy_probs,\n ax=ax,\n s=3,\n zorder=3,\n label=\"Individual HOP\",\n )\n # Plot accumulative HOP\n ax.plot(trial_list, hop_accumulative, color=\"r\", label=\"Cumulative HOP\")\n\n # Plot two-sigma shaded area\n ax = plot_errorbar(\n trial_list,\n hop_accumulative,\n two_sigma,\n ax=ax,\n fmt=\"none\",\n ecolor=\"lightgray\",\n elinewidth=20,\n capsize=0,\n alpha=0.5,\n label=\"2$\\\\sigma$\",\n )\n # Plot 2/3 success threshold\n ax.axhline(2 / 3, color=\"k\", linestyle=\"dashed\", linewidth=1, label=\"Threshold\")\n\n ax.set_ylim(\n max(hop_accumulative[-1] - 4 * two_sigma[-1], 0),\n min(hop_accumulative[-1] + 4 * two_sigma[-1], 1),\n )\n\n ax.set_xlabel(\"Number of Trials\", fontsize=14)\n ax.set_ylabel(\"Heavy Output Probability\", fontsize=14)\n\n ax.set_title(\n \"Quantum Volume experiment for depth \"\n + str(hop_result.extra[\"depth\"])\n + \" - accumulative hop\",\n fontsize=14,\n )\n\n # Re-arrange legend order\n handles, labels = ax.get_legend_handles_labels()\n handles = [handles[1], handles[2], handles[0], handles[3]]\n labels = [labels[1], labels[2], labels[0], labels[3]]\n ax.legend(handles, labels)\n return ax\n" ]
[ [ "numpy.cumsum", "numpy.median", "numpy.arange", "numpy.real", "numpy.mean" ] ]
houcharlie/federated-legacy
[ "cb10a9cdcea33288f8113e7445782d21c8c65f81" ]
[ "tensorflow_federated/python/core/utils/encoding_utils_test.py" ]
[ "# Copyright 2019, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport warnings\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.common_libs import test\nfrom tensorflow_federated.python.core.api import computation_types\nfrom tensorflow_federated.python.core.api import computations\nfrom tensorflow_federated.python.core.api import placements\nfrom tensorflow_federated.python.core.backends.native import execution_contexts\nfrom tensorflow_federated.python.core.impl.types import type_conversions\nfrom tensorflow_federated.python.core.templates.measured_process import MeasuredProcess\nfrom tensorflow_federated.python.core.utils import encoding_utils\nfrom tensorflow_federated.python.core.utils.computation_utils import StatefulAggregateFn\nfrom tensorflow_federated.python.core.utils.computation_utils import StatefulBroadcastFn\nfrom tensorflow_model_optimization.python.core.internal import tensor_encoding as te\n\n_bad_encoder_named_parameters = [('float', 1.0), ('string', 'str'),\n ('object', object),\n ('encoder', te.encoders.identity())]\n\n\nclass EncodedBroadcastTest(test.TestCase, parameterized.TestCase):\n \"\"\"Tests for build_encoded_broadcast method.\"\"\"\n\n def test_build_encoded_broadcast_raise_warning(self):\n value = tf.constant(np.random.rand(20))\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n encoder = te.encoders.as_simple_encoder(te.encoders.identity(), value_spec)\n\n with warnings.catch_warnings(record=True):\n warnings.simplefilter('error', DeprecationWarning)\n with self.assertRaisesRegex(DeprecationWarning,\n 'tff.utils.build_encoded_broadcast()'):\n encoding_utils.build_encoded_broadcast(value, encoder)\n\n @parameterized.named_parameters(\n ('tf_constant_identity', tf.constant, te.encoders.identity),\n ('tf_constant_uniform_quantization', tf.constant,\n lambda: te.encoders.uniform_quantization(8)),\n ('numpy_identity', lambda x: x, te.encoders.identity),\n ('numpy_uniform_quantization', lambda x: x,\n lambda: te.encoders.uniform_quantization(8)),\n )\n def test_build_encoded_broadcast(self, value_constructor,\n encoder_constructor):\n value = value_constructor(np.random.rand(20))\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n value_type = computation_types.to_type(value_spec)\n encoder = te.encoders.as_simple_encoder(encoder_constructor(), value_spec)\n broadcast_fn = encoding_utils.build_encoded_broadcast(value, encoder)\n state_type = broadcast_fn._initialize_fn.type_signature.result\n broadcast_signature = computations.federated_computation(\n broadcast_fn._next_fn,\n computation_types.FederatedType(state_type, placements.SERVER),\n computation_types.FederatedType(value_type,\n placements.SERVER)).type_signature\n\n self.assertIsInstance(broadcast_fn, StatefulBroadcastFn)\n self.assertEqual(state_type, broadcast_signature.result[0].member)\n self.assertEqual(placements.SERVER, broadcast_signature.result[0].placement)\n self.assertEqual(value_type, broadcast_signature.result[1].member)\n self.assertEqual(placements.CLIENTS,\n broadcast_signature.result[1].placement)\n\n @parameterized.named_parameters(*_bad_encoder_named_parameters)\n def test_build_encoded_broadcast_raises_bad_encoder(self, bad_encoder):\n value = tf.constant([0.0, 1.0])\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_broadcast(value, bad_encoder)\n\n def test_build_encoded_broadcast_raises_incompatible_encoder(self):\n value = tf.constant([0.0, 1.0])\n incompatible_encoder = te.encoders.as_simple_encoder(\n te.encoders.identity(), tf.TensorSpec((3,)))\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_broadcast(value, incompatible_encoder)\n\n def test_build_encoded_broadcast_raises_bad_structure(self):\n value = [tf.constant([0.0, 1.0]), tf.constant([0.0, 1.0])]\n encoder = te.encoders.as_simple_encoder(te.encoders.identity(),\n tf.TensorSpec((2,)))\n with self.assertRaises(ValueError):\n encoding_utils.build_encoded_broadcast(value, encoder)\n\n\nclass EncodedBroadcastProcessTest(test.TestCase, parameterized.TestCase):\n \"\"\"Tests for build_encoded_broadcast_process method.\"\"\"\n\n @parameterized.named_parameters(\n ('tf_constant_identity', tf.constant, te.encoders.identity),\n ('tf_constant_uniform_quantization', tf.constant,\n lambda: te.encoders.uniform_quantization(8)),\n ('numpy_identity', lambda x: x, te.encoders.identity),\n ('numpy_uniform_quantization', lambda x: x,\n lambda: te.encoders.uniform_quantization(8)),\n )\n def test_build_encoded_broadcast_process(self, value_constructor,\n encoder_constructor):\n value = value_constructor(np.random.rand(20))\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n value_type = computation_types.to_type(value_spec)\n encoder = te.encoders.as_simple_encoder(encoder_constructor(), value_spec)\n broadcast_process = encoding_utils.build_encoded_broadcast_process(\n value_type, encoder)\n state_type = broadcast_process._initialize_fn.type_signature.result\n broadcast_signature = broadcast_process._next_fn.type_signature\n\n self.assertIsInstance(broadcast_process, MeasuredProcess)\n self.assertEqual(state_type, broadcast_signature.result[0])\n self.assertEqual(placements.SERVER, broadcast_signature.result[0].placement)\n self.assertEqual(value_type, broadcast_signature.result[1].member)\n self.assertEqual(placements.CLIENTS,\n broadcast_signature.result[1].placement)\n\n @parameterized.named_parameters(*_bad_encoder_named_parameters)\n def test_build_encoded_broadcast_process_raises_bad_encoder(\n self, bad_encoder):\n value_type = computation_types.TensorType(tf.float32, shape=[2])\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_broadcast_process(value_type, bad_encoder)\n\n def test_build_encoded_broadcast_process_raises_incompatible_encoder(self):\n value_type = computation_types.TensorType(tf.float32, shape=[2])\n incompatible_encoder = te.encoders.as_simple_encoder(\n te.encoders.identity(), tf.TensorSpec((3,)))\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_broadcast_process(value_type,\n incompatible_encoder)\n\n def test_build_encoded_broadcast_process_raises_bad_structure(self):\n value_type = computation_types.StructType([\n computation_types.TensorType(tf.float32, shape=[2]),\n computation_types.TensorType(tf.float32, shape=[2])\n ])\n encoder = te.encoders.as_simple_encoder(te.encoders.identity(),\n tf.TensorSpec((2,)))\n with self.assertRaises(ValueError):\n encoding_utils.build_encoded_broadcast_process(value_type, encoder)\n\n\nclass EncodedSumTest(test.TestCase, parameterized.TestCase):\n \"\"\"Tests for build_encoded_sum method.\"\"\"\n\n def test_build_encoded_sum_raise_warning(self):\n value = tf.constant(np.random.rand(20))\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n encoder = te.encoders.as_gather_encoder(te.encoders.identity(), value_spec)\n\n with warnings.catch_warnings(record=True):\n warnings.simplefilter('error', DeprecationWarning)\n with self.assertRaisesRegex(DeprecationWarning,\n 'tff.utils.build_encoded_sum()'):\n encoding_utils.build_encoded_sum(value, encoder)\n\n @parameterized.named_parameters(\n ('tf_constant_identity', tf.constant, te.encoders.identity),\n ('tf_constant_uniform_quantization', tf.constant,\n lambda: te.encoders.uniform_quantization(8)),\n ('numpy_identity', lambda x: x, te.encoders.identity),\n ('numpy_uniform_quantization', lambda x: x,\n lambda: te.encoders.uniform_quantization(8)),\n )\n def test_build_encoded_sum(self, value_constructor, encoder_constructor):\n value = value_constructor(np.random.rand(20))\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n value_type = computation_types.to_type(value_spec)\n encoder = te.encoders.as_gather_encoder(encoder_constructor(), value_spec)\n gather_fn = encoding_utils.build_encoded_sum(value, encoder)\n state_type = gather_fn._initialize_fn.type_signature.result\n gather_signature = computations.federated_computation(\n gather_fn._next_fn,\n computation_types.FederatedType(state_type, placements.SERVER),\n computation_types.FederatedType(value_type, placements.CLIENTS),\n computation_types.FederatedType(\n computation_types.to_type(tf.float32),\n placements.CLIENTS)).type_signature\n\n self.assertIsInstance(gather_fn, StatefulAggregateFn)\n self.assertEqual(state_type, gather_signature.result[0].member)\n self.assertEqual(placements.SERVER, gather_signature.result[0].placement)\n self.assertEqual(value_type, gather_signature.result[1].member)\n self.assertEqual(placements.SERVER, gather_signature.result[1].placement)\n\n def test_run_encoded_sum(self):\n value = np.array([0.0, 1.0, 2.0, -1.0])\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n value_type = computation_types.to_type(value_spec)\n encoder = te.encoders.as_gather_encoder(te.encoders.identity(), value_spec)\n gather_fn = encoding_utils.build_encoded_sum(value, encoder)\n initial_state = gather_fn.initialize()\n\n @computations.federated_computation(\n computation_types.FederatedType(\n gather_fn._initialize_fn.type_signature.result, placements.SERVER),\n computation_types.FederatedType(value_type, placements.CLIENTS))\n def call_gather(state, value):\n return gather_fn(state, value)\n\n _, value_sum = call_gather(initial_state, [value, value])\n self.assertAllClose(2 * value, value_sum)\n\n _, value_sum = call_gather(initial_state, [value, -value])\n self.assertAllClose(0 * value, value_sum)\n\n _, value_sum = call_gather(initial_state, [value, 2 * value])\n self.assertAllClose(3 * value, value_sum)\n\n @parameterized.named_parameters(*_bad_encoder_named_parameters)\n def test_build_encoded_sum_raises_bad_encoder(self, bad_encoder):\n value = tf.constant([0.0, 1.0])\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_sum(value, bad_encoder)\n\n def test_build_encoded_sum_raises_incompatible_encoder(self):\n value = tf.constant([0.0, 1.0])\n incompatible_encoder = te.encoders.as_gather_encoder(\n te.encoders.identity(), tf.TensorSpec((3,)))\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_sum(value, incompatible_encoder)\n\n def test_build_encoded_sum_raises_bad_structure(self):\n value = [tf.constant([0.0, 1.0]), tf.constant([0.0, 1.0])]\n encoder = te.encoders.as_gather_encoder(te.encoders.identity(),\n tf.TensorSpec((2,)))\n with self.assertRaises(ValueError):\n encoding_utils.build_encoded_sum(value, encoder)\n\n\nclass EncodedSumProcessTest(test.TestCase, parameterized.TestCase):\n \"\"\"Tests for build_encoded_sum_process method.\"\"\"\n\n @parameterized.named_parameters(\n ('tf_constant_identity', tf.constant, te.encoders.identity),\n ('tf_constant_uniform_quantization', tf.constant,\n lambda: te.encoders.uniform_quantization(8)),\n ('numpy_identity', lambda x: x, te.encoders.identity),\n ('numpy_uniform_quantization', lambda x: x,\n lambda: te.encoders.uniform_quantization(8)),\n )\n def test_build_encoded_sum_process(self, value_constructor,\n encoder_constructor):\n value = value_constructor(np.random.rand(20))\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n value_type = computation_types.to_type(value_spec)\n encoder = te.encoders.as_gather_encoder(encoder_constructor(), value_spec)\n gather_process = encoding_utils.build_encoded_sum_process(\n value_type, encoder)\n state_type = gather_process._initialize_fn.type_signature.result\n gather_signature = gather_process._next_fn.type_signature\n\n self.assertIsInstance(gather_process, MeasuredProcess)\n self.assertEqual(state_type, gather_signature.result[0])\n self.assertEqual(placements.SERVER, gather_signature.result[0].placement)\n self.assertEqual(value_type, gather_signature.result[1].member)\n self.assertEqual(placements.SERVER, gather_signature.result[1].placement)\n\n def test_run_encoded_sum_process(self):\n value = np.array([0.0, 1.0, 2.0, -1.0])\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n encoder = te.encoders.as_gather_encoder(te.encoders.identity(), value_spec)\n value_type = type_conversions.type_from_tensors(value)\n gather_process = encoding_utils.build_encoded_sum_process(\n value_type, encoder)\n initial_state = gather_process.initialize()\n call_gather = gather_process._next_fn\n\n output = call_gather(initial_state, [value, value])\n self.assertAllClose(2 * value, output['result'])\n\n output = call_gather(initial_state, [value, -value])\n self.assertAllClose(0 * value, output['result'])\n\n output = call_gather(initial_state, [value, 2 * value])\n self.assertAllClose(3 * value, output['result'])\n\n @parameterized.named_parameters(*_bad_encoder_named_parameters)\n def test_build_encoded_sum_process_raises_bad_encoder(self, bad_encoder):\n value_type = computation_types.TensorType(tf.float32, shape=[2])\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_sum_process(value_type, bad_encoder)\n\n def test_build_encoded_sum_process_raises_incompatible_encoder(self):\n value_type = computation_types.TensorType(tf.float32, shape=[2])\n incompatible_encoder = te.encoders.as_gather_encoder(\n te.encoders.identity(), tf.TensorSpec((3,)))\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_sum_process(value_type, incompatible_encoder)\n\n def test_build_encoded_sum_process_raises_bad_structure(self):\n value_type = computation_types.StructType([\n computation_types.TensorType(tf.float32, shape=[2]),\n computation_types.TensorType(tf.float32, shape=[2])\n ])\n encoder = te.encoders.as_gather_encoder(te.encoders.identity(),\n tf.TensorSpec((2,)))\n with self.assertRaises(ValueError):\n encoding_utils.build_encoded_sum_process(value_type, encoder)\n\n\nclass EncodedMeanTest(test.TestCase, parameterized.TestCase):\n \"\"\"Tests for build_encoded_mean method.\"\"\"\n\n def test_build_encoded_mean_raise_warning(self):\n value = tf.constant(np.random.rand(20))\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n encoder = te.encoders.as_gather_encoder(te.encoders.identity(), value_spec)\n\n with warnings.catch_warnings(record=True):\n warnings.simplefilter('error', DeprecationWarning)\n with self.assertRaisesRegex(DeprecationWarning,\n 'tff.utils.build_encoded_mean()'):\n encoding_utils.build_encoded_mean(value, encoder)\n\n @parameterized.named_parameters(\n ('tf_constant_identity', tf.constant, te.encoders.identity),\n ('tf_constant_uniform_quantization', tf.constant,\n lambda: te.encoders.uniform_quantization(8)),\n ('numpy_identity', lambda x: x, te.encoders.identity),\n ('numpy_uniform_quantization', lambda x: x,\n lambda: te.encoders.uniform_quantization(8)),\n )\n def test_build_encoded_mean(self, value_constructor, encoder_constructor):\n value = value_constructor(np.random.rand(20))\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n value_type = computation_types.to_type(value_spec)\n encoder = te.encoders.as_gather_encoder(encoder_constructor(), value_spec)\n gather_fn = encoding_utils.build_encoded_mean(value, encoder)\n state_type = gather_fn._initialize_fn.type_signature.result\n gather_signature = computations.federated_computation(\n gather_fn._next_fn,\n computation_types.FederatedType(state_type, placements.SERVER),\n computation_types.FederatedType(value_type, placements.CLIENTS),\n computation_types.FederatedType(\n computation_types.to_type(tf.float32),\n placements.CLIENTS)).type_signature\n\n self.assertIsInstance(gather_fn, StatefulAggregateFn)\n self.assertEqual(state_type, gather_signature.result[0].member)\n self.assertEqual(placements.SERVER, gather_signature.result[0].placement)\n self.assertEqual(value_type, gather_signature.result[1].member)\n self.assertEqual(placements.SERVER, gather_signature.result[1].placement)\n\n def test_run_encoded_mean(self):\n value = np.array([0.0, 1.0, 2.0, -1.0])\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n value_type = computation_types.to_type(value_spec)\n encoder = te.encoders.as_gather_encoder(te.encoders.identity(), value_spec)\n gather_fn = encoding_utils.build_encoded_mean(value, encoder)\n initial_state = gather_fn.initialize()\n\n @computations.federated_computation(\n computation_types.FederatedType(\n gather_fn._initialize_fn.type_signature.result, placements.SERVER),\n computation_types.FederatedType(value_type, placements.CLIENTS),\n computation_types.FederatedType(\n computation_types.to_type(tf.float32), placements.CLIENTS))\n def call_gather(state, value, weight):\n return gather_fn(state, value, weight)\n\n _, value_mean = call_gather(initial_state, [value, value], [1.0, 1.0])\n self.assertAllClose(1 * value, value_mean)\n\n _, value_mean = call_gather(initial_state, [value, value], [0.3, 0.7])\n self.assertAllClose(1 * value, value_mean)\n\n _, value_mean = call_gather(initial_state, [value, 2 * value], [1.0, 2.0])\n self.assertAllClose(5 / 3 * value, value_mean)\n\n @parameterized.named_parameters(*_bad_encoder_named_parameters)\n def test_build_encoded_mean_raises_bad_encoder(self, bad_encoder):\n value = tf.constant([0.0, 1.0])\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_mean(value, bad_encoder)\n\n def test_build_encoded_mean_raises_incompatible_encoder(self):\n value = tf.constant([0.0, 1.0])\n incompatible_encoder = te.encoders.as_gather_encoder(\n te.encoders.identity(), tf.TensorSpec((3,)))\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_mean(value, incompatible_encoder)\n\n def test_build_encoded_mean_raises_bad_structure(self):\n value = [tf.constant([0.0, 1.0]), tf.constant([0.0, 1.0])]\n encoder = te.encoders.as_gather_encoder(te.encoders.identity(),\n tf.TensorSpec((2,)))\n with self.assertRaises(ValueError):\n encoding_utils.build_encoded_mean(value, encoder)\n\n\nclass EncodedMeanProcessTest(test.TestCase, parameterized.TestCase):\n \"\"\"Tests for build_encoded_mean_process method.\"\"\"\n\n @parameterized.named_parameters(\n ('tf_constant_identity', tf.constant, te.encoders.identity),\n ('tf_constant_uniform_quantization', tf.constant,\n lambda: te.encoders.uniform_quantization(8)),\n ('numpy_identity', lambda x: x, te.encoders.identity),\n ('numpy_uniform_quantization', lambda x: x,\n lambda: te.encoders.uniform_quantization(8)),\n )\n def test_build_encoded_mean_process(self, value_constructor,\n encoder_constructor):\n value = value_constructor(np.random.rand(20))\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n value_type = computation_types.to_type(value_spec)\n encoder = te.encoders.as_gather_encoder(encoder_constructor(), value_spec)\n gather_process = encoding_utils.build_encoded_mean_process(\n value_type, encoder)\n state_type = gather_process._initialize_fn.type_signature.result\n gather_signature = gather_process._next_fn.type_signature\n\n self.assertIsInstance(gather_process, MeasuredProcess)\n self.assertEqual(state_type, gather_signature.result[0])\n self.assertEqual(placements.SERVER, gather_signature.result[0].placement)\n self.assertEqual(value_type, gather_signature.result[1].member)\n self.assertEqual(placements.SERVER, gather_signature.result[1].placement)\n\n def test_run_encoded_mean_process(self):\n value = np.array([0.0, 1.0, 2.0, -1.0])\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n encoder = te.encoders.as_gather_encoder(te.encoders.identity(), value_spec)\n value_type = type_conversions.type_from_tensors(value)\n gather_process = encoding_utils.build_encoded_mean_process(\n value_type, encoder)\n initial_state = gather_process.initialize()\n call_gather = gather_process._next_fn\n\n output = call_gather(initial_state, [value, value], [1.0, 1.0])\n self.assertAllClose(1 * value, output['result'])\n\n output = call_gather(initial_state, [value, value], [0.3, 0.7])\n self.assertAllClose(1 * value, output['result'])\n\n output = call_gather(initial_state, [value, 2 * value], [1.0, 2.0])\n self.assertAllClose(5 / 3 * value, output['result'])\n\n @parameterized.named_parameters(*_bad_encoder_named_parameters)\n def test_build_encoded_mean_process_raises_bad_encoder(self, bad_encoder):\n value_type = computation_types.TensorType(tf.float32, shape=[2])\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_mean_process(value_type, bad_encoder)\n\n def test_build_encoded_mean_process_raises_incompatible_encoder(self):\n value_type = computation_types.TensorType(tf.float32, shape=[2])\n incompatible_encoder = te.encoders.as_gather_encoder(\n te.encoders.identity(), tf.TensorSpec((3,)))\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_mean_process(value_type,\n incompatible_encoder)\n\n def test_build_encoded_mean_process_raises_bad_structure(self):\n value_type = computation_types.StructType([\n computation_types.TensorType(tf.float32, shape=[2]),\n computation_types.TensorType(tf.float32, shape=[2])\n ])\n encoder = te.encoders.as_gather_encoder(te.encoders.identity(),\n tf.TensorSpec((2,)))\n with self.assertRaises(ValueError):\n encoding_utils.build_encoded_mean_process(value_type, encoder)\n\n\nclass EncodingUtilsTest(test.TestCase, parameterized.TestCase):\n \"\"\"Tests for utilities for building StatefulFns.\"\"\"\n\n @parameterized.named_parameters(\n ('identity', te.encoders.identity),\n ('uniform', lambda: te.encoders.uniform_quantization(8)),\n ('hadamard', lambda: te.encoders.hadamard_quantization(8)),\n (\n 'one_over_n',\n lambda: te.core.EncoderComposer( # pylint: disable=g-long-lambda\n te.testing.PlusOneOverNEncodingStage()).make()),\n (\n 'state_update',\n lambda: te.core.EncoderComposer( # pylint: disable=g-long-lambda\n StateUpdateTensorsEncodingStage()).make()),\n )\n def test_build_encode_decode_tf_computations_for_broadcast(\n self, encoder_constructor):\n value_spec = tf.TensorSpec((20,), tf.float32)\n encoder = te.encoders.as_simple_encoder(encoder_constructor(), value_spec)\n\n _, state_type = encoding_utils._build_initial_state_tf_computation(encoder)\n value_type = computation_types.to_type(value_spec)\n encode_fn, decode_fn = (\n encoding_utils._build_encode_decode_tf_computations_for_broadcast(\n state_type, value_type, encoder))\n\n self.assertEqual(state_type, encode_fn.type_signature.parameter[0])\n self.assertEqual(state_type, encode_fn.type_signature.result[0])\n # Output of encode should be the input to decode.\n self.assertEqual(encode_fn.type_signature.result[1],\n decode_fn.type_signature.parameter)\n # Decode should return the same type as input to encode - value_type.\n self.assertEqual(value_type, encode_fn.type_signature.parameter[1])\n self.assertEqual(value_type, decode_fn.type_signature.result)\n\n @parameterized.named_parameters(\n ('identity', te.encoders.identity),\n ('uniform', lambda: te.encoders.uniform_quantization(8)),\n ('hadamard', lambda: te.encoders.hadamard_quantization(8)),\n (\n 'one_over_n',\n lambda: te.core.EncoderComposer( # pylint: disable=g-long-lambda\n te.testing.PlusOneOverNEncodingStage()).make()),\n (\n 'state_update',\n lambda: te.core.EncoderComposer( # pylint: disable=g-long-lambda\n StateUpdateTensorsEncodingStage()).make()),\n )\n def test_build_tf_computations_for_sum(self, encoder_constructor):\n # Tests that the partial computations have matching relevant input-output\n # signatures.\n value_spec = tf.TensorSpec((20,), tf.float32)\n encoder = te.encoders.as_gather_encoder(encoder_constructor(), value_spec)\n\n _, state_type = encoding_utils._build_initial_state_tf_computation(encoder)\n value_type = computation_types.to_type(value_spec)\n nest_encoder = encoding_utils._build_tf_computations_for_gather(\n state_type, value_type, encoder)\n\n self.assertEqual(state_type,\n nest_encoder.get_params_fn.type_signature.parameter)\n encode_params_type = nest_encoder.get_params_fn.type_signature.result[0]\n decode_before_sum_params_type = nest_encoder.get_params_fn.type_signature.result[\n 1]\n decode_after_sum_params_type = nest_encoder.get_params_fn.type_signature.result[\n 2]\n\n self.assertEqual(value_type,\n nest_encoder.encode_fn.type_signature.parameter[0])\n self.assertEqual(encode_params_type,\n nest_encoder.encode_fn.type_signature.parameter[1])\n self.assertEqual(decode_before_sum_params_type,\n nest_encoder.encode_fn.type_signature.parameter[2])\n state_update_tensors_type = nest_encoder.encode_fn.type_signature.result[2]\n\n accumulator_type = nest_encoder.zero_fn.type_signature.result\n self.assertEqual(state_update_tensors_type,\n accumulator_type.state_update_tensors)\n\n self.assertEqual(accumulator_type,\n nest_encoder.accumulate_fn.type_signature.parameter[0])\n self.assertEqual(nest_encoder.encode_fn.type_signature.result,\n nest_encoder.accumulate_fn.type_signature.parameter[1])\n self.assertEqual(accumulator_type,\n nest_encoder.accumulate_fn.type_signature.result)\n self.assertEqual(accumulator_type,\n nest_encoder.merge_fn.type_signature.parameter[0])\n self.assertEqual(accumulator_type,\n nest_encoder.merge_fn.type_signature.parameter[1])\n self.assertEqual(accumulator_type,\n nest_encoder.merge_fn.type_signature.result)\n self.assertEqual(accumulator_type,\n nest_encoder.report_fn.type_signature.parameter)\n self.assertEqual(accumulator_type,\n nest_encoder.report_fn.type_signature.result)\n\n self.assertEqual(\n accumulator_type.values,\n nest_encoder.decode_after_sum_fn.type_signature.parameter[0])\n self.assertEqual(\n decode_after_sum_params_type,\n nest_encoder.decode_after_sum_fn.type_signature.parameter[1])\n self.assertEqual(value_type,\n nest_encoder.decode_after_sum_fn.type_signature.result)\n\n self.assertEqual(state_type,\n nest_encoder.update_state_fn.type_signature.parameter[0])\n self.assertEqual(state_update_tensors_type,\n nest_encoder.update_state_fn.type_signature.parameter[1])\n self.assertEqual(state_type,\n nest_encoder.update_state_fn.type_signature.result)\n\n\[email protected]_style_adaptive_encoding_stage\nclass StateUpdateTensorsEncodingStage(te.core.AdaptiveEncodingStageInterface):\n \"\"\"Test encoding stage using supported state aggregation modes.\n\n This implementation does not use `encoding_stage.StateAggregationMode.STACK`\n which is currently not supported by the implementation.\n \"\"\"\n\n ENCODED_VALUES_KEY = 'state_update_tensors_identity'\n SUM_STATE_UPDATE_KEY = 'state_update_tensors_update_sum'\n MIN_STATE_UPDATE_KEY = 'state_update_tensors_update_min'\n MAX_STATE_UPDATE_KEY = 'state_update_tensors_update_max'\n LAST_SUM_STATE_KEY = 'state_update_tensors_state_sum'\n LAST_MIN_STATE_KEY = 'state_update_tensors_state_min'\n LAST_MAX_STATE_KEY = 'state_update_tensors_state_max'\n\n @property\n def name(self):\n \"\"\"See base class.\"\"\"\n return 'state_update_tensors'\n\n @property\n def compressible_tensors_keys(self):\n \"\"\"See base class.\"\"\"\n return [self.ENCODED_VALUES_KEY]\n\n @property\n def commutes_with_sum(self):\n \"\"\"See base class.\"\"\"\n return True\n\n @property\n def decode_needs_input_shape(self):\n \"\"\"See base class.\"\"\"\n return False\n\n @property\n def state_update_aggregation_modes(self):\n \"\"\"See base class.\"\"\"\n return {\n self.SUM_STATE_UPDATE_KEY: te.core.StateAggregationMode.SUM,\n self.MIN_STATE_UPDATE_KEY: te.core.StateAggregationMode.MIN,\n self.MAX_STATE_UPDATE_KEY: te.core.StateAggregationMode.MAX,\n }\n\n def initial_state(self):\n \"\"\"See base class.\"\"\"\n return {\n self.LAST_SUM_STATE_KEY: tf.constant(0.0),\n self.LAST_MIN_STATE_KEY: tf.constant(0.0),\n self.LAST_MAX_STATE_KEY: tf.constant(0.0),\n }\n\n def update_state(self, state, state_update_tensors):\n \"\"\"See base class.\"\"\"\n del state # Unused.\n return {\n self.LAST_SUM_STATE_KEY:\n tf.reduce_sum(state_update_tensors[self.SUM_STATE_UPDATE_KEY]),\n self.LAST_MIN_STATE_KEY:\n tf.reduce_min(state_update_tensors[self.MIN_STATE_UPDATE_KEY]),\n self.LAST_MAX_STATE_KEY:\n tf.reduce_max(state_update_tensors[self.MAX_STATE_UPDATE_KEY])\n }\n\n def get_params(self, state):\n \"\"\"See base class.\"\"\"\n del state # Unused.\n return {}, {}\n\n def encode(self, x, encode_params):\n \"\"\"See base class.\"\"\"\n del encode_params # Unused.\n x = tf.identity(x)\n return {\n self.ENCODED_VALUES_KEY: x\n }, {\n self.SUM_STATE_UPDATE_KEY: tf.reduce_sum(x),\n self.MIN_STATE_UPDATE_KEY: tf.reduce_min(x),\n self.MAX_STATE_UPDATE_KEY: tf.reduce_max(x),\n }\n\n def decode(self,\n encoded_tensors,\n decode_params,\n num_summands=None,\n shape=None):\n \"\"\"See base class.\"\"\"\n del decode_params, num_summands, shape # Unused.\n return tf.identity(encoded_tensors[self.ENCODED_VALUES_KEY])\n\n\nif __name__ == '__main__':\n execution_contexts.set_local_execution_context()\n test.main()\n" ]
[ [ "tensorflow.reduce_min", "tensorflow.reduce_max", "tensorflow.identity", "numpy.random.rand", "tensorflow.TensorSpec", "numpy.array", "tensorflow.constant", "tensorflow.reduce_sum", "tensorflow.dtypes.as_dtype" ] ]
stefantaubert/waveglow
[ "5169ec751343a3e3008209a1a2f055e71a65908a" ]
[ "src/waveglow/core/inference.py" ]
[ "import datetime\nfrom dataclasses import dataclass\nfrom logging import Logger\nfrom pathlib import Path\nfrom typing import Callable, Dict, List, Optional, Tuple\n\nimport imageio\nimport numpy as np\nimport torch\nfrom audio_utils import get_duration_s, normalize_wav\nfrom audio_utils.audio import concatenate_audios\nfrom audio_utils.mel import TacotronSTFT, plot_melspec_np\nfrom general_utils.generic_list import GenericList\nfrom image_utils import (calculate_structual_similarity_np,\n make_same_width_by_filling_white)\nfrom mcd import get_mcd_between_mel_spectograms\nfrom pandas import DataFrame\nfrom tqdm import tqdm\nfrom waveglow.core.model_checkpoint import CheckpointWaveglow\nfrom waveglow.core.synthesizer import InferenceResult, Synthesizer\nfrom waveglow.globals import MCD_NO_OF_COEFFS_PER_FRAME\nfrom waveglow.utils import cosine_dist_mels\n\n\n@dataclass\nclass InferMelEntry():\n identifier: str\n mel: np.ndarray\n mel_path: Path\n sr: int\n\n\n@dataclass\nclass InferenceEntry():\n entry: InferMelEntry = None\n inference_result: InferenceResult = None\n seed: int = None\n inferred_duration_s: float = None\n iteration: int = None\n mel_original_frames: int = None\n mel_inferred_frames: int = None\n mcd_dtw: float = None\n mcd_dtw_penalty: int = None\n mcd_dtw_frames: int = None\n mcd: float = None\n mcd_penalty: int = None\n mcd_frames: int = None\n structural_similarity: float = None\n cosine_similarity: float = None\n denoiser_strength: float = None\n sigma: float = None\n train_name: str = None\n mel_path: Path = None\n\n\nclass InferenceEntries(GenericList[InferenceEntry]):\n pass\n\n\ndef get_df(entries: InferenceEntries) -> DataFrame:\n if len(entries) == 0:\n return DataFrame()\n\n data = [\n {\n \"Id\": entry.entry.identifier,\n \"Timepoint\": f\"{entry.inference_result.timepoint:%Y/%m/%d %H:%M:%S}\",\n \"Iteration\": entry.iteration,\n \"Seed\": entry.seed,\n \"Sigma\": entry.sigma,\n \"Denoiser strength\": entry.denoiser_strength,\n \"Inference duration (s)\": entry.inference_result.inference_duration_s,\n \"Denoising duration (s)\": entry.inference_result.denoising_duration_s,\n \"Overamplified?\": entry.inference_result.was_overamplified,\n \"Inferred wav duration (s)\": entry.inferred_duration_s,\n \"# Frames original mel\": entry.mel_original_frames,\n \"# Frames inferred mel\": entry.mel_inferred_frames,\n \"# Difference frames\": entry.mel_inferred_frames - entry.mel_original_frames,\n \"Sampling rate (Hz)\": entry.inference_result.sampling_rate,\n \"Train name\": entry.train_name,\n \"Mel path\": str(entry.entry.mel_path),\n \"Mel sampling rate\": str(entry.entry.sr),\n }\n for entry in entries.items()\n ]\n\n df = DataFrame(\n data=[x.values() for x in data],\n columns=data[0].keys(),\n )\n\n return df\n\n\n@dataclass\nclass InferenceEntryOutput():\n identifier: int = None\n mel_orig: np.ndarray = None\n mel_orig_img: np.ndarray = None\n orig_sr: int = None\n inferred_sr: int = None\n mel_inferred_denoised: np.ndarray = None\n mel_inferred_denoised_img: np.ndarray = None\n wav_inferred_denoised: np.ndarray = None\n mel_denoised_diff_img: np.ndarray = None\n wav_inferred: np.ndarray = None\n\n\ndef mel_to_torch(mel: np.ndarray) -> np.ndarray:\n res = torch.FloatTensor(mel)\n res = res.cuda()\n return res\n\n\ndef infer(mel_entries: List[InferMelEntry], checkpoint: CheckpointWaveglow, custom_hparams: Optional[Dict[str, str]], denoiser_strength: float, sigma: float, sentence_pause_s: float, save_callback: Callable[[InferenceEntryOutput], None], concatenate: bool, seed: int, train_name: str, logger: Logger) -> Tuple[InferenceEntries, Tuple[Optional[np.ndarray], int]]:\n inference_entries = InferenceEntries()\n\n if len(mel_entries) == 0:\n logger.info(\"Nothing to synthesize!\")\n return inference_entries\n\n synth = Synthesizer(\n checkpoint=checkpoint,\n custom_hparams=custom_hparams,\n logger=logger\n )\n\n # Check mels have the same sampling rate as trained waveglow model\n for mel_entry in mel_entries:\n assert mel_entry.sr == synth.hparams.sampling_rate\n\n taco_stft = TacotronSTFT(synth.hparams, logger=logger)\n mels_torch = []\n mels_torch_prepared = []\n for mel_entry in mel_entries:\n mel_torch = mel_to_torch(mel_entry.mel)\n mels_torch.append(mel_torch)\n mel_var = torch.autograd.Variable(mel_torch)\n mel_var = mel_var.cuda()\n mel_var = mel_var.unsqueeze(0)\n mels_torch_prepared.append(mel_var)\n\n inference_results = synth.infer_all(\n mels_torch_prepared, sigma, denoiser_strength, seed=seed)\n\n complete_wav_denoised: Optional[np.ndarray] = None\n if concatenate:\n if len(inference_results) >= 1:\n logger.info(\"Concatening audios...\")\n complete_wav_denoised = concatenate_audios(\n [x.wav_denoised for x in inference_results], sentence_pause_s, synth.hparams.sampling_rate)\n complete_wav_denoised = normalize_wav(complete_wav_denoised)\n if len(inference_results) >= 1:\n logger.info(\"Done.\")\n\n inference_result: InferenceResult\n mel_entry: InferMelEntry\n for mel_entry, inference_result in tqdm(zip(mel_entries, inference_results)):\n wav_inferred_denoised_normalized = normalize_wav(inference_result.wav_denoised)\n\n val_entry = InferenceEntry(\n entry=mel_entry,\n inference_result=inference_result,\n iteration=checkpoint.iteration,\n inferred_duration_s=get_duration_s(\n inference_result.wav_denoised, inference_result.sampling_rate),\n denoiser_strength=denoiser_strength,\n sigma=sigma,\n seed=seed,\n train_name=train_name,\n )\n\n mel_orig = mel_entry.mel\n\n wav_inferred_denoised_normalized_tensor = torch.FloatTensor(wav_inferred_denoised_normalized)\n mel_inferred_denoised = taco_stft.get_mel_tensor(wav_inferred_denoised_normalized_tensor)\n mel_inferred_denoised = mel_inferred_denoised.numpy()\n\n validation_entry_output = InferenceEntryOutput(\n identifier=mel_entry.identifier,\n mel_orig=mel_orig,\n inferred_sr=inference_result.sampling_rate,\n mel_inferred_denoised=mel_inferred_denoised,\n wav_inferred_denoised=wav_inferred_denoised_normalized,\n orig_sr=mel_entry.sr,\n wav_inferred=normalize_wav(inference_result.wav),\n mel_denoised_diff_img=None,\n mel_inferred_denoised_img=None,\n mel_orig_img=None,\n )\n\n mcd_dtw, penalty_dtw, final_frame_number_dtw = get_mcd_between_mel_spectograms(\n mel_1=mel_orig,\n mel_2=mel_inferred_denoised,\n n_mfcc=MCD_NO_OF_COEFFS_PER_FRAME,\n take_log=False,\n use_dtw=True,\n )\n\n val_entry.mel_original_frames = mel_orig.shape[1]\n val_entry.mel_inferred_frames = mel_inferred_denoised.shape[1]\n val_entry.mcd_dtw = mcd_dtw\n val_entry.mcd_dtw_penalty = penalty_dtw\n val_entry.mcd_dtw_frames = final_frame_number_dtw\n\n mcd, penalty, final_frame_number = get_mcd_between_mel_spectograms(\n mel_1=mel_orig,\n mel_2=mel_inferred_denoised,\n n_mfcc=MCD_NO_OF_COEFFS_PER_FRAME,\n take_log=False,\n use_dtw=False,\n )\n\n val_entry.mcd = mcd\n val_entry.mcd_penalty = penalty\n val_entry.mcd_frames = final_frame_number\n\n cosine_similarity = cosine_dist_mels(mel_orig, mel_inferred_denoised)\n val_entry.cosine_similarity = cosine_similarity\n\n mel_original_img_raw, mel_original_img = plot_melspec_np(mel_orig)\n mel_inferred_denoised_img_raw, mel_inferred_denoised_img = plot_melspec_np(\n mel_inferred_denoised)\n\n validation_entry_output.mel_orig_img = mel_original_img\n validation_entry_output.mel_inferred_denoised_img = mel_inferred_denoised_img\n\n mel_original_img_raw_same_dim, mel_inferred_denoised_img_raw_same_dim = make_same_width_by_filling_white(\n img_a=mel_original_img_raw,\n img_b=mel_inferred_denoised_img_raw,\n )\n\n mel_original_img_same_dim, mel_inferred_denoised_img_same_dim = make_same_width_by_filling_white(\n img_a=mel_original_img,\n img_b=mel_inferred_denoised_img,\n )\n\n structural_similarity_raw, mel_difference_denoised_img_raw = calculate_structual_similarity_np(\n img_a=mel_original_img_raw_same_dim,\n img_b=mel_inferred_denoised_img_raw_same_dim,\n )\n val_entry.structural_similarity = structural_similarity_raw\n\n structural_similarity, mel_denoised_diff_img = calculate_structual_similarity_np(\n img_a=mel_original_img_same_dim,\n img_b=mel_inferred_denoised_img_same_dim,\n )\n validation_entry_output.mel_denoised_diff_img = mel_denoised_diff_img\n\n imageio.imsave(\"/tmp/mel_original_img_raw.png\", mel_original_img_raw)\n imageio.imsave(\"/tmp/mel_inferred_img_raw.png\", mel_inferred_denoised_img_raw)\n imageio.imsave(\"/tmp/mel_difference_denoised_img_raw.png\", mel_difference_denoised_img_raw)\n\n # logger.info(val_entry)\n logger.info(f\"Current: {val_entry.entry.identifier}\")\n logger.info(f\"MCD DTW: {val_entry.mcd_dtw}\")\n logger.info(f\"MCD DTW penalty: {val_entry.mcd_dtw_penalty}\")\n logger.info(f\"MCD DTW frames: {val_entry.mcd_dtw_frames}\")\n\n logger.info(f\"MCD: {val_entry.mcd}\")\n logger.info(f\"MCD penalty: {val_entry.mcd_penalty}\")\n logger.info(f\"MCD frames: {val_entry.mcd_frames}\")\n\n # logger.info(f\"MCD DTW V2: {val_entry.mcd_dtw_v2}\")\n logger.info(f\"Structural Similarity: {val_entry.structural_similarity}\")\n logger.info(f\"Cosine Similarity: {val_entry.cosine_similarity}\")\n save_callback(validation_entry_output)\n inference_entries.append(val_entry)\n\n return inference_entries, (complete_wav_denoised, synth.hparams.sampling_rate)\n" ]
[ [ "torch.autograd.Variable", "torch.FloatTensor", "pandas.DataFrame" ] ]
Noczio/VoorSpelling
[ "51e30ab3f3b2e346c6eb56578818020e142a3adb" ]
[ "AppVoor/resources/backend_scripts/parameter_search.py" ]
[ "from abc import ABC, abstractmethod\nfrom typing import Any\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import GridSearchCV\nfrom skopt import BayesSearchCV\nfrom skopt.space import Real, Integer, Categorical\n\nfrom resources.backend_scripts.switcher import Switch\n\nNpArray = np.ndarray\nDataFrame = pd.DataFrame\n\n\nclass ParameterSearch(ABC):\n\n @abstractmethod\n def search_parameters(self, x: DataFrame, y: NpArray, parameters: dict,\n n_folds_validation: int, model: Any, score_type: str) -> tuple:\n pass\n\n\nclass BayesianSearch(ParameterSearch):\n\n def search_parameters(self, x: DataFrame, y: NpArray, parameters: dict,\n n_folds_validation: int, model: Any, score_type: str) -> tuple:\n clf = BayesSearchCV(estimator=model, search_spaces=parameters, cv=n_folds_validation,\n verbose=10, scoring=score_type)\n clf.fit(x, y)\n best_params = clf.best_params_\n best_score = clf.best_score_\n return best_params, best_score\n\n\nclass GridSearch(ParameterSearch):\n\n def search_parameters(self, x: DataFrame, y: NpArray, parameters: dict,\n n_folds_validation: int, model: Any, score_type: str) -> tuple:\n clf = GridSearchCV(estimator=model, param_grid=parameters, cv=n_folds_validation,\n verbose=10, scoring=score_type)\n clf.fit(x, y)\n best_params = clf.best_params_\n best_score = clf.best_score_\n return best_params, best_score\n\n\nclass ParameterSearchPossibilities(Switch):\n\n @staticmethod\n def BS() -> BayesianSearch:\n return BayesianSearch()\n\n @staticmethod\n def GS() -> GridSearch:\n return GridSearch()\n\n @staticmethod\n def BayesianSearch() -> BayesianSearch:\n return BayesianSearch()\n\n @staticmethod\n def GridSearch() -> GridSearch:\n return GridSearch()\n\n\nclass BayesianSearchParametersPossibilities(Switch):\n\n @staticmethod\n def LinearSVC() -> dict:\n return {'C': Real(1, 30, prior='log-uniform'),\n 'tol': Real(0.0001, 1, prior='log-uniform'),\n 'dual': Categorical([False]),\n 'penalty': Categorical(['l1', 'l2']),\n 'intercept_scaling': Real(1, 50, prior='log-uniform')}\n\n @staticmethod\n def SVC() -> dict:\n return {'C': Real(1, 30, prior='log-uniform'),\n 'tol': Real(0.0001, 1, prior='log-uniform'),\n 'gamma': Categorical(['scale', 'auto']),\n 'kernel': Categorical(['rbf', 'sigmoid'])}\n\n @staticmethod\n def KNeighborsClassifier() -> dict:\n return {'n_neighbors': Integer(1, 40),\n 'weights': Categorical(['uniform', 'distance']),\n 'leaf_size': Integer(30, 100),\n 'p': Integer(1, 30),\n 'algorithm': Categorical(['auto', 'ball_tree', 'kd_tree', 'brute'])}\n\n @staticmethod\n def GaussianNB() -> dict:\n return {'var_smoothing': Real(0.000000001, 100, prior='log-uniform')}\n\n @staticmethod\n def LinearSVR() -> dict:\n return {'epsilon': Real(0, 30, prior='log-uniform'),\n 'tol': Real(0.0001, 1, prior='log-uniform'),\n 'C': Real(1, 30, prior='log-uniform'),\n 'loss': Categorical(['epsilon_insensitive', 'squared_epsilon_insensitive']),\n 'dual': Categorical([False])}\n\n @staticmethod\n def SVR() -> dict:\n return {'gamma': Categorical(['scale', 'auto']),\n 'tol': Real(0.0001, 1, prior='log-uniform'),\n 'C': Real(1, 30, prior='log-uniform'),\n 'epsilon': Real(0.1, 30, prior='log-uniform'),\n 'kernel': Categorical(['rbf', 'sigmoid'])}\n\n @staticmethod\n def Lasso() -> dict:\n return {'alpha': Real(1, 40, prior='log-uniform'),\n 'tol': Real(0.0001, 1, prior='log-uniform'),\n 'selection': Categorical(['cyclic', 'random']),\n 'positive': Categorical([True, False])}\n\n @staticmethod\n def SGDClassifier() -> dict:\n return {'penalty': Categorical(['l2', 'l1', 'elasticnet']),\n 'alpha': Real(0.0001, 40, prior='log-uniform'),\n 'tol': Real(0.0001, 1, prior='log-uniform'),\n 'random_state': Integer(0, 1000)}\n\n @staticmethod\n def AffinityPropagation() -> dict:\n return {'damping': Real(0.5, 1, prior='log-uniform'),\n 'convergence_iter': Integer(15, 100),\n 'affinity': Categorical(['euclidean', 'precomputed']),\n 'random_state': Integer(0, 1000)}\n\n @staticmethod\n def KMeans() -> dict:\n return {'n_clusters': Integer(1, 50),\n 'tol': Real(0.0001, 1, prior='log-uniform'),\n 'random_state': Integer(0, 1000),\n 'algorithm': Categorical(['auto', 'full', 'elkan'])}\n\n @staticmethod\n def MiniBatchKMeans() -> dict:\n return {'n_clusters': Integer(1, 50),\n 'tol': Real(0, 1, prior='log-uniform'),\n 'batch_size': Integer(100, 512),\n 'reassignment_ratio': Real(0.01, 5, prior='log-uniform'),\n 'random_state': Integer(0, 1000)}\n\n @staticmethod\n def MeanShift() -> dict:\n return {'bin_seeding': Categorical([True, False]),\n 'cluster_all': Categorical([True, False]),\n 'min_bin_freq': Integer(1, 30)}\n\n\nclass GridSearchParametersPossibilities(Switch):\n\n @staticmethod\n def LinearSVC() -> dict:\n return {'C': np.arange(1, 32, 5),\n 'tol': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0] + list(np.arange(1, 5.5, 0.5)),\n 'dual': (False,),\n 'penalty': ('l1', 'l2'),\n 'intercept_scaling': np.arange(1, 22, 5)}\n\n @staticmethod\n def SVC() -> dict:\n return {'C': np.arange(1, 32, 5),\n 'tol': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0] + list(np.arange(1, 5.5, 0.5)),\n 'gamma': ('scale', 'auto'),\n 'kernel': ('rbf', 'sigmoid')}\n\n @staticmethod\n def KNeighborsClassifier() -> dict:\n return {'n_neighbors': np.arange(1, 32, 5),\n 'weights': ('uniform', 'distance'),\n 'leaf_size': (30, 50, 70, 100),\n 'p': (1, 2, 3, 5, 10, 15),\n 'algorithm': ('auto', 'ball_tree', 'kd_tree', 'brute')}\n\n @staticmethod\n def GaussianNB() -> dict:\n return {'var_smoothing': [0.000000001, 0.00000001, 0.0000001, 0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0] +\n list(np.arange(1, 101, 1))}\n\n @staticmethod\n def LinearSVR() -> dict:\n return {'epsilon': np.arange(0, 22, 3),\n 'tol': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0] + list(np.arange(1, 5.5, 0.5)),\n 'C': np.arange(1, 32, 5),\n 'loss': ('epsilon_insensitive', 'squared_epsilon_insensitive'),\n 'dual': (False,)}\n\n @staticmethod\n def SVR() -> dict:\n return {'gamma': ('scale', 'auto'),\n 'tol': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0] + list(np.arange(1, 5.5, 0.5)),\n 'C': np.arange(1, 32, 5),\n 'epsilon': (0.1, 1, 2, 3, 4, 5),\n 'kernel': ('rbf', 'sigmoid')}\n\n @staticmethod\n def Lasso() -> dict:\n return {'alpha': np.arange(1, 32, 5),\n 'tol': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0] + list(np.arange(1, 5.5, 0.5)),\n 'selection': ('cyclic', 'random'),\n 'positive': (True, False)}\n\n @staticmethod\n def SGDClassifier() -> dict:\n return {'penalty': ('l2', 'l1', 'elasticnet'),\n 'alpha': (0.0001, 0.01, 1, 2, 3, 4, 5),\n 'tol': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0] + list(np.arange(1, 5.5, 0.5)),\n 'random_state': np.arange(0, 2500, 500)}\n\n @staticmethod\n def AffinityPropagation() -> dict:\n return {'damping': np.arange(0.5, 1.1, 0.1),\n 'convergence_iter': (15, 30, 40, 50),\n 'affinity': ('euclidean', 'precomputed'),\n 'random_state': np.arange(0, 2500, 500)}\n\n @staticmethod\n def KMeans() -> dict:\n return {'n_clusters': np.arange(1, 32, 5),\n 'tol': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0] + list(np.arange(1, 5.5, 0.5)),\n 'random_state': np.arange(0, 2500, 500),\n 'algorithm': ('auto', 'full', 'elkan')}\n\n @staticmethod\n def MiniBatchKMeans() -> dict:\n return {'n_clusters': np.arange(1, 32, 5),\n 'tol': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1],\n 'batch_size': np.arange(100, 600, 100),\n 'reassignment_ratio': (0.01, 0.1, 1, 3, 5),\n 'random_state': np.arange(0, 2500, 500)}\n\n @staticmethod\n def MeanShift() -> dict:\n return {'bin_seeding': (True, False),\n 'cluster_all': (True, False),\n 'min_bin_freq': np.arange(1, 32, 1)}\n\n\nclass ParameterSearchCreator:\n\n @staticmethod\n def create_parameter_selector(selection_type: str) -> ParameterSearch:\n try:\n parameter_search_name = selection_type.replace(\" \", \"\")\n parameter_search_method = ParameterSearchPossibilities.case(parameter_search_name)\n return parameter_search_method\n except():\n available_types = ParameterSearchCreator.get_available_types()\n types_as_string = \", \".join(available_types)\n raise AttributeError(f\"Parameter value is wrong. \"\n f\"It should be any of the following: {types_as_string}\")\n\n @staticmethod\n def get_available_types() -> tuple:\n available_types = [func for func in dir(ParameterSearchPossibilities)\n if callable(getattr(ParameterSearchPossibilities, func)) and not\n (func.startswith(\"__\") or func is \"case\")]\n return tuple(available_types)\n" ]
[ [ "numpy.arange", "sklearn.model_selection.GridSearchCV" ] ]
svenaoki/zenml
[ "b94dff83f0e7c8ab29e99d6b42a0c906a3512b63" ]
[ "examples/not_so_quickstart/steps/torch_steps.py" ]
[ "# Copyright (c) ZenML GmbH 2021. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.optim.lr_scheduler import StepLR\nfrom torch.utils.data import DataLoader, TensorDataset\n\nfrom zenml.steps import step\n\nfrom .params import TrainerConfig\n\nDEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass Net(nn.Module):\n \"\"\"Straightforward NN for classification.\"\"\"\n\n def __init__(self):\n super(Net, self).__init__()\n self.flat_network = nn.Sequential(\n nn.Flatten(),\n nn.Linear(784, 10),\n )\n # fully connected layer, output 10 classes\n self.out = nn.Linear(10, 10)\n\n def forward(self, x):\n x = self.flat_network(x)\n x = self.out(x)\n output = self.out(x)\n return output\n\n\ndef get_data_loader_from_np(X: np.ndarray, y: np.ndarray) -> DataLoader:\n \"\"\"Returns a torch Dataloader from two np arrays.\"\"\"\n tensor_x = torch.Tensor(X) # transform to torch tensor\n tensor_y = torch.Tensor(y).type(torch.LongTensor)\n\n torch_dataset = TensorDataset(tensor_x, tensor_y) # create your dataset\n torch_dataloader = DataLoader(torch_dataset) # create your dataloader\n return torch_dataloader\n\n\n@step\ndef torch_trainer(\n config: TrainerConfig,\n X_train: np.ndarray,\n y_train: np.ndarray,\n) -> nn.Module:\n \"\"\"Train a neural net from scratch to recognize MNIST digits return our\n model or the learner\"\"\"\n train_loader = get_data_loader_from_np(X_train, y_train)\n\n model = Net().to(DEVICE)\n optimizer = optim.Adadelta(model.parameters(), lr=config.lr)\n\n scheduler = StepLR(optimizer, step_size=1, gamma=config.gamma)\n for epoch in range(1, config.epochs + 1):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(DEVICE), target.to(DEVICE)\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n scheduler.step()\n\n return model\n\n\n@step\ndef torch_evaluator(\n X_test: np.ndarray,\n y_test: np.ndarray,\n model: nn.Module,\n) -> float:\n \"\"\"Calculate the loss for the model for each epoch in a graph\"\"\"\n model.eval()\n test_loader = get_data_loader_from_np(X_test, y_test)\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(DEVICE), target.to(DEVICE)\n output = model(data)\n test_loss += F.nll_loss(\n output, target, reduction=\"sum\"\n ).item() # sum up batch loss\n pred = output.argmax(\n dim=1, keepdim=True\n ) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print(\n \"\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n\".format(\n test_loss,\n correct,\n len(test_loader.dataset),\n 100.0 * correct / len(test_loader.dataset),\n )\n )\n return correct / len(test_loader.dataset)\n" ]
[ [ "torch.utils.data.DataLoader", "torch.nn.Linear", "torch.nn.Flatten", "torch.nn.functional.nll_loss", "torch.no_grad", "torch.cuda.is_available", "torch.optim.lr_scheduler.StepLR", "torch.utils.data.TensorDataset", "torch.Tensor" ] ]
gotgenes/CpGHMMExample
[ "d55e02ee930da040808e278bd1216ddc25116d0f" ]
[ "CpGHMMExample/gencpgdata.py" ]
[ "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n# Copyright (c) 2012 Christopher D. Lasher\n#\n# This software is released under the MIT License. Please see\n# LICENSE.txt for details.\n\n\n\"\"\"Generates a random sequence with CpG islands.\n\nThis script produces three outfiles:\n\n* a FASTA format sequence file\n\n* a file containing the start and end positions of CpG islands\n\n* a file containing the parameters of the transitions\n\"\"\"\n\nimport argparse\nimport bisect\nimport random\nimport textwrap\n\nimport numpy as np\n\nimport logging\nLOGGER = logging.getLogger()\nLOGGER.setLevel(logging.INFO)\nSTREAM_HANDLER = logging.StreamHandler()\nSTREAM_HANDLER.setLevel(logging.INFO)\nLOGGER.addHandler(STREAM_HANDLER)\nFORMATTER = logging.Formatter('%(message)s')\nSTREAM_HANDLER.setFormatter(FORMATTER)\n\nALPHABET = 'ACGT'\n\n# Rows are ACGT, columns are ACGT\n_CPG_CPG_PROBABILITIES = np.array([\n [\n 0.1,\n 0.4,\n 0.4,\n 0.1\n ],\n [\n 0.05,\n 0.45,\n 0.45,\n 0.05\n ],\n [\n 0.05,\n 0.45,\n 0.45,\n 0.05\n ],\n [\n 0.1,\n 0.4,\n 0.4,\n 0.1\n ],\n])\n\n# Rows are ACGT, columns are ACGT\n_NORMAL_NORMAL_PROBABILITIES = np.array([\n [\n 0.25,\n 0.25,\n 0.25,\n 0.25\n ],\n [\n 0.15,\n 0.35,\n 0.35,\n 0.15\n ],\n [\n 0.15,\n 0.35,\n 0.35,\n 0.15\n ],\n [\n 0.25,\n 0.25,\n 0.25,\n 0.25\n ],\n])\n\n_CPG_TO_NORMAL_TRANSITION_PROB = 0.005\n_NORMAL_TO_CPG_TRANSITION_PROB = 0.0025\n\n_CPG_PROBABILITIES = np.concatenate(\n (\n (1 - _CPG_TO_NORMAL_TRANSITION_PROB) * _CPG_CPG_PROBABILITIES,\n _CPG_TO_NORMAL_TRANSITION_PROB * _NORMAL_NORMAL_PROBABILITIES\n ),\n 1\n)\n_NORMAL_PROBABILITIES = np.concatenate(\n (\n _NORMAL_TO_CPG_TRANSITION_PROB * _CPG_CPG_PROBABILITIES,\n (1 - _NORMAL_TO_CPG_TRANSITION_PROB) * _NORMAL_NORMAL_PROBABILITIES\n ),\n 1\n)\n\nTRANSITION_PROBABILITIES = np.concatenate(\n (_CPG_PROBABILITIES, _NORMAL_PROBABILITIES))\n\nTRANSITION_CUMSUMS = TRANSITION_PROBABILITIES.cumsum(1).tolist()\nfor row in TRANSITION_CUMSUMS:\n row[-1] = 1.0\n\n\ndef generate_sequence(length):\n \"\"\"Generates the random sequence, including CpG islands.\n\n :param length: length of the sequence to generate\n :returns: a randomly generated sequence, and a list of start and end\n positions of CpG sites within the sequence\n\n \"\"\"\n sequence = []\n cpg_sites = []\n cpg_start = None\n in_cpg = False\n start = random.randrange(len(TRANSITION_CUMSUMS))\n sequence.append(ALPHABET[start % 4])\n if start < 4:\n in_cpg = True\n cpg_start = start\n prev_index = start\n for x in range(1, length):\n random_value = random.random()\n transition_index = bisect.bisect_left(\n TRANSITION_CUMSUMS[prev_index], random_value)\n sequence.append(ALPHABET[transition_index % 4])\n if transition_index < 4:\n if not in_cpg:\n cpg_start = x\n in_cpg = True\n else:\n if in_cpg:\n cpg_sites.append((cpg_start, x - 1))\n in_cpg = False\n prev_index = transition_index\n\n if in_cpg:\n cpg_sites.append((cpg_start, length - 1))\n\n return ''.join(sequence), cpg_sites\n\n\ndef wrap_sequence(sequence, width=50):\n return '\\n'.join(sequence[i:i+width] for i in\n xrange(0, len(sequence), width))\n\n\ndef output_sequence(outfileh, sequence):\n \"\"\"Writes the sequence to the outfile in FASTA format.\n\n :param outfileh: @todo\n :param sequence: @todo\n :returns: @todo\n\n \"\"\"\n outfileh.write('>testcpg\\n')\n formatted_sequence = wrap_sequence(sequence, 50)\n outfileh.write(formatted_sequence)\n\n\ndef output_sites(outfileh, sites):\n \"\"\"Writes the CpG start and end positions to a CSV-format file.\n\n :param outfileh: @todo\n :param sites: @todo\n :returns: @todo\n\n \"\"\"\n outlines = (\"{},{}\\n\".format(start, end) for (start, end) in sites)\n outfileh.writelines(outlines)\n\n\ndef make_cli_parser():\n \"\"\"Creates the command-line interface.\n\n :returns: an :py:class:`argparse.ArgumentParser` instance\n\n \"\"\"\n cli_parser = argparse.ArgumentParser(description=__doc__)\n cli_parser.add_argument(\n 'length', type=int, help=\"length of sequence to generate\")\n return cli_parser\n\n\ndef main(argv=None):\n cli_parser = make_cli_parser()\n args = cli_parser.parse_args(argv)\n LOGGER.info(\"Generating random CpG sequence.\")\n sequence, cpg_sites = generate_sequence(args.length)\n LOGGER.info(\"Writing sequence to test_cpg_sequence.fasta\")\n with open('test_cpg_sequence.fasta', 'w') as fasta_outfile:\n output_sequence(fasta_outfile, sequence)\n LOGGER.info(\"Writing CpG site positions to test_cpg_sites.csv\")\n with open('test_cpg_sites.csv', 'w') as positions_outfile:\n output_sites(positions_outfile, cpg_sites)\n LOGGER.info(\"Writing transition probabilities to \"\n \"test_cpg_transitions.csv\")\n np.savetxt('test_cpg_transitions.csv', TRANSITION_PROBABILITIES,\n delimiter=',')\n\n\nif __name__ == '__main__':\n main()\n\n" ]
[ [ "numpy.array", "numpy.savetxt", "numpy.concatenate" ] ]
wangyusu/pymatgen
[ "a90af2fe71eff15134ca33c6e58f07caba425ae9" ]
[ "pymatgen/symmetry/tests/test_analyzer.py" ]
[ "# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\nimport unittest\nfrom pathlib import Path\nimport os\nimport numpy as np\n\nfrom pymatgen.core.operations import SymmOp\nfrom pymatgen.core.sites import PeriodicSite\nfrom pymatgen.core.structure import Molecule, Structure\nfrom pymatgen.io.cif import CifParser\nfrom pymatgen.io.vasp.inputs import Poscar\nfrom pymatgen.io.vasp.outputs import Vasprun\nfrom pymatgen.symmetry.analyzer import (\n PointGroupAnalyzer,\n SpacegroupAnalyzer,\n cluster_sites,\n iterative_symmetrize,\n)\nfrom pymatgen.util.testing import PymatgenTest\n\n\ntest_dir_mol = os.path.join(PymatgenTest.TEST_FILES_DIR, \"molecules\")\n\n\nclass SpacegroupAnalyzerTest(PymatgenTest):\n def setUp(self):\n p = Poscar.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, \"POSCAR\"))\n self.structure = p.structure\n self.sg = SpacegroupAnalyzer(self.structure, 0.001)\n self.disordered_structure = self.get_structure(\"Li10GeP2S12\")\n self.disordered_sg = SpacegroupAnalyzer(self.disordered_structure, 0.001)\n s = p.structure.copy()\n site = s[0]\n del s[0]\n s.append(site.species, site.frac_coords)\n self.sg3 = SpacegroupAnalyzer(s, 0.001)\n graphite = self.get_structure(\"Graphite\")\n graphite.add_site_property(\"magmom\", [0.1] * len(graphite))\n self.sg4 = SpacegroupAnalyzer(graphite, 0.001)\n self.structure4 = graphite\n\n def test_primitive(self):\n s = Structure.from_spacegroup(\"Fm-3m\", np.eye(3) * 3, [\"Cu\"], [[0, 0, 0]])\n a = SpacegroupAnalyzer(s)\n self.assertEqual(len(s), 4)\n self.assertEqual(len(a.find_primitive()), 1)\n\n def test_is_laue(self):\n s = Structure.from_spacegroup(\"Fm-3m\", np.eye(3) * 3, [\"Cu\"], [[0, 0, 0]])\n a = SpacegroupAnalyzer(s)\n self.assertTrue(a.is_laue())\n\n def test_magnetic(self):\n lfp = PymatgenTest.get_structure(\"LiFePO4\")\n sg = SpacegroupAnalyzer(lfp, 0.1)\n self.assertEqual(sg.get_space_group_symbol(), \"Pnma\")\n magmoms = [0] * len(lfp)\n magmoms[4] = 1\n magmoms[5] = -1\n magmoms[6] = 1\n magmoms[7] = -1\n lfp.add_site_property(\"magmom\", magmoms)\n sg = SpacegroupAnalyzer(lfp, 0.1)\n self.assertEqual(sg.get_space_group_symbol(), \"Pnma\")\n\n def test_get_space_symbol(self):\n self.assertEqual(self.sg.get_space_group_symbol(), \"Pnma\")\n self.assertEqual(self.disordered_sg.get_space_group_symbol(), \"P4_2/nmc\")\n self.assertEqual(self.sg3.get_space_group_symbol(), \"Pnma\")\n self.assertEqual(self.sg4.get_space_group_symbol(), \"P6_3/mmc\")\n\n def test_get_space_number(self):\n self.assertEqual(self.sg.get_space_group_number(), 62)\n self.assertEqual(self.disordered_sg.get_space_group_number(), 137)\n self.assertEqual(self.sg4.get_space_group_number(), 194)\n\n def test_get_hall(self):\n self.assertEqual(self.sg.get_hall(), \"-P 2ac 2n\")\n self.assertEqual(self.disordered_sg.get_hall(), \"P 4n 2n -1n\")\n\n def test_get_pointgroup(self):\n self.assertEqual(self.sg.get_point_group_symbol(), \"mmm\")\n self.assertEqual(self.disordered_sg.get_point_group_symbol(), \"4/mmm\")\n\n def test_get_symmetry_operations(self):\n\n for sg, structure in [(self.sg, self.structure), (self.sg4, self.structure4)]:\n\n pgops = sg.get_point_group_operations()\n fracsymmops = sg.get_symmetry_operations()\n symmops = sg.get_symmetry_operations(True)\n latt = structure.lattice\n for fop, op, pgop in zip(fracsymmops, symmops, pgops):\n # translation vector values should all be 0 or 0.5\n t = fop.translation_vector * 2\n self.assertArrayAlmostEqual(t - np.round(t), 0)\n\n self.assertArrayAlmostEqual(fop.rotation_matrix, pgop.rotation_matrix)\n for site in structure:\n newfrac = fop.operate(site.frac_coords)\n newcart = op.operate(site.coords)\n self.assertTrue(np.allclose(latt.get_fractional_coords(newcart), newfrac))\n found = False\n newsite = PeriodicSite(site.species, newcart, latt, coords_are_cartesian=True)\n for testsite in structure:\n if newsite.is_periodic_image(testsite, 1e-3):\n found = True\n break\n self.assertTrue(found)\n\n # Make sure this works for any position, not just the atomic\n # ones.\n random_fcoord = np.random.uniform(size=(3))\n random_ccoord = latt.get_cartesian_coords(random_fcoord)\n newfrac = fop.operate(random_fcoord)\n newcart = op.operate(random_ccoord)\n self.assertTrue(np.allclose(latt.get_fractional_coords(newcart), newfrac))\n\n def test_get_symmetry_dataset(self):\n ds = self.sg.get_symmetry_dataset()\n self.assertEqual(ds[\"international\"], \"Pnma\")\n\n def test_get_crystal_system(self):\n crystal_system = self.sg.get_crystal_system()\n self.assertEqual(\"orthorhombic\", crystal_system)\n self.assertEqual(\"tetragonal\", self.disordered_sg.get_crystal_system())\n\n def test_get_refined_structure(self):\n for a in self.sg.get_refined_structure().lattice.angles:\n self.assertEqual(a, 90)\n refined = self.disordered_sg.get_refined_structure()\n for a in refined.lattice.angles:\n self.assertEqual(a, 90)\n self.assertEqual(refined.lattice.a, refined.lattice.b)\n s = self.get_structure(\"Li2O\")\n sg = SpacegroupAnalyzer(s, 0.01)\n self.assertEqual(sg.get_refined_structure().num_sites, 4 * s.num_sites)\n\n def test_get_symmetrized_structure(self):\n symm_struct = self.sg.get_symmetrized_structure()\n for a in symm_struct.lattice.angles:\n self.assertEqual(a, 90)\n self.assertEqual(len(symm_struct.equivalent_sites), 5)\n\n symm_struct = self.disordered_sg.get_symmetrized_structure()\n self.assertEqual(len(symm_struct.equivalent_sites), 8)\n self.assertEqual([len(i) for i in symm_struct.equivalent_sites], [16, 4, 8, 4, 2, 8, 8, 8])\n s1 = symm_struct.equivalent_sites[1][1]\n s2 = symm_struct[symm_struct.equivalent_indices[1][1]]\n self.assertEqual(s1, s2)\n self.assertEqual(self.sg4.get_symmetrized_structure()[0].magmom, 0.1)\n self.assertEqual(symm_struct.wyckoff_symbols[0], \"16h\")\n # self.assertEqual(symm_struct[0].wyckoff, \"16h\")\n\n # Check copying\n self.assertEqual(symm_struct.copy(), symm_struct)\n d = symm_struct.as_dict()\n from pymatgen.symmetry.structure import SymmetrizedStructure\n\n ss = SymmetrizedStructure.from_dict(d)\n self.assertEqual(ss.wyckoff_symbols[0], \"16h\")\n self.assertIn(\"SymmetrizedStructure\", ss.__str__())\n\n def test_find_primitive(self):\n \"\"\"\n F m -3 m Li2O testing of converting to primitive cell\n \"\"\"\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"Li2O.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure)\n primitive_structure = s.find_primitive()\n self.assertEqual(primitive_structure.formula, \"Li2 O1\")\n # This isn't what is expected. All the angles should be 60\n self.assertAlmostEqual(primitive_structure.lattice.alpha, 60)\n self.assertAlmostEqual(primitive_structure.lattice.beta, 60)\n self.assertAlmostEqual(primitive_structure.lattice.gamma, 60)\n self.assertAlmostEqual(primitive_structure.lattice.volume, structure.lattice.volume / 4.0)\n\n def test_get_ir_reciprocal_mesh(self):\n grid = self.sg.get_ir_reciprocal_mesh()\n self.assertEqual(len(grid), 216)\n self.assertAlmostEqual(grid[1][0][0], 0.1)\n self.assertAlmostEqual(grid[1][0][1], 0.0)\n self.assertAlmostEqual(grid[1][0][2], 0.0)\n self.assertAlmostEqual(grid[1][1], 2)\n\n def test_get_conventional_standard_structure(self):\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"bcc_1927.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n conv = s.get_conventional_standard_structure()\n self.assertAlmostEqual(conv.lattice.alpha, 90)\n self.assertAlmostEqual(conv.lattice.beta, 90)\n self.assertAlmostEqual(conv.lattice.gamma, 90)\n self.assertAlmostEqual(conv.lattice.a, 9.1980270633769461)\n self.assertAlmostEqual(conv.lattice.b, 9.1980270633769461)\n self.assertAlmostEqual(conv.lattice.c, 9.1980270633769461)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"btet_1915.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n conv = s.get_conventional_standard_structure()\n self.assertAlmostEqual(conv.lattice.alpha, 90)\n self.assertAlmostEqual(conv.lattice.beta, 90)\n self.assertAlmostEqual(conv.lattice.gamma, 90)\n self.assertAlmostEqual(conv.lattice.a, 5.0615106678044235)\n self.assertAlmostEqual(conv.lattice.b, 5.0615106678044235)\n self.assertAlmostEqual(conv.lattice.c, 4.2327080177761687)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"orci_1010.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n conv = s.get_conventional_standard_structure()\n self.assertAlmostEqual(conv.lattice.alpha, 90)\n self.assertAlmostEqual(conv.lattice.beta, 90)\n self.assertAlmostEqual(conv.lattice.gamma, 90)\n self.assertAlmostEqual(conv.lattice.a, 2.9542233922299999)\n self.assertAlmostEqual(conv.lattice.b, 4.6330325651443296)\n self.assertAlmostEqual(conv.lattice.c, 5.373703587040775)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"orcc_1003.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n conv = s.get_conventional_standard_structure()\n self.assertAlmostEqual(conv.lattice.alpha, 90)\n self.assertAlmostEqual(conv.lattice.beta, 90)\n self.assertAlmostEqual(conv.lattice.gamma, 90)\n self.assertAlmostEqual(conv.lattice.a, 4.1430033493799998)\n self.assertAlmostEqual(conv.lattice.b, 31.437979757624728)\n self.assertAlmostEqual(conv.lattice.c, 3.99648651)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"orac_632475.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n conv = s.get_conventional_standard_structure()\n self.assertAlmostEqual(conv.lattice.alpha, 90)\n self.assertAlmostEqual(conv.lattice.beta, 90)\n self.assertAlmostEqual(conv.lattice.gamma, 90)\n self.assertAlmostEqual(conv.lattice.a, 3.1790663399999999)\n self.assertAlmostEqual(conv.lattice.b, 9.9032878699999998)\n self.assertAlmostEqual(conv.lattice.c, 3.5372412099999999)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"monoc_1028.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n conv = s.get_conventional_standard_structure()\n self.assertAlmostEqual(conv.lattice.alpha, 90)\n self.assertAlmostEqual(conv.lattice.beta, 117.53832420192903)\n self.assertAlmostEqual(conv.lattice.gamma, 90)\n self.assertAlmostEqual(conv.lattice.a, 14.033435583000625)\n self.assertAlmostEqual(conv.lattice.b, 3.96052850731)\n self.assertAlmostEqual(conv.lattice.c, 6.8743926325200002)\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"hex_1170.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n conv = s.get_conventional_standard_structure()\n self.assertAlmostEqual(conv.lattice.alpha, 90)\n self.assertAlmostEqual(conv.lattice.beta, 90)\n self.assertAlmostEqual(conv.lattice.gamma, 120)\n self.assertAlmostEqual(conv.lattice.a, 3.699919902005897)\n self.assertAlmostEqual(conv.lattice.b, 3.699919902005897)\n self.assertAlmostEqual(conv.lattice.c, 6.9779585500000003)\n\n structure = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, \"tric_684654.json\"))\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n conv = s.get_conventional_standard_structure()\n self.assertAlmostEqual(conv.lattice.alpha, 74.09581916308757)\n self.assertAlmostEqual(conv.lattice.beta, 75.72817279281173)\n self.assertAlmostEqual(conv.lattice.gamma, 63.63234318667333)\n self.assertAlmostEqual(conv.lattice.a, 3.741372924048738)\n self.assertAlmostEqual(conv.lattice.b, 3.9883228679270686)\n self.assertAlmostEqual(conv.lattice.c, 7.288495840048958)\n\n def test_get_primitive_standard_structure(self):\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"bcc_1927.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n prim = s.get_primitive_standard_structure()\n self.assertAlmostEqual(prim.lattice.alpha, 109.47122063400001)\n self.assertAlmostEqual(prim.lattice.beta, 109.47122063400001)\n self.assertAlmostEqual(prim.lattice.gamma, 109.47122063400001)\n self.assertAlmostEqual(prim.lattice.a, 7.9657251015812145)\n self.assertAlmostEqual(prim.lattice.b, 7.9657251015812145)\n self.assertAlmostEqual(prim.lattice.c, 7.9657251015812145)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"btet_1915.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n prim = s.get_primitive_standard_structure()\n self.assertAlmostEqual(prim.lattice.alpha, 105.015053349)\n self.assertAlmostEqual(prim.lattice.beta, 105.015053349)\n self.assertAlmostEqual(prim.lattice.gamma, 118.80658411899999)\n self.assertAlmostEqual(prim.lattice.a, 4.1579321075608791)\n self.assertAlmostEqual(prim.lattice.b, 4.1579321075608791)\n self.assertAlmostEqual(prim.lattice.c, 4.1579321075608791)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"orci_1010.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n prim = s.get_primitive_standard_structure()\n self.assertAlmostEqual(prim.lattice.alpha, 134.78923546600001)\n self.assertAlmostEqual(prim.lattice.beta, 105.856239333)\n self.assertAlmostEqual(prim.lattice.gamma, 91.276341676000001)\n self.assertAlmostEqual(prim.lattice.a, 3.8428217771014852)\n self.assertAlmostEqual(prim.lattice.b, 3.8428217771014852)\n self.assertAlmostEqual(prim.lattice.c, 3.8428217771014852)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"orcc_1003.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n prim = s.get_primitive_standard_structure()\n self.assertAlmostEqual(prim.lattice.alpha, 90)\n self.assertAlmostEqual(prim.lattice.beta, 90)\n self.assertAlmostEqual(prim.lattice.gamma, 164.985257335)\n self.assertAlmostEqual(prim.lattice.a, 15.854897098324196)\n self.assertAlmostEqual(prim.lattice.b, 15.854897098324196)\n self.assertAlmostEqual(prim.lattice.c, 3.99648651)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"orac_632475.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n prim = s.get_primitive_standard_structure()\n self.assertAlmostEqual(prim.lattice.alpha, 90)\n self.assertAlmostEqual(prim.lattice.beta, 90)\n self.assertAlmostEqual(prim.lattice.gamma, 144.40557588533386)\n self.assertAlmostEqual(prim.lattice.a, 5.2005185662155391)\n self.assertAlmostEqual(prim.lattice.b, 5.2005185662155391)\n self.assertAlmostEqual(prim.lattice.c, 3.5372412099999999)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"monoc_1028.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n prim = s.get_primitive_standard_structure()\n self.assertAlmostEqual(prim.lattice.alpha, 63.579155761999999)\n self.assertAlmostEqual(prim.lattice.beta, 116.42084423747779)\n self.assertAlmostEqual(prim.lattice.gamma, 148.47965136208569)\n self.assertAlmostEqual(prim.lattice.a, 7.2908007159612325)\n self.assertAlmostEqual(prim.lattice.b, 7.2908007159612325)\n self.assertAlmostEqual(prim.lattice.c, 6.8743926325200002)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"hex_1170.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n prim = s.get_primitive_standard_structure()\n self.assertAlmostEqual(prim.lattice.alpha, 90)\n self.assertAlmostEqual(prim.lattice.beta, 90)\n self.assertAlmostEqual(prim.lattice.gamma, 120)\n self.assertAlmostEqual(prim.lattice.a, 3.699919902005897)\n self.assertAlmostEqual(prim.lattice.b, 3.699919902005897)\n self.assertAlmostEqual(prim.lattice.c, 6.9779585500000003)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"rhomb_3478_conv.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n prim = s.get_primitive_standard_structure()\n self.assertAlmostEqual(prim.lattice.alpha, 28.049186140546812)\n self.assertAlmostEqual(prim.lattice.beta, 28.049186140546812)\n self.assertAlmostEqual(prim.lattice.gamma, 28.049186140546812)\n self.assertAlmostEqual(prim.lattice.a, 5.9352627428399982)\n self.assertAlmostEqual(prim.lattice.b, 5.9352627428399982)\n self.assertAlmostEqual(prim.lattice.c, 5.9352627428399982)\n\n def test_tricky_structure(self):\n # for some reason this structure kills spglib1.9\n # 1.7 can't find symmetry either, but at least doesn't kill python\n s = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, \"POSCAR.tricky_symmetry\"))\n sa = SpacegroupAnalyzer(s, 0.1)\n sa.get_space_group_symbol()\n sa.get_space_group_number()\n sa.get_point_group_symbol()\n sa.get_crystal_system()\n sa.get_hall()\n\n\nclass SpacegroupTest(unittest.TestCase):\n def setUp(self):\n p = Poscar.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, \"POSCAR\"))\n self.structure = p.structure\n self.sg1 = SpacegroupAnalyzer(self.structure, 0.001).get_space_group_operations()\n\n def test_are_symmetrically_equivalent(self):\n sites1 = [self.structure[i] for i in [0, 1]]\n sites2 = [self.structure[i] for i in [2, 3]]\n self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))\n\n sites1 = [self.structure[i] for i in [0, 1]]\n sites2 = [self.structure[i] for i in [0, 2]]\n self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))\n\n\nH2O2 = Molecule(\n [\"O\", \"O\", \"H\", \"H\"],\n [\n [0, 0.727403, -0.050147],\n [0, -0.727403, -0.050147],\n [0.83459, 0.897642, 0.401175],\n [-0.83459, -0.897642, 0.401175],\n ],\n)\n\nC2H2F2Br2 = Molecule(\n [\"C\", \"C\", \"F\", \"Br\", \"H\", \"F\", \"H\", \"Br\"],\n [\n [-0.752000, 0.001000, -0.141000],\n [0.752000, -0.001000, 0.141000],\n [-1.158000, 0.991000, 0.070000],\n [-1.240000, -0.737000, 0.496000],\n [-0.924000, -0.249000, -1.188000],\n [1.158000, -0.991000, -0.070000],\n [0.924000, 0.249000, 1.188000],\n [1.240000, 0.737000, -0.496000],\n ],\n)\n\nH2O = Molecule(\n [\"H\", \"O\", \"H\"],\n [[0, 0.780362, -0.456316], [0, 0, 0.114079], [0, -0.780362, -0.456316]],\n)\n\nC2H4 = Molecule(\n [\"C\", \"C\", \"H\", \"H\", \"H\", \"H\"],\n [\n [0.0000, 0.0000, 0.6695],\n [0.0000, 0.0000, -0.6695],\n [0.0000, 0.9289, 1.2321],\n [0.0000, -0.9289, 1.2321],\n [0.0000, 0.9289, -1.2321],\n [0.0000, -0.9289, -1.2321],\n ],\n)\n\nNH3 = Molecule(\n [\"N\", \"H\", \"H\", \"H\"],\n [\n [0.0000, 0.0000, 0.0000],\n [0.0000, -0.9377, -0.3816],\n [0.8121, 0.4689, -0.3816],\n [-0.8121, 0.4689, -0.3816],\n ],\n)\n\nBF3 = Molecule(\n [\"B\", \"F\", \"F\", \"F\"],\n [\n [0.0000, 0.0000, 0.0000],\n [0.0000, -0.9377, 0.00],\n [0.8121, 0.4689, 0],\n [-0.8121, 0.4689, 0],\n ],\n)\n\nCH4 = Molecule(\n [\"C\", \"H\", \"H\", \"H\", \"H\"],\n [\n [0.000000, 0.000000, 0.000000],\n [0.000000, 0.000000, 1.08],\n [1.026719, 0.000000, -0.363000],\n [-0.513360, -0.889165, -0.363000],\n [-0.513360, 0.889165, -0.363000],\n ],\n)\n\nPF6 = Molecule(\n [\"P\", \"F\", \"F\", \"F\", \"F\", \"F\", \"F\"],\n [[0, 0, 0], [0, 0, 1], [0, 0, -1], [0, 1, 0], [0, -1, 0], [1, 0, 0], [-1, 0, 0]],\n)\n\n\nclass PointGroupAnalyzerTest(PymatgenTest):\n def test_spherical(self):\n a = PointGroupAnalyzer(CH4)\n self.assertEqual(a.sch_symbol, \"Td\")\n self.assertEqual(len(a.get_pointgroup()), 24)\n a = PointGroupAnalyzer(PF6)\n self.assertEqual(a.sch_symbol, \"Oh\")\n self.assertEqual(len(a.get_pointgroup()), 48)\n m = Molecule.from_file(os.path.join(test_dir_mol, \"c60.xyz\"))\n a = PointGroupAnalyzer(m)\n self.assertEqual(a.sch_symbol, \"Ih\")\n\n cube_species = [\"C\", \"C\", \"C\", \"C\", \"C\", \"C\", \"C\", \"C\"]\n cube_coords = [\n [0, 0, 0],\n [1, 0, 0],\n [0, 1, 0],\n [1, 1, 0],\n [0, 0, 1],\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 1],\n ]\n\n m = Molecule(cube_species, cube_coords)\n a = PointGroupAnalyzer(m, 0.1)\n self.assertEqual(a.sch_symbol, \"Oh\")\n\n def test_tricky(self):\n m = Molecule.from_file(os.path.join(test_dir_mol, \"dh.xyz\"))\n a = PointGroupAnalyzer(m, 0.1)\n self.assertEqual(a.sch_symbol, \"D*h\")\n\n def test_linear(self):\n coords = [\n [0.000000, 0.000000, 0.000000],\n [0.000000, 0.000000, 1.08],\n [0, 0.000000, -1.08],\n ]\n mol = Molecule([\"C\", \"H\", \"H\"], coords)\n a = PointGroupAnalyzer(mol)\n self.assertEqual(a.sch_symbol, \"D*h\")\n mol = Molecule([\"C\", \"H\", \"N\"], coords)\n a = PointGroupAnalyzer(mol)\n self.assertEqual(a.sch_symbol, \"C*v\")\n\n def test_asym_top(self):\n coords = [\n [0.000000, 0.000000, 0.000000],\n [0.000000, 0.000000, 1.08],\n [1.026719, 0.000000, -0.363000],\n [-0.513360, -0.889165, -0.363000],\n [-0.513360, 0.889165, -0.363000],\n ]\n mol = Molecule([\"C\", \"H\", \"F\", \"Br\", \"Cl\"], coords)\n a = PointGroupAnalyzer(mol)\n\n self.assertEqual(a.sch_symbol, \"C1\")\n self.assertEqual(len(a.get_pointgroup()), 1)\n coords = [\n [0.000000, 0.000000, 1.08],\n [1.026719, 0.000000, -0.363000],\n [-0.513360, -0.889165, -0.363000],\n [-0.513360, 0.889165, -0.363000],\n ]\n cs_mol = Molecule([\"H\", \"F\", \"Cl\", \"Cl\"], coords)\n a = PointGroupAnalyzer(cs_mol)\n self.assertEqual(a.sch_symbol, \"Cs\")\n self.assertEqual(len(a.get_pointgroup()), 2)\n a = PointGroupAnalyzer(C2H2F2Br2)\n self.assertEqual(a.sch_symbol, \"Ci\")\n self.assertEqual(len(a.get_pointgroup()), 2)\n\n def test_cyclic(self):\n a = PointGroupAnalyzer(H2O2)\n self.assertEqual(a.sch_symbol, \"C2\")\n self.assertEqual(len(a.get_pointgroup()), 2)\n a = PointGroupAnalyzer(H2O)\n self.assertEqual(a.sch_symbol, \"C2v\")\n self.assertEqual(len(a.get_pointgroup()), 4)\n a = PointGroupAnalyzer(NH3)\n self.assertEqual(a.sch_symbol, \"C3v\")\n self.assertEqual(len(a.get_pointgroup()), 6)\n cs2 = Molecule.from_file(os.path.join(test_dir_mol, \"Carbon_Disulfide.xyz\"))\n a = PointGroupAnalyzer(cs2, eigen_tolerance=0.001)\n self.assertEqual(a.sch_symbol, \"C2v\")\n\n def test_dihedral(self):\n a = PointGroupAnalyzer(C2H4)\n self.assertEqual(a.sch_symbol, \"D2h\")\n self.assertEqual(len(a.get_pointgroup()), 8)\n a = PointGroupAnalyzer(BF3)\n self.assertEqual(a.sch_symbol, \"D3h\")\n self.assertEqual(len(a.get_pointgroup()), 12)\n m = Molecule.from_file(os.path.join(test_dir_mol, \"b12h12.xyz\"))\n a = PointGroupAnalyzer(m)\n self.assertEqual(a.sch_symbol, \"Ih\")\n\n def test_symmetrize_molecule1(self):\n np.random.seed(77)\n distortion = np.random.randn(len(C2H4), 3) / 10\n dist_mol = Molecule(C2H4.species, C2H4.cart_coords + distortion)\n\n eq = iterative_symmetrize(dist_mol, max_n=100, epsilon=1e-7)\n sym_mol, eq_sets, ops = eq[\"sym_mol\"], eq[\"eq_sets\"], eq[\"sym_ops\"]\n\n self.assertTrue({0, 1} in eq_sets.values())\n self.assertTrue({2, 3, 4, 5} in eq_sets.values())\n\n coords = sym_mol.cart_coords\n for i, eq_set in eq_sets.items():\n for j in eq_set:\n rotated = np.dot(ops[i][j], coords[i])\n self.assertTrue(np.allclose(np.dot(ops[i][j], coords[i]), coords[j]))\n\n def test_symmetrize_molecule2(self):\n np.random.seed(77)\n distortion = np.random.randn(len(C2H2F2Br2), 3) / 20\n dist_mol = Molecule(C2H2F2Br2.species, C2H2F2Br2.cart_coords + distortion)\n PA1 = PointGroupAnalyzer(C2H2F2Br2, tolerance=0.1)\n self.assertTrue(PA1.get_pointgroup().sch_symbol == \"Ci\")\n PA2 = PointGroupAnalyzer(dist_mol, tolerance=0.1)\n self.assertTrue(PA2.get_pointgroup().sch_symbol == \"C1\")\n eq = iterative_symmetrize(dist_mol, tolerance=0.3)\n PA3 = PointGroupAnalyzer(eq[\"sym_mol\"], tolerance=0.1)\n self.assertTrue(PA3.get_pointgroup().sch_symbol == \"Ci\")\n\n def test_get_kpoint_weights(self):\n for name in [\"SrTiO3\", \"LiFePO4\", \"Graphite\"]:\n s = PymatgenTest.get_structure(name)\n a = SpacegroupAnalyzer(s)\n ir_mesh = a.get_ir_reciprocal_mesh((4, 4, 4))\n weights = [i[1] for i in ir_mesh]\n weights = np.array(weights) / sum(weights)\n for i, w in zip(weights, a.get_kpoint_weights([i[0] for i in ir_mesh])):\n self.assertAlmostEqual(i, w)\n\n for name in [\"SrTiO3\", \"LiFePO4\", \"Graphite\"]:\n s = PymatgenTest.get_structure(name)\n a = SpacegroupAnalyzer(s)\n ir_mesh = a.get_ir_reciprocal_mesh((1, 2, 3))\n weights = [i[1] for i in ir_mesh]\n weights = np.array(weights) / sum(weights)\n for i, w in zip(weights, a.get_kpoint_weights([i[0] for i in ir_mesh])):\n self.assertAlmostEqual(i, w)\n\n v = Vasprun(os.path.join(PymatgenTest.TEST_FILES_DIR, \"vasprun.xml\"))\n a = SpacegroupAnalyzer(v.final_structure)\n wts = a.get_kpoint_weights(v.actual_kpoints)\n\n for w1, w2 in zip(v.actual_kpoints_weights, wts):\n self.assertAlmostEqual(w1, w2)\n\n kpts = [[0, 0, 0], [0.15, 0.15, 0.15], [0.2, 0.2, 0.2]]\n self.assertRaises(ValueError, a.get_kpoint_weights, kpts)\n\n\nclass FuncTest(unittest.TestCase):\n def test_cluster_sites(self):\n o, c = cluster_sites(CH4, 0.1)\n self.assertEqual(o.specie.symbol, \"C\")\n self.assertEqual(len(c), 1)\n o, c = cluster_sites(C2H2F2Br2.get_centered_molecule(), 0.1)\n self.assertIsNone(o)\n self.assertEqual(len(c), 4)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.random.uniform", "numpy.eye", "numpy.random.seed", "numpy.array", "numpy.dot", "numpy.round" ] ]
QPC-database/multimodal-affinities
[ "c3298e8db56a8b41110cc5681852f9f15d6deaa6" ]
[ "multimodal_affinities/visualization/image_utils.py" ]
[ "import cv2\nimport numpy as np\nfrom PIL import Image\nimport random\nimport string\nimport os\n\nclass ImageUtils(object):\n @staticmethod\n def read_image_for_bokeh(image_path, resize_height=None):\n # Open image, and make sure it's RGB*A*\n image = Image.open(image_path).convert('RGBA')\n print(\"image: {}\".format(image))\n if resize_height:\n image = ImageUtils.resize_image_by_height(image, resize_height)\n\n image_width, image_height = image.size\n # Create an array representation for the image `img`, and an 8-bit \"4\n # layer/RGBA\" version of it `view`.\n img = np.empty((image_height, image_width), dtype=np.uint32)\n view = img.view(dtype=np.uint8).reshape((image_height, image_width, 4))\n # Copy the RGBA image into view, flipping it so it comes right-side up\n # with a lower-left origin\n view[:, :, :] = np.flipud(np.asarray(image))\n print(\"input image width x height {}x{}\".format(image_width, image_height))\n return view, (image_width, image_height)\n\n @staticmethod\n def resize_image_by_height(pil_image, dst_height):\n src_width, src_height = pil_image.size\n factor = float(src_height) / dst_height\n dst_width = int(src_width / factor)\n pil_image.thumbnail((dst_width, dst_height), Image.ANTIALIAS)\n return pil_image\n\n\ndef resize_image(img, output_dimensions):\n '''\n resizes an img to output dimensions in x and y while preserving aspect ratio.\n pads (or cuts) along vertical direction if needed\n :param img:\n :param output_dimensions:\n :return:\n '''\n\n image_width = output_dimensions[0]\n image_height = output_dimensions[1]\n img_shape = img.shape\n num_pad_x = image_width - img.shape[1]\n pad_both_x_and_y = True\n if pad_both_x_and_y and num_pad_x > 0:\n num_pad_l = int(float(num_pad_x) / 2)\n num_pad_r = int(num_pad_x) - num_pad_l\n img = cv2.copyMakeBorder(img, 0, 0, num_pad_l, num_pad_r, cv2.BORDER_WRAP)\n elif not pad_both_x_and_y or num_pad_x < 0:\n resize_factor = float(img_shape[1]) / image_width\n img = cv2.resize(img, (int(img_shape[1] / resize_factor),\n int(img_shape[0] / resize_factor)))\n\n num_pad_y = image_height - img.shape[0]\n if num_pad_y > 0:\n num_pad_t = int(float(num_pad_y) / 2)\n num_pad_b = int(num_pad_y) - num_pad_t\n img = cv2.copyMakeBorder(img, num_pad_t, num_pad_b, 0, 0, cv2.BORDER_WRAP)\n elif num_pad_y < 0:\n num_pad_t = int(float(-num_pad_y) / 2)\n num_pad_b = int(-num_pad_y) - num_pad_t\n img = img[num_pad_t:-num_pad_b,:,:]\n\n # # debugging crops\n # random_filename = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))\n # cv2.imwrite(os.path.join(output_directory, random_filename + '.jpg'), img)\n return img" ]
[ [ "numpy.asarray", "numpy.empty" ] ]
RegentLee/master_research
[ "ee8e45abc890c7103c1c9917954c5958b48782f6" ]
[ "util/my_util.py" ]
[ "import numpy as np\n\n#############################################\n# variable #\n#############################################\nval = False\nx = 0\ny = 0\n\n\n#############################################\n# function #\n#############################################\ndef RMSD(A, B):\n mse = np.sum(np.power(A - B, 2)/B.size)\n return np.sqrt(mse)\n\ndef MAE(A, B):\n A = 59.2/2*(A + 1)\n B = 59.2/2*(B + 1)\n mae = np.sum(np.abs(A - B))/B.size\n return mae\n\n'''def DALI(A, B): not used\n \"\"\"Citation:\n Holm, Liisa. \n \"DALI and the persistence of protein shape.\" \n Protein Science 29.1 (2020): 128-140.\n APPENDIX I: SCORES USED IN DALI\n \"\"\"\n DALI_score = 0.2*len(B)\n A = 10*((A + 1)*3)\n B = 10*((B + 1)*3)\n for i in range(len(B)):\n for j in range(i + 1, len(B)):\n DALI_score += 2*(0.2 - 2*np.abs(A[i][j] - B[i][j])/(A[i][j] + B[i][j]))*np.exp(-((A[i][j] + B[i][j])/(2*20))**2)\n m_L = 7.95 + 0.71*len(B) - 0.000259*len(B)**2 - 0.00000192*len(B)**3\n Z_score = (DALI_score - m_L)/(0.5*m_L)\n return Z_score'''\n\n" ]
[ [ "numpy.sqrt", "numpy.power", "numpy.abs" ] ]
GregDMeyer/dynamite
[ "440f0c3674bf12a835b8ad4b3c10c303c2d28265" ]
[ "dynamite/operators.py" ]
[ "\"\"\"\nThis module provides the building blocks for Hamiltonians, and\ndefines their built-in behavior and operations.\n\"\"\"\n\nimport numpy as np\n\nfrom . import config, validate, msc_tools\nfrom .computations import evolve, eigsolve\nfrom .subspaces import Full\nfrom .states import State\n\nclass Operator:\n \"\"\"\n A class representing a quantum operator.\n\n This class generally won't be directly instantiated by the user, but is returned by the\n other functions in this module.\n \"\"\"\n\n def __init__(self):\n\n self._L = config.L\n self._max_spin_idx = None\n self._mats = {}\n self._msc = None\n self._is_reduced = False\n self._shell = config.shell\n\n self._subspaces = []\n\n self._tex = r'\\[\\text{operator}\\]'\n self._string = '[operator]'\n self._brackets = ''\n\n def copy(self):\n \"\"\"\n Return a copy of the operator.\n Copy will not have its PETSc matrix already built,\n even if the operator being copied does.\n\n Returns\n -------\n Operator\n A copy of the operator\n \"\"\"\n rtn = Operator()\n rtn.msc = self.msc.copy()\n rtn.is_reduced = self.is_reduced\n rtn.shell = self.shell\n\n if self._subspaces:\n for left, right in self.get_subspace_list():\n rtn.add_subspace(left, right)\n\n rtn.tex = self.tex\n rtn.string = self.string\n rtn.brackets = self.brackets\n\n return rtn\n\n ### computations\n\n def evolve(self, state, t, **kwargs):\n r\"\"\"\n Evolve a state under the Hamiltonian. If the Hamiltonian's chain length has not\n been set, attempts to set it based on the state's length.\n\n This method wraps :meth:`dynamite.computations.evolve` (see that documentation\n for a full description of the method's functionality).\n\n Parameters\n ----------\n state : dynamite.states.State\n The initial state.\n\n t : float\n The time :math:`t` for which to evolve the state (can be negative or complex).\n\n **kwargs :\n Any further keyword arguments are passed to the underlying call to\n :meth:`dynamite.computations.evolve`. See that documentation for a\n detailed description of possible arguments.\n\n Returns\n -------\n dynamite.states.State\n The result vector :math:`\\Psi_f`.\n \"\"\"\n\n if self.L is None:\n self.L = state.L\n\n return evolve(self, state, t, **kwargs)\n\n def eigsolve(self, **kwargs):\n \"\"\"\n Find eigenvalues (and eigenvectors if requested) of the Hamiltonian. This class\n method is a wrapper on :meth:`dynamite.computations.eigsolve`. Any keyword\n arguments are passed to that function; see its documentation for details.\n\n By default, finds one (or possibly a few) eigenvalues with the smallest real\n values (i.e. the ground state).\n\n .. note:: The spin chain length ``L`` must be set before calling ``eigsolve``.\n\n Returns\n -------\n numpy.array or tuple(numpy.array, list(dynamite.states.State))\n Either a 1D numpy array of eigenvalues, or a pair containing that array\n and a list of the corresponding eigenvectors.\n \"\"\"\n return eigsolve(self, **kwargs)\n\n ### properties\n\n @property\n def max_spin_idx(self):\n '''\n Read-only property giving the largest spin index on which this operator\n has support.\n '''\n # save this so we don't recompute it every time.\n # cleared when MSC changes\n\n if self._max_spin_idx is None:\n self._max_spin_idx = msc_tools.max_spin_idx(self.msc)\n\n return self._max_spin_idx\n\n @property\n def L(self):\n \"\"\"\n Property representing the length of the spin chain.\n If L hasn't been set, defaults to the size of support of the operator (from site 0).\n \"\"\"\n return self._L\n\n @L.setter\n def L(self, value):\n L = validate.L(value)\n if L < self.max_spin_idx + 1:\n raise ValueError('Cannot set L smaller than one plus the largest spin index'\n 'on which the operator has support (max_spin_idx = %d)' %\n (self.max_spin_idx))\n for left, right in self.get_subspace_list():\n left.L = L\n right.L = L\n self._L = L\n\n def get_length(self):\n '''\n Returns the length of the spin chain for this operator. It is defined by the\n property :meth:`Operator.L` if it has been set by the user. Otherwise, the\n number of sites on which the operator has support is returned by default.\n '''\n if self.L is None:\n return self.max_spin_idx + 1\n else:\n return self.L\n\n @property\n def dim(self):\n \"\"\"\n Read-only attribute returning the dimensions of the matrix.\n \"\"\"\n return self.left_subspace.get_dimension(), self.right_subspace.get_dimension()\n\n @property\n def nnz(self):\n \"\"\"\n The number of nonzero elements per row of the sparse matrix.\n \"\"\"\n return msc_tools.nnz(self.msc)\n\n @property\n def msc_size(self):\n \"\"\"\n The number of elements in the MSC representation of the matrix.\n \"\"\"\n return len(self.msc)\n\n @property\n def density(self):\n \"\"\"\n The density of the sparse matrix---that is, the number of non-zero\n elements per row divided by the length of a row.\n\n .. note::\n This quantity is not always well defined when using a subspace, since\n it can vary by row. In that case, the returned quantity will be an upper bound.\n \"\"\"\n return self.nnz/self.dim[1]\n\n @property\n def shell(self):\n \"\"\"\n Switch whether to use shell matrices or not. For a description of shell\n matrices and their benefits, see the documentation.\n\n .. note::\n Changing this value after the matrix has been built will invoke a call\n to :meth:`Operator.destroy_mat`.\n \"\"\"\n return self._shell\n\n @shell.setter\n def shell(self,value):\n value = validate.shell(value)\n if value != self._shell:\n self.destroy_mat()\n self._shell = value\n\n @property\n def left_subspace(self):\n \"\"\"\n Get the default left subspace for this operator. This is the subspace most recently\n added with :meth:`Operator.add_subspace`, or config.subspace if\n :meth:`Operator.add_subspace` has not been called.\n \"\"\"\n space = self.get_subspace_list()[-1][0]\n space.L = self.get_length()\n return space\n\n @property\n def right_subspace(self):\n \"\"\"\n Get the default right subspace for this operator. This is the subspace most recently\n added with :meth:`Operator.add_subspace`, or config.subspace if\n :meth:`Operator.add_subspace` has not been called.\n \"\"\"\n space = self.get_subspace_list()[-1][1]\n space.L = self.get_length()\n return space\n\n @property\n def subspace(self):\n \"\"\"\n Get the default subspace for this operator. This is the subspace most recently\n added with :meth:`Operator.add_subspace`, or config.subspace if\n :meth:`Operator.add_subspace` has not been called.\n \"\"\"\n if self.left_subspace != self.right_subspace:\n raise ValueError(\"Left and right subspaces are different for this operator. \"\n \"use Operator.left_subspace and Operator.right_subspace to \"\n \"access them individually.\")\n return self.left_subspace\n\n @subspace.setter\n def subspace(self, value):\n self.add_subspace(value, value)\n\n def add_subspace(self, left, right=None):\n '''\n Add a pair of subspaces that this operator is compatible with.\n\n Parameters\n ----------\n\n left : dynamite.subspaces.Subspace\n A subspace the operator can map to (or multiply from the left)\n\n right : dynamite.subspaces.Subspace, optional\n A subspace the operator can map from (or multiply to the right). If omitted,\n the left subspace is reused for the right.\n '''\n if right is None:\n right = left\n\n left = validate.subspace(left)\n right = validate.subspace(right)\n\n left.L = self.get_length()\n right.L = self.get_length()\n\n if (left, right) not in self.get_subspace_list():\n self.get_subspace_list().append((left, right))\n\n def get_subspace_list(self):\n '''\n Return a list of the subspaces that have been registered for this operator.\n '''\n if not self._subspaces:\n if config.subspace is not None:\n self._subspaces = [(config.subspace, config.subspace)]\n else:\n self._subspaces = [(Full(), Full())]\n\n for left, right in self._subspaces:\n left.L = self.get_length()\n right.L = self.get_length()\n return self._subspaces\n\n ### text representations\n\n # TODO: perhaps encapsulate the string/tex methods into their own class\n\n @property\n def string(self):\n '''\n A text string that will be used to represent the object in printed expressions.\n '''\n return self._string\n\n @string.setter\n def string(self, value):\n self._string = value\n\n @property\n def tex(self):\n '''\n A LaTeX expression corresponding to the object. Can be set to any valid TeX.\n '''\n return self._tex\n\n @tex.setter\n def tex(self, value):\n self._tex = value\n\n @property\n def brackets(self):\n '''\n Which kind of brackets to surround the expression with. Options are\n ``'()'``, ``'[]'``, or ``''``, where the empty string means no brackets.\n '''\n return self._brackets\n\n @brackets.setter\n def brackets(self, value):\n value = validate.brackets(value)\n self._brackets = value\n\n @classmethod\n def _with_brackets(cls, string, brackets, tex=False):\n '''\n Put the given brackets around the string. If tex = True, the brackets\n have \\left and \\right appended to them.\n\n Parameters\n ----------\n string : str\n The string to put brackets around\n\n brackets : str\n The set of brackets. Should be either ``'[]'``, ``'()'``, or ``''``\n for no brackets.\n\n tex : bool, optional\n Whether to prepend ``\\left`` and ``\\right`` to the brackets.\n\n Returns\n -------\n str\n The result\n '''\n if not brackets:\n return string\n if tex:\n brackets = [x+y for x,y in zip([r'\\left',r'\\right'], brackets)]\n return string.join(brackets)\n\n def with_brackets(self, which):\n '''\n Return a string or tex representation of the object, surrounded by brackets\n if necessary. Useful for building larger expressions.\n\n Parameters\n ----------\n\n which : str\n Whether to return a normal string or tex. Options are ``'string'`` or ``'tex'``.\n '''\n if which == 'tex':\n strng = self.tex\n elif which == 'string':\n strng = self.string\n else:\n raise ValueError(\"which must be either 'string' or 'tex'.\")\n\n return self._with_brackets(strng, self._brackets, which == 'tex')\n\n def __str__(self):\n return self.string\n\n def __repr__(self):\n rtn = 'dynamite.Operator on {size} spins:\\n'.format(size = self.get_length())\n rtn += self.string\n return rtn\n\n def table(self):\n '''\n Return a string containing an ASCII table of the coefficients and terms\n that make up this operator.\n\n The table is generated directly from the MSC representation, so it is\n expanded and simplified to the same form no matter how the operator was\n built.\n\n Call :meth:`Operator.reduce_msc` first for a more compact table.\n '''\n return msc_tools.table(self.msc, self.get_length())\n\n def get_latex(self):\n '''\n Return a clean LaTeX representation of the operator.\n '''\n return self.tex.replace('{IDX', '{')\n\n def _repr_latex_(self):\n return '$' + self.get_latex() + '$'\n\n ### save to disk\n\n def serialize(self):\n '''\n Serialize the operator's MSC representation into a string of bytes.\n The byte string ONLY contains the MSC representation and the spin chain\n length. It does not save any other information, such as subspaces etc.\n\n Returns\n -------\n bytes\n The byte string containing the serialized object.\n\n '''\n return msc_tools.serialize(self.msc)\n\n def save(self, filename):\n \"\"\"\n Save the MSC representation of the operator to disk.\n Can be loaded again through :class:`Load`.\n\n .. note::\n If one calls this method in parallel, one MUST call :meth:`dynamite.config.initialize`\n first, or all processes will try to simultaneously write to the same file!\n\n Parameters\n ----------\n filename : str\n The path to the file to save the operator in.\n \"\"\"\n\n if config.initialized:\n from petsc4py import PETSc\n do_save = PETSc.COMM_WORLD.rank == 0\n else:\n # this should be the case when not running under MPI\n do_save = True\n\n # only process 0 should save\n if do_save:\n with open(filename, mode='wb') as f:\n f.write(self.serialize())\n\n if config.initialized:\n PETSc.COMM_WORLD.barrier()\n\n ### interface with PETSc\n\n def get_mat(self, subspaces=None, diag_entries=False):\n \"\"\"\n Get the PETSc matrix corresponding to this operator, building it if necessary.\n\n Parameters\n ----------\n subspaces : tuple(Subspace, Subspace), optional\n The subspace pair to get the matrix for. If the matrix is already built for this\n pair, it will be reused. If this option is omitted, the last subspace added with\n :meth:`Operator.add_subspace` will be used, or the Full space by default.\n\n diag_entries : bool, optional\n Ensure that the sparse matrix has all diagonal elements filled,\n even if they are zero. Some PETSc functions fail if the\n diagonal elements do not exist. Currently a dummy argument; diagonal\n entries are always included.\n\n Returns\n -------\n petsc4py.PETSc.Mat\n The PETSc matrix corresponding to the operator.\n \"\"\"\n if subspaces is None:\n subspaces = (self.left_subspace, self.right_subspace)\n\n if subspaces not in self._mats:\n self.build_mat(subspaces, diag_entries=diag_entries)\n\n return self._mats[subspaces]\n\n def build_mat(self, subspaces=None, diag_entries=False):\n \"\"\"\n Build the PETSc matrix, destroying any matrix that has already been built, and\n store it internally. This function does not return the matrix--see\n :meth:`Operator.get_mat` for that functionality. This function is rarely needed\n by the end user, since it is called automatically whenever the underlying matrix\n needs to be built or rebuilt.\n \"\"\"\n\n if subspaces is None:\n subspaces = (self.left_subspace, self.right_subspace)\n\n if subspaces not in self.get_subspace_list():\n raise ValueError('Attempted to build matrix for a subspace that has not '\n 'been added to the operator.')\n\n config.initialize()\n from ._backend import bpetsc\n\n self.reduce_msc()\n term_array = self.msc\n\n # TODO: keep track of diag_entries\n diag_entries = True\n if term_array[0]['masks'] != 0:\n term_array = np.hstack([np.array([(0,0,0)], dtype=term_array.dtype), term_array])\n\n masks, indices = np.unique(term_array['masks'], return_index=True)\n\n # need to add the last index\n mask_offsets = np.ndarray((indices.size+1,), dtype=term_array.dtype['masks'])\n mask_offsets[:-1] = indices\n mask_offsets[-1] = term_array.shape[0]\n\n if not msc_tools.is_hermitian(term_array):\n raise ValueError('Building non-Hermitian matrices currently not supported.')\n\n mat = bpetsc.build_mat(\n L = self.get_length(),\n masks = np.ascontiguousarray(masks),\n mask_offsets = np.ascontiguousarray(mask_offsets),\n signs = np.ascontiguousarray(term_array['signs']),\n coeffs = np.ascontiguousarray(term_array['coeffs']),\n left_type = subspaces[0].to_enum(),\n left_data = subspaces[0].get_cdata(),\n right_type = subspaces[1].to_enum(),\n right_data = subspaces[1].get_cdata(),\n shell = self.shell,\n gpu = config.gpu\n )\n\n self._mats[subspaces] = mat\n\n def destroy_mat(self, subspaces=None):\n \"\"\"\n Destroy the PETSc matrix, freeing the corresponding memory. If the PETSc\n matrix does not exist (has not been built or has already been destroyed),\n the function has no effect.\n\n Parameters\n ----------\n subspaces : tuple(Subspace, Subspace), optional\n Destroy only the matrix for a particular pair of subspaces.\n \"\"\"\n if subspaces is not None:\n to_destroy = [subspaces]\n else:\n to_destroy = list(self._mats.keys())\n\n for k in to_destroy:\n mat = self._mats.pop(k, None)\n if mat is not None:\n mat.destroy()\n\n def create_states(self):\n '''\n Return a bra and ket compatible with this matrix.\n\n Returns\n -------\n tuple\n The two states\n '''\n bra = State(self.get_length(), self.left_subspace)\n ket = State(self.get_length(), self.right_subspace)\n return (bra, ket)\n\n ### mask, sign, coefficient representation of operators\n\n @property\n def msc(self):\n '''\n The (mask, sign, coefficient) representation of the operator. This\n representation is used internally by dynamite.\n '''\n return self._msc\n\n @msc.setter\n def msc(self, value):\n value = validate.msc(value)\n self._max_spin_idx = None\n self.is_reduced = False\n self._msc = value\n\n def reduce_msc(self):\n '''\n Combine and sort terms in the MSC representation, compressing it and\n preparing it for use in the backend.\n '''\n self.msc = msc_tools.combine_and_sort(self.msc)\n self.is_reduced = True\n\n @property\n def is_reduced(self):\n '''\n Whether :meth:`Operators.reduce_msc` has been called. Can also be set manually to avoid\n calling that function, if you are sure that the terms are sorted already.\n '''\n return self._is_reduced\n\n @is_reduced.setter\n def is_reduced(self, value):\n self._is_reduced = value\n\n def get_shifted_msc(self, shift, wrap_idx = None):\n '''\n Get the MSC representation of the operator, with all terms translated along\n the spin chain (away from zero) by ``shift`` spins.\n\n Parameters\n ----------\n shift : int\n Shift the whole operator along the spin chain by ``shift`` spins.\n\n wrap : bool\n The site at which to begin wrapping around to the beginning of the spin chain.\n e.g. takes a site index ``i`` to ``i % wrap_idx``. If ``None``, do not wrap.\n\n Returns\n -------\n numpy.ndarray\n A numpy array containing the representation.\n '''\n return msc_tools.shift(self.msc, shift, wrap_idx)\n\n ### interface to numpy\n\n def to_numpy(self, subspaces=None, sparse=True):\n '''\n Get a SciPy sparse matrix or dense numpy array representing the operator.\n\n Parameters\n ----------\n subspaces : tuple(Subspace, Subspace), optional\n The subspaces for which to get the matrix. If this option is omitted,\n the last subspace added with :meth:`Operator.add_subspace` will be used,\n or the Full space by default.\n\n sparse : bool, optional\n Whether to return a sparse matrix or a dense array.\n\n Returns\n -------\n np.ndarray(dtype = np.complex128)\n The array\n '''\n\n if subspaces is None:\n subspaces = (self.left_subspace, self.right_subspace)\n\n ary = msc_tools.msc_to_numpy(self.msc,\n (subspaces[0].get_dimension(),\n subspaces[1].get_dimension()),\n subspaces[0].idx_to_state,\n subspaces[1].state_to_idx,\n sparse)\n\n return ary\n\n def spy(self, subspaces=None, max_size=1024):\n '''\n Use matplotlib to show the nonzero structure of the matrix.\n\n Parameters\n ----------\n subspaces : tuple(Subspace, Subspace), optional\n The pair of subspaces for which to plot the matrix. Defaults to the most\n recent added with the Operator.add_subspace method, or otherwise\n config.subspace.\n\n max_size : int, optional\n The maximum matrix dimension for which this function can be called.\n Calling it for too large a matrix will not be informative and probably run\n out of memory, so this is a small safeguard.\n '''\n # TODO: should subspaces really be passed as an argument like that? or should we somehow\n # reference subspaces from the list, like with an index?\n\n if any(dim > max_size for dim in self.dim):\n raise ValueError('Matrix too big to spy. Either build a smaller operator, or adjust '\n 'the maximum spy size with the argument \"max_size\"')\n\n from matplotlib import pyplot as plt\n plt.figure()\n normalized = np.array((self.to_numpy(subspaces=subspaces) != 0).toarray(), dtype = np.float)\n transformed = np.log(normalized + 1E-9)\n plt.imshow(transformed, cmap='Greys')\n plt.show()\n\n ### unary and binary operations\n\n def __add__(self, x):\n if not isinstance(x, Operator):\n x = x*identity()\n return self._op_add(x)\n\n def __radd__(self,x):\n if not isinstance(x, Operator):\n x = x*identity()\n return x + self\n\n def __sub__(self, x):\n return self + -x\n\n def __neg__(self):\n return -1*self\n\n def __mul__(self, x):\n if isinstance(x, Operator):\n return self._op_mul(x)\n elif isinstance(x, State):\n return self._vec_mul(x)\n else:\n return self._num_mul(x)\n\n def __rmul__(self, x):\n if isinstance(x, State):\n return TypeError('Left vector-matrix multiplication not currently '\n 'supported.')\n else:\n return self._num_mul(x)\n\n def __eq__(self, x):\n if isinstance(x, Operator):\n self.reduce_msc()\n x.reduce_msc()\n return np.array_equal(self.msc, x.msc)\n else:\n raise TypeError('Equality not supported for types %s and %s'\n % (str(type(self)), str(type(x))))\n\n def _op_add(self, o):\n rtn = self.copy()\n rtn.msc = msc_tools.msc_sum([self.msc, o.msc])\n rtn.tex = self.tex + ' + ' + o.tex\n rtn.string = self.string + ' + ' + o.string\n rtn.brackets = '()'\n return rtn\n\n def _op_mul(self, o):\n rtn = self.copy()\n rtn.msc = msc_tools.msc_product([self.msc, o.msc])\n rtn.string = self.with_brackets('string') + '*' + o.with_brackets('string')\n rtn.tex = self.with_brackets('tex') + o.with_brackets('tex')\n rtn.brackets = ''\n return rtn\n\n def dot(self, x, result = None):\n r'''\n Compute the matrix-vector product :math:`\\vec{y} = A\\vec{x}`\n\n Parameters\n ----------\n x : dynamite.states.State\n The input state x.\n\n result : dynamite.states.State, optional\n A state in which to store the result. If omitted, a new State object\n is created.\n\n Returns\n -------\n dynamite.states.State\n The result\n '''\n right_subspace = x.subspace\n right_match = [(left, right) for left, right in self.get_subspace_list()\n if right == right_subspace]\n if not right_match:\n raise ValueError('No operator subspace found that matches input vector subspace. '\n 'Try adding the subspace with the Operator.add_subspace method.')\n\n if result is None:\n if len(right_match) != 1:\n raise ValueError('Ambiguous subspace for result vector. Pass a state '\n 'with the desired subspace as the \"result\" option to '\n 'Operator.dot.')\n left_subspace = right_match[0][0]\n result = State(L=left_subspace.L,\n subspace=left_subspace)\n else:\n left_subspace = result.subspace\n\n if (left_subspace, right_subspace) not in right_match:\n raise ValueError('Subspaces of matrix and result vector do not match.')\n\n self.get_mat(subspaces=(left_subspace, right_subspace)).mult(x.vec, result.vec)\n return result\n\n def _vec_mul(self, x):\n return self.dot(x)\n\n def scale(self, x):\n '''\n Scale an operator by a numerical value without making a copy. This is more\n efficient than just doing x*Operator.\n\n Parameters\n ----------\n x : numeric type\n The coefficient to scale by\n '''\n try:\n self.msc['coeffs'] *= x\n except (ValueError,TypeError):\n raise ValueError('Error attempting to multiply operator by type \"%s\"' % str(type(x)))\n\n self.string = '{:.3f}*'.format(x) + self.with_brackets('string')\n self.tex = '{:.3f}*'.format(x) + self.with_brackets('tex')\n self.brackets = ''\n return self\n\n def _num_mul(self, x):\n rtn = self.copy()\n rtn.scale(x)\n return rtn\n\ndef load_from_file(filename):\n '''\n Load the operator in file ``filename`` and return the corresponding object.\n\n Parameters\n ----------\n filename : str\n The path of the file to load.\n\n Returns\n -------\n dynamite.operators.Load\n The operator as a dynamite object.\n '''\n with open(filename, 'rb') as f:\n bytestring = f.read()\n op = from_bytes(bytestring)\n return op\n\ndef from_bytes(data):\n \"\"\"\n Load operator from a byte string generated with the :meth:`Operator.serialize`\n method.\n\n Parameters\n ----------\n data : bytes\n The byte string containing the serialized object.\n\n Returns\n -------\n Operator\n The operator.\n \"\"\"\n o = Operator()\n msc = msc_tools.deserialize(data)\n o.msc = msc\n o.string = '[operator from bytes]'\n o.tex = r'\\left[\\text{operator from bytes}\\right]'\n return o\n\ndef op_sum(terms, nshow = 3):\n r\"\"\"\n A sum of several operators. This object can be used in a couple ways.\n All of the following return the exact same object,\n :math:`\\sigma^x_0 + \\sigma^y_0`\\:\n\n .. code:: python\n\n sigmax() + sigmay()\n op_sum([sigmax(), sigmay()])\n op_sum(s() for s in [sigmax, sigmay])\n\n Parameters\n ----------\n terms : list\n A list of operators to sum\n\n nshow : int, optional\n The number of terms to show in the string representations before adding\n an ellipsis.\n \"\"\"\n\n o = Operator()\n msc_terms = []\n strings = []\n texs = []\n\n iterterms = iter(terms)\n\n done = False\n for n,t in enumerate(iterterms):\n msc_terms.append(t.msc)\n strings.append(t.string)\n texs.append(t.tex)\n if n >= nshow:\n break\n else:\n done = True\n\n if not done:\n strings[-1] = '...'\n texs[-1] = r'\\cdots'\n msc_terms.append(msc_tools.msc_sum(t.msc for t in iterterms))\n\n o.msc = msc_tools.msc_sum(msc_terms)\n o.string = ' + '.join(strings)\n o.tex = ' + '.join(texs)\n o.brackets = '()'\n return o\n\ndef op_product(terms):\n \"\"\"\n A product of several operators. Called in same way as :meth:`op_sum`.\n For example:\n\n .. code:: python\n\n >>> sigmax() * sigmay() == op_product([sigmax(), sigmay()])\n True\n\n Parameters\n ----------\n terms : list\n A list of operators to multiply\n \"\"\"\n\n # from a practical standpoint, there doesn't seem to ever be a use case\n # for taking the product of a huge number of terms. So we assume the number\n # of terms is O(1) in this implementation.\n\n msc_terms = []\n strings = []\n texs = []\n for t in terms:\n msc_terms.append(t.msc)\n strings.append(t.with_brackets(which='string'))\n texs.append(t.with_brackets(which='tex'))\n\n if msc_terms:\n o = Operator()\n o.msc = msc_tools.msc_product(msc_terms)\n o.string = '*'.join(strings)\n o.tex = ''.join(texs)\n o.brackets = ''\n else:\n o = identity()\n\n return o\n\ndef index_sum(op, size = None, start = 0, boundary = 'open'):\n \"\"\"\n Duplicate the operator onto adjacent sites in the spin chain, and sum the resulting\n operators.\n In most cases, ``op`` should have support on site 0 (and possibly others).\n\n See the examples for more information.\n\n Parameters\n ----------\n op : Operator\n The operator to translate along the spin chain.\n\n size : int, optional\n The size of the support of the resulting operator. For open boundary conditions,\n the number of terms in the sum may be smaller than this. If not provided, defaults\n to the value of :attr:`Operator.L`.\n\n start : int, optional\n The site for the first operator in the sum.\n\n boundary : str, optional\n Whether to use 'open' or 'closed' boundary conditions. When ``op`` has support\n on more than one site, this determines whether the last few terms of the sum should\n wrap around to the beginning of the spin chain.\n \"\"\"\n\n if size is None:\n if op.L is None:\n raise ValueError('Must specify index_sum size with either the \"size\" argument '\n 'or by setting Operator.L (possibly through config.L).')\n else:\n size = op.L\n\n if boundary == 'open':\n stop = start + size - op.max_spin_idx\n if stop <= start:\n raise ValueError(\"requested size %d for sum operator's support smaller than \"\n \"summand's support %d; impossible to satisfy\" % \\\n (size, op.max_spin_idx))\n wrap_idx = None\n\n elif boundary == 'closed':\n stop = start + size\n wrap_idx = stop\n if start != 0:\n raise ValueError('cannot set start != 0 for closed boundary conditions.')\n\n else:\n raise ValueError(\"invalid value for argument 'boundary' (can be 'open' or 'closed')\")\n\n rtn = Operator()\n rtn.msc = msc_tools.msc_sum(op.get_shifted_msc(i, wrap_idx) for i in range(start, stop))\n\n rtn.string = 'index_sum(' + op.string + ', sites %d - %d' % (start, stop-1)\n if boundary == 'closed':\n rtn.string += ', wrapped)'\n else:\n rtn.string += ')'\n\n # add i to the indices for TeX representation\n # TODO: use different letters if we have sum of sums\n sub_tex = op.with_brackets(which = 'tex')\n sub_tex = sub_tex.replace('{IDX', '{IDXi+').replace('{IDXi+0','{IDXi')\n\n rtn.tex = r'\\sum_{i=%d}^{%d}' % (start, stop-1) + sub_tex\n rtn.brackets = '[]'\n\n return rtn\n\ndef index_product(op, size = None, start = 0):\n \"\"\"\n Duplicate the operator onto adjacent sites in the spin chain, and multiply the\n resulting operators together.\n In most cases, ``op`` should have support on site 0 (and possibly others).\n\n Parameters\n ----------\n op : Operator\n The operator to translate along the spin chain.\n\n size : int, optional\n The size of the support of the resulting operator. If not provided, defaults\n to the value of :attr:`Operator.L`.\n\n start : int, optional\n The site for the first operator in the sum.\n \"\"\"\n\n if size is None:\n if op.L is None:\n raise ValueError('Must specify index_sum size with either the \"size\" argument '\n 'or by setting Operator.L (possibly through config.L).')\n else:\n size = op.L\n\n if size == 0:\n return identity()\n\n stop = start + size - op.max_spin_idx\n\n rtn = Operator()\n rtn.msc = msc_tools.msc_product(op.get_shifted_msc(i, wrap_idx = None) for i in range(start, stop))\n\n rtn.string = 'index_product(' + op.string + ', sites %d - %d)' % (start, stop-1)\n\n # add i to the indices for TeX representation\n # TODO: use different letters if we have sum of sums\n sub_tex = op.with_brackets(which = 'tex')\n sub_tex = sub_tex.replace('{IDX', '{IDXi+').replace('{IDXi+0','{IDXi')\n rtn.tex = r'\\prod_{i=%d}^{%d}' % (start, stop-1) + sub_tex\n rtn.brackets = '[]'\n\n return rtn\n\ndef sigmax(i=0):\n r\"\"\"\n The Pauli :math:`\\sigma_x` operator on site :math:`i`.\n \"\"\"\n o = Operator()\n o.msc = [(1<<i, 0, 1)]\n o.tex = r'\\sigma^x_{IDX'+str(i)+'}'\n o.string = 'σx'+str(i).join('[]')\n return o\n\ndef sigmay(i=0):\n r\"\"\"\n The Pauli :math:`\\sigma_y` operator on site :math:`i`.\n \"\"\"\n o = Operator()\n o.msc = [(1<<i, 1<<i, 1j)]\n o.tex = r'\\sigma^y_{IDX'+str(i)+'}'\n o.string = 'σy'+str(i).join('[]')\n return o\n\ndef sigmaz(i=0):\n r\"\"\"\n The Pauli :math:`\\sigma_z` operator on site :math:`i`.\n \"\"\"\n o = Operator()\n o.msc = [(0, 1<<i, 1)]\n o.tex = r'\\sigma^z_{IDX'+str(i)+'}'\n o.string = 'σz'+str(i).join('[]')\n return o\n\ndef sigma_plus(i=0):\n r\"\"\"\n The :math:`\\sigma_+ = \\sigma_x + i \\sigma_y` operator.\n\n .. note::\n\n :math:`\\sigma_+ = \\left( \\begin{array}{cc} 0 & 2 \\\\ 0 & 0 \\\\ \\end{array} \\right)`,\n so :math:`S_+ = \\left( \\begin{array}{cc} 0 & 1 \\\\ 0 & 0 \\\\ \\end{array} \\right) = \\frac{1}{2} \\sigma_+`\n \"\"\"\n o = sigmax(i) + 1j*sigmay(i)\n o.tex = r'\\sigma^+_{IDX'+str(i)+'}'\n o.string = 'σ+'+str(i).join('[]')\n return o\n\ndef sigma_minus(i=0):\n r\"\"\"\n The :math:`\\sigma_- = \\sigma_x - i \\sigma_y` operator.\n\n .. note::\n\n :math:`\\sigma_- = \\left( \\begin{array}{cc} 0 & 0 \\\\ 2 & 0 \\\\ \\end{array} \\right)`,\n so :math:`S_- = \\left( \\begin{array}{cc} 0 & 0 \\\\ 1 & 0 \\\\ \\end{array} \\right) = \\frac{1}{2} \\sigma_-`\n \"\"\"\n o = sigmax(i) - 1j*sigmay(i)\n o.tex = r'\\sigma^-_{IDX'+str(i)+'}'\n o.string = 'σ-'+str(i).join('[]')\n return o\n\ndef identity():\n \"\"\"\n The identity operator.\n \"\"\"\n o = Operator()\n o.msc = [(0, 0, 1)]\n # TODO: do a fancy double-lined 1?\n o.tex = '1'\n o.string = '1'\n return o\n\ndef zero():\n \"\"\"\n The zero operator---equivalent to a matrix of all zeros.\n \"\"\"\n o = Operator()\n o.msc = []\n o.tex = '0'\n o.string = '0'\n return o\n" ]
[ [ "matplotlib.pyplot.figure", "numpy.ndarray", "matplotlib.pyplot.imshow", "numpy.log", "matplotlib.pyplot.show", "numpy.array_equal", "numpy.array", "numpy.ascontiguousarray", "numpy.unique" ] ]
Virodroid/galaxy-cluster
[ "11c0b365ed94e1f141b55e905f93abcbf39b3657" ]
[ "clustering/aggOnFeatures.py" ]
[ "from sklearn.cluster import AgglomerativeClustering\nimport pandas as pd\nimport numpy as np\nfrom zoobot import label_metadata, schemas\nfrom sklearn.metrics import confusion_matrix, precision_recall_fscore_support\nfrom scipy.optimize import linear_sum_assignment as linear_assignment\nimport time\n\ndef findChoice(frac):\n choice = np.zeros_like(frac)\n choice[np.arange(len(frac)), frac.argmax(1)] = 1\n return choice\n\ndef getQuestionClasses(auto_f, volunteers, question, seed):\n qcol_name = question.text+'_total-votes'\n fcol_names = [(cols.text+'_fraction') for cols in question.answers]\n anscol_names = [cols.text for cols in question.answers]\n valid_feats = []\n \n valid_vol = volunteers.query('`{}`/`smooth-or-featured_total-votes` >= 0.5'.format(qcol_name))\n valid_idx = valid_vol.index.tolist()\n vol_results = valid_vol[fcol_names].values\n \n auto_values = auto_f.values\n \n for i in valid_idx:\n valid_feats.append(auto_values[i])\n \n rounded_vol_results = findChoice(np.asarray(vol_results))\n support = len(rounded_vol_results)\n \n pred_results = AgglomerativeClustering(n_clusters=len(fcol_names)).fit_predict(valid_feats)\n \n vol_classes = np.argmax(rounded_vol_results, axis=1)\n \n return valid_idx, support, anscol_names, np.array(pred_results), np.array(vol_classes)\n\ndef _make_cost_m(cm):\n s = np.max(cm)\n return (- cm + s)\n\ndef labelMap(vol, pred):\n cm = confusion_matrix(vol, pred)\n indexes = linear_assignment(_make_cost_m(cm))\n indexes = np.asarray(indexes)\n return indexes[1]\n \ndef convertLabels(lmap, pred):\n conv_preds = []\n for i in range(len(pred)):\n conv_preds.append(lmap[pred[i]])\n return np.array(conv_preds)\n\nauto_features = pd.read_csv(\"/users/ezraf/galaxyDECaLS/autoencoder/extracted_features.csv\")\nauto_features = auto_features.drop('file_loc',axis=1)\ndecals_test = pd.read_csv('/users/ezraf/galaxyDECaLS/Ilifu_data/decals_ilifu_test.csv')\nschema = schemas.Schema(label_metadata.decals_pairs, label_metadata.get_gz2_and_decals_dependencies(label_metadata.decals_pairs))\n\ntotal_report = {}\nseeds = [6589,4598,2489,9434,7984,1238,6468,5165,3246,8646]\ntotal_time = {}\nfor question in label_metadata.decals_pairs:\n total_report[question] = {\n 'precision': 0,\n 'recall': 0,\n 'f1': 0,\n 'support': 0\n }\nfor question in label_metadata.decals_pairs:\n total_time[question] = {}\n print('Starting Clustering for ',question)\n start = time.time()\n idxs, support, anscols, valid_preds, valid_vol = getQuestionClasses(auto_features, decals_test, schema.get_question(question), None)\n lmap = labelMap(valid_vol, valid_preds)\n conv_preds = convertLabels(lmap, valid_preds)\n question_report = precision_recall_fscore_support(y_pred=conv_preds, y_true=valid_vol, average='weighted')\n total_report[question]['precision'] += question_report[0]\n total_report[question]['recall'] += question_report[1]\n total_report[question]['f1'] += question_report[2]\n end = time.time()\n total_report[question]['support'] = support\n total_time[question]['total'] = end - start\n print('Question: ',question,' Completed 1 time')\n print('--------------------------------------------------------------')\n\nreport_df = pd.DataFrame.from_dict(total_report, orient='index')\ntime_df = pd.DataFrame.from_dict(total_time, orient='index')\n\nreport_df.to_csv(\"/users/ezraf/clusterResults/agg_accuracy.csv\")\ntime_df.to_csv(\"/users/ezraf/clusterResults/agg_time.csv\")" ]
[ [ "numpy.zeros_like", "sklearn.metrics.precision_recall_fscore_support", "pandas.read_csv", "numpy.asarray", "numpy.argmax", "sklearn.metrics.confusion_matrix", "numpy.max", "numpy.array", "pandas.DataFrame.from_dict" ] ]