repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
cch1999/protein_dynamics
[ "f24031d19f527f196d7c8bb822435ec1ac657da0" ]
[ "obselete/model4.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset\nfrom torch.nn.functional import normalize\n\nfrom random import shuffle\n\nfrom utils import MLP, read_input_file, _compute_connectivity, rmsd, save_structure\nimport matplotlib.pyplot as plt\nimport os\nfrom pykeops.torch import LazyTensor\nfrom tqdm import tqdm\n\nmodel_dir = os.path.dirname(os.path.realpath(__file__))\ndataset_dir = os.path.join(model_dir, \"datasets\")\ntrain_val_dir = os.path.join(model_dir, \"protein_data\", \"train_val\")\ntrained_model_file = os.path.join(model_dir, \"test_model2.pt\")\n\ntrain_proteins = [l.rstrip() for l in open(os.path.join(dataset_dir, \"train.txt\"))]\nval_proteins = [l.rstrip() for l in open(os.path.join(dataset_dir, \"val.txt\" ))]\n\ndevice = \"cuda:5\"\n\ntorch.set_num_threads(12)\n\n\natoms = [\"N\", \"CA\", \"C\", \"cent\"]\n\n# Last value is the number of atoms in the next residue\nangles = [\n\t(\"N\", \"CA\", \"C\" , 0), (\"CA\", \"C\" , \"N\" , 1), (\"C\", \"N\", \"CA\", 2),\n\t(\"N\", \"CA\", \"cent\", 0), (\"C\" , \"CA\", \"cent\", 0),\n]\n\n# Last value is the number of atoms in the next residue\ndihedrals = [\n\t(\"C\", \"N\", \"CA\", \"C\" , 3), (\"N\" , \"CA\", \"C\", \"N\", 1), (\"CA\", \"C\", \"N\", \"CA\", 2),\n\t(\"C\", \"N\", \"CA\", \"cent\", 3), (\"cent\", \"CA\", \"C\", \"N\", 1),\n]\n\naas = [\n\t\"A\", \"R\", \"N\", \"D\", \"C\", \"E\", \"Q\", \"G\", \"H\", \"I\",\n\t\"L\", \"K\", \"M\", \"F\", \"P\", \"S\", \"T\", \"W\", \"Y\", \"V\",\n]\nn_aas = len(aas)\n\nclass ProteinDataset(Dataset):\n def __init__(self, pdbids, coord_dir, device=\"cpu\"):\n self.pdbids = pdbids\n self.coord_dir = coord_dir\n self.set_size = len(pdbids)\n self.device = device\n\n def __len__(self):\n return self.set_size\n\n def __getitem__(self, index):\n fp = os.path.join(self.coord_dir, self.pdbids[index] + \".txt\")\n return get_features(fp, device=self.device)\n\nclass DistanceForces(nn.Module):\n\t\"\"\"\n\tCalculates forces between two atoms based on their \n\t\t1. atoms types\n\t\t2. Euclidian distance\n\t\t3. Seperation along the sequence\n\n\tInput dim = 50 (24*2 + 2)\n\tOutput dim = 1 (a scalar force)\n\t\"\"\"\n\tdef __init__(self, input_size, hidden_size, output_size):\n\t\tsuper(DistanceForces, self).__init__()\n\n\n\t\tself.model = nn.Sequential(\n\t\t\tnn.Linear((2*24)+2, hidden_size),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Linear(hidden_size, hidden_size),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Linear(hidden_size, hidden_size),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Linear(hidden_size, hidden_size),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Linear(hidden_size, hidden_size),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Linear(hidden_size, output_size))\n\n\tdef forward(self, atom1, atom2, edges):\n\n\t\tmessages = torch.cat([atom1, atom2, edges], dim=1)\n\n\t\treturn self.model(messages)\n\nclass AngleForces(nn.Module):\n\t\"\"\"\n\tCalculates forces between three atoms making an angle on their \n\t\t1. central atom types\n\t\t2. angle around the central atom\n\n\tInput dim = 25 (24 + 1)\n\tOutput dim = 1 (a scalar force)\n\t\"\"\"\n\tdef __init__(self, input_size, hidden_size, output_size):\n\t\tsuper(AngleForces, self).__init__()\n\n\t\tself.model = nn.Sequential(\n\t\t\tnn.Linear(input_size, hidden_size),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Linear(hidden_size, hidden_size),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Linear(hidden_size, hidden_size),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Linear(hidden_size, output_size))\n\n\tdef forward(self, central_atom, angles):\n\n\t\tmessages = torch.cat([central_atom, angles[:,:,None]], dim=2)\n\n\t\treturn self.model(messages)\n\nclass Simulator(nn.Module):\n\tdef __init__(self, input_size, hidden_size, output_size):\n\t\tsuper(Simulator, self).__init__()\n\n\t\tself.distance_forces = DistanceForces(50, 128, 1)\n\t\tself.angle_forces = AngleForces(24+1, 128, 1)\n\n\tdef forward(self, coords, node_f, res_numbers, masses, seq,\n\t\t\t\tradius, n_steps, timestep, temperature, animation, device):\n\n\t\tn_atoms = coords.shape[0]\n\t\tn_res = n_atoms // len(atoms)\n\t\tmodel_n = 0\n\n\t\tvels = torch.randn(coords.shape).to(device) * temperature\n\t\taccs_last = torch.zeros(coords.shape).to(device)\n\t\trandn_coords = coords + vels * timestep * n_steps\n\t\tloss, passed = rmsd(randn_coords, coords)\t\t\n\n\t\tfor i in range(n_steps):\n\n\t\t\tcoords = coords + vels * timestep + 0.5 * accs_last * timestep * timestep\n\n\t\t\tk = 15\n\t\t\tidx = knn(coords, k+1)\n\t\t\tsenders = idx[:,0].repeat_interleave(k)\n\t\t\treceivers = idx[:,1:].reshape(n_atoms*k)\n\n\t\t\t# Calc Euclidian distance\n\t\t\tdiffs = coords[senders] - coords[receivers]\n\t\t\tdists = diffs.norm(dim=1)\n\t\t\tnorm_diffs = diffs / dists.clamp(min=0.01).unsqueeze(1)\n\n\t\t\t# Calc sequence seperation\n\t\t\tseq_sep = abs(res_numbers[senders] - res_numbers[receivers])/5\n\t\t\tmask = seq_sep > 1\n\t\t\tseq_sep[mask] = 1\n\n\t\t\t# Concat edge features\n\t\t\tedges = torch.cat([dists.unsqueeze(1), seq_sep], dim=1)\n\n\t\t\t# Compute forces using MLP\n\t\t\tforces = self.distance_forces(node_f[senders], node_f[receivers], edges)\n\t\t\tforces = forces * norm_diffs\n\t\t\ttotal_forces = forces.view(n_atoms, k, 3).sum(1)/100\n\t\t\t\n\t\t\tbatch_size = 1\n\t\t\tatom_types = node_f.view(batch_size, n_res, len(atoms), 24)\n\t\t\tatom_coords = coords.view(batch_size, n_res, 3 * len(atoms))\n\t\t\tatom_accs = torch.zeros(batch_size, n_res, 3 * len(atoms), device=device)\n\t\t\t# Angle forces\n\t\t\t# across_res is the number of atoms in the next residue, starting from atom_3\n\t\t\tfor ai, (atom_1, atom_2, atom_3, across_res) in enumerate(angles):\n\t\t\t\t# Calc vectors and angle between atoms\n\t\t\t\tai_1, ai_2, ai_3 = atoms.index(atom_1), atoms.index(atom_2), atoms.index(atom_3)\n\t\t\t\tif across_res == 0:\n\t\t\t\t\tba = atom_coords[:, : , (ai_1 * 3):(ai_1 * 3 + 3)] - atom_coords[:, : , (ai_2 * 3):(ai_2 * 3 + 3)]\n\t\t\t\t\tbc = atom_coords[:, : , (ai_3 * 3):(ai_3 * 3 + 3)] - atom_coords[:, : , (ai_2 * 3):(ai_2 * 3 + 3)]\n\t\t\t\telif across_res == 1:\n\t\t\t\t\tba = atom_coords[:, :-1, (ai_1 * 3):(ai_1 * 3 + 3)] - atom_coords[:, :-1, (ai_2 * 3):(ai_2 * 3 + 3)]\n\t\t\t\t\tbc = atom_coords[:, 1: , (ai_3 * 3):(ai_3 * 3 + 3)] - atom_coords[:, :-1, (ai_2 * 3):(ai_2 * 3 + 3)]\n\t\t\t\telif across_res == 2:\n\t\t\t\t\tba = atom_coords[:, :-1, (ai_1 * 3):(ai_1 * 3 + 3)] - atom_coords[:, 1: , (ai_2 * 3):(ai_2 * 3 + 3)]\n\t\t\t\t\tbc = atom_coords[:, 1: , (ai_3 * 3):(ai_3 * 3 + 3)] - atom_coords[:, 1: , (ai_2 * 3):(ai_2 * 3 + 3)]\n\t\t\t\tba_norms = ba.norm(dim=2)\n\t\t\t\tbc_norms = bc.norm(dim=2)\n\t\t\t\tangs = torch.acos((ba * bc).sum(dim=2) / (ba_norms * bc_norms))\n\t\t\t\t# Get central atom properties\n\t\t\t\tif ai == 0 or ai == 3 or ai == 4:\n\t\t\t\t\tcentral_atom_types = atom_types[:,:,1,:]\n\t\t\t\telif ai == 1:\n\t\t\t\t\tcentral_atom_types = atom_types[:,:-1,2,:]\n\t\t\t\telif ai == 2:\n\t\t\t\t\tcentral_atom_types = atom_types[:,1:,0,:]\n\n\t\t\t\tangle_forces = self.angle_forces(central_atom_types, angs)\n\n\t\t\t\tcross_ba_bc = torch.cross(ba, bc, dim=2)\n\t\t\t\tfa = angle_forces * normalize(torch.cross( ba, cross_ba_bc, dim=2), dim=2) / ba_norms.unsqueeze(2)\n\t\t\t\tfc = angle_forces * normalize(torch.cross(-bc, cross_ba_bc, dim=2), dim=2) / bc_norms.unsqueeze(2)\n\t\t\t\tfb = -fa -fc\n\t\t\t\tif across_res == 0:\n\t\t\t\t\tatom_accs[:, : , (ai_1 * 3):(ai_1 * 3 + 3)] += fa\n\t\t\t\t\tatom_accs[:, : , (ai_2 * 3):(ai_2 * 3 + 3)] += fb\n\t\t\t\t\tatom_accs[:, : , (ai_3 * 3):(ai_3 * 3 + 3)] += fc\n\t\t\t\telif across_res == 1:\n\t\t\t\t\tatom_accs[:, :-1, (ai_1 * 3):(ai_1 * 3 + 3)] += fa\n\t\t\t\t\tatom_accs[:, :-1, (ai_2 * 3):(ai_2 * 3 + 3)] += fb\n\t\t\t\t\tatom_accs[:, 1: , (ai_3 * 3):(ai_3 * 3 + 3)] += fc\n\t\t\t\telif across_res == 2:\n\t\t\t\t\tatom_accs[:, :-1, (ai_1 * 3):(ai_1 * 3 + 3)] += fa\n\t\t\t\t\tatom_accs[:, 1: , (ai_2 * 3):(ai_2 * 3 + 3)] += fb\n\t\t\t\t\tatom_accs[:, 1: , (ai_3 * 3):(ai_3 * 3 + 3)] += fc\n\n\t\t\t# Calc distance accs\n\t\t\taccs = total_forces/masses.unsqueeze(1)\n\t\t\t# Calc angle accs\n\t\t\taccs += atom_accs.view(n_atoms, 3) / (masses.unsqueeze(1)*100)\n\n\n\t\t\tvels = vels + 0.5 * (accs_last + accs) * timestep\n\t\t\taccs_last = accs\n\n\t\t\tif animation:\n\t\t\t\tmodel_n += 1\n\t\t\t\tsave_structure(coords[None,:,:], \"animation.pdb\", seq, model_n)\n\n\t\treturn coords, loss\n\ndef knn(coords, k):\n\t\"\"\"\n\tFinds the k-nearest neibours\n\t\"\"\"\n\tcoords = coords.to(device)\n\n\tN, D = coords.shape\n\txyz_i = LazyTensor(coords[:, None, :])\n\txyz_j = LazyTensor(coords[None, :, :])\n\n\tpairwise_distance_ij = ((xyz_i - xyz_j) ** 2).sum(-1)\n\n\tidx = pairwise_distance_ij.argKmin(K=k, axis=1) # (N, K)\n\n\treturn idx\n\ndef get_features(fp, device):\n\n\n\tnative_coords, inters_ang, inters_dih, masses, seq = read_input_file(fp)\n\n\tone_hot_atoms = torch.tensor([[1,0,0,0],\n\t\t\t\t\t\t\t\t[0,1,0,0],\n\t\t\t\t\t\t\t\t[0,0,1,0],\n\t\t\t\t\t\t\t\t[0,0,0,1]])\n\tone_hot_atoms = one_hot_atoms.repeat(len(seq), 1)\n\n\tone_hot_seq = torch.zeros(len(seq)*4, 20)\n\tfor i, aa in enumerate(seq):\n\t\tindex = aas.index(aa)\n\t\tone_hot_seq[i*4:(i+1)*4, index] = 1\n\n\tres_numbers = torch.cat([torch.ones(4,1)*i for i in range(len(seq))])\n\n\tnode_f = torch.cat([one_hot_atoms, one_hot_seq], dim=1)\n\n\treturn native_coords.to(device), node_f.to(device), res_numbers.to(device), masses.to(device), seq\n\nif __name__ == \"__main__\":\n\n\tdata_dir = \"protein_data/train_val/\"\n\tdata = os.listdir(data_dir)\n\n\tmodel = Simulator(50, 128, 1).to(device)\n\n\toptimizer = torch.optim.Adam(model.parameters(), lr=0.0005)\n\n\n\tlosses = []\n\n\tpytorch_total_params = sum(p.numel() for p in model.parameters())\n\tprint(pytorch_total_params)\n\n\ttrain_set = ProteinDataset(train_proteins, train_val_dir, device=device)\n\tval_set = ProteinDataset(val_proteins , train_val_dir, device=device)\n\n\tfor i in range(20):\n\t\tprint(f\"Starting Epoch {i}:\")\n\n\t\ttrain_inds = list(range(len(train_set)))\n\t\tval_inds = list(range(len(val_set)))\n\t\tshuffle(train_inds)\n\t\tshuffle(val_inds)\n\t\tmodel.train()\n\t\toptimizer.zero_grad()\n\t\tfor protein in tqdm(train_inds):\n\n\t\t\tcoords, node_f, res_numbers, masses, seq = train_set[protein]\n\n\t\t\tmodel.train()\n\t\t\tprint('Forward')\n\t\t\tout, basic_loss = model(coords, node_f, res_numbers, masses, seq, 10, \n\t\t\t\t\t\t\tn_steps=800, timestep=0.02, temperature=0.02,\n\t\t\t\t\t\t\tanimation=False, device=device)\n\t\t\tprint('done forward')\n\t\t\tloss, passed = rmsd(out, coords)\n\t\t\tloss_log = torch.log(1.0 + loss)\n\t\t\tloss_log.backward()\n\t\t\tprint('Done backprop')\n\t\t\toptimizer.step()\n\t\t\toptimizer.zero_grad()\n\t\t\tlosses.append(loss - basic_loss)\n\n\t\t\tprint(\"Epoch:\", i)\n\t\t\tprint(\"Basic loss:\", round(basic_loss.item(),3))\n\t\t\tprint(\"----- Loss:\", round(loss.item(),3))\n\t\t\tprint(\"-Loss diff:\", round(loss.item() - basic_loss.item(), 3))\n\n\t\tmodel.eval()\n\t\twith torch.no_grad():\n\t\t\tcoords, node_f, res_numbers, masses, seq = get_features(\"protein_data/example/1CRN.txt\", device=device)\n\n\t\t\tout, basic_loss = model(coords, node_f, res_numbers, masses, seq, 10, \n\t\t\t\t\t\t\tn_steps=500, timestep=0.02, temperature=0.2,\n\t\t\t\t\t\t\tanimation=False, device=device)\n\t\t\n\n\t\ttorch.save(model.state_dict(), os.path.join(model_dir, f\"models/model_ang{i}.pt\"))\n\n\t\n\t\tplt.plot(losses)\n\t\tplt.xlim(0)\n\t\tplt.ylabel(\"Loss - RMSD (A)\")\n\t\tplt.xlabel(\"Epoch\")\n\t\tplt.title(f'No. epochs = {i+1}')\n\t\tplt.legend()\n\t\tplt.savefig('with_angles.png')\n" ]
[ [ "matplotlib.pyplot.legend", "torch.ones", "matplotlib.pyplot.title", "torch.cat", "torch.zeros", "torch.randn", "matplotlib.pyplot.savefig", "torch.tensor", "matplotlib.pyplot.plot", "torch.nn.Linear", "matplotlib.pyplot.xlim", "torch.set_num_threads", "torch.log", "torch.no_grad", "matplotlib.pyplot.xlabel", "torch.nn.ReLU", "torch.cross", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
contactlp/xgboost
[ "1d0ca49761d6a7dace5aec6af80c4aef7367fc2f" ]
[ "python-package/xgboost/training.py" ]
[ "# coding: utf-8\n# pylint: disable=too-many-locals, too-many-arguments, invalid-name\n# pylint: disable=too-many-branches, too-many-statements\n\"\"\"Training Library containing training routines.\"\"\"\nfrom __future__ import absolute_import\n\nimport warnings\nimport numpy as np\nfrom .core import Booster, STRING_TYPES, XGBoostError, CallbackEnv, EarlyStopException\nfrom .compat import (SKLEARN_INSTALLED, XGBStratifiedKFold)\nfrom . import rabit\nfrom . import callback\n\n\ndef _train_internal(params, dtrain,\n num_boost_round=10, evals=(),\n obj=None, feval=None,\n xgb_model=None, callbacks=None):\n \"\"\"internal training function\"\"\"\n callbacks = [] if callbacks is None else callbacks\n evals = list(evals)\n if isinstance(params, dict) \\\n and 'eval_metric' in params \\\n and isinstance(params['eval_metric'], list):\n params = dict((k, v) for k, v in params.items())\n eval_metrics = params['eval_metric']\n params.pop(\"eval_metric\", None)\n params = list(params.items())\n for eval_metric in eval_metrics:\n params += [('eval_metric', eval_metric)]\n\n bst = Booster(params, [dtrain] + [d[0] for d in evals])\n nboost = 0\n num_parallel_tree = 1\n\n if xgb_model is not None:\n bst = Booster(params, [dtrain] + [d[0] for d in evals],\n model_file=xgb_model)\n nboost = len(bst.get_dump())\n\n _params = dict(params) if isinstance(params, list) else params\n\n if 'num_parallel_tree' in _params:\n num_parallel_tree = _params['num_parallel_tree']\n nboost //= num_parallel_tree\n if 'num_class' in _params:\n nboost //= _params['num_class']\n\n # Distributed code: Load the checkpoint from rabit.\n version = bst.load_rabit_checkpoint()\n assert rabit.get_world_size() != 1 or version == 0\n rank = rabit.get_rank()\n start_iteration = int(version / 2)\n nboost += start_iteration\n\n callbacks_before_iter = [\n cb for cb in callbacks if cb.__dict__.get('before_iteration', False)]\n callbacks_after_iter = [\n cb for cb in callbacks if not cb.__dict__.get('before_iteration', False)]\n\n for i in range(start_iteration, num_boost_round):\n for cb in callbacks_before_iter:\n cb(CallbackEnv(model=bst,\n cvfolds=None,\n iteration=i,\n begin_iteration=start_iteration,\n end_iteration=num_boost_round,\n rank=rank,\n evaluation_result_list=None))\n # Distributed code: need to resume to this point.\n # Skip the first update if it is a recovery step.\n if version % 2 == 0:\n bst.update(dtrain, i, obj)\n bst.save_rabit_checkpoint()\n version += 1\n\n assert rabit.get_world_size() == 1 or version == rabit.version_number()\n\n nboost += 1\n evaluation_result_list = []\n # check evaluation result.\n if evals:\n bst_eval_set = bst.eval_set(evals, i, feval)\n if isinstance(bst_eval_set, STRING_TYPES):\n msg = bst_eval_set\n else:\n msg = bst_eval_set.decode()\n res = [x.split(':') for x in msg.split()]\n evaluation_result_list = [(k, float(v)) for k, v in res[1:]]\n try:\n for cb in callbacks_after_iter:\n cb(CallbackEnv(model=bst,\n cvfolds=None,\n iteration=i,\n begin_iteration=start_iteration,\n end_iteration=num_boost_round,\n rank=rank,\n evaluation_result_list=evaluation_result_list))\n except EarlyStopException:\n break\n # do checkpoint after evaluation, in case evaluation also updates booster.\n bst.save_rabit_checkpoint()\n version += 1\n\n if bst.attr('best_score') is not None:\n bst.best_score = float(bst.attr('best_score'))\n bst.best_iteration = int(bst.attr('best_iteration'))\n else:\n bst.best_iteration = nboost - 1\n bst.best_ntree_limit = (bst.best_iteration + 1) * num_parallel_tree\n return bst\n\n\ndef train(params, dtrain, num_boost_round=10, evals=(), obj=None, feval=None,\n maximize=False, early_stopping_rounds=None, evals_result=None,\n verbose_eval=True, xgb_model=None, callbacks=None, learning_rates=None):\n # pylint: disable=too-many-statements,too-many-branches, attribute-defined-outside-init\n \"\"\"Train a booster with given parameters.\n\n Parameters\n ----------\n params : dict\n Booster params.\n dtrain : DMatrix\n Data to be trained.\n num_boost_round: int\n Number of boosting iterations.\n evals: list of pairs (DMatrix, string)\n List of validation sets for which metrics will evaluated during training.\n Validation metrics will help us track the performance of the model.\n obj : function\n Customized objective function.\n feval : function\n Customized evaluation function.\n maximize : bool\n Whether to maximize feval.\n early_stopping_rounds: int\n Activates early stopping. Validation metric needs to improve at least once in\n every **early_stopping_rounds** round(s) to continue training.\n Requires at least one item in **evals**.\n The method returns the model from the last iteration (not the best one).\n If there's more than one item in **evals**, the last entry will be used\n for early stopping.\n If there's more than one metric in the **eval_metric** parameter given in\n **params**, the last metric will be used for early stopping.\n If early stopping occurs, the model will have three additional fields:\n ``bst.best_score``, ``bst.best_iteration`` and ``bst.best_ntree_limit``.\n (Use ``bst.best_ntree_limit`` to get the correct value if\n ``num_parallel_tree`` and/or ``num_class`` appears in the parameters)\n evals_result: dict\n This dictionary stores the evaluation results of all the items in watchlist.\n\n Example: with a watchlist containing\n ``[(dtest,'eval'), (dtrain,'train')]`` and\n a parameter containing ``('eval_metric': 'logloss')``,\n the **evals_result** returns\n\n .. code-block:: python\n\n {'train': {'logloss': ['0.48253', '0.35953']},\n 'eval': {'logloss': ['0.480385', '0.357756']}}\n\n verbose_eval : bool or int\n Requires at least one item in **evals**.\n If **verbose_eval** is True then the evaluation metric on the validation set is\n printed at each boosting stage.\n If **verbose_eval** is an integer then the evaluation metric on the validation set\n is printed at every given **verbose_eval** boosting stage. The last boosting stage\n / the boosting stage found by using **early_stopping_rounds** is also printed.\n Example: with ``verbose_eval=4`` and at least one item in **evals**, an evaluation metric\n is printed every 4 boosting stages, instead of every boosting stage.\n learning_rates: list or function (deprecated - use callback API instead)\n List of learning rate for each boosting round\n or a customized function that calculates eta in terms of\n current number of round and the total number of boosting round (e.g. yields\n learning rate decay)\n xgb_model : file name of stored xgb model or 'Booster' instance\n Xgb model to be loaded before training (allows training continuation).\n callbacks : list of callback functions\n List of callback functions that are applied at end of each iteration.\n It is possible to use predefined callbacks by using\n :ref:`Callback API <callback_api>`.\n Example:\n\n .. code-block:: python\n\n [xgb.callback.reset_learning_rate(custom_rates)]\n\n Returns\n -------\n Booster : a trained booster model\n \"\"\"\n callbacks = [] if callbacks is None else callbacks\n\n # Most of legacy advanced options becomes callbacks\n if isinstance(verbose_eval, bool) and verbose_eval:\n callbacks.append(callback.print_evaluation())\n else:\n if isinstance(verbose_eval, int):\n callbacks.append(callback.print_evaluation(verbose_eval))\n\n if early_stopping_rounds is not None:\n callbacks.append(callback.early_stop(early_stopping_rounds,\n maximize=maximize,\n verbose=bool(verbose_eval)))\n if evals_result is not None:\n callbacks.append(callback.record_evaluation(evals_result))\n\n if learning_rates is not None:\n warnings.warn(\"learning_rates parameter is deprecated - use callback API instead\",\n DeprecationWarning)\n callbacks.append(callback.reset_learning_rate(learning_rates))\n\n return _train_internal(params, dtrain,\n num_boost_round=num_boost_round,\n evals=evals,\n obj=obj, feval=feval,\n xgb_model=xgb_model, callbacks=callbacks)\n\n\nclass CVPack(object):\n \"\"\"\"Auxiliary datastruct to hold one fold of CV.\"\"\"\n def __init__(self, dtrain, dtest, param):\n \"\"\"\"Initialize the CVPack\"\"\"\n self.dtrain = dtrain\n self.dtest = dtest\n self.watchlist = [(dtrain, 'train'), (dtest, 'test')]\n self.bst = Booster(param, [dtrain, dtest])\n\n def update(self, iteration, fobj):\n \"\"\"\"Update the boosters for one iteration\"\"\"\n self.bst.update(self.dtrain, iteration, fobj)\n\n def eval(self, iteration, feval):\n \"\"\"\"Evaluate the CVPack for one iteration.\"\"\"\n return self.bst.eval_set(self.watchlist, iteration, feval)\n\n\ndef groups_to_rows(groups, boundaries):\n \"\"\"\n Given group row boundaries, convert ground indexes to row indexes\n :param groups: list of groups for testing\n :param boundaries: rows index limits of each group\n :return: row in group\n \"\"\"\n return np.concatenate([np.arange(boundaries[g], boundaries[g+1]) for g in groups])\n\n\ndef mkgroupfold(dall, nfold, param, evals=(), fpreproc=None, shuffle=True):\n \"\"\"\n Make n folds for cross-validation maintaining groups\n :return: cross-validation folds\n \"\"\"\n # we have groups for pairwise ranking... get a list of the group indexes\n group_boundaries = dall.get_uint_info('group_ptr')\n group_sizes = np.diff(group_boundaries)\n\n if shuffle is True:\n idx = np.random.permutation(len(group_sizes))\n else:\n idx = np.arange(len(group_sizes))\n # list by fold of test group indexes\n out_group_idset = np.array_split(idx, nfold)\n # list by fold of train group indexes\n in_group_idset = [np.concatenate([out_group_idset[i] for i in range(nfold) if k != i])\n for k in range(nfold)]\n # from the group indexes, convert them to row indexes\n in_idset = [groups_to_rows(in_groups, group_boundaries) for in_groups in in_group_idset]\n out_idset = [groups_to_rows(out_groups, group_boundaries) for out_groups in out_group_idset]\n\n # build the folds by taking the appropriate slices\n ret = []\n for k in range(nfold):\n # perform the slicing using the indexes determined by the above methods\n dtrain = dall.slice(in_idset[k], allow_groups=True)\n dtrain.set_group(group_sizes[in_group_idset[k]])\n dtest = dall.slice(out_idset[k], allow_groups=True)\n dtest.set_group(group_sizes[out_group_idset[k]])\n # run preprocessing on the data set if needed\n if fpreproc is not None:\n dtrain, dtest, tparam = fpreproc(dtrain, dtest, param.copy())\n else:\n tparam = param\n plst = list(tparam.items()) + [('eval_metric', itm) for itm in evals]\n ret.append(CVPack(dtrain, dtest, plst))\n return ret\n\n\ndef mknfold(dall, nfold, param, seed, evals=(), fpreproc=None, stratified=False,\n folds=None, shuffle=True):\n \"\"\"\n Make an n-fold list of CVPack from random indices.\n \"\"\"\n evals = list(evals)\n np.random.seed(seed)\n\n if stratified is False and folds is None:\n # Do standard k-fold cross validation. Automatically determine the folds.\n if len(dall.get_uint_info('group_ptr')) > 1:\n return mkgroupfold(dall, nfold, param, evals=evals, fpreproc=fpreproc, shuffle=shuffle)\n\n if shuffle is True:\n idx = np.random.permutation(dall.num_row())\n else:\n idx = np.arange(dall.num_row())\n out_idset = np.array_split(idx, nfold)\n in_idset = [np.concatenate([out_idset[i] for i in range(nfold) if k != i])\n for k in range(nfold)]\n elif folds is not None:\n # Use user specified custom split using indices\n try:\n in_idset = [x[0] for x in folds]\n out_idset = [x[1] for x in folds]\n except TypeError:\n # Custom stratification using Sklearn KFoldSplit object\n splits = list(folds.split(X=dall.get_label(), y=dall.get_label()))\n in_idset = [x[0] for x in splits]\n out_idset = [x[1] for x in splits]\n nfold = len(out_idset)\n else:\n # Do standard stratefied shuffle k-fold split\n sfk = XGBStratifiedKFold(n_splits=nfold, shuffle=True, random_state=seed)\n splits = list(sfk.split(X=dall.get_label(), y=dall.get_label()))\n in_idset = [x[0] for x in splits]\n out_idset = [x[1] for x in splits]\n nfold = len(out_idset)\n\n ret = []\n for k in range(nfold):\n # perform the slicing using the indexes determined by the above methods\n dtrain = dall.slice(in_idset[k])\n dtest = dall.slice(out_idset[k])\n # run preprocessing on the data set if needed\n if fpreproc is not None:\n dtrain, dtest, tparam = fpreproc(dtrain, dtest, param.copy())\n else:\n tparam = param\n plst = list(tparam.items()) + [('eval_metric', itm) for itm in evals]\n ret.append(CVPack(dtrain, dtest, plst))\n return ret\n\n\ndef aggcv(rlist):\n # pylint: disable=invalid-name\n \"\"\"\n Aggregate cross-validation results.\n\n If verbose_eval is true, progress is displayed in every call. If\n verbose_eval is an integer, progress will only be displayed every\n `verbose_eval` trees, tracked via trial.\n \"\"\"\n cvmap = {}\n idx = rlist[0].split()[0]\n for line in rlist:\n arr = line.split()\n assert idx == arr[0]\n for metric_idx, it in enumerate(arr[1:]):\n if not isinstance(it, STRING_TYPES):\n it = it.decode()\n k, v = it.split(':')\n if (metric_idx, k) not in cvmap:\n cvmap[(metric_idx, k)] = []\n cvmap[(metric_idx, k)].append(float(v))\n msg = idx\n results = []\n for (metric_idx, k), v in sorted(cvmap.items(), key=lambda x: x[0][0]):\n v = np.array(v)\n if not isinstance(msg, STRING_TYPES):\n msg = msg.decode()\n mean, std = np.mean(v), np.std(v)\n results.extend([(k, mean, std)])\n return results\n\n\ndef cv(params, dtrain, num_boost_round=10, nfold=3, stratified=False, folds=None,\n metrics=(), obj=None, feval=None, maximize=False, early_stopping_rounds=None,\n fpreproc=None, as_pandas=True, verbose_eval=None, show_stdv=True,\n seed=0, callbacks=None, shuffle=True):\n # pylint: disable = invalid-name\n \"\"\"Cross-validation with given parameters.\n\n Parameters\n ----------\n params : dict\n Booster params.\n dtrain : DMatrix\n Data to be trained.\n num_boost_round : int\n Number of boosting iterations.\n nfold : int\n Number of folds in CV.\n stratified : bool\n Perform stratified sampling.\n folds : a KFold or StratifiedKFold instance or list of fold indices\n Sklearn KFolds or StratifiedKFolds object.\n Alternatively may explicitly pass sample indices for each fold.\n For ``n`` folds, **folds** should be a length ``n`` list of tuples.\n Each tuple is ``(in,out)`` where ``in`` is a list of indices to be used\n as the training samples for the ``n`` th fold and ``out`` is a list of\n indices to be used as the testing samples for the ``n`` th fold.\n metrics : string or list of strings\n Evaluation metrics to be watched in CV.\n obj : function\n Custom objective function.\n feval : function\n Custom evaluation function.\n maximize : bool\n Whether to maximize feval.\n early_stopping_rounds: int\n Activates early stopping. Cross-Validation metric (average of validation\n metric computed over CV folds) needs to improve at least once in\n every **early_stopping_rounds** round(s) to continue training.\n The last entry in the evaluation history will represent the best iteration.\n If there's more than one metric in the **eval_metric** parameter given in\n **params**, the last metric will be used for early stopping.\n fpreproc : function\n Preprocessing function that takes (dtrain, dtest, param) and returns\n transformed versions of those.\n as_pandas : bool, default True\n Return pd.DataFrame when pandas is installed.\n If False or pandas is not installed, return np.ndarray\n verbose_eval : bool, int, or None, default None\n Whether to display the progress. If None, progress will be displayed\n when np.ndarray is returned. If True, progress will be displayed at\n boosting stage. If an integer is given, progress will be displayed\n at every given `verbose_eval` boosting stage.\n show_stdv : bool, default True\n Whether to display the standard deviation in progress.\n Results are not affected, and always contains std.\n seed : int\n Seed used to generate the folds (passed to numpy.random.seed).\n callbacks : list of callback functions\n List of callback functions that are applied at end of each iteration.\n It is possible to use predefined callbacks by using\n :ref:`Callback API <callback_api>`.\n Example:\n\n .. code-block:: python\n\n [xgb.callback.reset_learning_rate(custom_rates)]\n shuffle : bool\n Shuffle data before creating folds.\n\n Returns\n -------\n evaluation history : list(string)\n \"\"\"\n if stratified is True and not SKLEARN_INSTALLED:\n raise XGBoostError('sklearn needs to be installed in order to use stratified cv')\n\n if isinstance(metrics, str):\n metrics = [metrics]\n\n if isinstance(params, list):\n _metrics = [x[1] for x in params if x[0] == 'eval_metric']\n params = dict(params)\n if 'eval_metric' in params:\n params['eval_metric'] = _metrics\n else:\n params = dict((k, v) for k, v in params.items())\n\n if (not metrics) and 'eval_metric' in params:\n if isinstance(params['eval_metric'], list):\n metrics = params['eval_metric']\n else:\n metrics = [params['eval_metric']]\n\n params.pop(\"eval_metric\", None)\n\n results = {}\n cvfolds = mknfold(dtrain, nfold, params, seed, metrics, fpreproc,\n stratified, folds, shuffle)\n\n # setup callbacks\n callbacks = [] if callbacks is None else callbacks\n if early_stopping_rounds is not None:\n callbacks.append(callback.early_stop(early_stopping_rounds,\n maximize=maximize,\n verbose=False))\n\n if isinstance(verbose_eval, bool) and verbose_eval:\n callbacks.append(callback.print_evaluation(show_stdv=show_stdv))\n else:\n if isinstance(verbose_eval, int):\n callbacks.append(callback.print_evaluation(verbose_eval, show_stdv=show_stdv))\n\n callbacks_before_iter = [\n cb for cb in callbacks if cb.__dict__.get('before_iteration', False)]\n callbacks_after_iter = [\n cb for cb in callbacks if not cb.__dict__.get('before_iteration', False)]\n\n for i in range(num_boost_round):\n for cb in callbacks_before_iter:\n cb(CallbackEnv(model=None,\n cvfolds=cvfolds,\n iteration=i,\n begin_iteration=0,\n end_iteration=num_boost_round,\n rank=0,\n evaluation_result_list=None))\n for fold in cvfolds:\n fold.update(i, obj)\n res = aggcv([f.eval(i, feval) for f in cvfolds])\n\n for key, mean, std in res:\n if key + '-mean' not in results:\n results[key + '-mean'] = []\n if key + '-std' not in results:\n results[key + '-std'] = []\n results[key + '-mean'].append(mean)\n results[key + '-std'].append(std)\n try:\n for cb in callbacks_after_iter:\n cb(CallbackEnv(model=None,\n cvfolds=cvfolds,\n iteration=i,\n begin_iteration=0,\n end_iteration=num_boost_round,\n rank=0,\n evaluation_result_list=res))\n except EarlyStopException as e:\n for k in results:\n results[k] = results[k][:(e.best_iteration + 1)]\n break\n if as_pandas:\n try:\n import pandas as pd\n results = pd.DataFrame.from_dict(results)\n except ImportError:\n pass\n return results\n" ]
[ [ "numpy.random.seed", "numpy.arange", "numpy.std", "numpy.array_split", "numpy.diff", "numpy.mean", "pandas.DataFrame.from_dict", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Weiqi97/LilyPadz
[ "b374908444b8594e3f3a2ccf4bc39e3e731f31aa" ]
[ "lilypadz/model/clustering.py" ]
[ "import numpy as np\nimport pandas as pd\nimport plotly.graph_objs as go\nfrom typing import List\nfrom flask import jsonify\nfrom plotly.offline import plot\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\nfrom lilypadz.model.data_processor import get_toad_processed_hop\n\n\ndef get_all_clustering_result(n_clusters: int,\n names: List[str],\n variable: List[str]):\n \"\"\"Generate a 3D plot that contains just the dots for K means result.\n\n :return: A plotly object hat has been converted to HTML format string.\n \"\"\"\n # Get the force plate column names.\n fp_variables = list(\n {\"Fore-Aft\", \"Lateral\", \"Normal\"}.intersection(variable)\n )\n\n # Get the kinematic column names.\n kinematic_variables = list(\n {\"Elbow flexion/extension\",\n \"Humeral protraction/retraction\",\n \"Humeral depression/elevation\"}.intersection(variable)\n )\n\n # Get desired toad data.\n toads_hop = [\n get_toad_processed_hop(name=name) for name in names\n ]\n\n # Get all data.\n all_data = [\n [f\"{data_name} {data.sight}\"] +\n list(data.kinematic[kinematic_variables].mean(axis=\"index\")) +\n list(data.force_plate[fp_variables].mean(axis=\"index\"))\n for one_toad_hop in toads_hop\n for data_name, data in one_toad_hop.items()\n ]\n\n data = pd.DataFrame(\n index=[data[0] for data in all_data],\n data=[data[1:] for data in all_data]\n ).dropna(axis=\"index\")\n\n # Get kMeans analyze result and unpack it.\n k_means = KMeans(n_clusters=n_clusters)\n reduced_data = PCA(n_components=3).fit_transform(data)\n k_means_index = k_means.fit_predict(reduced_data)\n\n # Get hop names.\n labels = data.index.values\n\n # Separate x, y, z coordinates from the reduced data set.\n x_value = reduced_data[:, 0]\n y_value = reduced_data[:, 1]\n z_value = reduced_data[:, 2]\n\n # Create plot for each cluster so the color will differ among clusters.\n data = [\n go.Scatter3d(\n x=x_value[np.where(group_number == k_means_index)],\n y=y_value[np.where(group_number == k_means_index)],\n z=z_value[np.where(group_number == k_means_index)],\n text=labels[np.where(group_number == k_means_index)],\n mode=\"markers\",\n name=f\"Cluster {group_number + 1}\",\n hoverinfo=\"text\",\n marker=dict(\n size=12,\n line=dict(width=1)\n )\n )\n for group_number in np.unique(k_means_index)\n ]\n\n # Set the layout of the plot, mainly set the background color to grey.\n layout = go.Layout(\n height=500,\n hovermode=\"closest\",\n title=\"K-Means Two Dimensional Scatter Plot\",\n scene=dict(\n xaxis=dict(\n title=\"PC1\",\n showline=False,\n showbackground=True,\n backgroundcolor=\"rgb(230,230,230)\"),\n yaxis=dict(\n title=\"PC2\",\n showline=False,\n showbackground=True,\n backgroundcolor=\"rgb(230,230,230)\"),\n zaxis=dict(\n title=\"PC3\",\n showline=False,\n showbackground=True,\n backgroundcolor=\"rgb(230,230,230)\"),\n )\n )\n\n table = pd.DataFrame(data={\n \"Cluster #\": [index + 1 for index in k_means_index],\n \"Document\": labels,\n \"X-Coordinate\": reduced_data[:, 0],\n \"Y-Coordinate\": reduced_data[:, 1],\n \"Z-Coordinate\": reduced_data[:, 2]\n }).to_html(\n index=False,\n classes=\"table table-striped table-bordered text-center\"\n )\n\n # Return the plotly figure and table.\n return jsonify(\n table=table,\n plot=plot(\n go.Figure(data=data, layout=layout),\n show_link=False,\n output_type=\"div\",\n include_plotlyjs=False\n )\n )\n\n\ndef get_one_clustering_result(n_clusters: int,\n name: str,\n variable: List[str]):\n \"\"\"Generate a 3D plot that contains just the dots for K means result.\n\n :return: A plotly object hat has been converted to HTML format string.\n \"\"\"\n # Get the force plate column names.\n fp_variables = list(\n {\"Fore-Aft\", \"Lateral\", \"Normal\"}.intersection(variable)\n )\n\n # Get the kinematic column names.\n kinematic_variables = list(\n {\"Elbow flexion/extension\",\n \"Humeral protraction/retraction\",\n \"Humeral depression/elevation\"}.intersection(variable)\n )\n\n # Get all data.\n all_data = [\n [f\"{data_name} {data.sight}\"] +\n list(data.kinematic[kinematic_variables].mean(axis=\"index\")) +\n list(data.force_plate[fp_variables].mean(axis=\"index\"))\n for data_name, data in get_toad_processed_hop(name=name).items()\n ]\n\n data = pd.DataFrame(\n index=[data[0] for data in all_data],\n data=[data[1:] for data in all_data]\n ).dropna(axis=\"index\")\n\n # Get kMeans analyze result and unpack it.\n k_means = KMeans(n_clusters=n_clusters)\n reduced_data = PCA(n_components=3).fit_transform(data)\n k_means_index = k_means.fit_predict(reduced_data)\n\n # Get hop names.\n labels = data.index.values\n\n # Separate x, y, z coordinates from the reduced data set.\n x_value = reduced_data[:, 0]\n y_value = reduced_data[:, 1]\n z_value = reduced_data[:, 2]\n\n # Create plot for each cluster so the color will differ among clusters.\n data = [\n go.Scatter3d(\n x=x_value[np.where(group_number == k_means_index)],\n y=y_value[np.where(group_number == k_means_index)],\n z=z_value[np.where(group_number == k_means_index)],\n text=labels[np.where(group_number == k_means_index)],\n mode=\"markers\",\n name=f\"Cluster {group_number + 1}\",\n hoverinfo=\"text\",\n marker=dict(\n size=12,\n line=dict(width=1)\n )\n )\n for group_number in np.unique(k_means_index)\n ]\n\n # Set the layout of the plot, mainly set the background color to grey.\n layout = go.Layout(\n height=500,\n hovermode=\"closest\",\n title=\"K-Means Two Dimensional Scatter Plot\",\n scene=dict(\n xaxis=dict(\n title=\"PC1\",\n showline=False,\n showbackground=True,\n backgroundcolor=\"rgb(230,230,230)\"),\n yaxis=dict(\n title=\"PC2\",\n showline=False,\n showbackground=True,\n backgroundcolor=\"rgb(230,230,230)\"),\n zaxis=dict(\n title=\"PC3\",\n showline=False,\n showbackground=True,\n backgroundcolor=\"rgb(230,230,230)\"),\n )\n )\n\n table = pd.DataFrame(data={\n \"Cluster #\": [index + 1 for index in k_means_index],\n \"Document\": labels,\n \"X-Coordinate\": reduced_data[:, 0],\n \"Y-Coordinate\": reduced_data[:, 1],\n \"Z-Coordinate\": reduced_data[:, 2]\n }).to_html(\n index=False,\n classes=\"table table-striped table-bordered text-center\"\n )\n\n # Return the plotly figure and table.\n return jsonify(\n table=table,\n plot=plot(\n go.Figure(data=data, layout=layout),\n show_link=False,\n output_type=\"div\",\n include_plotlyjs=False\n )\n )\n" ]
[ [ "sklearn.cluster.KMeans", "numpy.unique", "pandas.DataFrame", "numpy.where", "sklearn.decomposition.PCA" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
rakesh1988/pandas_market_calendars
[ "327763f6c7692b575b1844c67acf2bdd0835360e", "327763f6c7692b575b1844c67acf2bdd0835360e" ]
[ "tests/test_hkex_calendar.py", "pandas_market_calendars/exchange_calendar_eurex.py" ]
[ "import datetime\n\nimport pandas as pd\nimport pytz\n\nfrom pandas_market_calendars.exchange_calendar_hkex import HKEXExchangeCalendar\n\n\ndef test_time_zone():\n assert HKEXExchangeCalendar().tz == pytz.timezone('Asia/Shanghai')\n assert HKEXExchangeCalendar().name == 'HKEX'\n\n\ndef test_2018_holidays():\n hkex = HKEXExchangeCalendar()\n trading_days = hkex.valid_days('2018-01-01', '2018-12-31')\n holidays = ['2018-01-01', '2018-02-16', '2018-02-17', '2018-02-18',\n '2018-02-19', '2018-03-30', '2018-04-02', '2018-04-05',\n '2018-05-01', '2018-05-22', '2018-06-18', '2018-07-02',\n '2018-09-25', '2018-10-01', '2018-10-17', '2018-12-25',\n '2018-12-26']\n for date in holidays:\n assert pd.Timestamp(date, tz='UTC') not in trading_days\n for date in ['2018-05-02']:\n assert pd.Timestamp(date, tz='UTC') in trading_days\n\n\ndef test_hkex_closes_at_lunch():\n hkex = HKEXExchangeCalendar()\n schedule = hkex.schedule(\n start_date=datetime.datetime(2015, 1, 14, tzinfo=pytz.timezone('Asia/Shanghai')),\n end_date=datetime.datetime(2015, 1, 16, tzinfo=pytz.timezone('Asia/Shanghai'))\n )\n\n assert HKEXExchangeCalendar.open_at_time(\n schedule=schedule,\n timestamp=datetime.datetime(2015, 1, 14, 11, 0, tzinfo=pytz.timezone('Asia/Shanghai'))\n )\n\n assert not HKEXExchangeCalendar.open_at_time(\n schedule=schedule,\n timestamp=datetime.datetime(2015, 1, 14, 12, 10, tzinfo=pytz.timezone('Asia/Shanghai'))\n )\n", "#\n# kewlfft\n#\n\nfrom datetime import time\n\nfrom pandas.tseries.holiday import AbstractHolidayCalendar, EasterMonday, GoodFriday, Holiday, previous_friday\nfrom pytz import timezone\n\nfrom .market_calendar import (FRIDAY, MONDAY, MarketCalendar, THURSDAY, TUESDAY, WEDNESDAY)\n\n# New Year's Eve\nEUREXNewYearsEve = Holiday(\n \"New Year's Eve\",\n month=12,\n day=31,\n observance=previous_friday,\n)\n# New Year's Day\nEUREXNewYearsDay = Holiday(\n \"New Year's Day\",\n month=1,\n day=1,\n days_of_week=(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY),\n)\n# Early May bank holiday\nMayBank = Holiday(\n \"Early May Bank Holiday\",\n month=5,\n day=1,\n days_of_week=(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY),\n)\n# German National Holiday (Tag der Deutschen Einheit)\nGermanNationalDay = Holiday(\n 'Tag der Deutschen Einheit',\n month=10,\n day=3,\n days_of_week=(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY),\n)\n# Christmas Eve\nChristmasEve = Holiday(\n 'Christmas Eve',\n month=12,\n day=24,\n observance=previous_friday,\n)\n# Christmas\nChristmas = Holiday(\n \"Christmas\",\n month=12,\n day=25,\n days_of_week=(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY),\n)\n# If christmas day is Saturday Monday 27th is a holiday\n# If christmas day is sunday the Tuesday 27th is a holiday\nWeekendChristmas = Holiday(\n \"Weekend Christmas\",\n month=12,\n day=27,\n days_of_week=(MONDAY, TUESDAY),\n)\n# Boxing day\nBoxingDay = Holiday(\n \"Boxing Day\",\n month=12,\n day=26,\n)\n# If boxing day is saturday then Monday 28th is a holiday\n# If boxing day is sunday then Tuesday 28th is a holiday\nWeekendBoxingDay = Holiday(\n \"Weekend Boxing Day\",\n month=12,\n day=28,\n days_of_week=(MONDAY, TUESDAY),\n)\n\n\nclass EUREXExchangeCalendar(MarketCalendar):\n \"\"\"\n Exchange calendar for EUREX\n\n \"\"\"\n aliases = ['EUREX']\n\n @property\n def name(self):\n return \"EUREX\"\n\n @property\n def tz(self):\n return timezone('Europe/Berlin')\n\n @property\n def open_time_default(self):\n return time(9, 0, tzinfo=self.tz)\n\n @property\n def close_time_default(self):\n return time(17, 30, tzinfo=self.tz)\n\n @property\n def regular_holidays(self):\n return AbstractHolidayCalendar(rules=[\n EUREXNewYearsDay,\n GoodFriday,\n EasterMonday,\n MayBank,\n GermanNationalDay,\n Christmas,\n WeekendChristmas,\n BoxingDay,\n WeekendBoxingDay\n ])\n\n @property\n def special_closes(self):\n return [(\n time(12, 30),\n AbstractHolidayCalendar(rules=[\n ChristmasEve,\n EUREXNewYearsEve,\n ])\n )]\n" ]
[ [ "pandas.Timestamp" ], [ "pandas.tseries.holiday.AbstractHolidayCalendar", "pandas.tseries.holiday.Holiday" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
JLUNeverMore/FAR-HO
[ "1f381f50f99b83ee5bed9700fcbfa4375096a0ca" ]
[ "far_ho/hyper_gradients.py" ]
[ "from __future__ import absolute_import, print_function, division\n\nimport sys\nfrom collections import defaultdict, deque\n\nimport tensorflow as tf\nfrom tensorflow.python.training import slot_creator\nfrom tensorflow.contrib.opt import ScipyOptimizerInterface\n\nfrom far_ho import utils\nfrom far_ho.optimizer import OptimizerDict\nfrom far_ho.utils import dot, maybe_add, reduce_all_sums\n\nRAISE_ERROR_ON_DETACHED = False\n\n\nclass HyperGradient(object):\n def __init__(self, name):\n self._optimizer_dicts = set()\n self._inner_objectives = None\n self._hypergrad_dictionary = defaultdict(list) # dictionary (hyperparameter, list of hypergradients)\n self._ts = None\n\n self._initialization = None\n self._iteration = None\n self._state = None\n self._name = name\n\n _ERROR_NOT_OPTIMIZER_DICT = \"\"\"\n Looks like {} is not an `OptimizerDict`. Use optimizers in far_ho.optimizers for obtaining an OptimizerDict.\n \"\"\"\n\n _ERROR_HYPER_DETACHED = \"\"\"\n Hyperparameter {} is detached from this optimization dynamics.\n \"\"\"\n\n def compute_gradients(self, outer_objective, optimizer_dict, hyper_list=None):\n # Doesn't do anything useful here. To be overridden.\n \"\"\"\n Function overridden by specific methods.\n\n :param optimizer_dict: OptimzerDict object resulting from the inner objective optimization.\n :param outer_objective: A loss function for the hyperparameters (scalar tensor)\n :param hyper_list: Optional list of hyperparameters to consider. If not provided will get all variables in the\n hyperparameter collection in the current scope.\n\n :return: list of hyperparameters involved in the computation\n \"\"\"\n assert isinstance(optimizer_dict, OptimizerDict), HyperGradient._ERROR_NOT_OPTIMIZER_DICT.format(optimizer_dict)\n self._optimizer_dicts.add(optimizer_dict)\n\n if hyper_list is None: # get default hyperparameters\n hyper_list = utils.hyperparameters(tf.get_variable_scope().name)\n return hyper_list\n\n @property\n def initialization(self):\n if self._initialization is None:\n self._initialization = [opt_dict.initialization for opt_dict in sorted(self._optimizer_dicts)]\n return self._initialization\n\n @property\n def iteration(self):\n if self._iteration is None:\n self._iteration = [opt_dict.iteration for opt_dict in sorted(self._optimizer_dicts)]\n return self._iteration\n\n @property\n def state(self):\n for opt_dict in sorted(self._optimizer_dicts):\n for v in opt_dict.state:\n yield v\n\n @property\n def inner_objectives(self):\n if self._inner_objectives is None:\n self._inner_objectives = [opt.objective if hasattr(opt, 'objective') else tf.constant(False)\n for opt in sorted(self._optimizer_dicts)]\n return self._inner_objectives\n\n @property\n def ts(self):\n if self._ts is None:\n self._ts = tf.group(*[opt_dict.ts for opt_dict in sorted(self._optimizer_dicts)])\n return self._ts\n\n def run(self, T_or_generator, inner_objective_feed_dicts=None, outer_objective_feed_dicts=None,\n initializer_feed_dict=None, global_step=None, session=None, online=False, callback=None):\n \"\"\"\n Runs the inner optimization dynamics for T iterations (T_or_generator can be indeed a generator) and computes\n in the meanwhile.\n\n :param T_or_generator: integer or generator that should yield a step. Express either a total number of\n iterations of inner objective optimization dynamics, or could implement a stopping\n condition, or variables number of steps.\n :param inner_objective_feed_dicts: Optional feed dictionary for the inner objective\n :param outer_objective_feed_dicts: Optional feed dictionary for the outer objective\n (note that this is not used in ForwardHG since hypergradients are not\n variables)\n :param initializer_feed_dict: Optional feed dictionary for the inner objective\n :param global_step: Optional global step for the\n :param session: Optional session (otherwise will take the default session)\n :param online: Performs the computation of the hypergradient in the online (or \"real time\") mode. Note that\n `ReverseHG` and `ForwardHG` behave differently.\n :param callback: callback funciton for the forward optimization\n\n \"\"\"\n raise NotImplementedError()\n\n def hgrads_hvars(self, hyper_list=None, aggregation_fn=None, process_fn=None):\n \"\"\"\n Method for getting hypergradient and hyperparameters as required by apply_gradient methods from tensorflow \n optimizers.\n \n :param hyper_list: Optional list of hyperparameters to consider. If not provided will get all variables in the\n hyperparameter collection in the current scope.\n :param aggregation_fn: Optional operation to aggregate multiple hypergradients (for the same hyperparameter),\n by default reduce_mean\n :param process_fn: Optional operation like clipping to be applied.\n :return: \n \"\"\"\n if hyper_list is None:\n hyper_list = utils.hyperparameters(tf.get_variable_scope().name)\n\n assert all([h in self._hypergrad_dictionary for h in hyper_list]), 'FINAL ERROR!'\n\n if aggregation_fn is None:\n aggregation_fn = lambda hgrad_list: tf.reduce_mean(hgrad_list, axis=0)\n\n def _aggregate_process_manage_collection(_hg_lst):\n if len(_hg_lst) == 1: # avoid useless operations...\n aggr = _hg_lst[0]\n else:\n with tf.name_scope(_hg_lst[0].op.name):\n aggr = aggregation_fn(_hg_lst) if len(_hg_lst) > 1 else _hg_lst[0]\n if process_fn is not None:\n with tf.name_scope('process_gradients'):\n aggr = process_fn(aggr)\n tf.add_to_collection(utils.GraphKeys.HYPERGRADIENTS, aggr)\n return aggr\n\n return [(_aggregate_process_manage_collection(self._hypergrad_dictionary[h]),\n h) for h in hyper_list]\n\n @property\n def name(self):\n return self._name\n\n @staticmethod\n def need_scalar_hyperparameters():\n return False\n\n # noinspection PyMethodMayBeStatic\n def _make_callback(self):\n \"\"\"\n Template for callbacks\n \"\"\"\n values = []\n\n # noinspection PyUnusedLocal\n def _callback(t, feed_dcit, session):\n values.append(0) # these should not depend from any feed dictionary\n\n return values, _callback\n\n def __str__(self):\n return self._name\n\n\nclass ReverseHG(HyperGradient):\n\n def __init__(self, history=None, name='ReverseHG'):\n super(ReverseHG, self).__init__(name)\n self._alpha_iter = tf.no_op()\n self._reverse_initializer = tf.no_op()\n self._history = history if history is not None else []\n\n @staticmethod\n def _truncated(max_items, name='TruncatedReverseHG'):\n \"\"\"\n Utility method to initialize truncated reverse HG (not necessarily online)\n\n :param max_items: Maximum number of iterations that will be stored\n :param name: a name for the operations and variables that will be created\n :return: ReverseHG object\n \"\"\"\n return ReverseHG(deque(maxlen=max_items + 1), name=name)\n\n # noinspection SpellCheckingInspection\n def compute_gradients(self, outer_objective, optimizer_dict, hyper_list=None):\n \"\"\"\n Function that adds to the computational graph all the operations needend for computing\n the hypergradients in a \"dynamic\" way, without unrolling the entire optimization graph.\n The resulting computation, while being roughly 2x more expensive then unrolling the\n optimizaiton dynamics, requires much less (GPU) memory and is more flexible, allowing\n to set a termination condition to the parameters optimizaiton routine.\n\n :param optimizer_dict: OptimzerDict object resulting from the inner objective optimization.\n :param outer_objective: A loss function for the hyperparameters (scalar tensor)\n :param hyper_list: Optional list of hyperparameters to consider. If not provided will get all variables in the\n hyperparameter collection in the current scope.\n\n :return: list of hyperparameters involved in the computation\n \"\"\"\n hyper_list = super(ReverseHG, self).compute_gradients(outer_objective, optimizer_dict, hyper_list)\n\n # derivative of outer objective w.r.t. state\n with tf.variable_scope(outer_objective.op.name): # for some reason without this there is a cathastrofic\n # failure...\n doo_ds = tf.gradients(outer_objective, list(optimizer_dict.state))\n\n alphas = self._create_lagrangian_multipliers(optimizer_dict, doo_ds)\n\n alpha_vec = utils.vectorize_all(alphas)\n dyn_vec = utils.vectorize_all(list(optimizer_dict.dynamics))\n lag_phi_t = utils.dot(alpha_vec, dyn_vec, name='iter_wise_lagrangian_part1')\n # TODO outer_objective might be a list... handle this case\n\n # iterative computation of hypergradients\n doo_dypers = tf.gradients(outer_objective, hyper_list) # (direct) derivative of outer objective w.r.t. hyp.\n alpha_dot_B = tf.gradients(lag_phi_t, hyper_list)\n # check that optimizer_dict has initial ops (phi_0)\n if optimizer_dict.init_dynamics is not None:\n lag_phi0 = utils.dot(alpha_vec, utils.vectorize_all([d for (s, d) in optimizer_dict.init_dynamics]))\n alpha_dot_B0 = tf.gradients(lag_phi0, hyper_list)\n else:\n alpha_dot_B0 = [None] * len(hyper_list)\n\n # here, if some of this is None it may mean that the hyperparameter compares inside phi_0: check that and\n # if it is not the case raise error...\n hyper_grad_vars, hyper_grad_step = [], tf.no_op()\n for dl_dh, doo_dh, a_d_b0, hyper in zip(alpha_dot_B, doo_dypers, alpha_dot_B0, hyper_list):\n assert dl_dh is not None or a_d_b0 is not None, HyperGradient._ERROR_HYPER_DETACHED.format(hyper)\n hgv = None\n if dl_dh is not None: # \"normal hyperparameter\"\n hgv = self._create_hypergradient(hyper, doo_dh)\n\n hyper_grad_step = tf.group(hyper_grad_step, hgv.assign_add(dl_dh))\n if a_d_b0 is not None:\n hgv = hgv + a_d_b0 if hgv is not None else a_d_b0\n # here hyper_grad_step has nothing to do...\n hyper_grad_vars.append(hgv) # save these...\n\n with tf.control_dependencies([hyper_grad_step]): # first update hypergradinet then alphas.\n _alpha_iter = tf.group(*[alpha.assign(dl_ds) for alpha, dl_ds\n in zip(alphas, tf.gradients(lag_phi_t, list(optimizer_dict.state)))])\n self._alpha_iter = tf.group(self._alpha_iter, _alpha_iter) # put all the backward iterations toghether\n\n [self._hypergrad_dictionary[h].append(hg) for h, hg in zip(hyper_list, hyper_grad_vars)]\n\n self._reverse_initializer = tf.group(self._reverse_initializer,\n tf.variables_initializer(alphas),\n tf.variables_initializer([h for h in hyper_grad_vars\n if hasattr(h, 'initializer')])) # some ->\n # hypergradients (those coming form initial dynamics) might be just tensors and not variables...\n\n return hyper_list\n\n @staticmethod\n def _create_lagrangian_multipliers(optimizer_dict, doo_ds):\n lag_mul = [slot_creator.create_slot(v.initialized_value(), utils.val_or_zero(der, v), 'alpha') for v, der\n in zip(optimizer_dict.state, doo_ds)]\n [tf.add_to_collection(utils.GraphKeys.LAGRANGIAN_MULTIPLIERS, lm) for lm in lag_mul]\n utils.remove_from_collection(utils.GraphKeys.GLOBAL_VARIABLES, *lag_mul)\n # this prevents the 'automatic' initialization with tf.global_variables_initializer.\n return lag_mul\n\n @staticmethod\n def _create_hypergradient(hyper, doo_dhypers):\n \"\"\"\n Creates one hyper-gradient as a variable. doo_dhypers: initialization, that is the derivative of\n the outer objective w.r.t this hyper\n \"\"\"\n hgs = slot_creator.create_slot(hyper, utils.val_or_zero(doo_dhypers, hyper), 'hypergradient')\n utils.remove_from_collection(utils.GraphKeys.GLOBAL_VARIABLES, hgs)\n return hgs\n\n def _state_feed_dict_generator(self, history, T_or_generator):\n for t, his in zip(utils.solve_int_or_generator(T_or_generator), history):\n yield t, utils.merge_dicts(\n *[od.state_feed_dict(h) for od, h in zip(sorted(self._optimizer_dicts), his)]\n )\n\n def run(self, T_or_generator, inner_objective_feed_dicts=None, outer_objective_feed_dicts=None,\n initializer_feed_dict=None, global_step=None, session=None, online=False, callback=None):\n # callback may be a pair, first for froward pass, second for reverse pass\n callback = utils.as_tuple_or_list(callback)\n # same thing for T\n T_or_generator = utils.as_tuple_or_list(T_or_generator)\n\n ss = session or tf.get_default_session()\n\n self._history.clear()\n if not online:\n _fd = utils.maybe_call(initializer_feed_dict, utils.maybe_eval(global_step, ss))\n self._save_history(ss.run(self.initialization, feed_dict=_fd))\n\n # else: # not totally clear if i should add this\n # self._save_history(ss.run(list(self.state)))\n\n T = 0 # this is useful if T_or_generator is indeed a generator...\n for t in utils.solve_int_or_generator(T_or_generator[0]):\n # nonlocal t # with nonlocal would not be necessary the variable T... not compatible with 2.7\n _fd = utils.maybe_call(inner_objective_feed_dicts, t)\n self._save_history(ss.run(self.iteration, feed_dict=_fd))\n T = t\n\n utils.maybe_call(callback[0], t, _fd, ss) # callback\n\n # initialization of support variables (supports stochastic evaluation of outer objective via global_step ->\n # variable)\n # TODO (maybe tf bug or oddity) for some strange reason, if some variable's initializer depends on\n # a placeholder, then the initializer of alpha SEEMS TO DEPEND ALSO ON THAT placeholder,\n # as if the primary variable should be reinitialized as well, but, I've checked, the primary variable is NOT\n # actually reinitialized. This doesn't make sense since the primary variable is already initialized\n # and Tensorflow seems not to care... should maybe look better into this issue\n reverse_init_fd = utils.maybe_call(outer_objective_feed_dicts, utils.maybe_eval(global_step, ss))\n # now adding also the initializer_feed_dict because of tf quirk...\n maybe_init_fd = utils.maybe_call(initializer_feed_dict, utils.maybe_eval(global_step, ss))\n reverse_init_fd = utils.merge_dicts(reverse_init_fd, maybe_init_fd)\n ss.run(self._reverse_initializer, feed_dict=reverse_init_fd)\n\n del self._history[-1] # do not consider last point\n\n for pt, state_feed_dict in self._state_feed_dict_generator(reversed(self._history), T_or_generator[-1]):\n # this should be fine also for truncated reverse... but check again the index t\n t = T - pt - 1 # if T is int then len(self.history) is T + 1 and this numerator\n # shall start at T-1\n _fd = utils.merge_dicts(state_feed_dict, utils.maybe_call(inner_objective_feed_dicts, t))\n ss.run(self._alpha_iter, _fd)\n if len(callback) == 2: utils.maybe_call(callback[1], t, _fd, ss)\n\n def _save_history(self, weights):\n self._history.append(weights)\n\n def hypergrad_callback(self, hyperparameter=None, flatten=True):\n \"\"\"callback that records the partial hypergradients on the reverse pass\"\"\"\n values = []\n gs = list(self._hypergrad_dictionary.values()) if hyperparameter is None else \\\n self._hypergrad_dictionary[hyperparameter]\n if flatten: gs = utils.vectorize_all(gs)\n\n # noinspection PyUnusedLocal\n def _callback(_, __, ss):\n values.append(ss.run(gs)) # these should not depend from any feed dictionary\n\n return values, _callback\n\n\nclass ReverseHg(ReverseHG):\n\n def __init__(self, history=None):\n print('WARNING, DEPRECATED: please use the class ReverseHG', file=sys.stderr)\n super(ReverseHg, self).__init__(history)\n\n\nclass ForwardHG(HyperGradient):\n def __init__(self, name='ForwardHG'):\n super(ForwardHG, self).__init__(name)\n self._forward_initializer = tf.no_op()\n self._zs = {} # hyperparameter - zs dictionary\n self._z_iter = tf.no_op()\n self._iteration = None\n self.A_dot_zs = {}\n\n _HYPER_RANK_ERROR_MESSAGE = \"\"\"\n ForwardHG: Only scalar hyperparameters accepted.\\n\n Hyperparameter tensor {} has rank {}.\\n\n Use keyword argument far_ho.get_hyperparameter(..., scalar=True) on hyperparameter creation.\n \"\"\"\n\n def compute_gradients(self, outer_objective, optimizer_dict, hyper_list=None):\n hyper_list = super(ForwardHG, self).compute_gradients(outer_objective, optimizer_dict, hyper_list)\n\n # scalar_hyper_list\n\n with tf.variable_scope(outer_objective.op.name):\n # dynamics_vec = vectorize_all(optimizer_dict.dynamics) # in the new implementation there's no need of\n # vectorizing... it might be more efficient since it's better to avoid too many reshaping operations...\n d_oo_d_state = tf.gradients(outer_objective, list(optimizer_dict.state))\n\n with tf.name_scope('DUMMY'): # variables to compute forward propagation\n # TODO avoid this computation if optimizer_dict has already been seen.\n aux_vs = [tf.zeros_like(v) for v in optimizer_dict.state]\n dynamics_dot_aux_v = reduce_all_sums(list(optimizer_dict.dynamics), aux_vs)\n\n der_dynamics_dot_aux_v = tf.gradients(dynamics_dot_aux_v, list(optimizer_dict.state))\n # this is a list of jacobians times aux_vs that have the same dimension of states variables.\n\n init_dynamics_dot_aux_v = None\n if optimizer_dict.init_dynamics:\n # init_dynamics_dot_aux_v = dot(vectorize_all(optimizer_dict.init_dynamics), aux_v_vec) # old impl\n init_dynamics_dot_aux_v = reduce_all_sums(\n optimizer_dict.init_dynamics, aux_vs)\n\n for hyp in hyper_list:\n assert hyp.shape.ndims == 0, ForwardHG._HYPER_RANK_ERROR_MESSAGE.format(hyp, hyp.shape.ndims)\n\n d_init_dyn_d_hyp = None if init_dynamics_dot_aux_v is None else \\\n tf.gradients(init_dynamics_dot_aux_v, hyp)[0]\n d_dyn_d_hyp = tf.gradients(dynamics_dot_aux_v, hyp)[0]\n d_oo_d_hyp = tf.gradients(outer_objective, hyp)[0]\n\n # ------------------------------------------------------------\n # check detached hyperparameters (for which hypergradient would be always null)\n hyper_ok = d_init_dyn_d_hyp is not None or d_dyn_d_hyp is not None or d_oo_d_hyp is not None\n if RAISE_ERROR_ON_DETACHED:\n # try:\n assert hyper_ok, HyperGradient._ERROR_HYPER_DETACHED.format(hyp)\n # ex\n else:\n if not hyper_ok:\n print(HyperGradient._ERROR_HYPER_DETACHED.format(hyp), file=sys.stderr)\n hyper_list.remove(hyp)\n # -------------------------------------------------------------\n\n # UPDATE OF TOTAL DERIVATIVE OF STATE W.R.T. HYPERPARAMETER\n zs = ForwardHG._create_zs(\n optimizer_dict, hyp, None if d_init_dyn_d_hyp is None else tf.gradients(d_init_dyn_d_hyp, aux_vs)\n ) # this is one z for each variable\n self._zs[hyp] = zs # store a reference for the total derivatives for easy access\n Bs = tf.gradients(d_dyn_d_hyp, aux_vs)\n\n A_dot_zs = tf.gradients(reduce_all_sums(der_dynamics_dot_aux_v, zs), aux_vs)\n\n self.A_dot_zs[hyp] = A_dot_zs\n\n _z_iter = tf.group(*[\n z.assign(maybe_add(A_dot_z, B)) for z, A_dot_z, B\n in zip(zs, A_dot_zs, Bs)\n ])\n self._z_iter = tf.group(self._z_iter, _z_iter)\n\n # -- HYPERGRADIENT -----\n d_E_T = [dot(d_oo_d_s, z) for d_oo_d_s, z in zip(d_oo_d_state, zs)\n if d_oo_d_s is not None and z is not None] # list of dot products\n hg = maybe_add(tf.reduce_sum(d_E_T), d_oo_d_hyp) # sum the partial dot products and possibly ->\n # adds the ''direct derivative'' term d(E( . , \\lambda))/d \\lambda\n\n self._hypergrad_dictionary[hyp].append(hg)\n self._forward_initializer = tf.group(self._forward_initializer,\n tf.variables_initializer(zs))\n return hyper_list\n\n @staticmethod\n def _create_zs(optimizer_dict, hyper, d_init_dynamics_d_hyper):\n if d_init_dynamics_d_hyper is None: d_init_dynamics_d_hyper = [None] * len(optimizer_dict)\n with tf.variable_scope('Z'):\n z = [slot_creator.create_slot(v, utils.val_or_zero(der, v), hyper.op.name) for v, der\n in zip(optimizer_dict.state, d_init_dynamics_d_hyper)]\n [tf.add_to_collection(utils.GraphKeys.ZS, lm) for lm in z]\n # in this case it is completely fine to keep zs into the global variable...\n return z\n\n def run(self, T_or_generator, inner_objective_feed_dicts=None, outer_objective_feed_dicts=None,\n initializer_feed_dict=None, global_step=None, session=None, online=False, callback=None):\n\n ss = session or tf.get_default_session()\n\n if not online:\n self._run_batch_initialization(ss, utils.maybe_call(\n initializer_feed_dict, utils.maybe_eval(global_step, ss)))\n\n for t in utils.solve_int_or_generator(T_or_generator):\n _fd = utils.maybe_call(inner_objective_feed_dicts, t)\n self._forward_step(ss, _fd)\n utils.maybe_call(callback, t, _fd, ss)\n\n def _forward_step(self, ss, _fd):\n ss.run(self._z_iter, _fd)\n ss.run(self.iteration, _fd)\n\n def _run_batch_initialization(self, ss, fd):\n ss.run(self.initialization, feed_dict=fd)\n ss.run(self._forward_initializer, feed_dict=fd)\n\n @staticmethod\n def need_scalar_hyperparameters():\n return True\n\n @property\n def w_dots(self):\n # if hyper: return self._zs[hyper]\n return [{h: self._zs[h][k] for h in self._zs} for k, _ in enumerate(self.state)]\n\n def z_callback(self, hyperparameter=None, flatten=True):\n zs_values = []\n zs = list(self._zs.values()) if hyperparameter is None else self._zs[hyperparameter]\n if flatten: zs = utils.vectorize_all(zs)\n\n # noinspection PyUnusedLocal\n def _callback(_, __, ss):\n zs_values.append(ss.run(zs)) # these should not depend from any feed dictionary\n\n return zs_values, _callback\n\n\nclass ImplicitHG(HyperGradient):\n \"\"\"\n Implementation follows Pedregosa's algorithm HOAG\n \"\"\"\n\n def __init__(self, linear_system_solver_gen=None, tolerance=None, name='ImplicitHG'):\n super(ImplicitHG, self).__init__(name)\n if linear_system_solver_gen is None:\n linear_system_solver_gen = lambda _obj, var_list, _tolerance: ScipyOptimizerInterface(\n _obj, var_list=var_list, options={'maxiter': 100}, method='cg', tol=_tolerance)\n self.linear_system_solver = linear_system_solver_gen\n\n if tolerance is None:\n tolerance = lambda _k: 0.1 * (0.9 ** _k)\n self.tolerance = tolerance\n\n self._lin_sys = []\n self._qs = []\n\n def compute_gradients(self, outer_objective, optimizer_dict, hyper_list=None):\n hyper_list = super(ImplicitHG, self).compute_gradients(outer_objective, optimizer_dict, hyper_list)\n state = list(optimizer_dict.state)\n\n with tf.variable_scope(outer_objective.op.name):\n g1 = utils.vectorize_all(tf.gradients(outer_objective, state))\n grads_inner_obj_vec = utils.vectorize_all(tf.gradients(optimizer_dict.objective, state))\n\n q = self._create_q(g1)\n obj = tf.norm(\n utils.vectorize_all(tf.gradients(utils.dot(grads_inner_obj_vec, q), state)) - g1\n ) # using the norm seems to produce better results then squared norm...\n # (even though is more costly)\n\n self._lin_sys.append(lambda _tolerance: self.linear_system_solver(obj, [q], _tolerance))\n\n g2s = tf.gradients(outer_objective, hyper_list)\n cross_ders = tf.gradients(utils.dot(grads_inner_obj_vec, q), hyper_list)\n for g2, cd, hyper in zip(g2s, cross_ders, hyper_list):\n assert g2 is not None or cd is not None, HyperGradient._ERROR_HYPER_DETACHED.format(hyper)\n hg = utils.maybe_add(-cd, g2)\n if hg is None: # this would be strange...\n print('WARNING, outer objective is only directly dependent on hyperparameter {}. ' +\n 'Direct optimization would be better!'.format(hyper))\n hg = g2\n self._hypergrad_dictionary[hyper].append(hg)\n\n return hyper_list\n\n def _create_q(self, d_oo_d_state):\n self._qs.append(slot_creator.create_zeros_slot(d_oo_d_state, 'q'))\n return self._qs[-1]\n\n def run(self, T_or_generator, inner_objective_feed_dicts=None, outer_objective_feed_dicts=None,\n initializer_feed_dict=None, global_step=None, session=None, online=False, callback=None):\n ss = session or tf.get_default_session()\n\n inner_objective_feed_dicts = utils.as_tuple_or_list(inner_objective_feed_dicts)\n if not online:\n self._run_batch_initialization(ss, utils.maybe_call(\n initializer_feed_dict, utils.maybe_eval(global_step, ss)))\n\n for t in utils.solve_int_or_generator(T_or_generator):\n _fd = utils.maybe_call(inner_objective_feed_dicts[0], t)\n self._forward_step(ss, _fd)\n utils.maybe_call(callback, t, _fd, ss)\n\n # end of optimization. Solve linear systems.\n tol_val = utils.maybe_call(self.tolerance, utils.maybe_eval(global_step, ss)) # decreasing tolerance (seq.)\n # feed dictionaries (could...in theory, implement stochastic solution of this linear system...)\n _fd = utils.maybe_call(inner_objective_feed_dicts[-1], -1)\n _fd_outer = utils.maybe_call(outer_objective_feed_dicts, utils.maybe_eval(global_step, ss))\n _fd = utils.merge_dicts(_fd, _fd_outer)\n\n for lin_sys in self._lin_sys:\n lin_sys(tol_val).minimize(ss, _fd) # implicitly warm restarts with previously found q\n\n def _forward_step(self, ss, _fd):\n ss.run(self.iteration, _fd)\n\n def _run_batch_initialization(self, ss, fd):\n ss.run(self.initialization, feed_dict=fd)\n" ]
[ [ "tensorflow.get_default_session", "tensorflow.python.training.slot_creator.create_zeros_slot", "tensorflow.constant", "tensorflow.contrib.opt.ScipyOptimizerInterface", "tensorflow.control_dependencies", "tensorflow.reduce_mean", "tensorflow.reduce_sum", "tensorflow.gradients", "tensorflow.variables_initializer", "tensorflow.get_variable_scope", "tensorflow.zeros_like", "tensorflow.no_op", "tensorflow.name_scope", "tensorflow.variable_scope", "tensorflow.group", "tensorflow.add_to_collection" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
seono/SKKALBERT
[ "0fe68260558656c9732205391539aa202e70ba67" ]
[ "src/utils/models/word_utils.py" ]
[ "import argparse, os, sys, math\nimport numpy as np\nfrom gensim.models import Word2Vec\nfrom sklearn.decomposition import TruncatedSVD\nfrom soynlp.word import pmi\nfrom soynlp.vectorizer import sent_to_word_contexts_matrix\nfrom collections import defaultdict\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\nfrom .preprocess import get_tokenizer\n\n\ndef train_word2vec(corpus_fname, model_fname, max_num_tokens_per_doc):\n make_save_path(model_fname)\n corpus_data = open(corpus_fname, 'r').readlines()\n max_num_tokens = np.max([len(sent.replace('\\n', '').strip().split(\" \")) for sent in corpus_data])\n if max_num_tokens_per_doc is None:\n max_num_tokens_per_doc = max_num_tokens\n else:\n max_num_tokens_per_doc = int(max_num_tokens_per_doc)\n print(\"Maximum number of tokens in corpus: \", max_num_tokens)\n print(\"Maximum token length per document: \", max_num_tokens_per_doc)\n corpus = []\n for sent in corpus_data:\n tokens = sent.replace('\\n', '').strip().split(\" \")\n while len(tokens) > max_num_tokens_per_doc:\n corpus.append(tokens[:max_num_tokens_per_doc])\n tokens = tokens[max_num_tokens_per_doc:]\n if len(tokens) > 0:\n corpus.append(tokens)\n model = Word2Vec(corpus, size=100, workers=4, sg=1)\n model.save(model_fname)\n\n\"\"\"\nLatent Semantic Analysis\nInspired by:\nhttps://lovit.github.io/nlp/2018/04/22/context_vector_for_word_similarity\nhttps://lovit.github.io/nlp/2018/04/22/implementing_pmi_numpy_practice\n\"\"\"\ndef latent_semantic_analysis(corpus_fname, output_fname):\n make_save_path(output_fname)\n corpus = [sent.replace('\\n', '').strip() for sent in open(corpus_fname, 'r').readlines()]\n # construct co-occurrence matrix (=word_context)\n # dynamic weight if True. co-occurrence weight = [1, (w-1)/w, (w-2)/w, ... 1/w]\n input_matrix, idx2vocab = sent_to_word_contexts_matrix(\n corpus,\n windows=3,\n min_tf=10,\n dynamic_weight=True,\n verbose=True)\n # compute truncated SVD\n cooc_svd = TruncatedSVD(n_components=100)\n cooc_vecs = cooc_svd.fit_transform(input_matrix)\n with open(output_fname + \"-cooc.vecs\", 'w') as f1:\n for word, vec in zip(idx2vocab, cooc_vecs):\n str_vec = [str(el) for el in vec]\n f1.writelines(word + ' ' + ' '.join(str_vec) + \"\\n\")\n # Shift PPMI at k=0, (equal PPMI)\n # pmi(word, contexts)\n # px: Probability of rows(items)\n # py: Probability of columns(features)\n pmi_matrix, _, _ = pmi(input_matrix, min_pmi=math.log(5))\n # compute truncated SVD\n pmi_svd = TruncatedSVD(n_components=100)\n pmi_vecs = pmi_svd.fit_transform(input_matrix)\n with open(output_fname + \"-pmi.vecs\", 'w') as f2:\n for word, vec in zip(idx2vocab, pmi_vecs):\n str_vec = [str(el) for el in vec]\n f2.writelines(word + ' ' + ' '.join(str_vec) + \"\\n\")\n\n\nclass CBoWModel(object):\n\n def __init__(self, train_fname, embedding_fname, model_fname, embedding_corpus_fname,\n embedding_method=\"fasttext\", is_weighted=True, average=False, dim=100, tokenizer_name=\"mecab\"):\n # configurations\n make_save_path(model_fname)\n self.dim = dim\n self.average = average\n if is_weighted:\n model_full_fname = model_fname + \"-weighted\"\n else:\n model_full_fname = model_fname + \"-original\"\n self.tokenizer = get_tokenizer(tokenizer_name)\n if is_weighted:\n # ready for weighted embeddings\n self.embeddings = self.load_or_construct_weighted_embedding(embedding_fname, embedding_method, embedding_corpus_fname)\n print(\"loading weighted embeddings, complete!\")\n else:\n # ready for original embeddings\n words, vectors = self.load_word_embeddings(embedding_fname, embedding_method)\n self.embeddings = defaultdict(list)\n for word, vector in zip(words, vectors):\n self.embeddings[word] = vector\n print(\"loading original embeddings, complete!\")\n if not os.path.exists(model_full_fname):\n print(\"train Continuous Bag of Words model\")\n self.model = self.train_model(train_fname, model_full_fname)\n else:\n print(\"load Continuous Bag of Words model\")\n self.model = self.load_model(model_full_fname)\n\n def evaluate(self, test_data_fname, batch_size=3000, verbose=False):\n print(\"evaluation start!\")\n test_data = self.load_or_tokenize_corpus(test_data_fname)\n data_size = len(test_data)\n num_batches = int((data_size - 1) / batch_size) + 1\n eval_score = 0\n for batch_num in range(num_batches):\n batch_sentences = []\n batch_tokenized_sentences = []\n batch_labels = []\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n features = test_data[start_index:end_index]\n for feature in features:\n sentence, tokens, label = feature\n batch_sentences.append(sentence)\n batch_tokenized_sentences.append(tokens)\n batch_labels.append(label)\n preds, curr_eval_score = self.predict_by_batch(batch_tokenized_sentences, batch_labels)\n eval_score += curr_eval_score\n if verbose:\n for sentence, pred, label in zip(batch_sentences, preds, batch_labels):\n print(sentence, \", pred:\", pred, \", label:\", label)\n print(\"# of correct:\", str(eval_score), \", total:\", str(len(test_data)), \", score:\", str(eval_score / len(test_data)))\n\n def predict(self, sentence):\n tokens = self.tokenizer.morphs(sentence)\n sentence_vector = self.get_sentence_vector(tokens)\n scores = np.dot(self.model[\"vectors\"], sentence_vector)\n pred = self.model[\"labels\"][np.argmax(scores)]\n return pred\n\n def predict_by_batch(self, tokenized_sentences, labels):\n sentence_vectors, eval_score = [], 0\n for tokens in tokenized_sentences:\n sentence_vectors.append(self.get_sentence_vector(tokens))\n scores = np.dot(self.model[\"vectors\"], np.array(sentence_vectors).T)\n preds = np.argmax(scores, axis=0)\n for pred, label in zip(preds, labels):\n if self.model[\"labels\"][pred] == label:\n eval_score += 1\n return preds, eval_score\n\n def get_sentence_vector(self, tokens):\n vector = np.zeros(self.dim)\n for token in tokens:\n if token in self.embeddings.keys():\n vector += self.embeddings[token]\n if not self.average:\n vector /= len(tokens)\n vector_norm = np.linalg.norm(vector)\n if vector_norm != 0:\n unit_vector = vector / vector_norm\n else:\n unit_vector = np.zeros(self.dim)\n return unit_vector\n\n def load_or_tokenize_corpus(self, fname):\n data = []\n if os.path.exists(fname + \"-tokenized\"):\n with open(fname + \"-tokenized\", \"r\") as f1:\n for line in f1:\n sentence, tokens, label = line.strip().split(\"\\u241E\")\n data.append([sentence, tokens.split(), label])\n else:\n with open(fname, \"r\") as f2, open(fname + \"-tokenized\", \"w\") as f3:\n for line in f2:\n sentence, label = line.strip().split(\"\\u241E\")\n tokens = self.tokenizer.morphs(sentence)\n data.append([sentence, tokens, label])\n f3.writelines(sentence + \"\\u241E\" + ' '.join(tokens) + \"\\u241E\" + label + \"\\n\")\n return data\n\n def compute_word_frequency(self, embedding_corpus_fname):\n total_count = 0\n words_count = defaultdict(int)\n with open(embedding_corpus_fname, \"r\") as f:\n for line in f:\n tokens = line.strip().split()\n for token in tokens:\n words_count[token] += 1\n total_count += 1\n return words_count, total_count\n\n def load_word_embeddings(self, vecs_fname, method):\n if method == \"word2vec\":\n model = Word2Vec.load(vecs_fname)\n words = model.wv.index2word\n vecs = model.wv.vectors\n else:\n words, vecs = [], []\n with open(vecs_fname, 'r', encoding='utf-8') as f1:\n if \"fasttext\" in method:\n next(f1) # skip head line\n for line in f1:\n if method == \"swivel\":\n splited_line = line.replace(\"\\n\", \"\").strip().split(\"\\t\")\n else:\n splited_line = line.replace(\"\\n\", \"\").strip().split(\" \")\n words.append(splited_line[0])\n vec = [float(el) for el in splited_line[1:]]\n vecs.append(vec)\n return words, vecs\n\n def load_or_construct_weighted_embedding(self, embedding_fname, embedding_method, embedding_corpus_fname, a=0.0001):\n dictionary = {}\n if os.path.exists(embedding_fname + \"-weighted\"):\n # load weighted word embeddings\n with open(embedding_fname + \"-weighted\", \"r\") as f2:\n for line in f2:\n word, weighted_vector = line.strip().split(\"\\u241E\")\n weighted_vector = [float(el) for el in weighted_vector.split()]\n dictionary[word] = weighted_vector\n else:\n # load pretrained word embeddings\n words, vecs = self.load_word_embeddings(embedding_fname, embedding_method)\n # compute word frequency\n words_count, total_word_count = self.compute_word_frequency(embedding_corpus_fname)\n # construct weighted word embeddings\n with open(embedding_fname + \"-weighted\", \"w\") as f3:\n for word, vec in zip(words, vecs):\n if word in words_count.keys():\n word_prob = words_count[word] / total_word_count\n else:\n word_prob = 0.0\n weighted_vector = (a / (word_prob + a)) * np.asarray(vec)\n dictionary[word] = weighted_vector\n f3.writelines(word + \"\\u241E\" + \" \".join([str(el) for el in weighted_vector]) + \"\\n\")\n return dictionary\n\n def train_model(self, train_data_fname, model_fname):\n model = {\"vectors\": [], \"labels\": [], \"sentences\": []}\n train_data = self.load_or_tokenize_corpus(train_data_fname)\n with open(model_fname, \"w\") as f:\n for sentence, tokens, label in train_data:\n tokens = self.tokenizer.morphs(sentence)\n sentence_vector = self.get_sentence_vector(tokens)\n model[\"sentences\"].append(sentence)\n model[\"vectors\"].append(sentence_vector)\n model[\"labels\"].append(label)\n str_vector = \" \".join([str(el) for el in sentence_vector])\n f.writelines(sentence + \"\\u241E\" + \" \".join(tokens) + \"\\u241E\" + str_vector + \"\\u241E\" + label + \"\\n\")\n return model\n\n def load_model(self, model_fname):\n model = {\"vectors\": [], \"labels\": [], \"sentences\": []}\n with open(model_fname, \"r\") as f:\n for line in f:\n sentence, _, vector, label = line.strip().split(\"\\u241E\")\n vector = np.array([float(el) for el in vector.split()])\n model[\"sentences\"].append(sentence)\n model[\"vectors\"].append(vector)\n model[\"labels\"].append(label)\n return model\n\n\ndef make_save_path(full_path):\n if full_path[:4] == \"data\":\n full_path = os.path.join(os.path.abspath(\".\"), full_path)\n model_path = '/'.join(full_path.split(\"/\")[:-1])\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--method', type=str, help='method')\n parser.add_argument('--input_path', type=str, help='Location of input files')\n parser.add_argument('--output_path', type=str, help='Location of output files')\n parser.add_argument('--embedding_path', type=str, help='Location of embedding model')\n parser.add_argument('--is_weighted', type=str, help='Use weighted method or not')\n parser.add_argument('--train_corpus_path', type=str, help='Location of train corpus')\n parser.add_argument('--test_corpus_path', type=str, help='Location of test corpus')\n parser.add_argument('--embedding_name', type=str, help='embedding name')\n parser.add_argument('--embedding_corpus_path', type=str, help='embedding corpus path')\n parser.add_argument('--max_num_tokens_per_doc', type=str, help='maximum number of tokens(word2vec)')\n parser.add_argument('--average', type=str, default=\"False\", help='average or not')\n args = parser.parse_args()\n\n def str2bool(str):\n return str.lower() in [\"true\", \"t\"]\n\n if args.method == \"train_word2vec\":\n train_word2vec(args.input_path, args.output_path, args.max_num_tokens_per_doc)\n elif args.method == \"latent_semantic_analysis\":\n latent_semantic_analysis(args.input_path, args.output_path)\n elif args.method == \"cbow\":\n model = CBoWModel(args.train_corpus_path, args.embedding_path,\n args.output_path, args.embedding_corpus_path,\n args.embedding_name, str2bool(args.is_weighted),\n str2bool(args.average))\n model.evaluate(args.test_corpus_path)" ]
[ [ "sklearn.decomposition.TruncatedSVD", "numpy.dot", "numpy.asarray", "numpy.linalg.norm", "numpy.argmax", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ness001/NeteaseMusic-playlist-download-app
[ "a06bc04493cc8b0ae4ad08eeb2db1b05d96bd3d2" ]
[ "batch.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'batch.ui'\n#\n# Created by: PyQt5 UI code generator 5.14.2\n#\n# WARNING! All changes made in this file will be lost!\n\n\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtCore import QThread, pyqtSignal\nfrom PyQt5.QtWidgets import QFileDialog\nfrom time import sleep\nfrom requests import get\nfrom urllib.request import urlretrieve, install_opener, build_opener, urlopen\nimport eyed3\nfrom os import path, chdir, getcwd, listdir\nfrom configparser import ConfigParser\nfrom pathlib import Path\nimport pandas as pd\n\n\nclass mythread(QThread):\n val = pyqtSignal(int)\n msg = pyqtSignal(str)\n max = pyqtSignal(int)\n\n def __init__(self, parent=None):\n super(mythread, self).__init__(parent)\n self.pid = None\n\n def run(self):\n\n print(getcwd())\n # self.msg.emit(getcwd())\n try:\n pid = int(self.pid) # 就算是数字,从textedit接收到的也是string\n r = get('http://music.163.com/api/playlist/detail?id=' + str(pid))\n if r.json()['code'] != 200:\n self.msg.emit(\"未找到歌单\")\n else:\n meta = r.json()['result']\n print('更新来自【' + meta['creator']['nickname'] + '】的歌单--' + meta['name'])\n self.msg.emit('更新来自【' + meta['creator']['nickname'] + '】的歌单--' + meta['name'])\n # QtWidgets.QApplication.processEvents()\n ids = []\n titles = []\n artists = []\n albums = []\n album_pics = []\n\n for i in range(len(r.json()['result']['tracks'])):\n ids.append(r.json()['result']['tracks'][i]['id'])\n titles.append(r.json()['result']['tracks'][i]['name'])\n artists.append(r.json()['result']['tracks'][i]['artists'][0]['name'])\n albums.append(r.json()['result']['tracks'][i]['album']['name'])\n album_pics.append(r.json()['result']['tracks'][i]['album']['blurPicUrl'])\n\n opener = build_opener()\n opener.addheaders = [('User-agent', 'Mozilla/5.0')]\n install_opener(opener)\n Path('./music').mkdir(parents=True, exist_ok=True) # an robust way to add a new folder\n mp3_path = './music/'\n Path('./img').mkdir(parents=True, exist_ok=True)\n img_path = './img/'\n\n d = {'ids': ids, 'titles': titles, 'artists': artists, 'albums': albums, 'album_pics': album_pics}\n df = pd.DataFrame(data=d)\n\n old = [path.splitext(item)[0] for item in listdir(mp3_path)]\n new = list(set(titles) - set(old))\n print('本次更新曲目', *new, sep=',')\n self.msg.emit(\"本次更新曲目...\")\n self.max.emit(len(new) - 1)\n for i in range(0, len(new)):\n self.msg.emit(str(i + 1) + '. ' + new[i])\n self.val.emit(i)\n # row=df.loc[df.titles == new[i]]\n # reference cell value row.titles.values[0]\n # better way\n row = df.loc[df.titles == new[i]].to_dict(orient='records')[0]\n song_url = 'http://music.163.com/song/media/outer/url?id=' + str(row['ids']) + '.mp3'\n r = urlopen(song_url)\n if r.geturl() == 'https://music.163.com/404':\n print('歌曲【' + row['titles'] + \"】无资源!\")\n self.msg.emit('歌曲【' + row['titles'] + \"】无资源!\")\n else:\n mp3_name = mp3_path + row['titles'].replace('/', '-') + '.mp3'\n urlretrieve(song_url, mp3_name)\n\n img_name = img_path + row['albums'].replace('/', '-') + '.jpg'\n urlretrieve(row['album_pics'], img_name)\n\n audiofile = eyed3.load(mp3_name)\n audiofile.tag.artist = str(row['artists'])\n audiofile.tag.album = str(row['albums'])\n # audiofile.tag.images.set(type_=3,img_data=None,mime_type='image/jpeg',img_url=album_pics[i])\n # simple url reference won't work, you have to download it to your disk\n # plus this method doesn't informed on the documentation, I found it on the stackoverflow\n audiofile.tag.images.set(type_=3, img_data=None, mime_type='image/jpeg',\n img_url=open(img_name, 'rb').read())\n # id3 version is important, encoding is important\n # the former granteened the id3 tag will be recognized by music players like Apple Music\n # the latter made the saving process ending up no error like 'Latin1' error\n audiofile.tag.save(version=eyed3.id3.ID3_V2_3, encoding='utf-8')\n sleep(0.5)\n print(\"更新完成!\")\n self.msg.emit(\"更新完成\")\n except ValueError:\n # assert isinstance(pid, int), \"id格式为纯数字\"\n self.msg.emit('⚠️id格式应为纯数字')\n # print('aaaa')\n\n\nclass Ui_MainWindow(object):\n def __init__(self):\n self.folder = None\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(306, 298)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.verticalLayout.addItem(spacerItem)\n self.pid = QtWidgets.QLineEdit(self.centralwidget)\n self.pid.setObjectName(\"pid\")\n self.verticalLayout.addWidget(self.pid)\n self.pid.setText('5022293116')\n\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\n self.submit = QtWidgets.QPushButton(self.centralwidget)\n self.submit.setObjectName(\"submit\")\n self.horizontalLayout_2.addWidget(self.submit)\n self.folder_btn = QtWidgets.QToolButton(self.centralwidget)\n self.folder_btn.setObjectName(\"toolButton\")\n self.horizontalLayout_2.addWidget(self.folder_btn)\n self.verticalLayout.addLayout(self.horizontalLayout_2)\n self.info = QtWidgets.QTextBrowser(self.centralwidget)\n self.info.setObjectName(\"info\")\n self.verticalLayout.addWidget(self.info)\n self.pbar = QtWidgets.QProgressBar(self.centralwidget)\n self.pbar.setProperty(\"value\", 0)\n self.pbar.setObjectName(\"pbar\")\n self.verticalLayout.addWidget(self.pbar)\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"网易云歌单下载\"))\n self.submit.setText(_translate(\"MainWindow\", \"submit\"))\n self.folder_btn.setText(_translate(\"MainWindow\", \"...\"))\n\n self.submit.clicked.connect(self.start_pbar) #### no mycode()\n self.folder_btn.clicked.connect(self.select_folder)\n # for .app file\n # self.current_path=Path(QtCore.QCoreApplication.applicationDirPath()).parents[1]\n\n # for unix file\n self.current_path = path.expanduser(\"~\")\n # print(self.current_path)\n self.cp = ConfigParser()\n\n self.config_dir = self.current_path + '/.batch'\n # print(self.config_dir)\n self.config_file = self.config_dir + '/settings.ini'\n # print(self.config_file)\n\n if not path.isfile(self.config_file):\n Path('./.batch').mkdir(parents=True, exist_ok=True)\n Path(self.config_file).touch(exist_ok=True)\n self.cp.add_section('Default')\n self.cp.add_section('User')\n self.cp['Default']['folder'] = self.current_path\n self.cp['User']['folder'] = ''\n with open(self.config_file, 'w') as f:\n self.cp.write(f)\n # load ini file\n self.cp.read(self.config_file)\n # if user had set cwd, use that cwd\n if self.cp['User']['folder'] != '':\n self.current_path = self.cp['User']['folder']\n input(\"Press enter to close program\")\n # #####################\n # if getattr(sys,'frozen',False):\n # # self.statusbar.showMessage(sys.executable)\n # # chdir(path.dirname(sys.executable))\n # # self.statusbar.showMessage(sys.argv[0])\n # # self.current_path=sys.executable\n # self.current_path=sys.argv[0]\n # # chdir(path.dirname(sys.argv[0]))\n # else:\n # self.current_path = path.abspath(path.dirname(__file__))\n # #####################\n\n chdir(self.current_path)\n self.statusbar.showMessage('当前目录:' + getcwd())\n\n # self.statusbar.showMessage('当前目录:'+getcwd())\n # os getcwd未必一直正确\n # os path,你需要判断是script还是application bundle\n\n def start_pbar(self):\n self.thread = mythread()\n self.pbar.setValue(0)\n self.thread.pid = self.pid.text()\n # self.thread.folder = self.folder\n self.thread.val.connect(self.set_pbar)\n self.thread.msg.connect(self.set_msg)\n self.thread.max.connect(self.set_pbar_max)\n self.thread.start()\n # msg=QtWidgets.QMessageBox()\n # msg.setText(\"abc\")\n\n def set_pbar(self, val):\n self.pbar.setValue(val)\n\n def set_pbar_max(self, max):\n self.pbar.setMaximum(max)\n\n def set_msg(self, msg):\n self.info.append(msg)\n\n def select_folder(self):\n self.folder = QFileDialog.getExistingDirectory()\n chdir(QFileDialog.getExistingDirectory())\n # ini file is load before, so you can just edit it\n self.cp['User']['folder'] = self.folder\n with open(self.config_file, 'w') as f:\n self.cp.write(f)\n self.statusbar.showMessage('当前目录:' + str(self.folder))\n\n\nif __name__ == \"__main__\":\n import sys\n\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
itsvetkov/pyqtgraph
[ "aa26d8ac82e00ea9ba992fef365933960e9e8aa2" ]
[ "pyqtgraph/functions.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nfunctions.py - Miscellaneous functions with no other home\nCopyright 2010 Luke Campagnola\nDistributed under MIT/X11 license. See license.txt for more infomation.\n\"\"\"\n\nfrom __future__ import division\nfrom .python2_3 import asUnicode\nfrom .Qt import QtGui, QtCore, USE_PYSIDE\nColors = {\n 'b': QtGui.QColor(0,0,255,255),\n 'g': QtGui.QColor(0,255,0,255),\n 'r': QtGui.QColor(255,0,0,255),\n 'c': QtGui.QColor(0,255,255,255),\n 'm': QtGui.QColor(255,0,255,255),\n 'y': QtGui.QColor(255,255,0,255),\n 'k': QtGui.QColor(0,0,0,255),\n 'w': QtGui.QColor(255,255,255,255),\n 'd': QtGui.QColor(150,150,150,255),\n 'l': QtGui.QColor(200,200,200,255),\n 's': QtGui.QColor(100,100,150,255),\n} \n\nSI_PREFIXES = asUnicode('yzafpnµm kMGTPEZY')\nSI_PREFIXES_ASCII = 'yzafpnum kMGTPEZY'\n\n\n\nfrom .Qt import QtGui, QtCore, USE_PYSIDE\nfrom . import getConfigOption, setConfigOptions\nimport numpy as np\nimport decimal, re\nimport ctypes\nimport sys, struct\n\nfrom . import debug\n\ndef siScale(x, minVal=1e-25, allowUnicode=True):\n \"\"\"\n Return the recommended scale factor and SI prefix string for x.\n \n Example::\n \n siScale(0.0001) # returns (1e6, 'μ')\n # This indicates that the number 0.0001 is best represented as 0.0001 * 1e6 = 100 μUnits\n \"\"\"\n \n if isinstance(x, decimal.Decimal):\n x = float(x)\n \n try:\n if np.isnan(x) or np.isinf(x):\n return(1, '')\n except:\n print(x, type(x))\n raise\n if abs(x) < minVal:\n m = 0\n x = 0\n else:\n m = int(np.clip(np.floor(np.log(abs(x))/np.log(1000)), -9.0, 9.0))\n \n if m == 0:\n pref = ''\n elif m < -8 or m > 8:\n pref = 'e%d' % (m*3)\n else:\n if allowUnicode:\n pref = SI_PREFIXES[m+8]\n else:\n pref = SI_PREFIXES_ASCII[m+8]\n p = .001**m\n \n return (p, pref) \n\ndef siFormat(x, precision=3, suffix='', space=True, error=None, minVal=1e-25, allowUnicode=True):\n \"\"\"\n Return the number x formatted in engineering notation with SI prefix.\n \n Example::\n siFormat(0.0001, suffix='V') # returns \"100 μV\"\n \"\"\"\n \n if space is True:\n space = ' '\n if space is False:\n space = ''\n \n \n (p, pref) = siScale(x, minVal, allowUnicode)\n if not (len(pref) > 0 and pref[0] == 'e'):\n pref = space + pref\n \n if error is None:\n fmt = \"%.\" + str(precision) + \"g%s%s\"\n return fmt % (x*p, pref, suffix)\n else:\n if allowUnicode:\n plusminus = space + asUnicode(\"±\") + space\n else:\n plusminus = \" +/- \"\n fmt = \"%.\" + str(precision) + \"g%s%s%s%s\"\n return fmt % (x*p, pref, suffix, plusminus, siFormat(error, precision=precision, suffix=suffix, space=space, minVal=minVal))\n \ndef siEval(s):\n \"\"\"\n Convert a value written in SI notation to its equivalent prefixless value\n \n Example::\n \n siEval(\"100 μV\") # returns 0.0001\n \"\"\"\n \n s = asUnicode(s)\n m = re.match(r'(-?((\\d+(\\.\\d*)?)|(\\.\\d+))([eE]-?\\d+)?)\\s*([u' + SI_PREFIXES + r']?).*$', s)\n if m is None:\n raise Exception(\"Can't convert string '%s' to number.\" % s)\n v = float(m.groups()[0])\n p = m.groups()[6]\n #if p not in SI_PREFIXES:\n #raise Exception(\"Can't convert string '%s' to number--unknown prefix.\" % s)\n if p == '':\n n = 0\n elif p == 'u':\n n = -2\n else:\n n = SI_PREFIXES.index(p) - 8\n return v * 1000**n\n \n\nclass Color(QtGui.QColor):\n def __init__(self, *args):\n QtGui.QColor.__init__(self, mkColor(*args))\n \n def glColor(self):\n \"\"\"Return (r,g,b,a) normalized for use in opengl\"\"\"\n return (self.red()/255., self.green()/255., self.blue()/255., self.alpha()/255.)\n \n def __getitem__(self, ind):\n return (self.red, self.green, self.blue, self.alpha)[ind]()\n \n \ndef mkColor(*args):\n \"\"\"\n Convenience function for constructing QColor from a variety of argument types. Accepted arguments are:\n \n ================ ================================================\n 'c' one of: r, g, b, c, m, y, k, w \n R, G, B, [A] integers 0-255\n (R, G, B, [A]) tuple of integers 0-255\n float greyscale, 0.0-1.0\n int see :func:`intColor() <pyqtgraph.intColor>`\n (int, hues) see :func:`intColor() <pyqtgraph.intColor>`\n \"RGB\" hexadecimal strings; may begin with '#'\n \"RGBA\" \n \"RRGGBB\" \n \"RRGGBBAA\" \n QColor QColor instance; makes a copy.\n ================ ================================================\n \"\"\"\n err = 'Not sure how to make a color from \"%s\"' % str(args)\n if len(args) == 1:\n if isinstance(args[0], basestring):\n c = args[0]\n if c[0] == '#':\n c = c[1:]\n if len(c) == 1:\n try:\n return Colors[c]\n except KeyError:\n raise Exception('No color named \"%s\"' % c)\n if len(c) == 3:\n r = int(c[0]*2, 16)\n g = int(c[1]*2, 16)\n b = int(c[2]*2, 16)\n a = 255\n elif len(c) == 4:\n r = int(c[0]*2, 16)\n g = int(c[1]*2, 16)\n b = int(c[2]*2, 16)\n a = int(c[3]*2, 16)\n elif len(c) == 6:\n r = int(c[0:2], 16)\n g = int(c[2:4], 16)\n b = int(c[4:6], 16)\n a = 255\n elif len(c) == 8:\n r = int(c[0:2], 16)\n g = int(c[2:4], 16)\n b = int(c[4:6], 16)\n a = int(c[6:8], 16)\n elif isinstance(args[0], QtGui.QColor):\n return QtGui.QColor(args[0])\n elif isinstance(args[0], float):\n r = g = b = int(args[0] * 255)\n a = 255\n elif hasattr(args[0], '__len__'):\n if len(args[0]) == 3:\n (r, g, b) = args[0]\n a = 255\n elif len(args[0]) == 4:\n (r, g, b, a) = args[0]\n elif len(args[0]) == 2:\n return intColor(*args[0])\n else:\n raise Exception(err)\n elif type(args[0]) == int:\n return intColor(args[0])\n else:\n raise Exception(err)\n elif len(args) == 3:\n (r, g, b) = args\n a = 255\n elif len(args) == 4:\n (r, g, b, a) = args\n else:\n raise Exception(err)\n \n args = [r,g,b,a]\n args = [0 if np.isnan(a) or np.isinf(a) else a for a in args]\n args = list(map(int, args))\n return QtGui.QColor(*args)\n\n\ndef mkBrush(*args, **kwds):\n \"\"\"\n | Convenience function for constructing Brush.\n | This function always constructs a solid brush and accepts the same arguments as :func:`mkColor() <pyqtgraph.mkColor>`\n | Calling mkBrush(None) returns an invisible brush.\n \"\"\"\n if 'color' in kwds:\n color = kwds['color']\n elif len(args) == 1:\n arg = args[0]\n if arg is None:\n return QtGui.QBrush(QtCore.Qt.NoBrush)\n elif isinstance(arg, QtGui.QBrush):\n return QtGui.QBrush(arg)\n else:\n color = arg\n elif len(args) > 1:\n color = args\n return QtGui.QBrush(mkColor(color))\n\ndef mkPen(*args, **kargs):\n \"\"\"\n Convenience function for constructing QPen. \n \n Examples::\n \n mkPen(color)\n mkPen(color, width=2)\n mkPen(cosmetic=False, width=4.5, color='r')\n mkPen({'color': \"FF0\", width: 2})\n mkPen(None) # (no pen)\n \n In these examples, *color* may be replaced with any arguments accepted by :func:`mkColor() <pyqtgraph.mkColor>` \"\"\"\n \n color = kargs.get('color', None)\n width = kargs.get('width', 1)\n style = kargs.get('style', None)\n dash = kargs.get('dash', None)\n cosmetic = kargs.get('cosmetic', True)\n hsv = kargs.get('hsv', None)\n \n if len(args) == 1:\n arg = args[0]\n if isinstance(arg, dict):\n return mkPen(**arg)\n if isinstance(arg, QtGui.QPen):\n return QtGui.QPen(arg) ## return a copy of this pen\n elif arg is None:\n style = QtCore.Qt.NoPen\n else:\n color = arg\n if len(args) > 1:\n color = args\n \n if color is None:\n color = mkColor('l')\n if hsv is not None:\n color = hsvColor(*hsv)\n else:\n color = mkColor(color)\n \n pen = QtGui.QPen(QtGui.QBrush(color), width)\n pen.setCosmetic(cosmetic)\n if style is not None:\n pen.setStyle(style)\n if dash is not None:\n pen.setDashPattern(dash)\n return pen\n\ndef hsvColor(hue, sat=1.0, val=1.0, alpha=1.0):\n \"\"\"Generate a QColor from HSVa values. (all arguments are float 0.0-1.0)\"\"\"\n c = QtGui.QColor()\n c.setHsvF(hue, sat, val, alpha)\n return c\n\n \ndef colorTuple(c):\n \"\"\"Return a tuple (R,G,B,A) from a QColor\"\"\"\n return (c.red(), c.green(), c.blue(), c.alpha())\n\ndef colorStr(c):\n \"\"\"Generate a hex string code from a QColor\"\"\"\n return ('%02x'*4) % colorTuple(c)\n\ndef intColor(index, hues=9, values=1, maxValue=255, minValue=150, maxHue=360, minHue=0, sat=255, alpha=255, **kargs):\n \"\"\"\n Creates a QColor from a single index. Useful for stepping through a predefined list of colors.\n \n The argument *index* determines which color from the set will be returned. All other arguments determine what the set of predefined colors will be\n \n Colors are chosen by cycling across hues while varying the value (brightness). \n By default, this selects from a list of 9 hues.\"\"\"\n hues = int(hues)\n values = int(values)\n ind = int(index) % (hues * values)\n indh = ind % hues\n indv = ind / hues\n if values > 1:\n v = minValue + indv * ((maxValue-minValue) / (values-1))\n else:\n v = maxValue\n h = minHue + (indh * (maxHue-minHue)) / hues\n \n c = QtGui.QColor()\n c.setHsv(h, sat, v)\n c.setAlpha(alpha)\n return c\n\ndef glColor(*args, **kargs):\n \"\"\"\n Convert a color to OpenGL color format (r,g,b,a) floats 0.0-1.0\n Accepts same arguments as :func:`mkColor <pyqtgraph.mkColor>`.\n \"\"\"\n c = mkColor(*args, **kargs)\n return (c.red()/255., c.green()/255., c.blue()/255., c.alpha()/255.)\n\n \n\ndef makeArrowPath(headLen=20, tipAngle=20, tailLen=20, tailWidth=3, baseAngle=0):\n \"\"\"\n Construct a path outlining an arrow with the given dimensions.\n The arrow points in the -x direction with tip positioned at 0,0.\n If *tipAngle* is supplied (in degrees), it overrides *headWidth*.\n If *tailLen* is None, no tail will be drawn.\n \"\"\"\n headWidth = headLen * np.tan(tipAngle * 0.5 * np.pi/180.)\n path = QtGui.QPainterPath()\n path.moveTo(0,0)\n path.lineTo(headLen, -headWidth)\n if tailLen is None:\n innerY = headLen - headWidth * np.tan(baseAngle*np.pi/180.)\n path.lineTo(innerY, 0)\n else:\n tailWidth *= 0.5\n innerY = headLen - (headWidth-tailWidth) * np.tan(baseAngle*np.pi/180.)\n path.lineTo(innerY, -tailWidth)\n path.lineTo(headLen + tailLen, -tailWidth)\n path.lineTo(headLen + tailLen, tailWidth)\n path.lineTo(innerY, tailWidth)\n path.lineTo(headLen, headWidth)\n path.lineTo(0,0)\n return path\n \n \n \ndef affineSlice(data, shape, origin, vectors, axes, order=1, returnCoords=False, **kargs):\n \"\"\"\n Take a slice of any orientation through an array. This is useful for extracting sections of multi-dimensional arrays such as MRI images for viewing as 1D or 2D data.\n \n The slicing axes are aribtrary; they do not need to be orthogonal to the original data or even to each other. It is possible to use this function to extract arbitrary linear, rectangular, or parallelepiped shapes from within larger datasets. The original data is interpolated onto a new array of coordinates using scipy.ndimage.map_coordinates if it is available (see the scipy documentation for more information about this). If scipy is not available, then a slower implementation of map_coordinates is used.\n \n For a graphical interface to this function, see :func:`ROI.getArrayRegion <pyqtgraph.ROI.getArrayRegion>`\n \n ============== ====================================================================================================\n **Arguments:**\n *data* (ndarray) the original dataset\n *shape* the shape of the slice to take (Note the return value may have more dimensions than len(shape))\n *origin* the location in the original dataset that will become the origin of the sliced data.\n *vectors* list of unit vectors which point in the direction of the slice axes. Each vector must have the same \n length as *axes*. If the vectors are not unit length, the result will be scaled relative to the \n original data. If the vectors are not orthogonal, the result will be sheared relative to the \n original data.\n *axes* The axes in the original dataset which correspond to the slice *vectors*\n *order* The order of spline interpolation. Default is 1 (linear). See scipy.ndimage.map_coordinates\n for more information.\n *returnCoords* If True, return a tuple (result, coords) where coords is the array of coordinates used to select\n values from the original dataset.\n *All extra keyword arguments are passed to scipy.ndimage.map_coordinates.*\n --------------------------------------------------------------------------------------------------------------------\n ============== ====================================================================================================\n \n Note the following must be true: \n \n | len(shape) == len(vectors) \n | len(origin) == len(axes) == len(vectors[i])\n \n Example: start with a 4D fMRI data set, take a diagonal-planar slice out of the last 3 axes\n \n * data = array with dims (time, x, y, z) = (100, 40, 40, 40)\n * The plane to pull out is perpendicular to the vector (x,y,z) = (1,1,1) \n * The origin of the slice will be at (x,y,z) = (40, 0, 0)\n * We will slice a 20x20 plane from each timepoint, giving a final shape (100, 20, 20)\n \n The call for this example would look like::\n \n affineSlice(data, shape=(20,20), origin=(40,0,0), vectors=((-1, 1, 0), (-1, 0, 1)), axes=(1,2,3))\n \n \"\"\"\n try:\n import scipy.ndimage\n have_scipy = True\n except ImportError:\n have_scipy = False\n have_scipy = False\n\n # sanity check\n if len(shape) != len(vectors):\n raise Exception(\"shape and vectors must have same length.\")\n if len(origin) != len(axes):\n raise Exception(\"origin and axes must have same length.\")\n for v in vectors:\n if len(v) != len(axes):\n raise Exception(\"each vector must be same length as axes.\")\n \n shape = list(map(np.ceil, shape))\n\n ## transpose data so slice axes come first\n trAx = list(range(data.ndim))\n for x in axes:\n trAx.remove(x)\n tr1 = tuple(axes) + tuple(trAx)\n data = data.transpose(tr1)\n #print \"tr1:\", tr1\n ## dims are now [(slice axes), (other axes)]\n \n ## make sure vectors are arrays\n if not isinstance(vectors, np.ndarray):\n vectors = np.array(vectors)\n if not isinstance(origin, np.ndarray):\n origin = np.array(origin)\n origin.shape = (len(axes),) + (1,)*len(shape)\n \n ## Build array of sample locations. \n grid = np.mgrid[tuple([slice(0,x) for x in shape])] ## mesh grid of indexes\n #print shape, grid.shape\n x = (grid[np.newaxis,...] * vectors.transpose()[(Ellipsis,) + (np.newaxis,)*len(shape)]).sum(axis=1) ## magic\n x += origin\n #print \"X values:\"\n #print x\n ## iterate manually over unused axes since map_coordinates won't do it for us\n if have_scipy:\n extraShape = data.shape[len(axes):]\n output = np.empty(tuple(shape) + extraShape, dtype=data.dtype)\n for inds in np.ndindex(*extraShape):\n ind = (Ellipsis,) + inds\n output[ind] = scipy.ndimage.map_coordinates(data[ind], x, order=order, **kargs)\n else:\n # map_coordinates expects the indexes as the first axis, whereas\n # interpolateArray expects indexes at the last axis. \n tr = tuple(range(1,x.ndim)) + (0,)\n output = interpolateArray(data, x.transpose(tr))\n \n \n tr = list(range(output.ndim))\n trb = []\n for i in range(min(axes)):\n ind = tr1.index(i) + (len(shape)-len(axes))\n tr.remove(ind)\n trb.append(ind)\n tr2 = tuple(trb+tr)\n\n ## Untranspose array before returning\n output = output.transpose(tr2)\n if returnCoords:\n return (output, x)\n else:\n return output\n\ndef interpolateArray(data, x, default=0.0):\n \"\"\"\n N-dimensional interpolation similar scipy.ndimage.map_coordinates.\n \n This function returns linearly-interpolated values sampled from a regular\n grid of data. \n \n *data* is an array of any shape containing the values to be interpolated.\n *x* is an array with (shape[-1] <= data.ndim) containing the locations\n within *data* to interpolate. \n \n Returns array of shape (x.shape[:-1] + data.shape)\n \n For example, assume we have the following 2D image data::\n \n >>> data = np.array([[1, 2, 4 ],\n [10, 20, 40 ],\n [100, 200, 400]])\n \n To compute a single interpolated point from this data::\n \n >>> x = np.array([(0.5, 0.5)])\n >>> interpolateArray(data, x)\n array([ 8.25])\n \n To compute a 1D list of interpolated locations:: \n \n >>> x = np.array([(0.5, 0.5),\n (1.0, 1.0),\n (1.0, 2.0),\n (1.5, 0.0)])\n >>> interpolateArray(data, x)\n array([ 8.25, 20. , 40. , 55. ])\n \n To compute a 2D array of interpolated locations::\n \n >>> x = np.array([[(0.5, 0.5), (1.0, 2.0)],\n [(1.0, 1.0), (1.5, 0.0)]])\n >>> interpolateArray(data, x)\n array([[ 8.25, 40. ],\n [ 20. , 55. ]])\n \n ..and so on. The *x* argument may have any shape as long as \n ```x.shape[-1] <= data.ndim```. In the case that \n ```x.shape[-1] < data.ndim```, then the remaining axes are simply \n broadcasted as usual. For example, we can interpolate one location\n from an entire row of the data::\n \n >>> x = np.array([[0.5]])\n >>> interpolateArray(data, x)\n array([[ 5.5, 11. , 22. ]])\n\n This is useful for interpolating from arrays of colors, vertexes, etc.\n \"\"\"\n \n prof = debug.Profiler()\n \n result = np.empty(x.shape[:-1] + data.shape, dtype=data.dtype)\n nd = data.ndim\n md = x.shape[-1]\n\n # First we generate arrays of indexes that are needed to \n # extract the data surrounding each point\n fields = np.mgrid[(slice(0,2),) * md]\n xmin = np.floor(x).astype(int)\n xmax = xmin + 1\n indexes = np.concatenate([xmin[np.newaxis, ...], xmax[np.newaxis, ...]])\n fieldInds = []\n totalMask = np.ones(x.shape[:-1], dtype=bool) # keep track of out-of-bound indexes\n for ax in range(md):\n mask = (xmin[...,ax] >= 0) & (x[...,ax] <= data.shape[ax]-1) \n # keep track of points that need to be set to default\n totalMask &= mask \n \n # ..and keep track of indexes that are out of bounds \n # (note that when x[...,ax] == data.shape[ax], then xmax[...,ax] will be out\n # of bounds, but the interpolation will work anyway)\n mask &= (xmax[...,ax] < data.shape[ax])\n axisIndex = indexes[...,ax][fields[ax]]\n #axisMask = mask.astype(np.ubyte).reshape((1,)*(fields.ndim-1) + mask.shape)\n axisIndex[axisIndex < 0] = 0\n axisIndex[axisIndex >= data.shape[ax]] = 0\n fieldInds.append(axisIndex)\n prof()\n \n # Get data values surrounding each requested point\n # fieldData[..., i] contains all 2**nd values needed to interpolate x[i]\n fieldData = data[tuple(fieldInds)]\n prof()\n \n ## Interpolate\n s = np.empty((md,) + fieldData.shape, dtype=float)\n dx = x - xmin\n # reshape fields for arithmetic against dx\n for ax in range(md):\n f1 = fields[ax].reshape(fields[ax].shape + (1,)*(dx.ndim-1))\n sax = f1 * dx[...,ax] + (1-f1) * (1-dx[...,ax])\n sax = sax.reshape(sax.shape + (1,) * (s.ndim-1-sax.ndim))\n s[ax] = sax\n s = np.product(s, axis=0)\n result = fieldData * s\n for i in range(md):\n result = result.sum(axis=0)\n\n prof()\n totalMask.shape = totalMask.shape + (1,) * (nd - md)\n result[~totalMask] = default\n prof()\n return result\n\n\ndef transformToArray(tr):\n \"\"\"\n Given a QTransform, return a 3x3 numpy array.\n Given a QMatrix4x4, return a 4x4 numpy array.\n \n Example: map an array of x,y coordinates through a transform::\n \n ## coordinates to map are (1,5), (2,6), (3,7), and (4,8)\n coords = np.array([[1,2,3,4], [5,6,7,8], [1,1,1,1]]) # the extra '1' coordinate is needed for translation to work\n \n ## Make an example transform\n tr = QtGui.QTransform()\n tr.translate(3,4)\n tr.scale(2, 0.1)\n \n ## convert to array\n m = pg.transformToArray()[:2] # ignore the perspective portion of the transformation\n \n ## map coordinates through transform\n mapped = np.dot(m, coords)\n \"\"\"\n #return np.array([[tr.m11(), tr.m12(), tr.m13()],[tr.m21(), tr.m22(), tr.m23()],[tr.m31(), tr.m32(), tr.m33()]])\n ## The order of elements given by the method names m11..m33 is misleading--\n ## It is most common for x,y translation to occupy the positions 1,3 and 2,3 in\n ## a transformation matrix. However, with QTransform these values appear at m31 and m32.\n ## So the correct interpretation is transposed:\n if isinstance(tr, QtGui.QTransform):\n return np.array([[tr.m11(), tr.m21(), tr.m31()], [tr.m12(), tr.m22(), tr.m32()], [tr.m13(), tr.m23(), tr.m33()]])\n elif isinstance(tr, QtGui.QMatrix4x4):\n return np.array(tr.copyDataTo()).reshape(4,4)\n else:\n raise Exception(\"Transform argument must be either QTransform or QMatrix4x4.\")\n\ndef transformCoordinates(tr, coords, transpose=False):\n \"\"\"\n Map a set of 2D or 3D coordinates through a QTransform or QMatrix4x4.\n The shape of coords must be (2,...) or (3,...)\n The mapping will _ignore_ any perspective transformations.\n \n For coordinate arrays with ndim=2, this is basically equivalent to matrix multiplication.\n Most arrays, however, prefer to put the coordinate axis at the end (eg. shape=(...,3)). To \n allow this, use transpose=True.\n \n \"\"\"\n \n if transpose:\n ## move last axis to beginning. This transposition will be reversed before returning the mapped coordinates.\n coords = coords.transpose((coords.ndim-1,) + tuple(range(0,coords.ndim-1)))\n \n nd = coords.shape[0]\n if isinstance(tr, np.ndarray):\n m = tr\n else:\n m = transformToArray(tr)\n m = m[:m.shape[0]-1] # remove perspective\n \n ## If coords are 3D and tr is 2D, assume no change for Z axis\n if m.shape == (2,3) and nd == 3:\n m2 = np.zeros((3,4))\n m2[:2, :2] = m[:2,:2]\n m2[:2, 3] = m[:2,2]\n m2[2,2] = 1\n m = m2\n \n ## if coords are 2D and tr is 3D, ignore Z axis\n if m.shape == (3,4) and nd == 2:\n m2 = np.empty((2,3))\n m2[:,:2] = m[:2,:2]\n m2[:,2] = m[:2,3]\n m = m2\n \n ## reshape tr and coords to prepare for multiplication\n m = m.reshape(m.shape + (1,)*(coords.ndim-1))\n coords = coords[np.newaxis, ...]\n \n # separate scale/rotate and translation \n translate = m[:,-1] \n m = m[:, :-1]\n \n ## map coordinates and return\n mapped = (m*coords).sum(axis=1) ## apply scale/rotate\n mapped += translate\n \n if transpose:\n ## move first axis to end.\n mapped = mapped.transpose(tuple(range(1,mapped.ndim)) + (0,))\n return mapped\n \n \n\n \ndef solve3DTransform(points1, points2):\n \"\"\"\n Find a 3D transformation matrix that maps points1 onto points2.\n Points must be specified as either lists of 4 Vectors or \n (4, 3) arrays.\n \"\"\"\n import numpy.linalg\n pts = []\n for inp in (points1, points2):\n if isinstance(inp, np.ndarray):\n A = np.empty((4,4), dtype=float)\n A[:,:3] = inp[:,:3]\n A[:,3] = 1.0\n else:\n A = np.array([[inp[i].x(), inp[i].y(), inp[i].z(), 1] for i in range(4)])\n pts.append(A)\n \n ## solve 3 sets of linear equations to determine transformation matrix elements\n matrix = np.zeros((4,4))\n for i in range(3):\n ## solve Ax = B; x is one row of the desired transformation matrix\n matrix[i] = numpy.linalg.solve(pts[0], pts[1][:,i]) \n \n return matrix\n \ndef solveBilinearTransform(points1, points2):\n \"\"\"\n Find a bilinear transformation matrix (2x4) that maps points1 onto points2.\n Points must be specified as a list of 4 Vector, Point, QPointF, etc.\n \n To use this matrix to map a point [x,y]::\n \n mapped = np.dot(matrix, [x*y, x, y, 1])\n \"\"\"\n import numpy.linalg\n ## A is 4 rows (points) x 4 columns (xy, x, y, 1)\n ## B is 4 rows (points) x 2 columns (x, y)\n A = np.array([[points1[i].x()*points1[i].y(), points1[i].x(), points1[i].y(), 1] for i in range(4)])\n B = np.array([[points2[i].x(), points2[i].y()] for i in range(4)])\n \n ## solve 2 sets of linear equations to determine transformation matrix elements\n matrix = np.zeros((2,4))\n for i in range(2):\n matrix[i] = numpy.linalg.solve(A, B[:,i]) ## solve Ax = B; x is one row of the desired transformation matrix\n \n return matrix\n \ndef rescaleData(data, scale, offset, dtype=None):\n \"\"\"Return data rescaled and optionally cast to a new dtype::\n \n data => (data-offset) * scale\n \n Uses scipy.weave (if available) to improve performance.\n \"\"\"\n if dtype is None:\n dtype = data.dtype\n else:\n dtype = np.dtype(dtype)\n \n try:\n if not getConfigOption('useWeave'):\n raise Exception('Weave is disabled; falling back to slower version.')\n try:\n import scipy.weave\n except ImportError:\n raise Exception('scipy.weave is not importable; falling back to slower version.')\n \n ## require native dtype when using weave\n if not data.dtype.isnative:\n data = data.astype(data.dtype.newbyteorder('='))\n if not dtype.isnative:\n weaveDtype = dtype.newbyteorder('=')\n else:\n weaveDtype = dtype\n \n newData = np.empty((data.size,), dtype=weaveDtype)\n flat = np.ascontiguousarray(data).reshape(data.size)\n size = data.size\n \n code = \"\"\"\n double sc = (double)scale;\n double off = (double)offset;\n for( int i=0; i<size; i++ ) {\n newData[i] = ((double)flat[i] - off) * sc;\n }\n \"\"\"\n scipy.weave.inline(code, ['flat', 'newData', 'size', 'offset', 'scale'], compiler='gcc')\n if dtype != weaveDtype:\n newData = newData.astype(dtype)\n data = newData.reshape(data.shape)\n except:\n if getConfigOption('useWeave'):\n if getConfigOption('weaveDebug'):\n debug.printExc(\"Error; disabling weave.\")\n setConfigOptions(useWeave=False)\n \n #p = np.poly1d([scale, -offset*scale])\n #data = p(data).astype(dtype)\n d2 = data-offset\n d2 *= scale\n data = d2.astype(dtype)\n return data\n \ndef applyLookupTable(data, lut):\n \"\"\"\n Uses values in *data* as indexes to select values from *lut*.\n The returned data has shape data.shape + lut.shape[1:]\n \n Note: color gradient lookup tables can be generated using GradientWidget.\n \"\"\"\n if data.dtype.kind not in ('i', 'u'):\n data = data.astype(int)\n \n return np.take(lut, data, axis=0, mode='clip') \n \n\ndef makeRGBA(*args, **kwds):\n \"\"\"Equivalent to makeARGB(..., useRGBA=True)\"\"\"\n kwds['useRGBA'] = True\n return makeARGB(*args, **kwds)\n\ndef makeARGB(data, lut=None, levels=None, scale=None, useRGBA=False): \n \"\"\" \n Convert an array of values into an ARGB array suitable for building QImages, OpenGL textures, etc.\n \n Returns the ARGB array (values 0-255) and a boolean indicating whether there is alpha channel data.\n This is a two stage process:\n \n 1) Rescale the data based on the values in the *levels* argument (min, max).\n 2) Determine the final output by passing the rescaled values through a lookup table.\n \n Both stages are optional.\n \n ============== ==================================================================================\n **Arguments:**\n data numpy array of int/float types. If \n levels List [min, max]; optionally rescale data before converting through the\n lookup table. The data is rescaled such that min->0 and max->*scale*::\n \n rescaled = (clip(data, min, max) - min) * (*scale* / (max - min))\n \n It is also possible to use a 2D (N,2) array of values for levels. In this case,\n it is assumed that each pair of min,max values in the levels array should be \n applied to a different subset of the input data (for example, the input data may \n already have RGB values and the levels are used to independently scale each \n channel). The use of this feature requires that levels.shape[0] == data.shape[-1].\n scale The maximum value to which data will be rescaled before being passed through the \n lookup table (or returned if there is no lookup table). By default this will\n be set to the length of the lookup table, or 256 is no lookup table is provided.\n For OpenGL color specifications (as in GLColor4f) use scale=1.0\n lut Optional lookup table (array with dtype=ubyte).\n Values in data will be converted to color by indexing directly from lut.\n The output data shape will be input.shape + lut.shape[1:].\n \n Note: the output of makeARGB will have the same dtype as the lookup table, so\n for conversion to QImage, the dtype must be ubyte.\n \n Lookup tables can be built using GradientWidget.\n useRGBA If True, the data is returned in RGBA order (useful for building OpenGL textures). \n The default is False, which returns in ARGB order for use with QImage \n (Note that 'ARGB' is a term used by the Qt documentation; the _actual_ order \n is BGRA).\n ============== ==================================================================================\n \"\"\"\n profile = debug.Profiler()\n \n if lut is not None and not isinstance(lut, np.ndarray):\n lut = np.array(lut)\n if levels is not None and not isinstance(levels, np.ndarray):\n levels = np.array(levels)\n \n if levels is not None:\n if levels.ndim == 1:\n if len(levels) != 2:\n raise Exception('levels argument must have length 2')\n elif levels.ndim == 2:\n if lut is not None and lut.ndim > 1:\n raise Exception('Cannot make ARGB data when bot levels and lut have ndim > 2')\n if levels.shape != (data.shape[-1], 2):\n raise Exception('levels must have shape (data.shape[-1], 2)')\n else:\n print(levels)\n raise Exception(\"levels argument must be 1D or 2D.\")\n\n profile()\n\n if scale is None:\n if lut is not None:\n scale = lut.shape[0]\n else:\n scale = 255.\n\n ## Apply levels if given\n if levels is not None:\n \n if isinstance(levels, np.ndarray) and levels.ndim == 2:\n ## we are going to rescale each channel independently\n if levels.shape[0] != data.shape[-1]:\n raise Exception(\"When rescaling multi-channel data, there must be the same number of levels as channels (data.shape[-1] == levels.shape[0])\")\n newData = np.empty(data.shape, dtype=int)\n for i in range(data.shape[-1]):\n minVal, maxVal = levels[i]\n if minVal == maxVal:\n maxVal += 1e-16\n newData[...,i] = rescaleData(data[...,i], scale/(maxVal-minVal), minVal, dtype=int)\n data = newData\n else:\n minVal, maxVal = levels\n if minVal == maxVal:\n maxVal += 1e-16\n if maxVal == minVal:\n data = rescaleData(data, 1, minVal, dtype=int)\n else:\n data = rescaleData(data, scale/(maxVal-minVal), minVal, dtype=int)\n\n profile()\n\n ## apply LUT if given\n if lut is not None:\n data = applyLookupTable(data, lut)\n else:\n if data.dtype is not np.ubyte:\n data = np.clip(data, 0, 255).astype(np.ubyte)\n\n profile()\n\n ## copy data into ARGB ordered array\n imgData = np.empty(data.shape[:2]+(4,), dtype=np.ubyte)\n\n profile()\n\n if useRGBA:\n order = [0,1,2,3] ## array comes out RGBA\n else:\n order = [2,1,0,3] ## for some reason, the colors line up as BGR in the final image.\n \n if data.ndim == 2:\n # This is tempting:\n # imgData[..., :3] = data[..., np.newaxis]\n # ..but it turns out this is faster:\n for i in range(3):\n imgData[..., i] = data\n elif data.shape[2] == 1:\n for i in range(3):\n imgData[..., i] = data[..., 0]\n else:\n for i in range(0, data.shape[2]):\n imgData[..., i] = data[..., order[i]] \n \n profile()\n \n if data.ndim == 2 or data.shape[2] == 3:\n alpha = False\n imgData[..., 3] = 255\n else:\n alpha = True\n \n profile()\n return imgData, alpha\n\n\ndef makeQImage(imgData, alpha=None, copy=True, transpose=True):\n \"\"\"\n Turn an ARGB array into QImage.\n By default, the data is copied; changes to the array will not\n be reflected in the image. The image will be given a 'data' attribute\n pointing to the array which shares its data to prevent python\n freeing that memory while the image is in use.\n \n ============== ===================================================================\n **Arguments:**\n imgData Array of data to convert. Must have shape (width, height, 3 or 4) \n and dtype=ubyte. The order of values in the 3rd axis must be \n (b, g, r, a).\n alpha If True, the QImage returned will have format ARGB32. If False,\n the format will be RGB32. By default, _alpha_ is True if\n array.shape[2] == 4.\n copy If True, the data is copied before converting to QImage.\n If False, the new QImage points directly to the data in the array.\n Note that the array must be contiguous for this to work\n (see numpy.ascontiguousarray).\n transpose If True (the default), the array x/y axes are transposed before \n creating the image. Note that Qt expects the axes to be in \n (height, width) order whereas pyqtgraph usually prefers the \n opposite.\n ============== =================================================================== \n \"\"\"\n ## create QImage from buffer\n profile = debug.Profiler()\n \n ## If we didn't explicitly specify alpha, check the array shape.\n if alpha is None:\n alpha = (imgData.shape[2] == 4)\n \n copied = False\n if imgData.shape[2] == 3: ## need to make alpha channel (even if alpha==False; QImage requires 32 bpp)\n if copy is True:\n d2 = np.empty(imgData.shape[:2] + (4,), dtype=imgData.dtype)\n d2[:,:,:3] = imgData\n d2[:,:,3] = 255\n imgData = d2\n copied = True\n else:\n raise Exception('Array has only 3 channels; cannot make QImage without copying.')\n \n if alpha:\n imgFormat = QtGui.QImage.Format_ARGB32\n else:\n imgFormat = QtGui.QImage.Format_RGB32\n \n if transpose:\n imgData = imgData.transpose((1, 0, 2)) ## QImage expects the row/column order to be opposite\n\n profile()\n\n if not imgData.flags['C_CONTIGUOUS']:\n if copy is False:\n extra = ' (try setting transpose=False)' if transpose else ''\n raise Exception('Array is not contiguous; cannot make QImage without copying.'+extra)\n imgData = np.ascontiguousarray(imgData)\n copied = True\n \n if copy is True and copied is False:\n imgData = imgData.copy()\n \n if USE_PYSIDE:\n ch = ctypes.c_char.from_buffer(imgData, 0)\n img = QtGui.QImage(ch, imgData.shape[1], imgData.shape[0], imgFormat)\n else:\n #addr = ctypes.addressof(ctypes.c_char.from_buffer(imgData, 0))\n ## PyQt API for QImage changed between 4.9.3 and 4.9.6 (I don't know exactly which version it was)\n ## So we first attempt the 4.9.6 API, then fall back to 4.9.3\n #addr = ctypes.c_char.from_buffer(imgData, 0)\n #try:\n #img = QtGui.QImage(addr, imgData.shape[1], imgData.shape[0], imgFormat)\n #except TypeError: \n #addr = ctypes.addressof(addr)\n #img = QtGui.QImage(addr, imgData.shape[1], imgData.shape[0], imgFormat)\n try:\n img = QtGui.QImage(imgData.ctypes.data, imgData.shape[1], imgData.shape[0], imgFormat)\n except:\n if copy:\n # does not leak memory, is not mutable\n img = QtGui.QImage(buffer(imgData), imgData.shape[1], imgData.shape[0], imgFormat)\n else:\n # mutable, but leaks memory\n img = QtGui.QImage(memoryview(imgData), imgData.shape[1], imgData.shape[0], imgFormat)\n \n img.data = imgData\n return img\n #try:\n #buf = imgData.data\n #except AttributeError: ## happens when image data is non-contiguous\n #buf = imgData.data\n \n #profiler()\n #qimage = QtGui.QImage(buf, imgData.shape[1], imgData.shape[0], imgFormat)\n #profiler()\n #qimage.data = imgData\n #return qimage\n\ndef imageToArray(img, copy=False, transpose=True):\n \"\"\"\n Convert a QImage into numpy array. The image must have format RGB32, ARGB32, or ARGB32_Premultiplied.\n By default, the image is not copied; changes made to the array will appear in the QImage as well (beware: if \n the QImage is collected before the array, there may be trouble).\n The array will have shape (width, height, (b,g,r,a)).\n \"\"\"\n fmt = img.format()\n ptr = img.bits()\n if USE_PYSIDE:\n arr = np.frombuffer(ptr, dtype=np.ubyte)\n else:\n ptr.setsize(img.byteCount())\n arr = np.asarray(ptr)\n if img.byteCount() != arr.size * arr.itemsize:\n # Required for Python 2.6, PyQt 4.10\n # If this works on all platforms, then there is no need to use np.asarray..\n arr = np.frombuffer(ptr, np.ubyte, img.byteCount())\n \n if fmt == img.Format_RGB32:\n arr = arr.reshape(img.height(), img.width(), 3)\n elif fmt == img.Format_ARGB32 or fmt == img.Format_ARGB32_Premultiplied:\n arr = arr.reshape(img.height(), img.width(), 4)\n \n if copy:\n arr = arr.copy()\n \n if transpose:\n return arr.transpose((1,0,2))\n else:\n return arr\n \ndef colorToAlpha(data, color):\n \"\"\"\n Given an RGBA image in *data*, convert *color* to be transparent. \n *data* must be an array (w, h, 3 or 4) of ubyte values and *color* must be \n an array (3) of ubyte values.\n This is particularly useful for use with images that have a black or white background.\n \n Algorithm is taken from Gimp's color-to-alpha function in plug-ins/common/colortoalpha.c\n Credit:\n /*\n * Color To Alpha plug-in v1.0 by Seth Burgess, [email protected] 1999/05/14\n * with algorithm by clahey\n */\n \n \"\"\"\n data = data.astype(float)\n if data.shape[-1] == 3: ## add alpha channel if needed\n d2 = np.empty(data.shape[:2]+(4,), dtype=data.dtype)\n d2[...,:3] = data\n d2[...,3] = 255\n data = d2\n \n color = color.astype(float)\n alpha = np.zeros(data.shape[:2]+(3,), dtype=float)\n output = data.copy()\n \n for i in [0,1,2]:\n d = data[...,i]\n c = color[i]\n mask = d > c\n alpha[...,i][mask] = (d[mask] - c) / (255. - c)\n imask = d < c\n alpha[...,i][imask] = (c - d[imask]) / c\n \n output[...,3] = alpha.max(axis=2) * 255.\n \n mask = output[...,3] >= 1.0 ## avoid zero division while processing alpha channel\n correction = 255. / output[...,3][mask] ## increase value to compensate for decreased alpha\n for i in [0,1,2]:\n output[...,i][mask] = ((output[...,i][mask]-color[i]) * correction) + color[i]\n output[...,3][mask] *= data[...,3][mask] / 255. ## combine computed and previous alpha values\n \n #raise Exception()\n return np.clip(output, 0, 255).astype(np.ubyte)\n\ndef gaussianFilter(data, sigma):\n \"\"\"\n Drop-in replacement for scipy.ndimage.gaussian_filter.\n \n (note: results are only approximately equal to the output of\n gaussian_filter)\n \"\"\"\n if np.isscalar(sigma):\n sigma = (sigma,) * data.ndim\n \n baseline = data.mean()\n filtered = data - baseline\n for ax in range(data.ndim):\n s = sigma[ax]\n if s == 0:\n continue\n \n # generate 1D gaussian kernel\n ksize = int(s * 6)\n x = np.arange(-ksize, ksize)\n kernel = np.exp(-x**2 / (2*s**2))\n kshape = [1,] * data.ndim\n kshape[ax] = len(kernel)\n kernel = kernel.reshape(kshape)\n \n # convolve as product of FFTs\n shape = data.shape[ax] + ksize\n scale = 1.0 / (abs(s) * (2*np.pi)**0.5)\n filtered = scale * np.fft.irfft(np.fft.rfft(filtered, shape, axis=ax) * \n np.fft.rfft(kernel, shape, axis=ax), \n axis=ax)\n \n # clip off extra data\n sl = [slice(None)] * data.ndim\n sl[ax] = slice(filtered.shape[ax]-data.shape[ax],None,None)\n filtered = filtered[sl]\n return filtered + baseline\n \n \ndef downsample(data, n, axis=0, xvals='subsample'):\n \"\"\"Downsample by averaging points together across axis.\n If multiple axes are specified, runs once per axis.\n If a metaArray is given, then the axis values can be either subsampled\n or downsampled to match.\n \"\"\"\n ma = None\n if (hasattr(data, 'implements') and data.implements('MetaArray')):\n ma = data\n data = data.view(np.ndarray)\n \n \n if hasattr(axis, '__len__'):\n if not hasattr(n, '__len__'):\n n = [n]*len(axis)\n for i in range(len(axis)):\n data = downsample(data, n[i], axis[i])\n return data\n \n nPts = int(data.shape[axis] / n)\n s = list(data.shape)\n s[axis] = nPts\n s.insert(axis+1, n)\n sl = [slice(None)] * data.ndim\n sl[axis] = slice(0, nPts*n)\n d1 = data[tuple(sl)]\n #print d1.shape, s\n d1.shape = tuple(s)\n d2 = d1.mean(axis+1)\n \n if ma is None:\n return d2\n else:\n info = ma.infoCopy()\n if 'values' in info[axis]:\n if xvals == 'subsample':\n info[axis]['values'] = info[axis]['values'][::n][:nPts]\n elif xvals == 'downsample':\n info[axis]['values'] = downsample(info[axis]['values'], n)\n return MetaArray(d2, info=info)\n\n\ndef arrayToQPath(x, y, connect='all'):\n \"\"\"Convert an array of x,y coordinats to QPainterPath as efficiently as possible.\n The *connect* argument may be 'all', indicating that each point should be\n connected to the next; 'pairs', indicating that each pair of points\n should be connected, or an array of int32 values (0 or 1) indicating\n connections.\n \"\"\"\n\n ## Create all vertices in path. The method used below creates a binary format so that all\n ## vertices can be read in at once. This binary format may change in future versions of Qt,\n ## so the original (slower) method is left here for emergencies:\n #path.moveTo(x[0], y[0])\n #if connect == 'all':\n #for i in range(1, y.shape[0]):\n #path.lineTo(x[i], y[i])\n #elif connect == 'pairs':\n #for i in range(1, y.shape[0]):\n #if i%2 == 0:\n #path.lineTo(x[i], y[i])\n #else:\n #path.moveTo(x[i], y[i])\n #elif isinstance(connect, np.ndarray):\n #for i in range(1, y.shape[0]):\n #if connect[i] == 1:\n #path.lineTo(x[i], y[i])\n #else:\n #path.moveTo(x[i], y[i])\n #else:\n #raise Exception('connect argument must be \"all\", \"pairs\", or array')\n\n ## Speed this up using >> operator\n ## Format is:\n ## numVerts(i4) 0(i4)\n ## x(f8) y(f8) 0(i4) <-- 0 means this vertex does not connect\n ## x(f8) y(f8) 1(i4) <-- 1 means this vertex connects to the previous vertex\n ## ...\n ## 0(i4)\n ##\n ## All values are big endian--pack using struct.pack('>d') or struct.pack('>i')\n\n path = QtGui.QPainterPath()\n\n #profiler = debug.Profiler()\n n = x.shape[0]\n # create empty array, pad with extra space on either end\n arr = np.empty(n+2, dtype=[('x', '>f8'), ('y', '>f8'), ('c', '>i4')])\n # write first two integers\n #profiler('allocate empty')\n byteview = arr.view(dtype=np.ubyte)\n byteview[:12] = 0\n byteview.data[12:20] = struct.pack('>ii', n, 0)\n #profiler('pack header')\n # Fill array with vertex values\n arr[1:-1]['x'] = x\n arr[1:-1]['y'] = y\n\n # decide which points are connected by lines\n if connect == 'pairs':\n connect = np.empty((n/2,2), dtype=np.int32)\n if connect.size != n:\n raise Exception(\"x,y array lengths must be multiple of 2 to use connect='pairs'\")\n connect[:,0] = 1\n connect[:,1] = 0\n connect = connect.flatten()\n if connect == 'finite':\n connect = np.isfinite(x) & np.isfinite(y)\n arr[1:-1]['c'] = connect\n if connect == 'all':\n arr[1:-1]['c'] = 1\n elif isinstance(connect, np.ndarray):\n arr[1:-1]['c'] = connect\n else:\n raise Exception('connect argument must be \"all\", \"pairs\", or array')\n\n #profiler('fill array')\n # write last 0\n lastInd = 20*(n+1)\n byteview.data[lastInd:lastInd+4] = struct.pack('>i', 0)\n #profiler('footer')\n # create datastream object and stream into path\n\n ## Avoiding this method because QByteArray(str) leaks memory in PySide\n #buf = QtCore.QByteArray(arr.data[12:lastInd+4]) # I think one unnecessary copy happens here\n\n path.strn = byteview.data[12:lastInd+4] # make sure data doesn't run away\n try:\n buf = QtCore.QByteArray.fromRawData(path.strn)\n except TypeError:\n buf = QtCore.QByteArray(bytes(path.strn))\n #profiler('create buffer')\n ds = QtCore.QDataStream(buf)\n\n ds >> path\n #profiler('load')\n\n return path\n\n#def isosurface(data, level):\n #\"\"\"\n #Generate isosurface from volumetric data using marching tetrahedra algorithm.\n #See Paul Bourke, \"Polygonising a Scalar Field Using Tetrahedrons\" (http://local.wasp.uwa.edu.au/~pbourke/geometry/polygonise/)\n \n #*data* 3D numpy array of scalar values\n #*level* The level at which to generate an isosurface\n #\"\"\"\n \n #facets = []\n \n ### mark everything below the isosurface level\n #mask = data < level\n \n #### make eight sub-fields \n #fields = np.empty((2,2,2), dtype=object)\n #slices = [slice(0,-1), slice(1,None)]\n #for i in [0,1]:\n #for j in [0,1]:\n #for k in [0,1]:\n #fields[i,j,k] = mask[slices[i], slices[j], slices[k]]\n \n \n \n ### split each cell into 6 tetrahedra\n ### these all have the same 'orienation'; points 1,2,3 circle \n ### clockwise around point 0\n #tetrahedra = [\n #[(0,1,0), (1,1,1), (0,1,1), (1,0,1)],\n #[(0,1,0), (0,1,1), (0,0,1), (1,0,1)],\n #[(0,1,0), (0,0,1), (0,0,0), (1,0,1)],\n #[(0,1,0), (0,0,0), (1,0,0), (1,0,1)],\n #[(0,1,0), (1,0,0), (1,1,0), (1,0,1)],\n #[(0,1,0), (1,1,0), (1,1,1), (1,0,1)]\n #]\n \n ### each tetrahedron will be assigned an index\n ### which determines how to generate its facets.\n ### this structure is: \n ### facets[index][facet1, facet2, ...]\n ### where each facet is triangular and its points are each \n ### interpolated between two points on the tetrahedron\n ### facet = [(p1a, p1b), (p2a, p2b), (p3a, p3b)]\n ### facet points always circle clockwise if you are looking \n ### at them from below the isosurface.\n #indexFacets = [\n #[], ## all above\n #[[(0,1), (0,2), (0,3)]], # 0 below\n #[[(1,0), (1,3), (1,2)]], # 1 below\n #[[(0,2), (1,3), (1,2)], [(0,2), (0,3), (1,3)]], # 0,1 below\n #[[(2,0), (2,1), (2,3)]], # 2 below\n #[[(0,3), (1,2), (2,3)], [(0,3), (0,1), (1,2)]], # 0,2 below\n #[[(1,0), (2,3), (2,0)], [(1,0), (1,3), (2,3)]], # 1,2 below\n #[[(3,0), (3,1), (3,2)]], # 3 above\n #[[(3,0), (3,2), (3,1)]], # 3 below\n #[[(1,0), (2,0), (2,3)], [(1,0), (2,3), (1,3)]], # 0,3 below\n #[[(0,3), (2,3), (1,2)], [(0,3), (1,2), (0,1)]], # 1,3 below\n #[[(2,0), (2,3), (2,1)]], # 0,1,3 below\n #[[(0,2), (1,2), (1,3)], [(0,2), (1,3), (0,3)]], # 2,3 below\n #[[(1,0), (1,2), (1,3)]], # 0,2,3 below\n #[[(0,1), (0,3), (0,2)]], # 1,2,3 below\n #[] ## all below\n #]\n \n #for tet in tetrahedra:\n \n ### get the 4 fields for this tetrahedron\n #tetFields = [fields[c] for c in tet]\n \n ### generate an index for each grid cell\n #index = tetFields[0] + tetFields[1]*2 + tetFields[2]*4 + tetFields[3]*8\n \n ### add facets\n #for i in xrange(index.shape[0]): # data x-axis\n #for j in xrange(index.shape[1]): # data y-axis\n #for k in xrange(index.shape[2]): # data z-axis\n #for f in indexFacets[index[i,j,k]]: # faces to generate for this tet\n #pts = []\n #for l in [0,1,2]: # points in this face\n #p1 = tet[f[l][0]] # tet corner 1\n #p2 = tet[f[l][1]] # tet corner 2\n #pts.append([(p1[x]+p2[x])*0.5+[i,j,k][x]+0.5 for x in [0,1,2]]) ## interpolate between tet corners\n #facets.append(pts)\n\n #return facets\n \n\ndef isocurve(data, level, connected=False, extendToEdge=False, path=False):\n \"\"\"\n Generate isocurve from 2D data using marching squares algorithm.\n \n ============== =========================================================\n **Arguments:**\n data 2D numpy array of scalar values\n level The level at which to generate an isosurface\n connected If False, return a single long list of point pairs\n If True, return multiple long lists of connected point \n locations. (This is slower but better for drawing \n continuous lines)\n extendToEdge If True, extend the curves to reach the exact edges of \n the data. \n path if True, return a QPainterPath rather than a list of \n vertex coordinates. This forces connected=True.\n ============== =========================================================\n \n This function is SLOW; plenty of room for optimization here.\n \"\"\" \n \n if path is True:\n connected = True\n \n if extendToEdge:\n d2 = np.empty((data.shape[0]+2, data.shape[1]+2), dtype=data.dtype)\n d2[1:-1, 1:-1] = data\n d2[0, 1:-1] = data[0]\n d2[-1, 1:-1] = data[-1]\n d2[1:-1, 0] = data[:, 0]\n d2[1:-1, -1] = data[:, -1]\n d2[0,0] = d2[0,1]\n d2[0,-1] = d2[1,-1]\n d2[-1,0] = d2[-1,1]\n d2[-1,-1] = d2[-1,-2]\n data = d2\n \n sideTable = [\n [],\n [0,1],\n [1,2],\n [0,2],\n [0,3],\n [1,3],\n [0,1,2,3],\n [2,3],\n [2,3],\n [0,1,2,3],\n [1,3],\n [0,3],\n [0,2],\n [1,2],\n [0,1],\n []\n ]\n \n edgeKey=[\n [(0,1), (0,0)],\n [(0,0), (1,0)],\n [(1,0), (1,1)],\n [(1,1), (0,1)]\n ]\n \n \n lines = []\n \n ## mark everything below the isosurface level\n mask = data < level\n \n ### make four sub-fields and compute indexes for grid cells\n index = np.zeros([x-1 for x in data.shape], dtype=np.ubyte)\n fields = np.empty((2,2), dtype=object)\n slices = [slice(0,-1), slice(1,None)]\n for i in [0,1]:\n for j in [0,1]:\n fields[i,j] = mask[slices[i], slices[j]]\n #vertIndex = i - 2*j*i + 3*j + 4*k ## this is just to match Bourk's vertex numbering scheme\n vertIndex = i+2*j\n #print i,j,k,\" : \", fields[i,j,k], 2**vertIndex\n index += fields[i,j] * 2**vertIndex\n #print index\n #print index\n \n ## add lines\n for i in range(index.shape[0]): # data x-axis\n for j in range(index.shape[1]): # data y-axis \n sides = sideTable[index[i,j]]\n for l in range(0, len(sides), 2): ## faces for this grid cell\n edges = sides[l:l+2]\n pts = []\n for m in [0,1]: # points in this face\n p1 = edgeKey[edges[m]][0] # p1, p2 are points at either side of an edge\n p2 = edgeKey[edges[m]][1]\n v1 = data[i+p1[0], j+p1[1]] # v1 and v2 are the values at p1 and p2\n v2 = data[i+p2[0], j+p2[1]]\n f = (level-v1) / (v2-v1)\n fi = 1.0 - f\n p = ( ## interpolate between corners\n p1[0]*fi + p2[0]*f + i + 0.5, \n p1[1]*fi + p2[1]*f + j + 0.5\n )\n if extendToEdge:\n ## check bounds\n p = (\n min(data.shape[0]-2, max(0, p[0]-1)),\n min(data.shape[1]-2, max(0, p[1]-1)), \n )\n if connected:\n gridKey = i + (1 if edges[m]==2 else 0), j + (1 if edges[m]==3 else 0), edges[m]%2\n pts.append((p, gridKey)) ## give the actual position and a key identifying the grid location (for connecting segments)\n else:\n pts.append(p)\n \n lines.append(pts)\n\n if not connected:\n return lines\n \n ## turn disjoint list of segments into continuous lines\n\n #lines = [[2,5], [5,4], [3,4], [1,3], [6,7], [7,8], [8,6], [11,12], [12,15], [11,13], [13,14]]\n #lines = [[(float(a), a), (float(b), b)] for a,b in lines]\n points = {} ## maps each point to its connections\n for a,b in lines:\n if a[1] not in points:\n points[a[1]] = []\n points[a[1]].append([a,b])\n if b[1] not in points:\n points[b[1]] = []\n points[b[1]].append([b,a])\n\n ## rearrange into chains\n for k in list(points.keys()):\n try:\n chains = points[k]\n except KeyError: ## already used this point elsewhere\n continue\n #print \"===========\", k\n for chain in chains:\n #print \" chain:\", chain\n x = None\n while True:\n if x == chain[-1][1]:\n break ## nothing left to do on this chain\n \n x = chain[-1][1]\n if x == k: \n break ## chain has looped; we're done and can ignore the opposite chain\n y = chain[-2][1]\n connects = points[x]\n for conn in connects[:]:\n if conn[1][1] != y:\n #print \" ext:\", conn\n chain.extend(conn[1:])\n #print \" del:\", x\n del points[x]\n if chain[0][1] == chain[-1][1]: # looped chain; no need to continue the other direction\n chains.pop()\n break\n \n\n ## extract point locations \n lines = []\n for chain in points.values():\n if len(chain) == 2:\n chain = chain[1][1:][::-1] + chain[0] # join together ends of chain\n else:\n chain = chain[0]\n lines.append([p[0] for p in chain])\n \n if not path:\n return lines ## a list of pairs of points\n \n path = QtGui.QPainterPath()\n for line in lines:\n path.moveTo(*line[0])\n for p in line[1:]:\n path.lineTo(*p)\n \n return path\n \n \ndef traceImage(image, values, smooth=0.5):\n \"\"\"\n Convert an image to a set of QPainterPath curves.\n One curve will be generated for each item in *values*; each curve outlines the area\n of the image that is closer to its value than to any others.\n \n If image is RGB or RGBA, then the shape of values should be (nvals, 3/4)\n The parameter *smooth* is expressed in pixels.\n \"\"\"\n try:\n import scipy.ndimage as ndi\n except ImportError:\n raise Exception(\"traceImage() requires the package scipy.ndimage, but it is not importable.\")\n \n if values.ndim == 2:\n values = values.T\n values = values[np.newaxis, np.newaxis, ...].astype(float)\n image = image[..., np.newaxis].astype(float)\n diff = np.abs(image-values)\n if values.ndim == 4:\n diff = diff.sum(axis=2)\n \n labels = np.argmin(diff, axis=2)\n \n paths = []\n for i in range(diff.shape[-1]): \n d = (labels==i).astype(float)\n d = gaussianFilter(d, (smooth, smooth))\n lines = isocurve(d, 0.5, connected=True, extendToEdge=True)\n path = QtGui.QPainterPath()\n for line in lines:\n path.moveTo(*line[0])\n for p in line[1:]:\n path.lineTo(*p)\n \n paths.append(path)\n return paths\n \n \n \nIsosurfaceDataCache = None\ndef isosurface(data, level):\n \"\"\"\n Generate isosurface from volumetric data using marching cubes algorithm.\n See Paul Bourke, \"Polygonising a Scalar Field\" \n (http://paulbourke.net/geometry/polygonise/)\n \n *data* 3D numpy array of scalar values\n *level* The level at which to generate an isosurface\n \n Returns an array of vertex coordinates (Nv, 3) and an array of \n per-face vertex indexes (Nf, 3) \n \"\"\"\n ## For improvement, see:\n ## \n ## Efficient implementation of Marching Cubes' cases with topological guarantees.\n ## Thomas Lewiner, Helio Lopes, Antonio Wilson Vieira and Geovan Tavares.\n ## Journal of Graphics Tools 8(2): pp. 1-15 (december 2003)\n \n ## Precompute lookup tables on the first run\n global IsosurfaceDataCache\n if IsosurfaceDataCache is None:\n ## map from grid cell index to edge index.\n ## grid cell index tells us which corners are below the isosurface,\n ## edge index tells us which edges are cut by the isosurface.\n ## (Data stolen from Bourk; see above.)\n edgeTable = np.array([\n 0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,\n 0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,\n 0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,\n 0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,\n 0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,\n 0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,\n 0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,\n 0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,\n 0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,\n 0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,\n 0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,\n 0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,\n 0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,\n 0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,\n 0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,\n 0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,\n 0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,\n 0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,\n 0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,\n 0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,\n 0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,\n 0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,\n 0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,\n 0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,\n 0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,\n 0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,\n 0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,\n 0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,\n 0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,\n 0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,\n 0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,\n 0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0 ], dtype=np.uint16)\n \n ## Table of triangles to use for filling each grid cell.\n ## Each set of three integers tells us which three edges to\n ## draw a triangle between.\n ## (Data stolen from Bourk; see above.)\n triTable = [\n [],\n [0, 8, 3],\n [0, 1, 9],\n [1, 8, 3, 9, 8, 1],\n [1, 2, 10],\n [0, 8, 3, 1, 2, 10],\n [9, 2, 10, 0, 2, 9],\n [2, 8, 3, 2, 10, 8, 10, 9, 8],\n [3, 11, 2],\n [0, 11, 2, 8, 11, 0],\n [1, 9, 0, 2, 3, 11],\n [1, 11, 2, 1, 9, 11, 9, 8, 11],\n [3, 10, 1, 11, 10, 3],\n [0, 10, 1, 0, 8, 10, 8, 11, 10],\n [3, 9, 0, 3, 11, 9, 11, 10, 9],\n [9, 8, 10, 10, 8, 11],\n [4, 7, 8],\n [4, 3, 0, 7, 3, 4],\n [0, 1, 9, 8, 4, 7],\n [4, 1, 9, 4, 7, 1, 7, 3, 1],\n [1, 2, 10, 8, 4, 7],\n [3, 4, 7, 3, 0, 4, 1, 2, 10],\n [9, 2, 10, 9, 0, 2, 8, 4, 7],\n [2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4],\n [8, 4, 7, 3, 11, 2],\n [11, 4, 7, 11, 2, 4, 2, 0, 4],\n [9, 0, 1, 8, 4, 7, 2, 3, 11],\n [4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1],\n [3, 10, 1, 3, 11, 10, 7, 8, 4],\n [1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4],\n [4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3],\n [4, 7, 11, 4, 11, 9, 9, 11, 10],\n [9, 5, 4],\n [9, 5, 4, 0, 8, 3],\n [0, 5, 4, 1, 5, 0],\n [8, 5, 4, 8, 3, 5, 3, 1, 5],\n [1, 2, 10, 9, 5, 4],\n [3, 0, 8, 1, 2, 10, 4, 9, 5],\n [5, 2, 10, 5, 4, 2, 4, 0, 2],\n [2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8],\n [9, 5, 4, 2, 3, 11],\n [0, 11, 2, 0, 8, 11, 4, 9, 5],\n [0, 5, 4, 0, 1, 5, 2, 3, 11],\n [2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5],\n [10, 3, 11, 10, 1, 3, 9, 5, 4],\n [4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10],\n [5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3],\n [5, 4, 8, 5, 8, 10, 10, 8, 11],\n [9, 7, 8, 5, 7, 9],\n [9, 3, 0, 9, 5, 3, 5, 7, 3],\n [0, 7, 8, 0, 1, 7, 1, 5, 7],\n [1, 5, 3, 3, 5, 7],\n [9, 7, 8, 9, 5, 7, 10, 1, 2],\n [10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3],\n [8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2],\n [2, 10, 5, 2, 5, 3, 3, 5, 7],\n [7, 9, 5, 7, 8, 9, 3, 11, 2],\n [9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11],\n [2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7],\n [11, 2, 1, 11, 1, 7, 7, 1, 5],\n [9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11],\n [5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0],\n [11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0],\n [11, 10, 5, 7, 11, 5],\n [10, 6, 5],\n [0, 8, 3, 5, 10, 6],\n [9, 0, 1, 5, 10, 6],\n [1, 8, 3, 1, 9, 8, 5, 10, 6],\n [1, 6, 5, 2, 6, 1],\n [1, 6, 5, 1, 2, 6, 3, 0, 8],\n [9, 6, 5, 9, 0, 6, 0, 2, 6],\n [5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8],\n [2, 3, 11, 10, 6, 5],\n [11, 0, 8, 11, 2, 0, 10, 6, 5],\n [0, 1, 9, 2, 3, 11, 5, 10, 6],\n [5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11],\n [6, 3, 11, 6, 5, 3, 5, 1, 3],\n [0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6],\n [3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9],\n [6, 5, 9, 6, 9, 11, 11, 9, 8],\n [5, 10, 6, 4, 7, 8],\n [4, 3, 0, 4, 7, 3, 6, 5, 10],\n [1, 9, 0, 5, 10, 6, 8, 4, 7],\n [10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4],\n [6, 1, 2, 6, 5, 1, 4, 7, 8],\n [1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7],\n [8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6],\n [7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9],\n [3, 11, 2, 7, 8, 4, 10, 6, 5],\n [5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11],\n [0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6],\n [9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6],\n [8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6],\n [5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11],\n [0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7],\n [6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9],\n [10, 4, 9, 6, 4, 10],\n [4, 10, 6, 4, 9, 10, 0, 8, 3],\n [10, 0, 1, 10, 6, 0, 6, 4, 0],\n [8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10],\n [1, 4, 9, 1, 2, 4, 2, 6, 4],\n [3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4],\n [0, 2, 4, 4, 2, 6],\n [8, 3, 2, 8, 2, 4, 4, 2, 6],\n [10, 4, 9, 10, 6, 4, 11, 2, 3],\n [0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6],\n [3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10],\n [6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1],\n [9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3],\n [8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1],\n [3, 11, 6, 3, 6, 0, 0, 6, 4],\n [6, 4, 8, 11, 6, 8],\n [7, 10, 6, 7, 8, 10, 8, 9, 10],\n [0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10],\n [10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0],\n [10, 6, 7, 10, 7, 1, 1, 7, 3],\n [1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7],\n [2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9],\n [7, 8, 0, 7, 0, 6, 6, 0, 2],\n [7, 3, 2, 6, 7, 2],\n [2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7],\n [2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7],\n [1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11],\n [11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1],\n [8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6],\n [0, 9, 1, 11, 6, 7],\n [7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0],\n [7, 11, 6],\n [7, 6, 11],\n [3, 0, 8, 11, 7, 6],\n [0, 1, 9, 11, 7, 6],\n [8, 1, 9, 8, 3, 1, 11, 7, 6],\n [10, 1, 2, 6, 11, 7],\n [1, 2, 10, 3, 0, 8, 6, 11, 7],\n [2, 9, 0, 2, 10, 9, 6, 11, 7],\n [6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8],\n [7, 2, 3, 6, 2, 7],\n [7, 0, 8, 7, 6, 0, 6, 2, 0],\n [2, 7, 6, 2, 3, 7, 0, 1, 9],\n [1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6],\n [10, 7, 6, 10, 1, 7, 1, 3, 7],\n [10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8],\n [0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7],\n [7, 6, 10, 7, 10, 8, 8, 10, 9],\n [6, 8, 4, 11, 8, 6],\n [3, 6, 11, 3, 0, 6, 0, 4, 6],\n [8, 6, 11, 8, 4, 6, 9, 0, 1],\n [9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6],\n [6, 8, 4, 6, 11, 8, 2, 10, 1],\n [1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6],\n [4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9],\n [10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3],\n [8, 2, 3, 8, 4, 2, 4, 6, 2],\n [0, 4, 2, 4, 6, 2],\n [1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8],\n [1, 9, 4, 1, 4, 2, 2, 4, 6],\n [8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1],\n [10, 1, 0, 10, 0, 6, 6, 0, 4],\n [4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3],\n [10, 9, 4, 6, 10, 4],\n [4, 9, 5, 7, 6, 11],\n [0, 8, 3, 4, 9, 5, 11, 7, 6],\n [5, 0, 1, 5, 4, 0, 7, 6, 11],\n [11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5],\n [9, 5, 4, 10, 1, 2, 7, 6, 11],\n [6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5],\n [7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2],\n [3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6],\n [7, 2, 3, 7, 6, 2, 5, 4, 9],\n [9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7],\n [3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0],\n [6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8],\n [9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7],\n [1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4],\n [4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10],\n [7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10],\n [6, 9, 5, 6, 11, 9, 11, 8, 9],\n [3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5],\n [0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11],\n [6, 11, 3, 6, 3, 5, 5, 3, 1],\n [1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6],\n [0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10],\n [11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5],\n [6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3],\n [5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2],\n [9, 5, 6, 9, 6, 0, 0, 6, 2],\n [1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8],\n [1, 5, 6, 2, 1, 6],\n [1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6],\n [10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0],\n [0, 3, 8, 5, 6, 10],\n [10, 5, 6],\n [11, 5, 10, 7, 5, 11],\n [11, 5, 10, 11, 7, 5, 8, 3, 0],\n [5, 11, 7, 5, 10, 11, 1, 9, 0],\n [10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1],\n [11, 1, 2, 11, 7, 1, 7, 5, 1],\n [0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11],\n [9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7],\n [7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2],\n [2, 5, 10, 2, 3, 5, 3, 7, 5],\n [8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5],\n [9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2],\n [9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2],\n [1, 3, 5, 3, 7, 5],\n [0, 8, 7, 0, 7, 1, 1, 7, 5],\n [9, 0, 3, 9, 3, 5, 5, 3, 7],\n [9, 8, 7, 5, 9, 7],\n [5, 8, 4, 5, 10, 8, 10, 11, 8],\n [5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0],\n [0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5],\n [10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4],\n [2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8],\n [0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11],\n [0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5],\n [9, 4, 5, 2, 11, 3],\n [2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4],\n [5, 10, 2, 5, 2, 4, 4, 2, 0],\n [3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9],\n [5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2],\n [8, 4, 5, 8, 5, 3, 3, 5, 1],\n [0, 4, 5, 1, 0, 5],\n [8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5],\n [9, 4, 5],\n [4, 11, 7, 4, 9, 11, 9, 10, 11],\n [0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11],\n [1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11],\n [3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4],\n [4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2],\n [9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3],\n [11, 7, 4, 11, 4, 2, 2, 4, 0],\n [11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4],\n [2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9],\n [9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7],\n [3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10],\n [1, 10, 2, 8, 7, 4],\n [4, 9, 1, 4, 1, 7, 7, 1, 3],\n [4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1],\n [4, 0, 3, 7, 4, 3],\n [4, 8, 7],\n [9, 10, 8, 10, 11, 8],\n [3, 0, 9, 3, 9, 11, 11, 9, 10],\n [0, 1, 10, 0, 10, 8, 8, 10, 11],\n [3, 1, 10, 11, 3, 10],\n [1, 2, 11, 1, 11, 9, 9, 11, 8],\n [3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9],\n [0, 2, 11, 8, 0, 11],\n [3, 2, 11],\n [2, 3, 8, 2, 8, 10, 10, 8, 9],\n [9, 10, 2, 0, 9, 2],\n [2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8],\n [1, 10, 2],\n [1, 3, 8, 9, 1, 8],\n [0, 9, 1],\n [0, 3, 8],\n []\n ] \n edgeShifts = np.array([ ## maps edge ID (0-11) to (x,y,z) cell offset and edge ID (0-2)\n [0, 0, 0, 0], \n [1, 0, 0, 1],\n [0, 1, 0, 0],\n [0, 0, 0, 1],\n [0, 0, 1, 0],\n [1, 0, 1, 1],\n [0, 1, 1, 0],\n [0, 0, 1, 1],\n [0, 0, 0, 2],\n [1, 0, 0, 2],\n [1, 1, 0, 2],\n [0, 1, 0, 2],\n #[9, 9, 9, 9] ## fake\n ], dtype=np.uint16) # don't use ubyte here! This value gets added to cell index later; will need the extra precision.\n nTableFaces = np.array([len(f)/3 for f in triTable], dtype=np.ubyte)\n faceShiftTables = [None]\n for i in range(1,6):\n ## compute lookup table of index: vertexes mapping\n faceTableI = np.zeros((len(triTable), i*3), dtype=np.ubyte)\n faceTableInds = np.argwhere(nTableFaces == i)\n faceTableI[faceTableInds[:,0]] = np.array([triTable[j] for j in faceTableInds])\n faceTableI = faceTableI.reshape((len(triTable), i, 3))\n faceShiftTables.append(edgeShifts[faceTableI])\n \n ## Let's try something different:\n #faceTable = np.empty((256, 5, 3, 4), dtype=np.ubyte) # (grid cell index, faces, vertexes, edge lookup)\n #for i,f in enumerate(triTable):\n #f = np.array(f + [12] * (15-len(f))).reshape(5,3)\n #faceTable[i] = edgeShifts[f]\n \n \n IsosurfaceDataCache = (faceShiftTables, edgeShifts, edgeTable, nTableFaces)\n else:\n faceShiftTables, edgeShifts, edgeTable, nTableFaces = IsosurfaceDataCache\n\n\n \n ## mark everything below the isosurface level\n mask = data < level\n \n ### make eight sub-fields and compute indexes for grid cells\n index = np.zeros([x-1 for x in data.shape], dtype=np.ubyte)\n fields = np.empty((2,2,2), dtype=object)\n slices = [slice(0,-1), slice(1,None)]\n for i in [0,1]:\n for j in [0,1]:\n for k in [0,1]:\n fields[i,j,k] = mask[slices[i], slices[j], slices[k]]\n vertIndex = i - 2*j*i + 3*j + 4*k ## this is just to match Bourk's vertex numbering scheme\n index += fields[i,j,k] * 2**vertIndex\n \n ### Generate table of edges that have been cut\n cutEdges = np.zeros([x+1 for x in index.shape]+[3], dtype=np.uint32)\n edges = edgeTable[index]\n for i, shift in enumerate(edgeShifts[:12]): \n slices = [slice(shift[j],cutEdges.shape[j]+(shift[j]-1)) for j in range(3)]\n cutEdges[slices[0], slices[1], slices[2], shift[3]] += edges & 2**i\n \n ## for each cut edge, interpolate to see where exactly the edge is cut and generate vertex positions\n m = cutEdges > 0\n vertexInds = np.argwhere(m) ## argwhere is slow!\n vertexes = vertexInds[:,:3].astype(np.float32)\n dataFlat = data.reshape(data.shape[0]*data.shape[1]*data.shape[2])\n \n ## re-use the cutEdges array as a lookup table for vertex IDs\n cutEdges[vertexInds[:,0], vertexInds[:,1], vertexInds[:,2], vertexInds[:,3]] = np.arange(vertexInds.shape[0])\n \n for i in [0,1,2]:\n vim = vertexInds[:,3] == i\n vi = vertexInds[vim, :3]\n viFlat = (vi * (np.array(data.strides[:3]) // data.itemsize)[np.newaxis,:]).sum(axis=1)\n v1 = dataFlat[viFlat]\n v2 = dataFlat[viFlat + data.strides[i]//data.itemsize]\n vertexes[vim,i] += (level-v1) / (v2-v1)\n \n ### compute the set of vertex indexes for each face. \n \n ## This works, but runs a bit slower.\n #cells = np.argwhere((index != 0) & (index != 255)) ## all cells with at least one face\n #cellInds = index[cells[:,0], cells[:,1], cells[:,2]]\n #verts = faceTable[cellInds]\n #mask = verts[...,0,0] != 9\n #verts[...,:3] += cells[:,np.newaxis,np.newaxis,:] ## we now have indexes into cutEdges\n #verts = verts[mask]\n #faces = cutEdges[verts[...,0], verts[...,1], verts[...,2], verts[...,3]] ## and these are the vertex indexes we want.\n \n \n ## To allow this to be vectorized efficiently, we count the number of faces in each \n ## grid cell and handle each group of cells with the same number together.\n ## determine how many faces to assign to each grid cell\n nFaces = nTableFaces[index]\n totFaces = nFaces.sum()\n faces = np.empty((totFaces, 3), dtype=np.uint32)\n ptr = 0\n #import debug\n #p = debug.Profiler()\n \n ## this helps speed up an indexing operation later on\n cs = np.array(cutEdges.strides)//cutEdges.itemsize\n cutEdges = cutEdges.flatten()\n\n ## this, strangely, does not seem to help.\n #ins = np.array(index.strides)/index.itemsize\n #index = index.flatten()\n\n for i in range(1,6):\n ### expensive:\n #profiler()\n cells = np.argwhere(nFaces == i) ## all cells which require i faces (argwhere is expensive)\n #profiler()\n if cells.shape[0] == 0:\n continue\n cellInds = index[cells[:,0], cells[:,1], cells[:,2]] ## index values of cells to process for this round\n #profiler()\n \n ### expensive:\n verts = faceShiftTables[i][cellInds]\n #profiler()\n verts[...,:3] += cells[:,np.newaxis,np.newaxis,:] ## we now have indexes into cutEdges\n verts = verts.reshape((verts.shape[0]*i,)+verts.shape[2:])\n #profiler()\n \n ### expensive:\n verts = (verts * cs[np.newaxis, np.newaxis, :]).sum(axis=2)\n vertInds = cutEdges[verts]\n #profiler()\n nv = vertInds.shape[0]\n #profiler()\n faces[ptr:ptr+nv] = vertInds #.reshape((nv, 3))\n #profiler()\n ptr += nv\n \n return vertexes, faces\n\n\n \ndef invertQTransform(tr):\n \"\"\"Return a QTransform that is the inverse of *tr*.\n Rasises an exception if tr is not invertible.\n \n Note that this function is preferred over QTransform.inverted() due to\n bugs in that method. (specifically, Qt has floating-point precision issues\n when determining whether a matrix is invertible)\n \"\"\"\n try:\n import numpy.linalg\n arr = np.array([[tr.m11(), tr.m12(), tr.m13()], [tr.m21(), tr.m22(), tr.m23()], [tr.m31(), tr.m32(), tr.m33()]])\n inv = numpy.linalg.inv(arr)\n return QtGui.QTransform(inv[0,0], inv[0,1], inv[0,2], inv[1,0], inv[1,1], inv[1,2], inv[2,0], inv[2,1])\n except ImportError:\n inv = tr.inverted()\n if inv[1] is False:\n raise Exception(\"Transform is not invertible.\")\n return inv[0]\n \n \ndef pseudoScatter(data, spacing=None, shuffle=True, bidir=False):\n \"\"\"\n Used for examining the distribution of values in a set. Produces scattering as in beeswarm or column scatter plots.\n \n Given a list of x-values, construct a set of y-values such that an x,y scatter-plot\n will not have overlapping points (it will look similar to a histogram).\n \"\"\"\n inds = np.arange(len(data))\n if shuffle:\n np.random.shuffle(inds)\n \n data = data[inds]\n \n if spacing is None:\n spacing = 2.*np.std(data)/len(data)**0.5\n s2 = spacing**2\n \n yvals = np.empty(len(data))\n if len(data) == 0:\n return yvals\n yvals[0] = 0\n for i in range(1,len(data)):\n x = data[i] # current x value to be placed\n x0 = data[:i] # all x values already placed\n y0 = yvals[:i] # all y values already placed\n y = 0\n \n dx = (x0-x)**2 # x-distance to each previous point\n xmask = dx < s2 # exclude anything too far away\n \n if xmask.sum() > 0:\n if bidir:\n dirs = [-1, 1]\n else:\n dirs = [1]\n yopts = []\n for direction in dirs:\n y = 0\n dx2 = dx[xmask]\n dy = (s2 - dx2)**0.5 \n limits = np.empty((2,len(dy))) # ranges of y-values to exclude\n limits[0] = y0[xmask] - dy\n limits[1] = y0[xmask] + dy \n while True:\n # ignore anything below this y-value\n if direction > 0:\n mask = limits[1] >= y\n else:\n mask = limits[0] <= y\n \n limits2 = limits[:,mask]\n \n # are we inside an excluded region?\n mask = (limits2[0] < y) & (limits2[1] > y)\n if mask.sum() == 0:\n break\n \n if direction > 0:\n y = limits2[:,mask].max()\n else:\n y = limits2[:,mask].min()\n yopts.append(y)\n if bidir:\n y = yopts[0] if -yopts[0] < yopts[1] else yopts[1]\n else:\n y = yopts[0]\n yvals[i] = y\n \n return yvals[np.argsort(inds)] ## un-shuffle values before returning\n" ]
[ [ "numpy.product", "numpy.take", "numpy.asarray", "numpy.dtype", "numpy.concatenate", "numpy.argmin", "numpy.exp", "numpy.clip", "numpy.arange", "numpy.frombuffer", "numpy.std", "numpy.zeros", "numpy.log", "numpy.ascontiguousarray", "numpy.isnan", "numpy.tan", "numpy.floor", "numpy.argsort", "numpy.array", "numpy.abs", "numpy.isfinite", "numpy.fft.rfft", "numpy.random.shuffle", "numpy.ones", "numpy.argwhere", "numpy.isscalar", "numpy.ndindex", "numpy.isinf", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Sam-Armstrong/MNIST-GAN
[ "56143cecf50df5dd331278eaf4e0e387fe59bf5c" ]
[ "run_model.py" ]
[ "\"\"\"\nAuthor: Sam Armstrong\nDate: Autumn 2021\n\nDescription: The code for generating a single sample using the model (saves the image to the local folder)\n\"\"\"\n\nimport torch\nimport numpy as np\nfrom Generator import Generator\nfrom PIL import Image\nfrom matplotlib import cm\nfrom torch.autograd import Variable\nfrom torch import Tensor\n\ndevice = torch.device('cuda')\n\ndef run_model():\n generator = Generator()\n generator.load_state_dict(torch.load('generator-model.pickle'))\n generator.eval()\n \n z = Variable(Tensor(np.random.rand(1, 16)))\n image_array = generator(z).detach().numpy()\n image_array = image_array.reshape(28, 28)\n data = Image.fromarray(image_array)\n data = Image.fromarray(np.uint8(cm.gist_earth(image_array) * 255))\n data.show()\n data.save('GAN-Image.png')\n\nif __name__ == '__main__':\n run_model()\n" ]
[ [ "torch.device", "matplotlib.cm.gist_earth", "numpy.random.rand", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JosephKJ/hat
[ "a6386c8a5435573034f3a55c86438c0a82ee9d8d" ]
[ "src/dataloaders/mixture.py" ]
[ "import os,sys\nimport os.path\nimport numpy as np\nimport torch\nimport torch.utils.data\nfrom torchvision import datasets,transforms\nfrom sklearn.utils import shuffle\nimport urllib.request\nfrom PIL import Image\nimport pickle\nimport utils\n\n########################################################################################################################\n\ndef get(seed=0,fixed_order=False,pc_valid=0.15):\n data={}\n taskcla=[]\n size=[3,32,32]\n\n idata=np.arange(8)\n if not fixed_order:\n idata=list(shuffle(idata,random_state=seed))\n print('Task order =',idata)\n\n if not os.path.isdir('../dat/binary_mixture/'):\n os.makedirs('../dat/binary_mixture')\n # Pre-load\n for n,idx in enumerate(idata):\n if idx==0:\n # CIFAR10\n mean=[x/255 for x in [125.3,123.0,113.9]]\n std=[x/255 for x in [63.0,62.1,66.7]]\n dat={}\n dat['train']=datasets.CIFAR10('../dat/',train=True,download=True,transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n dat['test']=datasets.CIFAR10('../dat/',train=False,download=True,transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n data[n]={}\n data[n]['name']='cifar10'\n data[n]['ncla']=10\n for s in ['train','test']:\n loader=torch.utils.data.DataLoader(dat[s],batch_size=1,shuffle=False)\n data[n][s]={'x': [],'y': []}\n for image,target in loader:\n data[n][s]['x'].append(image)\n data[n][s]['y'].append(target.numpy()[0])\n\n elif idx==1:\n # CIFAR100\n mean=[x/255 for x in [125.3,123.0,113.9]]\n std=[x/255 for x in [63.0,62.1,66.7]]\n dat={}\n dat['train']=datasets.CIFAR100('../dat/',train=True,download=True,transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n dat['test']=datasets.CIFAR100('../dat/',train=False,download=True,transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n data[n]={}\n data[n]['name']='cifar100'\n data[n]['ncla']=100\n for s in ['train','test']:\n loader=torch.utils.data.DataLoader(dat[s],batch_size=1,shuffle=False)\n data[n][s]={'x': [],'y': []}\n for image,target in loader:\n data[n][s]['x'].append(image)\n data[n][s]['y'].append(target.numpy()[0])\n\n elif idx==2:\n # MNIST\n #mean=(0.1307,) # Mean and std without including the padding\n #std=(0.3081,)\n mean=(0.1,) # Mean and std including the padding\n std=(0.2752,)\n dat={}\n dat['train']=datasets.MNIST('../dat/',train=True,download=True,transform=transforms.Compose([\n transforms.Pad(padding=2,fill=0),transforms.ToTensor(),transforms.Normalize(mean,std)]))\n dat['test']=datasets.MNIST('../dat/',train=False,download=True,transform=transforms.Compose([\n transforms.Pad(padding=2,fill=0),transforms.ToTensor(),transforms.Normalize(mean,std)]))\n data[n]={}\n data[n]['name']='mnist'\n data[n]['ncla']=10\n for s in ['train','test']:\n loader=torch.utils.data.DataLoader(dat[s],batch_size=1,shuffle=False)\n data[n][s]={'x': [],'y': []}\n for image,target in loader:\n image=image.expand(1,3,image.size(2),image.size(3)) # Create 3 equal channels\n data[n][s]['x'].append(image)\n data[n][s]['y'].append(target.numpy()[0])\n\n elif idx == 3:\n # SVHN\n mean=[0.4377,0.4438,0.4728]\n std=[0.198,0.201,0.197]\n dat = {}\n dat['train']=datasets.SVHN('../dat/',split='train',download=True,transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n dat['test']=datasets.SVHN('../dat/',split='test',download=True,transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n data[n] = {}\n data[n]['name']='svhn'\n data[n]['ncla']=10\n for s in ['train','test']:\n loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)\n data[n][s] = {'x': [], 'y': []}\n for image, target in loader:\n data[n][s]['x'].append(image)\n data[n][s]['y'].append(target.numpy()[0])\n\n elif idx == 4:\n # FashionMNIST\n mean=(0.2190,) # Mean and std including the padding\n std=(0.3318,)\n dat={}\n dat['train']=FashionMNIST('../dat/fashion_mnist', train=True, download=True, transform=transforms.Compose([\n transforms.Pad(padding=2, fill=0), transforms.ToTensor(),transforms.Normalize(mean, std)]))\n dat['test']=FashionMNIST('../dat/fashion_mnist', train=False, download=True, transform=transforms.Compose([\n transforms.Pad(padding=2, fill=0), transforms.ToTensor(),transforms.Normalize(mean, std)]))\n data[n]={}\n data[n]['name']='fashion-mnist'\n data[n]['ncla']=10\n for s in ['train','test']:\n loader=torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)\n data[n][s]={'x': [], 'y': []}\n for image,target in loader:\n image=image.expand(1, 3, image.size(2), image.size(3)) # Create 3 equal channels\n data[n][s]['x'].append(image)\n data[n][s]['y'].append(target.numpy()[0])\n\n elif idx == 5:\n # Traffic signs\n mean=[0.3398,0.3117,0.3210]\n std=[0.2755,0.2647,0.2712]\n dat={}\n dat['train']=TrafficSigns('../dat/traffic_signs', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n dat['test']=TrafficSigns('../dat/traffic_signs', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n # mean, var = utils.compute_mean_std_dataset(dat['train'])\n data[n]={}\n data[n]['name']='traffic-signs'\n data[n]['ncla']=43\n for s in ['train','test']:\n loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)\n data[n][s] = {'x': [], 'y': []}\n for image, target in loader:\n data[n][s]['x'].append(image)\n data[n][s]['y'].append(target.numpy()[0])\n elif idx == 6:\n # Facescrub 100 faces\n mean=[0.5163,0.5569,0.4695]\n std=[0.2307,0.2272,0.2479]\n dat={}\n dat['train']=Facescrub('../dat/facescrub', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n dat['test']=Facescrub('../dat/facescrub', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n #mean, std = utils.compute_mean_std_dataset(dat['train']); print(mean,std); sys.exit()\n data[n]={}\n data[n]['name']='facescrub'\n data[n]['ncla']=100\n for s in ['train','test']:\n loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)\n data[n][s] = {'x': [], 'y': []}\n for image, target in loader:\n data[n][s]['x'].append(image)\n data[n][s]['y'].append(target.numpy()[0])\n elif idx == 7:\n # notMNIST A-J letters\n mean=(0.4254,)\n std=(0.4501,)\n dat={}\n dat['train']=notMNIST('../dat/notmnist', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n dat['test']=notMNIST('../dat/notmnist', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))\n #mean, std = utils.compute_mean_std_dataset(dat['train']); print(mean,std); sys.exit()\n data[n]={}\n data[n]['name']='notmnist'\n data[n]['ncla']=10\n for s in ['train','test']:\n loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)\n data[n][s] = {'x': [], 'y': []}\n for image, target in loader:\n image=image.expand(1,3,image.size(2),image.size(3))\n data[n][s]['x'].append(image)\n data[n][s]['y'].append(target.numpy()[0])\n else:\n print('ERROR: Undefined data set',n)\n sys.exit()\n #print(n,data[n]['name'],data[n]['ncla'],len(data[n]['train']['x']))\n\n # \"Unify\" and save\n for s in ['train','test']:\n data[n][s]['x']=torch.stack(data[n][s]['x']).view(-1,size[0],size[1],size[2])\n data[n][s]['y']=torch.LongTensor(np.array(data[n][s]['y'],dtype=int)).view(-1)\n torch.save(data[n][s]['x'], os.path.join(os.path.expanduser('../dat/binary_mixture'),'data'+str(idx)+s+'x.bin'))\n torch.save(data[n][s]['y'], os.path.join(os.path.expanduser('../dat/binary_mixture'),'data'+str(idx)+s+'y.bin'))\n\n else:\n\n # Load binary files\n for n,idx in enumerate(idata):\n data[n] = dict.fromkeys(['name','ncla','train','test'])\n if idx==0:\n data[n]['name']='cifar10'\n data[n]['ncla']=10\n elif idx==1:\n data[n]['name']='cifar100'\n data[n]['ncla']=100\n elif idx==2:\n data[n]['name']='mnist'\n data[n]['ncla']=10\n elif idx==3:\n data[n]['name']='svhn'\n data[n]['ncla']=10\n elif idx==4:\n data[n]['name']='fashion-mnist'\n data[n]['ncla']=10\n elif idx==5:\n data[n]['name']='traffic-signs'\n data[n]['ncla']=43\n elif idx==6:\n data[n]['name']='facescrub'\n data[n]['ncla']=100\n elif idx==7:\n data[n]['name']='notmnist'\n data[n]['ncla']=10\n else:\n print('ERROR: Undefined data set',n)\n sys.exit()\n\n # Load\n for s in ['train','test']:\n data[n][s]={'x':[],'y':[]}\n data[n][s]['x'] = torch.load(os.path.join(os.path.expanduser('../dat/binary_mixture'),'data'+str(idx)+s+'x.bin'))\n data[n][s]['y'] = torch.load(os.path.join(os.path.expanduser('../dat/binary_mixture'),'data'+str(idx)+s+'y.bin'))\n\n # Validation\n for t in data.keys():\n r=np.arange(data[t]['train']['x'].size(0))\n r=np.array(shuffle(r,random_state=seed),dtype=int)\n nvalid=int(pc_valid*len(r))\n ivalid=torch.LongTensor(r[:nvalid])\n itrain=torch.LongTensor(r[nvalid:])\n data[t]['valid']={}\n data[t]['valid']['x']=data[t]['train']['x'][ivalid].clone()\n data[t]['valid']['y']=data[t]['train']['y'][ivalid].clone()\n data[t]['train']['x']=data[t]['train']['x'][itrain].clone()\n data[t]['train']['y']=data[t]['train']['y'][itrain].clone()\n\n # Others\n n=0\n for t in data.keys():\n taskcla.append((t,data[t]['ncla']))\n n+=data[t]['ncla']\n data['ncla']=n\n\n return data,taskcla,size\n\n########################################################################################################################\n\nclass FashionMNIST(datasets.MNIST):\n \"\"\"`Fashion MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.\n \"\"\"\n urls = [\n 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',\n 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz',\n 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',\n 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz',\n ]\n\n########################################################################################################################\n\nclass TrafficSigns(torch.utils.data.Dataset):\n \"\"\"`German Traffic Signs <http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset>`_ Dataset.\n\n Args:\n root (string): Root directory of dataset where directory ``Traffic signs`` exists.\n split (string): One of {'train', 'test'}.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory.\n If dataset is already downloaded, it is not downloaded again.\n\n \"\"\"\n\n def __init__(self, root, train=True,transform=None, download=False):\n self.root = os.path.expanduser(root)\n self.transform = transform\n self.filename = \"traffic_signs_dataset.zip\"\n self.url = \"https://d17h27t6h515a5.cloudfront.net/topher/2016/October/580d53ce_traffic-sign-data/traffic-sign-data.zip\"\n # Other options for the same 32x32 pickled dataset\n # url=\"https://d17h27t6h515a5.cloudfront.net/topher/2016/November/581faac4_traffic-signs-data/traffic-signs-data.zip\"\n # url_train=\"https://drive.google.com/open?id=0B5WIzrIVeL0WR1dsTC1FdWEtWFE\"\n # url_test=\"https://drive.google.com/open?id=0B5WIzrIVeL0WLTlPNlR2RG95S3c\"\n\n fpath = os.path.join(root, self.filename)\n if not os.path.isfile(fpath):\n if not download:\n raise RuntimeError('Dataset not found. You can use download=True to download it')\n else:\n print('Downloading from '+self.url)\n self.download()\n\n training_file = 'lab 2 data/train.p'\n testing_file = 'lab 2 data/test.p'\n if train:\n with open(os.path.join(root,training_file), mode='rb') as f:\n train = pickle.load(f)\n self.data = train['features']\n self.labels = train['labels']\n else:\n with open(os.path.join(root,testing_file), mode='rb') as f:\n test = pickle.load(f)\n self.data = test['features']\n self.labels = test['labels']\n\n self.data = np.transpose(self.data, (0, 3, 1, 2))\n #print(self.data.shape); sys.exit()\n\n def __getitem__(self, index):\n \"\"\"\n Args: index (int): Index\n Returns: tuple: (image, target) where target is index of the target class.\n \"\"\"\n img, target = self.data[index], self.labels[index]\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(np.transpose(img, (1, 2, 0)))\n\n if self.transform is not None:\n img = self.transform(img)\n\n return img, target\n\n def __len__(self):\n return len(self.data)\n\n def download(self):\n import errno\n root = os.path.expanduser(self.root)\n fpath = os.path.join(root, self.filename)\n\n try:\n os.makedirs(root)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n urllib.request.urlretrieve(self.url, fpath)\n import zipfile\n zip_ref = zipfile.ZipFile(fpath, 'r')\n zip_ref.extractall(root)\n zip_ref.close()\n\n\n########################################################################################################################\n\nclass Facescrub(torch.utils.data.Dataset):\n \"\"\"Subset of the Facescrub cropped from the official Megaface challenge page: http://megaface.cs.washington.edu/participate/challenge.html, resized to 38x38\n\n Args:\n root (string): Root directory of dataset where directory ``Traffic signs`` exists.\n split (string): One of {'train', 'test'}.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory.\n If dataset is already downloaded, it is not downloaded again.\n\n \"\"\"\n\n def __init__(self, root, train=True,transform=None, download=False):\n self.root = os.path.expanduser(root)\n self.transform = transform\n self.filename = \"facescrub_100.zip\"\n self.url = \"https://github.com/nkundiushuti/facescrub_subset/blob/master/data/facescrub_100.zip?raw=true\"\n\n fpath=os.path.join(root,self.filename)\n if not os.path.isfile(fpath):\n if not download:\n raise RuntimeError('Dataset not found. You can use download=True to download it')\n else:\n print('Downloading from '+self.url)\n self.download()\n\n training_file = 'facescrub_train_100.pkl'\n testing_file = 'facescrub_test_100.pkl'\n if train:\n with open(os.path.join(root,training_file),'rb') as f:\n # u = pickle._Unpickler(f)\n # u.encoding = 'latin1'\n # train = u.load()\n train = pickle.load(f)\n self.data = train['features'].astype(np.uint8)\n self.labels = train['labels'].astype(np.uint8)\n \"\"\"\n print(self.data.shape)\n print(self.data.mean())\n print(self.data.std())\n print(self.labels.max())\n #\"\"\"\n else:\n with open(os.path.join(root,testing_file),'rb') as f:\n # u = pickle._Unpickler(f)\n # u.encoding = 'latin1'\n # test = u.load()\n test = pickle.load(f)\n\n self.data = test['features'].astype(np.uint8)\n self.labels = test['labels'].astype(np.uint8)\n\n def __getitem__(self, index):\n \"\"\"\n Args: index (int): Index\n Returns: tuple: (image, target) where target is index of the target class.\n \"\"\"\n img, target = self.data[index], self.labels[index]\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(np.transpose(img, (1, 2, 0)))\n\n if self.transform is not None:\n img = self.transform(img)\n\n return img, target\n\n def __len__(self):\n return len(self.data)\n\n def download(self):\n import errno\n root = os.path.expanduser(self.root)\n\n fpath = os.path.join(root, self.filename)\n\n try:\n os.makedirs(root)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n urllib.request.urlretrieve(self.url, fpath)\n\n import zipfile\n zip_ref = zipfile.ZipFile(fpath, 'r')\n zip_ref.extractall(root)\n zip_ref.close()\n\n\n########################################################################################################################\n\nclass notMNIST(torch.utils.data.Dataset):\n \"\"\"The notMNIST dataset is a image recognition dataset of font glypyhs for the letters A through J useful with simple neural networks. It is quite similar to the classic MNIST dataset of handwritten digits 0 through 9.\n\n Args:\n root (string): Root directory of dataset where directory ``Traffic signs`` exists.\n split (string): One of {'train', 'test'}.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory.\n If dataset is already downloaded, it is not downloaded again.\n\n \"\"\"\n\n def __init__(self, root, train=True,transform=None, download=False):\n self.root = os.path.expanduser(root)\n self.transform = transform\n self.filename = \"notmnist.zip\"\n self.url = \"https://github.com/nkundiushuti/notmnist_convert/blob/master/notmnist.zip?raw=true\"\n\n fpath = os.path.join(root, self.filename)\n if not os.path.isfile(fpath):\n if not download:\n raise RuntimeError('Dataset not found. You can use download=True to download it')\n else:\n print('Downloading from '+self.url)\n self.download()\n\n training_file = 'notmnist_train.pkl'\n testing_file = 'notmnist_test.pkl'\n if train:\n with open(os.path.join(root,training_file),'rb') as f:\n # u = pickle._Unpickler(f)\n # u.encoding = 'latin1'\n # train = u.load()\n train = pickle.load(f)\n self.data = train['features'].astype(np.uint8)\n self.labels = train['labels'].astype(np.uint8)\n else:\n with open(os.path.join(root,testing_file),'rb') as f:\n # u = pickle._Unpickler(f)\n # u.encoding = 'latin1'\n # test = u.load()\n test = pickle.load(f)\n\n self.data = test['features'].astype(np.uint8)\n self.labels = test['labels'].astype(np.uint8)\n\n\n def __getitem__(self, index):\n \"\"\"\n Args: index (int): Index\n Returns: tuple: (image, target) where target is index of the target class.\n \"\"\"\n img, target = self.data[index], self.labels[index]\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(img[0])\n\n if self.transform is not None:\n img = self.transform(img)\n\n return img, target\n\n def __len__(self):\n return len(self.data)\n\n def download(self):\n import errno\n root = os.path.expanduser(self.root)\n\n fpath = os.path.join(root, self.filename)\n\n try:\n os.makedirs(root)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n urllib.request.urlretrieve(self.url, fpath)\n\n import zipfile\n zip_ref = zipfile.ZipFile(fpath, 'r')\n zip_ref.extractall(root)\n zip_ref.close()\n\n\n########################################################################################################################\n" ]
[ [ "torch.LongTensor", "numpy.arange", "sklearn.utils.shuffle", "torch.utils.data.DataLoader", "numpy.transpose", "torch.stack", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
weijiawu/SyntoReal_STD
[ "4f92809cfa276d0424019bcc1fb77659d20423b8" ]
[ "lib/utils.py" ]
[ "import numpy as np\nfrom shapely.geometry import Polygon\nimport numpy as np\nimport cv2\nfrom PIL import Image\nimport math\nimport os\nimport torch\n\n\ndef get_MSER(image_path,):\n image = cv2.imread(image_path)\n rgb_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n negtive_map = np.zeros(image.shape[:2])\n mser = cv2.MSER_create(_delta=5, _min_area=10, _max_variation=0.8)\n regions, bboxes = mser.detectRegions(rgb_img)\n # 绘制文本区域(不规则轮廓)\n hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]\n\n keep = []\n for hull in hulls:\n x, y, w, h = cv2.boundingRect(hull)\n keep.append([x, y, x + w, y + h])\n\n # 使用非极大值抑制获取不重复的矩形框\n pick = non_max_suppression_fast(np.array(keep), overlapThresh=0.4)\n\n # loop over the picked bounding boxes and draw them\n for (startX, startY, endX, endY) in pick:\n cv2.fillPoly(negtive_map, np.array([[[startX, startY], [endX, startY], [endX, endY], [startX, endY]]]), (1))\n negtive_map = Image.fromarray(negtive_map)\n\n return negtive_map\n\n\ndef crop_img_target_source(img, vertices, labels, length,negtive_map):\n '''crop img patches to obtain batch and augment\n Input:\n img : PIL Image\n vertices : vertices of text regions <numpy.ndarray, (n,8)>\n labels : 1->valid, 0->ignore, <numpy.ndarray, (n,)>\n length : length of cropped image region\n Output:\n region : cropped image region\n new_vertices: new vertices in cropped region\n '''\n h, w = img.height, img.width\n # confirm the shortest side of image >= length\n if h >= w and w < length:\n img = img.resize((length, int(h * length / w)), Image.BILINEAR)\n negtive_map = negtive_map.resize((length, int(h * length / w)), Image.BILINEAR)\n elif h < w and h < length:\n img = img.resize((int(w * length / h), length), Image.BILINEAR)\n negtive_map = negtive_map.resize((length, int(h * length / w)), Image.BILINEAR)\n\n ratio_w = img.width / w\n ratio_h = img.height / h\n assert (ratio_w >= 1 and ratio_h >= 1)\n\n new_vertices = np.zeros(vertices.shape)\n if vertices.size > 0:\n new_vertices[:, [0, 2, 4, 6]] = vertices[:, [0, 2, 4, 6]] * ratio_w\n new_vertices[:, [1, 3, 5, 7]] = vertices[:, [1, 3, 5, 7]] * ratio_h\n\n # find random position\n remain_h = img.height - length\n remain_w = img.width - length\n flag = True\n cnt = 0\n while flag and cnt < 1000:\n cnt += 1\n start_w = int(np.random.rand() * remain_w)\n start_h = int(np.random.rand() * remain_h)\n flag = is_cross_text([start_w, start_h], length, new_vertices[labels == 1, :])\n box = (start_w, start_h, start_w + length, start_h + length)\n region = img.crop(box)\n negtive_map = negtive_map.crop(box)\n\n if new_vertices.size == 0:\n return region, new_vertices,negtive_map\n\n new_vertices[:, [0, 2, 4, 6]] -= start_w\n new_vertices[:, [1, 3, 5, 7]] -= start_h\n return region, new_vertices,negtive_map\n\n\ndef rotate_img_target_source(img, vertices, negtive_map,angle_range=10):\n '''rotate image [-10, 10] degree to aug data\n Input:\n img : PIL Image\n vertices : vertices of text regions <numpy.ndarray, (n,8)>\n angle_range : rotate range\n Output:\n img : rotated PIL Image\n new_vertices: rotated vertices\n '''\n center_x = (img.width - 1) / 2\n center_y = (img.height - 1) / 2\n angle = angle_range * (np.random.rand() * 2 - 1)\n img = img.rotate(angle, Image.BILINEAR)\n negtive_map = negtive_map.rotate(angle, Image.BILINEAR)\n\n new_vertices = np.zeros(vertices.shape)\n for i, vertice in enumerate(vertices):\n new_vertices[i, :] = rotate_vertices(vertice, -angle / 180 * math.pi, np.array([[center_x], [center_y]]))\n return img, new_vertices,negtive_map\n\ndef adjust_height_target_source(img, vertices, negtive_map, ratio=0.2):\n '''adjust height of image to aug data\n Input:\n img : PIL Image\n vertices : vertices of text regions <numpy.ndarray, (n,8)>\n ratio : height changes in [0.8, 1.2]\n Output:\n img : adjusted PIL Image\n new_vertices: adjusted vertices\n '''\n ratio_h = 1 + ratio * (np.random.rand() * 2 - 1) #[0.8, 1.2]\n old_h = img.height\n new_h = int(np.around(old_h * ratio_h))\n img = img.resize((img.width, new_h), Image.BILINEAR)\n negtive_map = negtive_map.resize((img.width, new_h), Image.BILINEAR)\n\n new_vertices = vertices.copy()\n if vertices.size > 0:\n new_vertices[:, [1, 3, 5, 7]] = vertices[:, [1, 3, 5, 7]] * (new_h / old_h)\n return img, new_vertices,negtive_map\n\ndef rotate_all_pixels(rotate_mat, anchor_x, anchor_y, length):\n '''get rotated locations of all pixels for next stages\n Input:\n rotate_mat: rotatation matrix\n anchor_x : fixed x position\n anchor_y : fixed y position\n length : length of image\n Output:\n rotated_x : rotated x positions <numpy.ndarray, (length,length)>\n rotated_y : rotated y positions <numpy.ndarray, (length,length)>\n '''\n x = np.arange(length)\n y = np.arange(length)\n x, y = np.meshgrid(x, y)\n x_lin = x.reshape((1, x.size))\n y_lin = y.reshape((1, x.size))\n coord_mat = np.concatenate((x_lin, y_lin), 0)\n rotated_coord = np.dot(rotate_mat, coord_mat - np.array([[anchor_x], [anchor_y]])) + \\\n np.array([[anchor_x], [anchor_y]])\n rotated_x = rotated_coord[0, :].reshape(x.shape)\n rotated_y = rotated_coord[1, :].reshape(y.shape)\n return rotated_x, rotated_y\n\n\ndef get_score_geo_target_source(img, vertices, labels, scale, length, negative_map):\n '''generate score gt and geometry gt\n Input:\n img : PIL Image\n vertices: vertices of text regions <numpy.ndarray, (n,8)>\n labels : 1->valid, 0->ignore, <numpy.ndarray, (n,)>\n scale : feature map / image 0.25\n length : image length 512\n Output:\n score gt, geo gt, ignored\n '''\n score_map = np.zeros((int(img.height * scale), int(img.width * scale), 1), np.float32)\n geo_map = np.zeros((int(img.height * scale), int(img.width * scale), 5), np.float32)\n ignored_map = np.zeros((int(img.height * scale), int(img.width * scale), 1), np.float32)\n\n\n negative_map = np.array(negative_map)\n negative_map = cv2.resize(negative_map,(int(img.height * scale), int(img.width * scale)))\n negative_map = np.expand_dims(negative_map, -1)\n\n index = np.arange(0, length, int(1 / scale)) # [0 4 8 12 .... 508]\n index_x, index_y = np.meshgrid(index, index) # Return coordinate matrices from coordinate vectors.\n ignored_polys = []\n polys = []\n\n for i, vertice in enumerate(vertices):\n\n ignored_polys.append(np.around(scale * vertice.reshape((4, 2))).astype(np.int32))\n\n #得到0.3缩放后的score map 将vectice缩小到原来的0.3\n poly = np.around(scale * shrink_poly(vertice).reshape((4, 2))).astype(np.int32) # scaled & shrinked\n polys.append(poly)\n temp_mask = np.zeros(score_map.shape[:-1], np.float32)\n cv2.fillPoly(temp_mask, [poly], 1)\n\n theta = find_min_rect_angle(vertice)\n rotate_mat = get_rotate_mat(theta)\n\n rotated_vertices = rotate_vertices(vertice, theta)\n x_min, x_max, y_min, y_max = get_boundary(rotated_vertices)\n rotated_x, rotated_y = rotate_all_pixels(rotate_mat, vertice[0], vertice[1], length)\n\n # print(\"rotated_x:\",rotated_x.shape)\n # print(\"rotated_y:\", rotated_y.shape)\n d1 = rotated_y - y_min\n d1[d1 < 0] = 0\n d2 = y_max - rotated_y\n d2[d2 < 0] = 0\n d3 = rotated_x - x_min\n d3[d3 < 0] = 0\n d4 = x_max - rotated_x\n d4[d4 < 0] = 0\n geo_map[:, :, 0] += d1[index_y, index_x] * temp_mask\n geo_map[:, :, 1] += d2[index_y, index_x] * temp_mask\n geo_map[:, :, 2] += d3[index_y, index_x] * temp_mask\n geo_map[:, :, 3] += d4[index_y, index_x] * temp_mask\n geo_map[:, :, 4] += theta * temp_mask\n\n cv2.fillPoly(ignored_map, ignored_polys, 1)\n\n # negative_map: 背景\n # ignored_map:前景\n text_background = negative_map*ignored_map\n negative_map = negative_map*(1-text_background)\n ignored_map = ignored_map*(1-text_background)\n\n ignored_map = 1 - (negative_map + ignored_map)\n cv2.fillPoly(score_map, polys, 1)\n return torch.Tensor(score_map).permute(2, 0, 1), torch.Tensor(geo_map).permute(2, 0, 1), torch.Tensor(\n ignored_map).permute(2, 0, 1)\n\n\ndef non_max_suppression_fast(boxes, overlapThresh):\n \"\"\"\n boxes: boxes为一个m*n的矩阵,m为bbox的个数,n的前4列为每个bbox的坐标,\n 格式为(x1,y1,x2,y2),有时会有第5列,该列为每一类的置信\n overlapThresh: 最大允许重叠率\n \"\"\"\n # if there are no boxes, return an empty list\n if len(boxes) == 0:\n return []\n\n # if the bounding boxes are integers, convert them to floats\n # this is important since we'll be doing a bunch of divisions\n if boxes.dtype.kind == \"i\":\n boxes = boxes.astype(\"float\")\n\n # initialize the list of picked indexes\n pick = []\n\n # grab the coordinates of all bounding boxes respectively\n x1 = boxes[:,0] # startX\n y1 = boxes[:,1] # startY\n x2 = boxes[:,2] # endX\n y2 = boxes[:,3] # endY\n # probs = boxes[:,4]\n\n # compute the area of the bounding boxes and sort the bboxes\n # by the bottom y-coordinate of the bboxes by ascending order\n # and grab the indexes of the sorted coordinates of bboxes\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n idxs = np.argsort(y2)\n\n # if probabilities are provided, sort by them instead\n # idxs = np.argsort(probs)\n\n # keep looping while some indexes still remain in the idxs list\n while len(idxs) > 0:\n # grab the last index in the idxs list (the bottom-right box)\n # and add the index value to the list of picked indexes\n last = len(idxs) - 1\n i = idxs[last]\n pick.append(i)\n\n # find the largest coordinates for the start of the bbox\n # and the smallest coordinates for the end of the bbox\n # in the rest of bounding boxes.\n xx1 = np.maximum(x1[i], x1[idxs[:last]])\n yy1 = np.maximum(y1[i], y1[idxs[:last]])\n xx2 = np.minimum(x2[i], x2[idxs[:last]])\n yy2 = np.minimum(y2[i], y2[idxs[:last]])\n\n # the width and height of the bounding box\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n # the ratio of overlap in the bounding box\n overlap = (w * h) / area[idxs[:last]]\n\n # delete all indexes from the index list that overlap is larger than overlapThresh\n idxs = np.delete(idxs, np.concatenate(([last], np.where(overlap > overlapThresh)[0])))\n\n # return only the bounding boxes that were picked using the\n # integer data type\n return boxes[pick].astype(\"int\")\n\n\n\ndef cal_distance(x1, y1, x2, y2):\n '''calculate the Euclidean distance'''\n return math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n\n\ndef move_points(vertices, index1, index2, r, coef):\n '''move the two points to shrink edge\n Input:\n vertices: vertices of text region <numpy.ndarray, (8,)>\n index1 : offset of point1\n index2 : offset of point2\n r : [r1, r2, r3, r4] in paper\n coef : shrink ratio in paper\n Output:\n vertices: vertices where one edge has been shinked\n '''\n index1 = index1 % 4\n index2 = index2 % 4\n x1_index = index1 * 2 + 0\n y1_index = index1 * 2 + 1\n x2_index = index2 * 2 + 0\n y2_index = index2 * 2 + 1\n\n r1 = r[index1]\n r2 = r[index2]\n length_x = vertices[x1_index] - vertices[x2_index]\n length_y = vertices[y1_index] - vertices[y2_index]\n length = cal_distance(vertices[x1_index], vertices[y1_index], vertices[x2_index], vertices[y2_index])\n if length > 1:\n ratio = (r1 * coef) / length\n vertices[x1_index] += ratio * (-length_x)\n vertices[y1_index] += ratio * (-length_y)\n ratio = (r2 * coef) / length\n vertices[x2_index] += ratio * length_x\n vertices[y2_index] += ratio * length_y\n return vertices\n\n\ndef shrink_poly(vertices, coef=0.3):\n '''shrink the text region\n Input:\n vertices: vertices of text region <numpy.ndarray, (8,)>\n coef : shrink ratio in paper\n Output:\n v : vertices of shrinked text region <numpy.ndarray, (8,)>\n '''\n x1, y1, x2, y2, x3, y3, x4, y4 = vertices\n r1 = min(cal_distance(x1, y1, x2, y2), cal_distance(x1, y1, x4, y4))\n r2 = min(cal_distance(x2, y2, x1, y1), cal_distance(x2, y2, x3, y3))\n r3 = min(cal_distance(x3, y3, x2, y2), cal_distance(x3, y3, x4, y4))\n r4 = min(cal_distance(x4, y4, x1, y1), cal_distance(x4, y4, x3, y3))\n r = [r1, r2, r3, r4]\n\n # obtain offset to perform move_points() automatically\n if cal_distance(x1, y1, x2, y2) + cal_distance(x3, y3, x4, y4) > \\\n cal_distance(x2, y2, x3, y3) + cal_distance(x1, y1, x4, y4):\n offset = 0 # two longer edges are (x1y1-x2y2) & (x3y3-x4y4)\n else:\n offset = 1 # two longer edges are (x2y2-x3y3) & (x4y4-x1y1)\n\n v = vertices.copy()\n v = move_points(v, 0 + offset, 1 + offset, r, coef)\n v = move_points(v, 2 + offset, 3 + offset, r, coef)\n v = move_points(v, 1 + offset, 2 + offset, r, coef)\n v = move_points(v, 3 + offset, 4 + offset, r, coef)\n return v\n\n\ndef get_rotate_mat(theta):\n '''positive theta value means rotate clockwise'''\n return np.array([[math.cos(theta), -math.sin(theta)], [math.sin(theta), math.cos(theta)]])\n\n\ndef rotate_vertices(vertices, theta, anchor=None):\n '''rotate vertices around anchor\n Input:\n vertices: vertices of text region <numpy.ndarray, (8,)>\n theta : angle in radian measure\n anchor : fixed position during rotation\n Output:\n rotated vertices <numpy.ndarray, (8,)>\n '''\n v = vertices.reshape((4, 2)).T\n\n # print(vertices)\n # print(\"v:\",v.shape)\n # 取第一个顶点\n if anchor is None:\n anchor = v[:, :1]\n # print(anchor)\n\n rotate_mat = get_rotate_mat(theta)\n res = np.dot(rotate_mat, v - anchor)\n return (res + anchor).T.reshape(-1)\n\n\ndef get_boundary(vertices):\n '''get the tight boundary around given vertices\n Input:\n vertices: vertices of text region <numpy.ndarray, (8,)>\n Output:\n the boundary\n '''\n x1, y1, x2, y2, x3, y3, x4, y4 = vertices\n x_min = min(x1, x2, x3, x4)\n x_max = max(x1, x2, x3, x4)\n y_min = min(y1, y2, y3, y4)\n y_max = max(y1, y2, y3, y4)\n return x_min, x_max, y_min, y_max\n\n\ndef cal_error(vertices):\n '''default orientation is x1y1 : left-top, x2y2 : right-top, x3y3 : right-bot, x4y4 : left-bot\n calculate the difference between the vertices orientation and default orientation\n Input:\n vertices: vertices of text region <numpy.ndarray, (8,)>\n Output:\n err : difference measure\n '''\n x_min, x_max, y_min, y_max = get_boundary(vertices)\n x1, y1, x2, y2, x3, y3, x4, y4 = vertices\n err = cal_distance(x1, y1, x_min, y_min) + cal_distance(x2, y2, x_max, y_min) + \\\n cal_distance(x3, y3, x_max, y_max) + cal_distance(x4, y4, x_min, y_max)\n return err\n\n\ndef find_min_rect_angle(vertices):\n '''find the best angle to rotate poly and obtain min rectangle\n Input:\n vertices: vertices of text region <numpy.ndarray, (8,)>\n Output:\n the best angle <radian measure>\n '''\n angle_interval = 1\n angle_list = list(range(-90, 90, angle_interval))\n area_list = []\n for theta in angle_list:\n rotated = rotate_vertices(vertices, theta / 180 * math.pi)\n x1, y1, x2, y2, x3, y3, x4, y4 = rotated\n temp_area = (max(x1, x2, x3, x4) - min(x1, x2, x3, x4)) * \\\n (max(y1, y2, y3, y4) - min(y1, y2, y3, y4))\n area_list.append(temp_area)\n\n sorted_area_index = sorted(list(range(len(area_list))), key=lambda k: area_list[k])\n min_error = float('inf')\n best_index = -1\n rank_num = 10\n # find the best angle with correct orientation\n for index in sorted_area_index[:rank_num]:\n rotated = rotate_vertices(vertices, angle_list[index] / 180 * math.pi)\n temp_error = cal_error(rotated)\n if temp_error < min_error:\n min_error = temp_error\n best_index = index\n return angle_list[best_index] / 180 * math.pi\n\n\ndef is_cross_text(start_loc, length, vertices):\n '''check if the crop image crosses text regions\n Input:\n start_loc: left-top position\n length : length of crop image\n vertices : vertices of text regions <numpy.ndarray, (n,8)>\n Output:\n True if crop image crosses text region\n '''\n if vertices.size == 0:\n return False\n start_w, start_h = start_loc\n a = np.array([start_w, start_h, start_w + length, start_h, \\\n start_w + length, start_h + length, start_w, start_h + length]).reshape((4, 2))\n p1 = Polygon(a).convex_hull\n for vertice in vertices:\n p2 = Polygon(vertice.reshape((4, 2))).convex_hull\n inter = p1.intersection(p2).area\n try:\n if 0.01 <= inter / p2.area <= 0.99:\n return True\n except:\n continue\n return False" ]
[ [ "numpy.dot", "numpy.expand_dims", "numpy.maximum", "numpy.minimum", "numpy.meshgrid", "torch.Tensor", "numpy.arange", "numpy.around", "numpy.concatenate", "numpy.random.rand", "numpy.argsort", "numpy.array", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
snigdhagit/compare-selection
[ "26f41d06405e9a9be9894878bf604c38049a4729" ]
[ "statistics.py" ]
[ "from __future__ import division\n\nimport numpy as np, pandas as pd, time\nfrom utils import BHfilter\n\ndef interval_statistic(method,\n instance,\n X,\n Y,\n beta,\n l_theory,\n l_min,\n l_1se,\n sigma_reid,\n M=None):\n\n if M is None:\n toc = time.time()\n M = method(X.copy(), Y.copy(), l_theory.copy(), l_min, l_1se, sigma_reid)\n else:\n toc = np.inf\n try:\n active, lower, upper, pvalues = M.generate_intervals()\n except AttributeError:\n return M, None \n\n if len(active) > 0:\n naive_lower, naive_upper = M.naive_intervals(active)[1:]\n naive_pvalues = M.naive_pvalues(active)[1]\n else:\n naive_lower, naive_upper, naive_pvalues = None, None, None\n target = M.get_target(active, beta) # for now limited to Gaussian methods\n full_target = M.full_target(active, beta)\n tic = time.time()\n\n if len(active) > 0:\n alpha = 1 - M.confidence\n fdp = (pvalues[full_target == 0] < alpha).sum() / pvalues.shape[0]\n value = pd.DataFrame({'active_variable':active,\n 'lower_confidence':lower,\n 'upper_confidence':upper,\n 'target':target,\n 'full_target':full_target,\n 'fdp':fdp * np.ones_like(pvalues)})\n if naive_lower is not None:\n value['naive_lower_confidence'] = naive_lower\n value['naive_upper_confidence'] = naive_upper\n value['naive_pvalue'] = naive_pvalues\n if np.isfinite(toc):\n value['Time'] = tic-toc\n value['pvalue'] = pvalues\n return M, value\n else:\n return M, None\n\ndef interval_summary(result):\n\n length = result['upper_confidence'] - result['lower_confidence']\n if 'naive_lower_confidence' in result.columns:\n naive_length = result['naive_upper_confidence'] - result['naive_lower_confidence']\n else:\n naive_length = np.ones_like(length) * np.nan\n\n def coverage_(result):\n return np.mean(np.asarray(result['lower_confidence'] <= result['target']) *\n np.asarray(result['upper_confidence'] >= result['target']))\n \n def naive_coverage_(result):\n return np.mean(np.asarray(result['naive_lower_confidence'] <= result['target']) *\n np.asarray(result['naive_upper_confidence'] >= result['target']))\n \n instances = result.groupby('instance_id')\n len_cover = np.array([(len(g.index), coverage_(g)) for _, g in instances])\n\n instances = result.groupby('instance_id')\n naive_cover = np.array([(len(g.index), naive_coverage_(g)) for _, g in instances])\n naive_coverage = np.mean(naive_cover, 0)[1]\n active_vars, mean_coverage = np.mean(len_cover, 0)\n sd_coverage = np.std(len_cover[:,1])\n\n # XXX we should group by instances before averaging and computing SD\n\n value = pd.DataFrame([[len(np.unique(result['instance_id'])),\n mean_coverage,\n sd_coverage,\n np.median(length),\n np.mean(length),\n np.mean(naive_length),\n np.median(naive_length),\n naive_coverage,\n active_vars,\n np.mean(result['Time']),\n result['model_target'].values[0]]],\n columns=['Replicates',\n 'Coverage',\n 'SD(Coverage)',\n 'Median Length',\n 'Mean Length',\n 'Mean Naive Length',\n 'Median Naive Length',\n 'Naive Coverage',\n 'Active',\n 'Time',\n 'Model'])\n\n # keep all things constant over groups\n\n for n in result.columns:\n if len(np.unique(result[n])) == 1:\n value[n] = result[n].values[0]\n\n return value\n\ndef estimator_statistic(method,\n instance,\n X,\n Y,\n beta,\n l_theory,\n l_min,\n l_1se,\n sigma_reid,\n M=None):\n\n if M is None:\n toc = time.time()\n M = method(X.copy(), Y.copy(), l_theory.copy(), l_min, l_1se, sigma_reid)\n else:\n toc = np.inf\n \n try:\n active, point_estimate = M.point_estimator()\n except AttributeError:\n return M, None # cannot make point estimator\n\n if len(active) > 0:\n naive_estimate = M.naive_estimator(active)[1]\n else:\n naive_estimate = np.zeros_like(point_estimate)\n\n tic = time.time()\n\n S = instance.feature_cov\n\n full_risk = np.sum((beta - point_estimate) * S.dot(beta - point_estimate)) / beta[active].shape\n naive_full_risk = np.sum((beta - naive_estimate) * S.dot(beta - naive_estimate)) / beta[active].shape\n\n # partial risk -- only active coordinates\n\n target = M.get_target(active, beta) # for now limited to Gaussian methods\n\n S_active = S[active][:,active]\n delta = target - point_estimate[active]\n partial_risk = np.sum(delta * S_active.dot(delta)) / delta.shape[0]\n naive_delta = target - naive_estimate[active]\n naive_partial_risk = np.sum(naive_delta * S_active.dot(naive_delta)) / delta.shape[0]\n\n if np.linalg.norm(target) > 0:\n partial_relative_risk = partial_risk / max(np.sum(target * S_active.dot(target)), 1)\n naive_partial_relative_risk = naive_partial_risk / max(np.sum(target * S_active.dot(target)), 1)\n\n # relative risk\n\n relative_risk = full_risk / (np.sum(beta * S.dot(beta)) * beta.shape[0])\n naive_relative_risk = naive_full_risk / np.sum(beta * S.dot(beta))\n\n bias = np.mean(point_estimate - beta)\n naive_bias = np.mean(naive_estimate - beta)\n\n value = pd.DataFrame({'Full Risk':[full_risk],\n 'Naive Full Risk':[naive_full_risk],\n 'Partial Risk':[partial_risk],\n 'Partial Relative Risk':[partial_relative_risk],\n 'Naive Partial Relative Risk':[naive_partial_relative_risk],\n 'Naive Partial Risk':[naive_partial_risk],\n 'Relative Risk':[relative_risk],\n 'Naive Relative Risk':[naive_relative_risk],\n 'Bias':[bias],\n 'Naive Bias':[naive_bias],\n })\n\n if np.isfinite(toc):\n value['Time'] = tic-toc\n value['Active'] = len(active)\n\n return M, value\n\ndef estimator_summary(result):\n\n nresult = result['Full Risk'].shape[0]\n value = pd.DataFrame([[nresult,\n np.median(result['Full Risk']),\n np.std(result['Full Risk']),\n np.median(result['Naive Full Risk']),\n np.std(result['Naive Full Risk']),\n np.median(result['Partial Risk']),\n np.std(result['Partial Risk']),\n np.median(result['Naive Partial Risk']),\n np.std(result['Naive Partial Risk']),\n np.median(result['Relative Risk']),\n np.std(result['Relative Risk']),\n np.median(result['Naive Relative Risk']),\n np.std(result['Naive Relative Risk']),\n np.median(result['Bias']),\n np.std(result['Bias']),\n np.median(result['Naive Bias']),\n np.std(result['Naive Bias']),\n np.mean(result['Time']),\n np.mean(result['Active']),\n result['model_target'].values[0]]],\n columns=['Replicates',\n 'Median(Full Risk)',\n 'SD(Full Risk)',\n 'Median(Naive Full Risk)',\n 'SD(Naive Full Risk)',\n 'Median(Partial Risk)',\n 'SD(Partial Risk)',\n 'Median(Naive Partial Risk)',\n 'SD(Naive Partial Risk)',\n 'Median(Relative Risk)',\n 'SD(Relative Risk)',\n 'Median(Naive Relative Risk)',\n 'SD(Naive Relative Risk)',\n 'Median(Bias)',\n 'SD(Bias)',\n 'Median(Naive Bias)',\n 'SD(Naive Bias)',\n 'Time', \n 'Active',\n 'Model'\n ])\n\n # keep all things constant over groups\n\n for n in result.columns:\n if len(np.unique(result[n])) == 1:\n value[n] = result[n].values[0]\n\n return value\n\ndef BH_statistic(method,\n instance,\n X,\n Y,\n beta,\n l_theory,\n l_min,\n l_1se,\n sigma_reid,\n M=None):\n\n if M is None:\n toc = time.time()\n M = method(X.copy(), Y.copy(), l_theory.copy(), l_min, l_1se, sigma_reid)\n else:\n toc = np.inf\n \n selected, active = M.select()\n try:\n if len(active) > 0:\n naive_pvalues = M.naive_pvalues(active)[1]\n naive_selected = [active[j] for j in BHfilter(naive_pvalues, q=M.q)]\n else:\n naive_selected = None\n except AttributeError:\n naive_selected = None\n tic = time.time()\n true_active = np.nonzero(beta)[0]\n\n if active is not None:\n selection_quality = instance.discoveries(active, true_active)\n TD = instance.discoveries(selected, true_active)\n FD = len(selected) - TD\n FDP = FD / max(TD + 1. * FD, 1.)\n\n # naive\n if naive_selected is not None:\n nTD = instance.discoveries(naive_selected, true_active)\n nFD = len(naive_selected) - nTD\n nFDP = nFD / max(nTD + 1. * nFD, 1.)\n else:\n nTD, nFDP, nFD = np.nan, np.nan, np.nan\n\n ntrue_active = max(len(true_active), 1) \n value = pd.DataFrame([[TD / ntrue_active, \n FD, \n FDP, \n np.maximum(nTD / ntrue_active, 1), \n nFD,\n nFDP,\n selection_quality / ntrue_active,\n len(active)]],\n columns=['Full Model Power',\n 'False Discoveries',\n 'Full Model FDP',\n 'Naive Full Model Power',\n 'Naive False Discoveries',\n 'Naive Full Model FDP',\n 'Selection Quality',\n 'Active'])\n else:\n value = pd.DataFrame([[0, 0, 0, 0, 0, 0, tic-toc, 0, 0]],\n columns=['Full Model Power',\n 'False Discoveries',\n 'Full Model FDP',\n 'Naive Full Model Power',\n 'Naive False Discoveries',\n 'Naive Full Model FDP',\n 'Time',\n 'Selection Quality',\n 'Active'])\n if np.isfinite(toc):\n value['Time'] = tic-toc\n\n return M, value\n\ndef BH_summary(result):\n\n nresult = result['Full Model Power'].shape[0]\n value = pd.DataFrame([[nresult,\n np.mean(result['Full Model Power']), \n np.std(result['Full Model Power']) / np.sqrt(nresult),\n np.mean(result['False Discoveries']), \n np.mean(result['Full Model FDP']), \n np.std(result['Full Model FDP']) / np.sqrt(nresult),\n np.mean(result['Naive Full Model FDP']), \n np.mean(result['Naive Full Model Power']), \n np.mean(result['Naive False Discoveries']), \n np.mean(result['Time']),\n np.mean(result['Selection Quality']),\n np.mean(result['Active']),\n result['model_target'].values[0]]],\n columns=['Replicates', \n 'Full Model Power', \n 'SD(Full Model Power)', \n 'False Discoveries', \n 'Full Model FDR', \n 'SD(Full Model FDR)', \n 'Naive Full Model FDP',\n 'Naive Full Model Power',\n 'Naive False Discoveries',\n 'Time', \n 'Selection Quality',\n 'Active',\n 'Model'\n ])\n\n # keep all things constant over groups\n\n for n in result.columns:\n if len(np.unique(result[n])) == 1:\n value[n] = result[n].values[0]\n\n return value\n\n# marginally threshold p-values at 10% by default\n\nmarginal_summary = BH_summary # reporting statistics are the same as with BHfilter\n\ndef marginal_statistic(method, \n instance, \n X, \n Y, \n beta, \n l_theory, \n l_min, \n l_1se, \n sigma_reid):\n\n toc = time.time()\n M = method(X.copy(), Y.copy(), l_theory.copy(), l_min, l_1se, sigma_reid)\n try:\n active, pvalues = M.generate_pvalues()\n selected = pvalues < method.level\n except AttributeError: # some methods do not have pvalues (e.g. knockoffs for these we will run their select method\n active, selected = M.select()\n\n try:\n if len(active) > 0:\n naive_pvalues = M.naive_pvalues(active)[1]\n naive_selected = naive_pvalues < method.level\n else:\n naive_selected = None\n except AttributeError:\n naive_selected = None\n\n tic = time.time()\n true_active = np.nonzero(beta)[0]\n\n if active is not None:\n selection_quality = instance.discoveries(active, true_active)\n TD = instance.discoveries(selected, true_active)\n FD = len(selected) - TD\n FDP = FD / max(TD + 1. * FD, 1.)\n\n # naive\n if naive_selected is not None:\n nTD = instance.discoveries(naive_selected, true_active)\n nFD = len(naive_selected) - nTD\n nFDP = nFD / max(nTD + 1. * nFD, 1.)\n else:\n nTD, nFDP, nFD = np.nan, np.nan, np.nan\n\n ntrue_active = max(len(true_active), 1) \n return M, pd.DataFrame([[TD / ntrue_active,\n FD, \n FDP, \n np.maximum(nTD / ntrue_active, 1), \n nFD,\n nFDP,\n tic-toc, \n selection_quality / ntrue_active,\n len(active)]],\n columns=['Full Model Power',\n 'False Discoveries',\n 'Full Model FDP',\n 'Naive Full Model Power',\n 'Naive False Discoveries',\n 'Naive Full Model FDP',\n 'Time',\n 'Selection Quality',\n 'Active'])\n else:\n return M, pd.DataFrame([[0, 0, 0, 0, 0, 0, tic-toc, 0, 0]],\n columns=['Full Model Power',\n 'False Discoveries',\n 'Full Model FDP',\n 'Naive Full Model Power',\n 'Naive False Discoveries',\n 'Naive Full Model FDP',\n 'Time',\n 'Selection Quality',\n 'Active'])\n\n" ]
[ [ "numpy.ones_like", "numpy.maximum", "numpy.isfinite", "numpy.nonzero", "numpy.asarray", "numpy.unique", "numpy.median", "numpy.sqrt", "numpy.linalg.norm", "pandas.DataFrame", "numpy.std", "numpy.mean", "numpy.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
fluiddyn/fluidpythran
[ "e34e9886680e6b8e365d24a77fcb66b67e554043", "e34e9886680e6b8e365d24a77fcb66b67e554043" ]
[ "doc/examples/not_implemented/pythran_class_with_calls.py", "data_tests/methods.py" ]
[ "\"\"\"\nWith classes, we have a problem with heritage. Note that for standard functions\n(like sum_arrays), we actually also have the problem with monkey patching.\n\nWe can just say that monkey patching of `sum_arrays` is not supported (so that\n`sum_arrays` can be treated as a Pythran function, and potentially inlined) but\nfor class, we really want to support heritage (like in MyClassChild) so we\nwould need to replace `compute` by a Python method calling Pythran functions\nand Python methods (which themselves call Pythran functions).\n\nThe mechanism needed for `compute` is much more complicated than the simple\ncase in `pythran_class.py` and more complicated than what is needed for\n`compute1` (which is actually similar to [issue\n#7](https://bitbucket.org/fluiddyn/fluidpythran/issues/7/support-kernels-with-function-calls)).\n\n\"\"\"\n\nfrom fluidpythran import Type, NDim, Array, boost\n\nimport numpy as np\n\n\nT = Type(int, np.float64)\nN = NDim(1)\n\nA1 = Array[T, N]\nA2 = Array[float, N + 1]\n\n\ndef sum_arrays(arr0, arr1):\n return arr0 + arr1\n\n\nclass MyClass:\n\n arr0: A1\n arr1: A1\n arr2: A2\n\n def __init__(self, n, dtype=int):\n self.arr0 = np.zeros(n, dtype=dtype)\n self.arr1 = np.zeros(n, dtype=dtype)\n self.arr2 = np.zeros(n)\n\n @boost\n def compute(self, alpha: float):\n tmp = self.sum_arrays().mean()\n return tmp ** alpha * self.arr2\n\n def sum_arrays(self):\n return self.arr0 + self.arr1\n\n @boost\n def compute1(self, alpha: float):\n tmp = sum_arrays(self.arr0, self.arr1).mean()\n return tmp ** alpha * self.arr2\n\n\nclass MyClassChild(MyClass):\n def sum_arrays(self):\n return 2 * self.arr0 + self.arr1\n", "import numpy as np\n\n# pythran import numpy as np\n\nfrom fluidpythran import boost\n\n\n@boost\nclass Transmitter:\n\n freq: float\n\n def __init__(self, freq):\n self.freq = float(freq)\n\n @boost\n def __call__(self, inp: \"float[]\"):\n \"\"\"My docstring\"\"\"\n return inp * np.exp(np.arange(len(inp)) * self.freq * 1j)\n\n\nif __name__ == \"__main__\":\n inp = np.ones(2)\n freq = 1.0\n trans = Transmitter(freq)\n\n def for_check(freq, inp):\n return inp * np.exp(np.arange(len(inp)) * freq * 1j)\n\n assert np.allclose(trans(inp), for_check(freq, inp))\n" ]
[ [ "numpy.zeros" ], [ "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tangibleai/rasa
[ "d92cda129bbbf4b52151d981535bd02fc7597d6d" ]
[ "tests/core/featurizers/test_single_state_featurizers.py" ]
[ "from typing import Text\nimport numpy as np\nfrom rasa.shared.core.constants import ENTITY_LABEL_SEPARATOR\nimport scipy.sparse\n\nimport pytest\n\nfrom rasa.core.featurizers.single_state_featurizer import SingleStateFeaturizer\nfrom rasa.shared.core.domain import Domain\nfrom rasa.shared.nlu.constants import (\n ACTION_TEXT,\n ACTION_NAME,\n ENTITIES,\n TEXT,\n INTENT,\n FEATURE_TYPE_SEQUENCE,\n FEATURE_TYPE_SENTENCE,\n ENTITY_ATTRIBUTE_TYPE,\n ENTITY_ATTRIBUTE_VALUE,\n ENTITY_ATTRIBUTE_START,\n ENTITY_ATTRIBUTE_END,\n ENTITY_TAGS,\n)\nfrom rasa.shared.core.constants import ACTIVE_LOOP, SLOTS\nfrom rasa.shared.nlu.interpreter import RegexInterpreter\nfrom rasa.shared.core.slots import Slot\nfrom rasa.shared.nlu.training_data.features import Features\n\n\ndef test_single_state_featurizer_without_interpreter_state_not_with_action_listen():\n \"\"\"This test are for encoding state without a trained interpreter.\n action_name is not action_listen, so, INTENT, TEXT and ENTITIES should not be\n featurized.\n \"\"\"\n f = SingleStateFeaturizer()\n f._default_feature_states[INTENT] = {\"a\": 0, \"b\": 1}\n f._default_feature_states[ACTION_NAME] = {\"c\": 0, \"d\": 1, \"action_listen\": 2}\n f._default_feature_states[SLOTS] = {\"e_0\": 0, \"f_0\": 1, \"g_0\": 2}\n f._default_feature_states[ACTIVE_LOOP] = {\"h\": 0, \"i\": 1, \"j\": 2, \"k\": 3}\n\n encoded = f.encode_state(\n {\n \"user\": {\"intent\": \"a\", \"text\": \"blah blah blah\"},\n \"prev_action\": {\"action_name\": \"d\", \"action_text\": \"boom\"},\n \"active_loop\": {\"name\": \"i\"},\n \"slots\": {\"g\": (1.0,)},\n },\n interpreter=RegexInterpreter(),\n )\n\n # user input is ignored as prev action is not action_listen\n assert list(encoded.keys()) == [ACTION_NAME, ACTIVE_LOOP, SLOTS]\n assert (\n encoded[ACTION_NAME][0].features != scipy.sparse.coo_matrix([[0, 1, 0]])\n ).nnz == 0\n assert (\n encoded[ACTIVE_LOOP][0].features != scipy.sparse.coo_matrix([[0, 1, 0, 0]])\n ).nnz == 0\n assert (encoded[SLOTS][0].features != scipy.sparse.coo_matrix([[0, 0, 1]])).nnz == 0\n\n\ndef test_single_state_featurizer_without_interpreter_state_with_action_listen():\n \"\"\"This test are for encoding state without a trained interpreter.\n action_name is action_listen, so, INTENT and ENTITIES should be featurized\n while text shouldn't because we don't have an interpreter.\n \"\"\"\n f = SingleStateFeaturizer()\n f._default_feature_states[INTENT] = {\"a\": 0, \"b\": 1}\n f._default_feature_states[ACTION_NAME] = {\"c\": 0, \"d\": 1, \"action_listen\": 2}\n f._default_feature_states[SLOTS] = {\"e_0\": 0, \"f_0\": 1, \"g_0\": 2}\n f._default_feature_states[ACTIVE_LOOP] = {\"h\": 0, \"i\": 1, \"j\": 2, \"k\": 3}\n\n encoded = f.encode_state(\n {\n \"user\": {\"intent\": \"a\", \"text\": \"blah blah blah\"},\n \"prev_action\": {\"action_name\": \"action_listen\", \"action_text\": \"boom\"},\n \"active_loop\": {\"name\": \"k\"},\n \"slots\": {\"e\": (1.0,)},\n },\n interpreter=RegexInterpreter(),\n )\n\n # we featurize all the features except for *_text ones because NLU wasn't trained\n assert list(encoded.keys()) == [INTENT, ACTION_NAME, ACTIVE_LOOP, SLOTS]\n assert (encoded[INTENT][0].features != scipy.sparse.coo_matrix([[1, 0]])).nnz == 0\n assert (\n encoded[ACTION_NAME][0].features != scipy.sparse.coo_matrix([[0, 0, 1]])\n ).nnz == 0\n assert (\n encoded[ACTIVE_LOOP][0].features != scipy.sparse.coo_matrix([[0, 0, 0, 1]])\n ).nnz == 0\n assert (encoded[SLOTS][0].features != scipy.sparse.coo_matrix([[1, 0, 0]])).nnz == 0\n\n\ndef test_single_state_featurizer_without_interpreter_state_no_intent_no_action_name():\n f = SingleStateFeaturizer()\n f._default_feature_states[INTENT] = {\"a\": 0, \"b\": 1}\n f._default_feature_states[ACTION_NAME] = {\"c\": 0, \"d\": 1, \"action_listen\": 2}\n f._default_feature_states[SLOTS] = {\"e_0\": 0, \"f_0\": 1, \"g_0\": 2}\n f._default_feature_states[ACTIVE_LOOP] = {\"h\": 0, \"i\": 1, \"j\": 2, \"k\": 3}\n\n # check that no intent / action_name features are added when the interpreter\n # isn't there and\n # intent / action_name not in input\n encoded = f.encode_state(\n {\n \"user\": {\"text\": \"blah blah blah\"},\n \"prev_action\": {\"action_text\": \"boom\"},\n \"active_loop\": {\"name\": \"k\"},\n \"slots\": {\"e\": (1.0,)},\n },\n interpreter=RegexInterpreter(),\n )\n\n assert list(encoded.keys()) == [ACTIVE_LOOP, SLOTS]\n assert (\n encoded[ACTIVE_LOOP][0].features != scipy.sparse.coo_matrix([[0, 0, 0, 1]])\n ).nnz == 0\n assert (encoded[SLOTS][0].features != scipy.sparse.coo_matrix([[1, 0, 0]])).nnz == 0\n\n\ndef test_single_state_featurizer_correctly_encodes_non_existing_value():\n f = SingleStateFeaturizer()\n f._default_feature_states[INTENT] = {\"a\": 0, \"b\": 1}\n f._default_feature_states[ACTION_NAME] = {\"c\": 0, \"d\": 1}\n\n encoded = f.encode_state(\n {\"user\": {\"intent\": \"e\"}, \"prev_action\": {\"action_name\": \"action_listen\"}},\n interpreter=RegexInterpreter(),\n )\n\n assert list(encoded.keys()) == [INTENT, ACTION_NAME]\n assert (encoded[INTENT][0].features != scipy.sparse.coo_matrix([[0, 0]])).nnz == 0\n\n\ndef test_single_state_featurizer_prepare_for_training():\n domain = Domain(\n intents=[\"greet\"],\n entities=[\"name\"],\n slots=[Slot(\"name\")],\n templates={},\n forms=[],\n action_names=[\"utter_greet\", \"action_check_weather\"],\n )\n\n f = SingleStateFeaturizer()\n f.prepare_for_training(domain, RegexInterpreter())\n\n assert len(f._default_feature_states[INTENT]) > 1\n assert \"greet\" in f._default_feature_states[INTENT]\n assert len(f._default_feature_states[ENTITIES]) == 1\n assert f._default_feature_states[ENTITIES][\"name\"] == 0\n assert len(f._default_feature_states[SLOTS]) == 1\n assert f._default_feature_states[SLOTS][\"name_0\"] == 0\n assert len(f._default_feature_states[ACTION_NAME]) > 2\n assert \"utter_greet\" in f._default_feature_states[ACTION_NAME]\n assert \"action_check_weather\" in f._default_feature_states[ACTION_NAME]\n assert len(f._default_feature_states[ACTIVE_LOOP]) == 0\n\n\ndef test_single_state_featurizer_creates_encoded_all_actions():\n domain = Domain(\n intents=[],\n entities=[],\n slots=[],\n templates={},\n forms={},\n action_names=[\"a\", \"b\", \"c\", \"d\"],\n )\n\n f = SingleStateFeaturizer()\n f.prepare_for_training(domain, RegexInterpreter())\n encoded_actions = f.encode_all_actions(domain, RegexInterpreter())\n\n assert len(encoded_actions) == len(domain.action_names_or_texts)\n assert all(\n [\n ACTION_NAME in encoded_action and ACTION_TEXT not in encoded_action\n for encoded_action in encoded_actions\n ]\n )\n\n\[email protected](300) # these can take a longer time than the default timeout\ndef test_single_state_featurizer_with_entity_roles_and_groups(\n unpacked_trained_moodbot_path: Text,\n):\n from rasa.core.agent import Agent\n\n interpreter = Agent.load(unpacked_trained_moodbot_path).interpreter\n # TODO roles and groups are not supported in e2e yet\n domain = Domain(\n intents=[],\n entities=[\"city\", f\"city{ENTITY_LABEL_SEPARATOR}to\"],\n slots=[],\n templates={},\n forms={},\n action_names=[],\n )\n f = SingleStateFeaturizer()\n f.prepare_for_training(domain, RegexInterpreter())\n encoded = f.encode_entities(\n {\n TEXT: \"I am flying from London to Paris\",\n ENTITIES: [\n {\n ENTITY_ATTRIBUTE_TYPE: \"city\",\n ENTITY_ATTRIBUTE_VALUE: \"London\",\n ENTITY_ATTRIBUTE_START: 17,\n ENTITY_ATTRIBUTE_END: 23,\n },\n {\n ENTITY_ATTRIBUTE_TYPE: f\"city{ENTITY_LABEL_SEPARATOR}to\",\n ENTITY_ATTRIBUTE_VALUE: \"Paris\",\n ENTITY_ATTRIBUTE_START: 27,\n ENTITY_ATTRIBUTE_END: 32,\n },\n ],\n },\n interpreter=interpreter,\n )\n assert sorted(list(encoded.keys())) == sorted([ENTITY_TAGS])\n assert np.all(\n encoded[ENTITY_TAGS][0].features == [[0], [0], [0], [0], [1], [0], [2]]\n )\n\n\ndef test_single_state_featurizer_uses_dtype_float():\n f = SingleStateFeaturizer()\n f._default_feature_states[INTENT] = {\"a\": 0, \"b\": 1}\n f._default_feature_states[ACTION_NAME] = {\"e\": 0, \"d\": 1}\n f._default_feature_states[ENTITIES] = {\"c\": 0}\n\n encoded = f.encode_state(\n {\n \"user\": {\"intent\": \"a\", \"entities\": [\"c\"]},\n \"prev_action\": {\"action_name\": \"d\"},\n },\n interpreter=RegexInterpreter(),\n )\n\n assert encoded[ACTION_NAME][0].features.dtype == np.float32\n\n\[email protected](300) # these can take a longer time than the default timeout\ndef test_single_state_featurizer_with_interpreter_state_with_action_listen(\n unpacked_trained_moodbot_path: Text,\n):\n from rasa.core.agent import Agent\n\n interpreter = Agent.load(unpacked_trained_moodbot_path).interpreter\n\n f = SingleStateFeaturizer()\n f._default_feature_states[INTENT] = {\"greet\": 0, \"inform\": 1}\n f._default_feature_states[ENTITIES] = {\n \"city\": 0,\n \"name\": 1,\n f\"city{ENTITY_LABEL_SEPARATOR}to\": 2,\n f\"city{ENTITY_LABEL_SEPARATOR}from\": 3,\n }\n f._default_feature_states[ACTION_NAME] = {\n \"utter_ask_where_to\": 0,\n \"utter_greet\": 1,\n \"action_listen\": 2,\n }\n # `_0` in slots represent feature dimension\n f._default_feature_states[SLOTS] = {\"slot_1_0\": 0, \"slot_2_0\": 1, \"slot_3_0\": 2}\n f._default_feature_states[ACTIVE_LOOP] = {\n \"active_loop_1\": 0,\n \"active_loop_2\": 1,\n \"active_loop_3\": 2,\n \"active_loop_4\": 3,\n }\n encoded = f.encode_state(\n {\n \"user\": {\n \"text\": \"I am flying from London to Paris\",\n \"intent\": \"inform\",\n \"entities\": [\"city\", f\"city{ENTITY_LABEL_SEPARATOR}to\"],\n },\n \"prev_action\": {\n \"action_name\": \"action_listen\",\n \"action_text\": \"throw a ball\",\n },\n \"active_loop\": {\"name\": \"active_loop_4\"},\n \"slots\": {\"slot_1\": (1.0,)},\n },\n interpreter=interpreter,\n )\n\n # check all the features are encoded and *_text features are encoded by a\n # dense featurizer\n assert sorted(list(encoded.keys())) == sorted(\n [TEXT, ENTITIES, ACTION_NAME, SLOTS, ACTIVE_LOOP, INTENT, ACTION_TEXT]\n )\n assert encoded[TEXT][0].features.shape[-1] == 300\n assert encoded[ACTION_TEXT][0].features.shape[-1] == 300\n assert (encoded[INTENT][0].features != scipy.sparse.coo_matrix([[0, 1]])).nnz == 0\n assert (\n encoded[ACTION_NAME][0].features != scipy.sparse.coo_matrix([[0, 0, 1]])\n ).nnz == 0\n assert encoded[ENTITIES][0].features.shape[-1] == 4\n assert (encoded[SLOTS][0].features != scipy.sparse.coo_matrix([[1, 0, 0]])).nnz == 0\n assert (\n encoded[ACTIVE_LOOP][0].features != scipy.sparse.coo_matrix([[0, 0, 0, 1]])\n ).nnz == 0\n\n\[email protected](300) # these can take a longer time than the default timeout\ndef test_single_state_featurizer_with_interpreter_state_not_with_action_listen(\n unpacked_trained_moodbot_path: Text,\n):\n # check that user features are ignored when action_name is not action_listen\n from rasa.core.agent import Agent\n\n interpreter = Agent.load(unpacked_trained_moodbot_path).interpreter\n f = SingleStateFeaturizer()\n f._default_feature_states[INTENT] = {\"a\": 0, \"b\": 1}\n f._default_feature_states[ENTITIES] = {\"c\": 0}\n f._default_feature_states[ACTION_NAME] = {\"e\": 0, \"d\": 1, \"action_listen\": 2}\n f._default_feature_states[SLOTS] = {\"e_0\": 0, \"f_0\": 1, \"g_0\": 2}\n f._default_feature_states[ACTIVE_LOOP] = {\"h\": 0, \"i\": 1, \"j\": 2, \"k\": 3}\n\n encoded = f.encode_state(\n {\n \"user\": {\"text\": \"a ball\", \"intent\": \"b\", \"entities\": [\"c\"]},\n \"prev_action\": {\"action_name\": \"d\", \"action_text\": \"throw a ball\"},\n \"active_loop\": {\"name\": \"k\"},\n \"slots\": {\"e\": (1.0,)},\n },\n interpreter=interpreter,\n )\n\n # check user input is ignored when action is not action_listen\n assert list(encoded.keys()) == [ACTION_TEXT, ACTION_NAME, ACTIVE_LOOP, SLOTS]\n assert encoded[ACTION_TEXT][0].features.shape[-1] == 300\n assert (\n encoded[ACTION_NAME][0].features != scipy.sparse.coo_matrix([[0, 1, 0]])\n ).nnz == 0\n assert (encoded[SLOTS][0].features != scipy.sparse.coo_matrix([[1, 0, 0]])).nnz == 0\n assert (\n encoded[ACTIVE_LOOP][0].features != scipy.sparse.coo_matrix([[0, 0, 0, 1]])\n ).nnz == 0\n\n\[email protected](300) # these can take a longer time than the default timeout\ndef test_single_state_featurizer_with_interpreter_state_with_no_action_name(\n unpacked_trained_moodbot_path: Text,\n):\n # check that action name features are not added by the featurizer when not\n # present in the state and\n # check user input is ignored when action is not action_listen\n # and action_name is features are not added\n from rasa.core.agent import Agent\n\n interpreter = Agent.load(unpacked_trained_moodbot_path).interpreter\n\n f = SingleStateFeaturizer()\n f._default_feature_states[INTENT] = {\"a\": 0, \"b\": 1}\n f._default_feature_states[ENTITIES] = {\"c\": 0}\n f._default_feature_states[ACTION_NAME] = {\"e\": 0, \"d\": 1, \"action_listen\": 2}\n f._default_feature_states[SLOTS] = {\"e_0\": 0, \"f_0\": 1, \"g_0\": 2}\n f._default_feature_states[ACTIVE_LOOP] = {\"h\": 0, \"i\": 1, \"j\": 2, \"k\": 3}\n\n encoded = f.encode_state(\n {\n \"user\": {\"text\": \"a ball\", \"intent\": \"b\", \"entities\": [\"c\"]},\n \"prev_action\": {\"action_text\": \"throw a ball\"},\n \"active_loop\": {\"name\": \"k\"},\n \"slots\": {\"e\": (1.0,)},\n },\n interpreter=interpreter,\n )\n\n assert list(encoded.keys()) == [ACTION_TEXT, ACTIVE_LOOP, SLOTS]\n assert encoded[ACTION_TEXT][0].features.shape[-1] == 300\n assert (encoded[SLOTS][0].features != scipy.sparse.coo_matrix([[1, 0, 0]])).nnz == 0\n assert (\n encoded[ACTIVE_LOOP][0].features != scipy.sparse.coo_matrix([[0, 0, 0, 1]])\n ).nnz == 0\n\n\ndef test_state_features_for_attribute_raises_on_not_supported_attribute():\n f = SingleStateFeaturizer()\n\n with pytest.raises(ValueError):\n f._state_features_for_attribute({}, \"not-supported-attribute\")\n\n\ndef test_to_sparse_sentence_features():\n features = [\n Features(\n scipy.sparse.csr_matrix(np.random.randint(5, size=(5, 10))),\n FEATURE_TYPE_SEQUENCE,\n TEXT,\n \"some-featurizer\",\n )\n ]\n\n sentence_features = SingleStateFeaturizer._to_sparse_sentence_features(features)\n\n assert len(sentence_features) == 1\n assert FEATURE_TYPE_SENTENCE == sentence_features[0].type\n assert features[0].origin == sentence_features[0].origin\n assert features[0].attribute == sentence_features[0].attribute\n assert sentence_features[0].features.shape == (1, 10)\n\n\[email protected](300) # these can take a longer time than the default timeout\ndef test_single_state_featurizer_uses_regex_interpreter(\n unpacked_trained_moodbot_path: Text,\n):\n from rasa.core.agent import Agent\n\n domain = Domain(\n intents=[], entities=[], slots=[], templates={}, forms=[], action_names=[],\n )\n f = SingleStateFeaturizer()\n # simulate that core was trained separately by passing\n # RegexInterpreter to prepare_for_training\n f.prepare_for_training(domain, RegexInterpreter())\n # simulate that nlu and core models were manually combined for prediction\n # by passing trained interpreter to encode_all_actions\n interpreter = Agent.load(unpacked_trained_moodbot_path).interpreter\n features = f._extract_state_features({TEXT: \"some text\"}, interpreter)\n # RegexInterpreter cannot create features for text, therefore since featurizer\n # was trained without nlu, features for text should be empty\n assert not features\n" ]
[ [ "numpy.all", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mohammadzainabbas/tensorflow
[ "049dfd5e070cfa84c82eea71c6c746a70cba4a3f", "352142267a1a151b04c6198de83b40b7e979d1d8", "352142267a1a151b04c6198de83b40b7e979d1d8", "8e86dcd1c59bb3f1dc978fcb5398dd3f2f51d9ad" ]
[ "tensorflow/contrib/eager/python/datasets_test.py", "tensorflow/python/kernel_tests/manip_ops_test.py", "tensorflow/python/keras/_impl/keras/engine/training_test.py", "tensorflow/python/grappler/tf_optimizer_test.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport threading\nimport time\n\nimport numpy as np\n\nfrom tensorflow.contrib import lookup\nfrom tensorflow.contrib.data.python.ops import prefetching_ops\nfrom tensorflow.contrib.data.python.ops import threadpool\nfrom tensorflow.contrib.data.python.ops import unique\nfrom tensorflow.contrib.eager.python import checkpointable_utils\nfrom tensorflow.contrib.eager.python import datasets\nfrom tensorflow.python.data import Dataset\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import script_ops\n\n\nclass IteratorTest(test.TestCase):\n\n def testBasic(self):\n got = []\n for t in datasets.Iterator(Dataset.range(4)):\n got.append(t.numpy())\n self.assertAllEqual([0, 1, 2, 3], got)\n\n def testBasicOneShotIterator(self):\n got = []\n for t in Dataset.range(4).make_one_shot_iterator():\n got.append(t.numpy())\n self.assertAllEqual([0, 1, 2, 3], got)\n\n def testBasicImplicitIterator(self):\n got = []\n for t in Dataset.range(4):\n got.append(t.numpy())\n self.assertAllEqual([0, 1, 2, 3], got)\n\n def testGetNext(self):\n iterator = datasets.Iterator(Dataset.range(4))\n self.assertEqual(0, iterator.get_next().numpy())\n self.assertEqual(1, iterator.get_next().numpy())\n self.assertEqual(2, iterator.get_next().numpy())\n self.assertEqual(3, iterator.get_next().numpy())\n with self.assertRaises(errors.OutOfRangeError):\n iterator.get_next()\n\n def testGetNextOneShotIterator(self):\n iterator = Dataset.range(4).make_one_shot_iterator()\n self.assertEqual(0, iterator.get_next().numpy())\n self.assertEqual(1, iterator.get_next().numpy())\n self.assertEqual(2, iterator.get_next().numpy())\n self.assertEqual(3, iterator.get_next().numpy())\n with self.assertRaises(errors.OutOfRangeError):\n iterator.get_next()\n\n def testMultipleIteratorsOnTheSameDataset(self):\n ds = Dataset.range(4)\n it1 = datasets.Iterator(ds)\n it2 = datasets.Iterator(ds)\n got = [x.numpy() for x in it1]\n self.assertAllEqual([0, 1, 2, 3], got)\n\n got = [x.numpy() for x in it2]\n self.assertAllEqual([0, 1, 2, 3], got)\n\n def testNestedOutputs(self):\n ds = Dataset.zip((Dataset.range(4), Dataset.zip((Dataset.range(4),\n Dataset.range(4)))))\n total = 0\n # The Iterator will return a nested structure of Tensor objects.\n # Some funkiness to compare against simple integers.\n for (i, x) in enumerate(datasets.Iterator(ds)):\n want = (i, (i, i))\n got = (x[0].numpy(), (x[1][0].numpy(), x[1][1].numpy()))\n self.assertEqual(got, want)\n total += 1\n self.assertEqual(4, total)\n\n def testMapAndFilter(self):\n def even(x):\n return math_ops.equal(math_ops.mod(x, 2), 0)\n\n it = datasets.Iterator(Dataset.range(8).map(math_ops.square).filter(even))\n got = [x.numpy() for x in it]\n self.assertAllEqual([0, 4, 16, 36], got)\n\n def testMapCaptureLookupTable(self):\n default_val = -1\n keys = constant_op.constant(['brain', 'salad', 'surgery'])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = lookup.HashTable(\n lookup.KeyValueTensorInitializer(keys, values), default_val)\n dataset = Dataset.from_tensor_slices(['brain', 'salad', 'surgery'])\n dataset = dataset.map(table.lookup)\n it = datasets.Iterator(dataset)\n got = [x.numpy() for x in it]\n self.assertAllEqual([0, 1, 2], got)\n\n def testMultipleIteratorsOnADatasetThatUsesFunctions(self):\n ds = Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6]).map(math_ops.square)\n\n got1 = [x.numpy() for x in datasets.Iterator(ds)]\n self.assertAllEqual([1, 4, 9, 16, 25, 36], got1)\n got2 = [x.numpy() for x in datasets.Iterator(ds)]\n self.assertAllEqual(got1, got2)\n\n def assertSparseValuesEqual(self, a, b):\n self.assertAllEqual(a.indices, b.indices)\n self.assertAllEqual(a.values, b.values)\n self.assertAllEqual(a.dense_shape, b.dense_shape)\n\n def testSparseTensorElements(self):\n components = (sparse_tensor.SparseTensorValue(\n indices=np.array([[0, 0], [1, 0], [2, 0]]),\n values=np.array([0, 0, 0]),\n dense_shape=np.array([3, 1])),\n sparse_tensor.SparseTensorValue(\n indices=np.array([[0, 0], [1, 1], [2, 2]]),\n values=np.array([1, 2, 3]),\n dense_shape=np.array([3, 3])))\n\n expected = [\n (sparse_tensor.SparseTensorValue(\n indices=np.array([[0]]),\n values=np.array([0]),\n dense_shape=np.array([1])),\n sparse_tensor.SparseTensorValue(\n indices=np.array([[0]]),\n values=np.array([1]),\n dense_shape=np.array([3]))),\n (sparse_tensor.SparseTensorValue(\n indices=np.array([[0]]),\n values=np.array([0]),\n dense_shape=np.array([1])),\n sparse_tensor.SparseTensorValue(\n indices=np.array([[1]]),\n values=np.array([2]),\n dense_shape=np.array([3]))),\n (sparse_tensor.SparseTensorValue(\n indices=np.array([[0]]),\n values=np.array([0]),\n dense_shape=np.array([1])),\n sparse_tensor.SparseTensorValue(\n indices=np.array([[2]]),\n values=np.array([3]),\n dense_shape=np.array([3]))),\n ]\n\n for i, result in enumerate(\n datasets.Iterator(Dataset.from_tensor_slices(components))):\n self.assertSparseValuesEqual(expected[i][0], result[0])\n self.assertSparseValuesEqual(expected[i][1], result[1])\n\n def testPyFunc(self):\n\n def my_map(inp):\n return [[x + 1 for x in inp]]\n\n ds = Dataset.range(4).map(\n lambda x: script_ops.py_func(my_map, [[x]], dtypes.int64))\n got = [x.numpy() for x in datasets.Iterator(ds)]\n self.assertAllEqual([[1], [2], [3], [4]], got)\n\n def testTensorsPlacedOnDevice(self):\n ds = Dataset.from_tensors([0., 1.])\n with ops.device(test.gpu_device_name()):\n x = datasets.Iterator(ds).next()\n x = math_ops.add(x, x)\n self.assertAllEqual([0., 2.], x.numpy())\n\n def testTensorsExplicitPrefetchToDevice(self):\n ds = Dataset.from_tensor_slices([0., 1.])\n ds = ds.apply(prefetching_ops.prefetch_to_device(test.gpu_device_name()))\n\n with self.assertRaisesRegexp(TypeError, 'prefetch_to_device'):\n datasets.Iterator(ds)\n\n for i, x in enumerate(ds):\n with ops.device(test.gpu_device_name()):\n x = math_ops.add(x, x)\n self.assertEqual(float(i) + float(i), x.numpy())\n\n def testOverrideThreadPool(self):\n\n def get_thread_id(_):\n # Python creates a dummy thread object to represent the current\n # thread when called from an \"alien\" thread (such as a\n # `PrivateThreadPool` thread in this case). It does not include\n # the TensorFlow-given display name, but it has a unique\n # identifier that maps one-to-one with the underlying OS thread.\n return np.array(threading.current_thread().ident).astype(np.int64)\n\n for num_threads in [1, 2, 4, 8, 16]:\n\n dataset = (\n Dataset.range(1000).map(\n lambda x: script_ops.py_func(get_thread_id, [x], dtypes.int64),\n num_parallel_calls=32).apply(unique.unique()))\n\n dataset = threadpool.override_threadpool(\n dataset,\n threadpool.PrivateThreadPool(\n num_threads, display_name='private_thread_pool_%d' % num_threads))\n\n thread_ids = []\n for next_element in datasets.Iterator(dataset):\n thread_ids.append(next_element)\n self.assertEqual(len(thread_ids), len(set(thread_ids)))\n self.assertGreater(len(thread_ids), 0)\n # NOTE(mrry): We don't control the thread pool scheduling, and\n # so cannot guarantee that all of the threads in the pool will\n # perform work.\n self.assertLessEqual(len(thread_ids), num_threads)\n\n def testSaveRestore(self):\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')\n dataset = Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])\n dataset = dataset.map(math_ops.square).batch(2)\n iterator = datasets.Iterator(dataset)\n checkpoint = checkpointable_utils.Checkpoint(iterator=iterator)\n self.assertAllEqual([1, 4], iterator.get_next().numpy())\n save_path = checkpoint.save(checkpoint_prefix)\n self.assertAllEqual([9, 16], iterator.get_next().numpy())\n self.assertAllEqual([25, 36], iterator.get_next().numpy())\n checkpoint.restore(save_path)\n self.assertAllEqual([9, 16], iterator.get_next().numpy())\n self.assertAllEqual([25, 36], iterator.get_next().numpy())\n\n def testSaveRestoreMultipleIterator(self):\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')\n dataset = Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])\n dataset = dataset.map(math_ops.square).batch(2)\n iterator_1 = datasets.Iterator(dataset)\n iterator_2 = datasets.Iterator(dataset)\n dataset_2 = Dataset.range(10)\n iterator_3 = datasets.Iterator(dataset_2)\n\n checkpoint = checkpointable_utils.Checkpoint(\n iterator_1=iterator_1, iterator_2=iterator_2, iterator_3=iterator_3)\n self.assertAllEqual([1, 4], iterator_1.get_next().numpy())\n self.assertEqual(0, iterator_3.get_next().numpy())\n self.assertEqual(1, iterator_3.get_next().numpy())\n self.assertEqual(2, iterator_3.get_next().numpy())\n\n save_path = checkpoint.save(checkpoint_prefix)\n self.assertAllEqual([1, 4], iterator_2.get_next().numpy())\n self.assertAllEqual([9, 16], iterator_2.get_next().numpy())\n self.assertEqual(3, iterator_3.get_next().numpy())\n checkpoint.restore(save_path)\n self.assertAllEqual([9, 16], iterator_1.get_next().numpy())\n self.assertAllEqual([1, 4], iterator_2.get_next().numpy())\n self.assertEqual(3, iterator_3.get_next().numpy())\n\n def testRestoreExhaustedIterator(self):\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')\n dataset = Dataset.range(3)\n iterator = datasets.Iterator(dataset)\n\n checkpoint = checkpointable_utils.Checkpoint(iterator=iterator)\n self.assertEqual(0, iterator.get_next().numpy())\n self.assertEqual(1, iterator.get_next().numpy())\n save_path = checkpoint.save(checkpoint_prefix)\n self.assertEqual(2, iterator.get_next().numpy())\n checkpoint.restore(save_path)\n self.assertEqual(2, iterator.get_next().numpy())\n\n\nclass DatasetConstructorBenchmark(test.Benchmark):\n\n def benchmarkSliceRepeatBatchEager(self):\n input_size = 10000\n batch_size = 100\n num_epochs = 100\n\n input_data = np.random.randn(input_size)\n\n dataset = (\n Dataset.from_tensor_slices(input_data).repeat(num_epochs)\n .batch(batch_size))\n iterator = datasets.Iterator(dataset)\n\n ends = [time.time()]\n for _ in iterator:\n ends.append(time.time())\n\n deltas = np.ediff1d(ends)\n median_wall_time = np.median(deltas)\n print(\n 'Slice/repeat/batch eager input size: %d batch size: %d Median wall '\n 'time per element: %f'\n % (input_size, batch_size, median_wall_time))\n self.report_benchmark(\n iters=len(deltas),\n wall_time=median_wall_time,\n name='benchmark_slice_repeat_batch_eager_input_%d_batch_%d' %\n (input_size, batch_size))\n\n def benchmarkSliceBatchCacheRepeatCallable(self):\n input_size = 10000\n batch_size = 100\n num_epochs = 100\n\n input_data = np.random.randn(input_size)\n\n dataset = (\n Dataset.from_tensor_slices(input_data).batch(batch_size).cache()\n .repeat(num_epochs))\n iterator = datasets.Iterator(dataset)\n\n ends = [time.time()]\n for _ in iterator:\n ends.append(time.time())\n\n deltas = np.ediff1d(ends)\n median_wall_time = np.median(deltas)\n print(\n 'Slice/batch/cache/repeat eager input size: %d batch size: %d Median '\n 'wall time per element: %f'\n % (input_size, batch_size, median_wall_time))\n self.report_benchmark(\n iters=len(deltas),\n wall_time=median_wall_time,\n name='benchmark_slice_batch_cache_repeat_eager_input_%d_batch_%d' %\n (input_size, batch_size))\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for manip_ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import manip_ops\nfrom tensorflow.python.platform import test as test_lib\n\n# pylint: disable=g-import-not-at-top\ntry:\n from distutils.version import StrictVersion as Version\n # numpy.roll for multiple shifts was introduced in numpy version 1.12.0\n NP_ROLL_CAN_MULTISHIFT = Version(np.version.version) >= Version(\"1.12.0\")\nexcept ImportError:\n NP_ROLL_CAN_MULTISHIFT = False\n# pylint: enable=g-import-not-at-top\n\n\nclass RollTest(test_util.TensorFlowTestCase):\n\n def _testRoll(self, np_input, shift, axis):\n expected_roll = np.roll(np_input, shift, axis)\n with self.test_session():\n roll = manip_ops.roll(np_input, shift, axis)\n self.assertAllEqual(roll.eval(), expected_roll)\n\n def _testGradient(self, np_input, shift, axis):\n with self.test_session():\n inx = constant_op.constant(np_input.tolist())\n xs = list(np_input.shape)\n y = manip_ops.roll(inx, shift, axis)\n # Expected y's shape to be the same\n ys = xs\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n inx, xs, y, ys, x_init_value=np_input)\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _testAll(self, np_input, shift, axis):\n self._testRoll(np_input, shift, axis)\n if np_input.dtype == np.float32:\n self._testGradient(np_input, shift, axis)\n\n def testIntTypes(self):\n for t in [np.int32, np.int64]:\n self._testAll(np.random.randint(-100, 100, (5)).astype(t), 3, 0)\n if NP_ROLL_CAN_MULTISHIFT:\n self._testAll(\n np.random.randint(-100, 100, (4, 4, 3)).astype(t), [1, -2, 3],\n [0, 1, 2])\n self._testAll(\n np.random.randint(-100, 100, (4, 2, 1, 3)).astype(t), [0, 1, -2],\n [1, 2, 3])\n\n def testFloatTypes(self):\n for t in [np.float32, np.float64]:\n self._testAll(np.random.rand(5).astype(t), 2, 0)\n if NP_ROLL_CAN_MULTISHIFT:\n self._testAll(np.random.rand(3, 4).astype(t), [1, 2], [1, 0])\n self._testAll(np.random.rand(1, 3, 4).astype(t), [1, 0, -3], [0, 1, 2])\n\n def testComplexTypes(self):\n for t in [np.complex64, np.complex128]:\n x = np.random.rand(4, 4).astype(t)\n self._testAll(x + 1j * x, 2, 0)\n if NP_ROLL_CAN_MULTISHIFT:\n x = np.random.rand(2, 5).astype(t)\n self._testAll(x + 1j * x, [1, 2], [1, 0])\n x = np.random.rand(3, 2, 1, 1).astype(t)\n self._testAll(x + 1j * x, [2, 1, 1, 0], [0, 3, 1, 2])\n\n def testNegativeAxis(self):\n self._testAll(np.random.randint(-100, 100, (5)).astype(np.int32), 3, -1)\n self._testAll(np.random.randint(-100, 100, (4, 4)).astype(np.int32), 3, -2)\n # Make sure negative axis shoudl be 0 <= axis + dims < dims\n with self.test_session():\n with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,\n \"is out of range\"):\n manip_ops.roll(np.random.randint(-100, 100, (4, 4)).astype(np.int32),\n 3, -10).eval()\n\n def testRollInputMustVectorHigherRaises(self):\n tensor = 7\n shift = 1\n axis = 0\n with self.test_session():\n with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,\n \"input must be 1-D or higher\"):\n manip_ops.roll(tensor, shift, axis).eval()\n\n def testRollAxisMustBeScalarOrVectorRaises(self):\n tensor = [[1, 2], [3, 4]]\n shift = 1\n axis = [[0, 1]]\n with self.test_session():\n with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,\n \"axis must be a scalar or a 1-D vector\"):\n manip_ops.roll(tensor, shift, axis).eval()\n\n def testRollShiftMustBeScalarOrVectorRaises(self):\n tensor = [[1, 2], [3, 4]]\n shift = [[0, 1]]\n axis = 1\n with self.test_session():\n with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,\n \"shift must be a scalar or a 1-D vector\"):\n manip_ops.roll(tensor, shift, axis).eval()\n\n def testRollShiftAndAxisMustBeSameSizeRaises(self):\n tensor = [[1, 2], [3, 4]]\n shift = [1]\n axis = [0, 1]\n with self.test_session():\n with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,\n \"shift and axis must have the same size\"):\n manip_ops.roll(tensor, shift, axis).eval()\n\n def testRollAxisOutOfRangeRaises(self):\n tensor = [1, 2]\n shift = 1\n axis = 1\n with self.test_session():\n with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,\n \"is out of range\"):\n manip_ops.roll(tensor, shift, axis).eval()\n\n\nif __name__ == \"__main__\":\n test_lib.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for training routines.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport unittest\n\nimport numpy as np\n\nfrom tensorflow.python.keras._impl import keras\nfrom tensorflow.python.keras._impl.keras import testing_utils\nfrom tensorflow.python.keras._impl.keras.engine.training_utils import weighted_masked_objective\nfrom tensorflow.python.keras._impl.keras.utils.generic_utils import slice_arrays\nfrom tensorflow.python.platform import test\n\ntry:\n import scipy.sparse as scipy_sparse # pylint: disable=g-import-not-at-top\nexcept ImportError:\n scipy_sparse = None\n\n\nclass TrainingTest(test.TestCase):\n\n def test_fit_on_arrays(self):\n with self.test_session():\n a = keras.layers.Input(shape=(3,), name='input_a')\n b = keras.layers.Input(shape=(3,), name='input_b')\n\n dense = keras.layers.Dense(4, name='dense')\n c = dense(a)\n d = dense(b)\n e = keras.layers.Dropout(0.5, name='dropout')(c)\n\n model = keras.models.Model([a, b], [d, e])\n\n optimizer = 'rmsprop'\n loss = 'mse'\n loss_weights = [1., 0.5]\n metrics = ['mae']\n model.compile(optimizer, loss, metrics=metrics, loss_weights=loss_weights)\n\n input_a_np = np.random.random((10, 3))\n input_b_np = np.random.random((10, 3))\n\n output_d_np = np.random.random((10, 4))\n output_e_np = np.random.random((10, 4))\n\n # Test fit at different verbosity\n model.fit(\n [input_a_np, input_b_np], [output_d_np, output_e_np],\n epochs=1,\n batch_size=5,\n verbose=0)\n model.fit(\n [input_a_np, input_b_np], [output_d_np, output_e_np],\n epochs=1,\n batch_size=5,\n verbose=1)\n model.fit(\n [input_a_np, input_b_np], [output_d_np, output_e_np],\n epochs=2,\n batch_size=5,\n verbose=2)\n model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])\n\n # Test model with input data as a list of lists\n model.fit(\n [np.ndarray.tolist(input_a_np), np.ndarray.tolist(input_b_np)],\n [output_d_np, output_e_np],\n epochs=2,\n batch_size=5,\n verbose=2)\n\n # Test with validation data\n model.fit(\n [input_a_np, input_b_np], [output_d_np, output_e_np],\n validation_data=([input_a_np, input_b_np], [output_d_np,\n output_e_np]),\n epochs=1,\n batch_size=5,\n verbose=0)\n model.fit(\n [input_a_np, input_b_np], [output_d_np, output_e_np],\n validation_data=([input_a_np, input_b_np], [output_d_np,\n output_e_np]),\n epochs=2,\n batch_size=5,\n verbose=1)\n model.fit(\n [input_a_np, input_b_np], [output_d_np, output_e_np],\n validation_data=([input_a_np, input_b_np], [output_d_np,\n output_e_np]),\n epochs=2,\n batch_size=5,\n verbose=2)\n # Test with validation split\n model.fit(\n [input_a_np, input_b_np], [output_d_np, output_e_np],\n epochs=2,\n batch_size=5,\n verbose=0,\n validation_split=0.2)\n\n # Test with dictionary inputs\n model.fit(\n {\n 'input_a': input_a_np,\n 'input_b': input_b_np\n }, {'dense': output_d_np,\n 'dropout': output_e_np},\n epochs=1,\n batch_size=5,\n verbose=0)\n model.fit(\n {\n 'input_a': input_a_np,\n 'input_b': input_b_np\n }, {'dense': output_d_np,\n 'dropout': output_e_np},\n epochs=1,\n batch_size=5,\n verbose=1)\n model.fit(\n {\n 'input_a': input_a_np,\n 'input_b': input_b_np\n }, {'dense': output_d_np,\n 'dropout': output_e_np},\n validation_data=({\n 'input_a': input_a_np,\n 'input_b': input_b_np\n }, {\n 'dense': output_d_np,\n 'dropout': output_e_np\n }),\n epochs=1,\n batch_size=5,\n verbose=0)\n model.train_on_batch({\n 'input_a': input_a_np,\n 'input_b': input_b_np\n }, {'dense': output_d_np,\n 'dropout': output_e_np})\n\n # Test with lists for loss, metrics\n loss = ['mae', 'mse']\n metrics = ['acc', 'mae']\n model.compile(optimizer, loss, metrics=metrics)\n model.fit(\n [input_a_np, input_b_np], [output_d_np, output_e_np],\n epochs=1,\n batch_size=5,\n verbose=0)\n\n # Test with dictionaries for loss, metrics, loss weights\n loss = {'dense': 'mse', 'dropout': 'mae'}\n loss_weights = {'dense': 1., 'dropout': 0.5}\n metrics = {'dense': 'mse', 'dropout': 'mae'}\n model.compile(optimizer, loss, metrics=metrics, loss_weights=loss_weights)\n model.fit(\n [input_a_np, input_b_np], [output_d_np, output_e_np],\n epochs=1,\n batch_size=5,\n verbose=0)\n\n # Invalid use cases\n with self.assertRaises(ValueError):\n model.train_on_batch({'input_a': input_a_np},\n [output_d_np, output_e_np])\n with self.assertRaises(AttributeError):\n model.fit(\n [input_a_np, input_b_np], [output_d_np, output_e_np],\n epochs=1,\n validation_data=([input_a_np, input_b_np], 0, 0),\n verbose=0)\n with self.assertRaises(ValueError):\n model.train_on_batch([input_a_np], [output_d_np, output_e_np])\n with self.assertRaises(AttributeError):\n model.train_on_batch(1, [output_d_np, output_e_np])\n with self.assertRaises(ValueError):\n model.train_on_batch(input_a_np, [output_d_np, output_e_np])\n with self.assertRaises(ValueError):\n bad_input = np.random.random((11, 3))\n model.train_on_batch([bad_input, input_b_np],\n [output_d_np, output_e_np])\n with self.assertRaises(ValueError):\n bad_target = np.random.random((11, 4))\n model.train_on_batch([input_a_np, input_b_np],\n [bad_target, output_e_np])\n\n # Build single-input model\n x = keras.layers.Input(shape=(3,), name='input_a')\n y = keras.layers.Dense(4)(x)\n model = keras.models.Model(x, y)\n model.compile(optimizer='rmsprop', loss='mse')\n # This will work\n model.fit([input_a_np], output_d_np, epochs=1)\n with self.assertRaises(ValueError):\n model.fit([input_a_np, input_a_np], output_d_np, epochs=1)\n\n # Test model on a list of floats\n input_a_np = np.random.random((10, 3))\n input_b_np = np.random.random((10, 4))\n\n model.fit([np.ndarray.tolist(input_a_np)],\n [np.ndarray.tolist(input_b_np)],\n epochs=2,\n batch_size=5,\n verbose=2)\n\n def test_evaluate_predict_on_arrays(self):\n with self.test_session():\n a = keras.layers.Input(shape=(3,), name='input_a')\n b = keras.layers.Input(shape=(3,), name='input_b')\n\n dense = keras.layers.Dense(4, name='dense')\n c = dense(a)\n d = dense(b)\n e = keras.layers.Dropout(0.5, name='dropout')(c)\n\n model = keras.models.Model([a, b], [d, e])\n\n optimizer = 'rmsprop'\n loss = 'mse'\n loss_weights = [1., 0.5]\n metrics = ['mae']\n model.compile(\n optimizer,\n loss,\n metrics=metrics,\n loss_weights=loss_weights,\n sample_weight_mode=None)\n\n input_a_np = np.random.random((10, 3))\n input_b_np = np.random.random((10, 3))\n\n output_d_np = np.random.random((10, 4))\n output_e_np = np.random.random((10, 4))\n\n # Test evaluate at different verbosity\n out = model.evaluate(\n [input_a_np, input_b_np], [output_d_np, output_e_np],\n batch_size=5,\n verbose=0)\n self.assertEqual(len(out), 5)\n out = model.evaluate(\n [input_a_np, input_b_np], [output_d_np, output_e_np],\n batch_size=5,\n verbose=1)\n self.assertEqual(len(out), 5)\n out = model.evaluate(\n [input_a_np, input_b_np], [output_d_np, output_e_np],\n batch_size=5,\n verbose=2)\n self.assertEqual(len(out), 5)\n out = model.test_on_batch([input_a_np, input_b_np],\n [output_d_np, output_e_np])\n self.assertEqual(len(out), 5)\n\n # Test evaluate with dictionary inputs\n model.evaluate(\n {\n 'input_a': input_a_np,\n 'input_b': input_b_np\n }, {'dense': output_d_np,\n 'dropout': output_e_np},\n batch_size=5,\n verbose=0)\n model.evaluate(\n {\n 'input_a': input_a_np,\n 'input_b': input_b_np\n }, {'dense': output_d_np,\n 'dropout': output_e_np},\n batch_size=5,\n verbose=1)\n\n # Test predict\n out = model.predict([input_a_np, input_b_np], batch_size=5)\n self.assertEqual(len(out), 2)\n out = model.predict({'input_a': input_a_np, 'input_b': input_b_np})\n self.assertEqual(len(out), 2)\n out = model.predict_on_batch({\n 'input_a': input_a_np,\n 'input_b': input_b_np\n })\n self.assertEqual(len(out), 2)\n\n def test_invalid_loss_or_metrics(self):\n num_classes = 5\n train_samples = 1000\n test_samples = 1000\n input_dim = 5\n\n with self.test_session():\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(10, input_shape=(input_dim,)))\n model.add(keras.layers.Activation('relu'))\n model.add(keras.layers.Dense(num_classes))\n model.add(keras.layers.Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n np.random.seed(1337)\n (x_train, y_train), (_, _) = testing_utils.get_test_data(\n train_samples=train_samples,\n test_samples=test_samples,\n input_shape=(input_dim,),\n num_classes=num_classes)\n with self.assertRaises(ValueError):\n model.fit(x_train, y_train)\n\n with self.assertRaises(ValueError):\n model.fit(x_train, np.concatenate([y_train, y_train], axis=-1))\n\n with self.assertRaises(TypeError):\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=set(0))\n\n with self.assertRaises(ValueError):\n model.compile(loss=None,\n optimizer='rmsprop')\n\n def test_training_on_sparse_data_with_dense_placeholders(self):\n if scipy_sparse is None:\n return\n\n with self.test_session():\n test_inputs = [\n scipy_sparse.random(6, 3, density=0.25).tocsr() for _ in range(2)]\n test_outputs = [\n scipy_sparse.random(6, i, density=0.25).tocsr() for i in range(3, 5)]\n in1 = keras.layers.Input(shape=(3,))\n in2 = keras.layers.Input(shape=(3,))\n out1 = keras.layers.Dropout(0.5, name='dropout')(in1)\n out2 = keras.layers.Dense(4, name='dense_1')(in2)\n model = keras.Model([in1, in2], [out1, out2])\n model.predict(test_inputs, batch_size=2)\n model.compile('rmsprop', 'mse')\n model.fit(test_inputs, test_outputs,\n epochs=1, batch_size=2, validation_split=0.5)\n model.evaluate(test_inputs, test_outputs, batch_size=2)\n\n def test_that_trainable_disables_updates(self):\n val_a = np.random.random((10, 4))\n val_out = np.random.random((10, 4))\n\n with self.test_session():\n a = keras.layers.Input(shape=(4,))\n layer = keras.layers.BatchNormalization(input_shape=(4,))\n b = layer(a)\n model = keras.Model(a, b)\n\n model.trainable = False\n assert not model.updates\n\n model.compile('sgd', 'mse')\n assert not model.updates\n\n x1 = model.predict(val_a)\n model.train_on_batch(val_a, val_out)\n x2 = model.predict(val_a)\n self.assertAllClose(x1, x2, atol=1e-7)\n\n model.trainable = True\n model.compile('sgd', 'mse')\n assert model.updates\n\n model.train_on_batch(val_a, val_out)\n x2 = model.predict(val_a)\n assert np.abs(np.sum(x1 - x2)) > 1e-5\n\n layer.trainable = False\n model.compile('sgd', 'mse')\n assert not model.updates\n\n x1 = model.predict(val_a)\n model.train_on_batch(val_a, val_out)\n x2 = model.predict(val_a)\n self.assertAllClose(x1, x2, atol=1e-7)\n\n\nclass LossWeightingTest(test.TestCase):\n\n def test_class_weights(self):\n num_classes = 5\n batch_size = 5\n epochs = 5\n weighted_class = 3\n train_samples = 1000\n test_samples = 1000\n input_dim = 5\n\n with self.test_session():\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(10, input_shape=(input_dim,)))\n model.add(keras.layers.Activation('relu'))\n model.add(keras.layers.Dense(num_classes))\n model.add(keras.layers.Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n\n np.random.seed(1337)\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=train_samples,\n test_samples=test_samples,\n input_shape=(input_dim,),\n num_classes=num_classes)\n int_y_test = y_test.copy()\n int_y_train = y_train.copy()\n # convert class vectors to binary class matrices\n y_train = keras.utils.to_categorical(y_train, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n test_ids = np.where(int_y_test == np.array(weighted_class))[0]\n\n class_weight = dict([(i, 1.) for i in range(num_classes)])\n class_weight[weighted_class] = 2.\n\n sample_weight = np.ones((y_train.shape[0]))\n sample_weight[int_y_train == weighted_class] = 2.\n\n model.fit(\n x_train,\n y_train,\n batch_size=batch_size,\n epochs=epochs // 3,\n verbose=0,\n class_weight=class_weight,\n validation_data=(x_train, y_train, sample_weight))\n model.fit(\n x_train,\n y_train,\n batch_size=batch_size,\n epochs=epochs // 2,\n verbose=0,\n class_weight=class_weight)\n model.fit(\n x_train,\n y_train,\n batch_size=batch_size,\n epochs=epochs // 2,\n verbose=0,\n class_weight=class_weight,\n validation_split=0.1)\n\n model.train_on_batch(\n x_train[:batch_size], y_train[:batch_size], class_weight=class_weight)\n ref_score = model.evaluate(x_test, y_test, verbose=0)\n score = model.evaluate(\n x_test[test_ids, :], y_test[test_ids, :], verbose=0)\n self.assertLess(score, ref_score)\n\n def test_sample_weights(self):\n num_classes = 5\n batch_size = 5\n epochs = 5\n weighted_class = 3\n train_samples = 1000\n test_samples = 1000\n input_dim = 5\n\n with self.test_session():\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(10, input_shape=(input_dim,)))\n model.add(keras.layers.Activation('relu'))\n model.add(keras.layers.Dense(num_classes))\n model.add(keras.layers.Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n\n np.random.seed(43)\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=train_samples,\n test_samples=test_samples,\n input_shape=(input_dim,),\n num_classes=num_classes)\n int_y_test = y_test.copy()\n int_y_train = y_train.copy()\n # convert class vectors to binary class matrices\n y_train = keras.utils.to_categorical(y_train, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n test_ids = np.where(int_y_test == np.array(weighted_class))[0]\n\n class_weight = dict([(i, 1.) for i in range(num_classes)])\n class_weight[weighted_class] = 2.\n\n sample_weight = np.ones((y_train.shape[0]))\n sample_weight[int_y_train == weighted_class] = 2.\n\n model.fit(\n x_train,\n y_train,\n batch_size=batch_size,\n epochs=epochs // 3,\n verbose=0,\n sample_weight=sample_weight)\n model.fit(\n x_train,\n y_train,\n batch_size=batch_size,\n epochs=epochs // 3,\n verbose=0,\n sample_weight=sample_weight,\n validation_split=0.1)\n\n model.train_on_batch(\n x_train[:batch_size],\n y_train[:batch_size],\n sample_weight=sample_weight[:batch_size])\n model.test_on_batch(\n x_train[:batch_size],\n y_train[:batch_size],\n sample_weight=sample_weight[:batch_size])\n ref_score = model.evaluate(x_test, y_test, verbose=0)\n score = model.evaluate(\n x_test[test_ids, :], y_test[test_ids, :], verbose=0)\n self.assertLess(score, ref_score)\n\n def test_temporal_sample_weights(self):\n num_classes = 5\n batch_size = 5\n epochs = 5\n weighted_class = 3\n train_samples = 1000\n test_samples = 1000\n input_dim = 5\n timesteps = 3\n\n with self.test_session():\n model = keras.models.Sequential()\n model.add(\n keras.layers.TimeDistributed(\n keras.layers.Dense(num_classes),\n input_shape=(timesteps, input_dim)))\n model.add(keras.layers.Activation('softmax'))\n\n np.random.seed(1337)\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=train_samples,\n test_samples=test_samples,\n input_shape=(input_dim,),\n num_classes=num_classes)\n int_y_test = y_test.copy()\n int_y_train = y_train.copy()\n # convert class vectors to binary class matrices\n y_train = keras.utils.to_categorical(y_train, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n test_ids = np.where(int_y_test == np.array(weighted_class))[0]\n\n class_weight = dict([(i, 1.) for i in range(num_classes)])\n class_weight[weighted_class] = 2.\n\n sample_weight = np.ones((y_train.shape[0]))\n sample_weight[int_y_train == weighted_class] = 2.\n\n temporal_x_train = np.reshape(x_train, (len(x_train), 1,\n x_train.shape[1]))\n temporal_x_train = np.repeat(temporal_x_train, timesteps, axis=1)\n temporal_x_test = np.reshape(x_test, (len(x_test), 1, x_test.shape[1]))\n temporal_x_test = np.repeat(temporal_x_test, timesteps, axis=1)\n\n temporal_y_train = np.reshape(y_train, (len(y_train), 1,\n y_train.shape[1]))\n temporal_y_train = np.repeat(temporal_y_train, timesteps, axis=1)\n temporal_y_test = np.reshape(y_test, (len(y_test), 1, y_test.shape[1]))\n temporal_y_test = np.repeat(temporal_y_test, timesteps, axis=1)\n\n temporal_sample_weight = np.reshape(sample_weight, (len(sample_weight),\n 1))\n temporal_sample_weight = np.repeat(\n temporal_sample_weight, timesteps, axis=1)\n\n model.compile(\n loss='binary_crossentropy',\n optimizer='rmsprop',\n sample_weight_mode='temporal')\n\n model.fit(\n temporal_x_train,\n temporal_y_train,\n batch_size=batch_size,\n epochs=epochs // 3,\n verbose=0,\n sample_weight=temporal_sample_weight)\n model.fit(\n temporal_x_train,\n temporal_y_train,\n batch_size=batch_size,\n epochs=epochs // 3,\n verbose=0,\n sample_weight=temporal_sample_weight,\n validation_split=0.1)\n\n model.train_on_batch(\n temporal_x_train[:batch_size],\n temporal_y_train[:batch_size],\n sample_weight=temporal_sample_weight[:batch_size])\n model.test_on_batch(\n temporal_x_train[:batch_size],\n temporal_y_train[:batch_size],\n sample_weight=temporal_sample_weight[:batch_size])\n ref_score = model.evaluate(temporal_x_test, temporal_y_test, verbose=0)\n score = model.evaluate(\n temporal_x_test[test_ids], temporal_y_test[test_ids], verbose=0)\n self.assertLess(score, ref_score)\n\n def test_class_weight_invalid_use_case(self):\n num_classes = 5\n train_samples = 1000\n test_samples = 1000\n input_dim = 5\n timesteps = 3\n\n with self.test_session():\n model = keras.models.Sequential()\n model.add(\n keras.layers.TimeDistributed(\n keras.layers.Dense(num_classes),\n input_shape=(timesteps, input_dim)))\n model.add(keras.layers.Activation('softmax'))\n model.compile(\n loss='binary_crossentropy',\n optimizer='rmsprop')\n\n (x_train, y_train), _ = testing_utils.get_test_data(\n train_samples=train_samples,\n test_samples=test_samples,\n input_shape=(input_dim,),\n num_classes=num_classes)\n # convert class vectors to binary class matrices\n y_train = keras.utils.to_categorical(y_train, num_classes)\n class_weight = dict([(i, 1.) for i in range(num_classes)])\n\n del class_weight[1]\n with self.assertRaises(ValueError):\n model.fit(x_train, y_train,\n epochs=0, verbose=0, class_weight=class_weight)\n\n with self.assertRaises(ValueError):\n model.compile(\n loss='binary_crossentropy',\n optimizer='rmsprop',\n sample_weight_mode=[])\n\n # Build multi-output model\n x = keras.Input((3,))\n y1 = keras.layers.Dense(4, name='1')(x)\n y2 = keras.layers.Dense(4, name='2')(x)\n model = keras.models.Model(x, [y1, y2])\n model.compile(optimizer='rmsprop', loss='mse')\n x_np = np.random.random((10, 3))\n y_np = np.random.random((10, 4))\n w_np = np.random.random((10,))\n # This will work\n model.fit(x_np, [y_np, y_np], epochs=1,\n sample_weight={'1': w_np})\n # These will not\n with self.assertRaises(ValueError):\n model.fit(x_np, [y_np, y_np], epochs=1,\n sample_weight=[w_np])\n with self.assertRaises(TypeError):\n model.fit(x_np, [y_np, y_np], epochs=1,\n sample_weight=w_np)\n with self.assertRaises(ValueError):\n bad_w_np = np.random.random((11,))\n model.fit(x_np, [y_np, y_np], epochs=1,\n sample_weight={'1': bad_w_np})\n with self.assertRaises(ValueError):\n bad_w_np = np.random.random((10, 2))\n model.fit(x_np, [y_np, y_np], epochs=1,\n sample_weight={'1': bad_w_np})\n with self.assertRaises(ValueError):\n bad_w_np = np.random.random((10, 2, 2))\n model.fit(x_np, [y_np, y_np], epochs=1,\n sample_weight={'1': bad_w_np})\n\n\nclass LossMaskingTest(test.TestCase):\n\n def test_masking(self):\n with self.test_session():\n np.random.seed(1337)\n x = np.array([[[1], [1]], [[0], [0]]])\n model = keras.models.Sequential()\n model.add(keras.layers.Masking(mask_value=0, input_shape=(2, 1)))\n model.add(\n keras.layers.TimeDistributed(\n keras.layers.Dense(1, kernel_initializer='one')))\n model.compile(loss='mse', optimizer='sgd')\n y = np.array([[[1], [1]], [[1], [1]]])\n loss = model.train_on_batch(x, y)\n self.assertEqual(loss, 0)\n\n def test_loss_masking(self):\n with self.test_session():\n weighted_loss = weighted_masked_objective(keras.losses.get('mae'))\n shape = (3, 4, 2)\n x = np.arange(24).reshape(shape)\n y = 2 * x\n\n # Normally the trailing 1 is added by standardize_weights\n weights = np.ones((3,))\n mask = np.ones((3, 4))\n mask[1, 0] = 0\n\n keras.backend.eval(\n weighted_loss(\n keras.backend.variable(x),\n keras.backend.variable(y),\n keras.backend.variable(weights), keras.backend.variable(mask)))\n\n\nclass TestDynamicTrainability(test.TestCase):\n\n def test_trainable_warning(self):\n with self.test_session():\n x = np.random.random((5, 3))\n y = np.random.random((5, 2))\n\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(2, input_dim=3))\n model.trainable = False\n model.compile('rmsprop', 'mse')\n model.trainable = True\n model.train_on_batch(x, y)\n self.assertRaises(Warning)\n\n def test_trainable_argument(self):\n with self.test_session():\n x = np.random.random((5, 3))\n y = np.random.random((5, 2))\n\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(2, input_dim=3, trainable=False))\n model.compile('rmsprop', 'mse')\n out = model.predict(x)\n model.train_on_batch(x, y)\n out_2 = model.predict(x)\n self.assertAllClose(out, out_2)\n\n # test with nesting\n inputs = keras.layers.Input(shape=(3,))\n output = model(inputs)\n model = keras.models.Model(inputs, output)\n model.compile('rmsprop', 'mse')\n out = model.predict(x)\n model.train_on_batch(x, y)\n out_2 = model.predict(x)\n self.assertAllClose(out, out_2)\n\n def test_layer_trainability_switch(self):\n with self.test_session():\n # with constructor argument, in Sequential\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(2, trainable=False, input_dim=1))\n self.assertListEqual(model.trainable_weights, [])\n\n # by setting the `trainable` argument, in Sequential\n model = keras.models.Sequential()\n layer = keras.layers.Dense(2, input_dim=1)\n model.add(layer)\n self.assertListEqual(model.trainable_weights, layer.trainable_weights)\n layer.trainable = False\n self.assertListEqual(model.trainable_weights, [])\n\n # with constructor argument, in Model\n x = keras.layers.Input(shape=(1,))\n y = keras.layers.Dense(2, trainable=False)(x)\n model = keras.models.Model(x, y)\n self.assertListEqual(model.trainable_weights, [])\n\n # by setting the `trainable` argument, in Model\n x = keras.layers.Input(shape=(1,))\n layer = keras.layers.Dense(2)\n y = layer(x)\n model = keras.models.Model(x, y)\n self.assertListEqual(model.trainable_weights, layer.trainable_weights)\n layer.trainable = False\n self.assertListEqual(model.trainable_weights, [])\n\n def test_model_trainability_switch(self):\n with self.test_session():\n # a non-trainable model has no trainable weights\n x = keras.layers.Input(shape=(1,))\n y = keras.layers.Dense(2)(x)\n model = keras.models.Model(x, y)\n model.trainable = False\n self.assertListEqual(model.trainable_weights, [])\n\n # same for Sequential\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(2, input_dim=1))\n model.trainable = False\n self.assertListEqual(model.trainable_weights, [])\n\n def test_nested_model_trainability(self):\n with self.test_session():\n # a Sequential inside a Model\n inner_model = keras.models.Sequential()\n inner_model.add(keras.layers.Dense(2, input_dim=1))\n\n x = keras.layers.Input(shape=(1,))\n y = inner_model(x)\n outer_model = keras.models.Model(x, y)\n self.assertListEqual(outer_model.trainable_weights,\n inner_model.trainable_weights)\n inner_model.trainable = False\n self.assertListEqual(outer_model.trainable_weights, [])\n inner_model.trainable = True\n inner_model.layers[-1].trainable = False\n self.assertListEqual(outer_model.trainable_weights, [])\n\n # a Sequential inside a Sequential\n inner_model = keras.models.Sequential()\n inner_model.add(keras.layers.Dense(2, input_dim=1))\n outer_model = keras.models.Sequential()\n outer_model.add(inner_model)\n self.assertListEqual(outer_model.trainable_weights,\n inner_model.trainable_weights)\n inner_model.trainable = False\n self.assertListEqual(outer_model.trainable_weights, [])\n inner_model.trainable = True\n inner_model.layers[-1].trainable = False\n self.assertListEqual(outer_model.trainable_weights, [])\n\n # a Model inside a Model\n x = keras.layers.Input(shape=(1,))\n y = keras.layers.Dense(2)(x)\n inner_model = keras.models.Model(x, y)\n x = keras.layers.Input(shape=(1,))\n y = inner_model(x)\n outer_model = keras.models.Model(x, y)\n self.assertListEqual(outer_model.trainable_weights,\n inner_model.trainable_weights)\n inner_model.trainable = False\n self.assertListEqual(outer_model.trainable_weights, [])\n inner_model.trainable = True\n inner_model.layers[-1].trainable = False\n self.assertListEqual(outer_model.trainable_weights, [])\n\n # a Model inside a Sequential\n x = keras.layers.Input(shape=(1,))\n y = keras.layers.Dense(2)(x)\n inner_model = keras.models.Model(x, y)\n outer_model = keras.models.Sequential()\n outer_model.add(inner_model)\n self.assertListEqual(outer_model.trainable_weights,\n inner_model.trainable_weights)\n inner_model.trainable = False\n self.assertListEqual(outer_model.trainable_weights, [])\n inner_model.trainable = True\n inner_model.layers[-1].trainable = False\n self.assertListEqual(outer_model.trainable_weights, [])\n\n\nclass TestGeneratorMethods(test.TestCase):\n\n @unittest.skipIf(\n os.name == 'nt',\n 'use_multiprocessing=True does not work on windows properly.')\n def test_generator_methods(self):\n arr_data = np.random.random((50, 2))\n arr_labels = np.random.random((50,))\n\n def custom_generator():\n batch_size = 10\n num_samples = 50\n while True:\n batch_index = np.random.randint(0, num_samples - batch_size)\n start = batch_index\n end = start + batch_size\n x = arr_data[start: end]\n y = arr_labels[start: end]\n yield x, y\n\n with self.test_session():\n x = keras.Input((2,))\n y = keras.layers.Dense(1)(x)\n fn_model = keras.models.Model(x, y)\n fn_model.compile(loss='mse', optimizer='sgd')\n\n seq_model = keras.models.Sequential()\n seq_model.add(keras.layers.Dense(1, input_shape=(2,)))\n seq_model.compile(loss='mse', optimizer='sgd')\n\n for model in [fn_model, seq_model]:\n model.fit_generator(custom_generator(),\n steps_per_epoch=5,\n epochs=1,\n verbose=1,\n max_queue_size=10,\n workers=4,\n use_multiprocessing=True)\n model.fit_generator(custom_generator(),\n steps_per_epoch=5,\n epochs=1,\n verbose=1,\n max_queue_size=10,\n use_multiprocessing=False)\n model.fit_generator(custom_generator(),\n steps_per_epoch=5,\n epochs=1,\n verbose=1,\n max_queue_size=10,\n use_multiprocessing=False,\n validation_data=custom_generator(),\n validation_steps=10)\n model.fit_generator(custom_generator(),\n steps_per_epoch=5,\n validation_data=custom_generator(),\n validation_steps=1,\n workers=0)\n model.predict_generator(custom_generator(),\n steps=5,\n max_queue_size=10,\n workers=2,\n use_multiprocessing=True)\n model.predict_generator(custom_generator(),\n steps=5,\n max_queue_size=10,\n use_multiprocessing=False)\n model.predict_generator(custom_generator(),\n steps=5,\n max_queue_size=10,\n workers=0)\n model.evaluate_generator(custom_generator(),\n steps=5,\n max_queue_size=10,\n workers=2,\n use_multiprocessing=True)\n model.evaluate_generator(custom_generator(),\n steps=5,\n max_queue_size=10,\n use_multiprocessing=False)\n model.evaluate_generator(custom_generator(),\n steps=5,\n max_queue_size=10,\n use_multiprocessing=False,\n workers=0)\n\n def test_generator_methods_with_sample_weights(self):\n arr_data = np.random.random((50, 2))\n arr_labels = np.random.random((50,))\n arr_sample_weights = np.random.random((50,))\n\n def custom_generator():\n batch_size = 10\n num_samples = 50\n while True:\n batch_index = np.random.randint(0, num_samples - batch_size)\n start = batch_index\n end = start + batch_size\n x = arr_data[start: end]\n y = arr_labels[start: end]\n w = arr_sample_weights[start: end]\n yield x, y, w\n\n with self.test_session():\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(1, input_shape=(2,)))\n model.compile(loss='mse', optimizer='sgd')\n\n model.fit_generator(custom_generator(),\n steps_per_epoch=5,\n epochs=1,\n verbose=1,\n max_queue_size=10,\n use_multiprocessing=False)\n model.fit_generator(custom_generator(),\n steps_per_epoch=5,\n epochs=1,\n verbose=1,\n max_queue_size=10,\n use_multiprocessing=False,\n validation_data=custom_generator(),\n validation_steps=10)\n model.predict_generator(custom_generator(),\n steps=5,\n max_queue_size=10,\n use_multiprocessing=False)\n model.evaluate_generator(custom_generator(),\n steps=5,\n max_queue_size=10,\n use_multiprocessing=False)\n\n def test_generator_methods_invalid_use_case(self):\n\n def custom_generator():\n while 1:\n yield 0\n\n with self.test_session():\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(1, input_shape=(2,)))\n model.compile(loss='mse', optimizer='sgd')\n\n with self.assertRaises(ValueError):\n model.fit_generator(custom_generator(),\n steps_per_epoch=5,\n epochs=1,\n verbose=1,\n max_queue_size=10,\n use_multiprocessing=False)\n with self.assertRaises(ValueError):\n model.fit_generator(custom_generator(),\n steps_per_epoch=5,\n epochs=1,\n verbose=1,\n max_queue_size=10,\n use_multiprocessing=False,\n validation_data=custom_generator(),\n validation_steps=10)\n with self.assertRaises(AttributeError):\n model.predict_generator(custom_generator(),\n steps=5,\n max_queue_size=10,\n use_multiprocessing=False)\n with self.assertRaises(ValueError):\n model.evaluate_generator(custom_generator(),\n steps=5,\n max_queue_size=10,\n use_multiprocessing=False)\n\n def test_training_with_sequences(self):\n\n class DummySequence(keras.utils.Sequence):\n\n def __getitem__(self, idx):\n return np.zeros([10, 2]), np.ones([10])\n\n def __len__(self):\n return 10\n\n arr_data = np.random.random((50, 2))\n arr_labels = np.random.random((50,))\n arr_sample_weights = np.random.random((50,))\n\n def custom_generator():\n batch_size = 10\n num_samples = 50\n while True:\n batch_index = np.random.randint(0, num_samples - batch_size)\n start = batch_index\n end = start + batch_size\n x = arr_data[start: end]\n y = arr_labels[start: end]\n w = arr_sample_weights[start: end]\n yield x, y, w\n\n with self.test_session():\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(1, input_shape=(2,)))\n model.compile(loss='mse', optimizer='sgd')\n\n model.fit_generator(DummySequence(),\n steps_per_epoch=10,\n validation_data=custom_generator(),\n validation_steps=1,\n max_queue_size=10,\n workers=0,\n use_multiprocessing=True)\n model.fit_generator(DummySequence(),\n steps_per_epoch=10,\n validation_data=custom_generator(),\n validation_steps=1,\n max_queue_size=10,\n workers=0,\n use_multiprocessing=False)\n\n\nclass TestTrainingUtils(test.TestCase):\n\n def test_check_array_lengths(self):\n keras.engine.training_utils.check_array_lengths(None, None, None)\n a_np = np.random.random((4, 3, 3))\n keras.engine.training_utils.check_array_lengths(a_np, a_np, a_np)\n keras.engine.training_utils.check_array_lengths(\n [a_np, a_np], [a_np, a_np], [a_np, a_np])\n keras.engine.training_utils.check_array_lengths([None], [None], [None])\n\n b_np = np.random.random((3, 4))\n with self.assertRaises(ValueError):\n keras.engine.training_utils.check_array_lengths([a_np], [b_np], None)\n\n def test_slice_arrays(self):\n input_a = np.random.random((10, 3))\n slice_arrays(input_a, 0)\n slice_arrays(None)\n slice_arrays(input_a, 0, 1)\n slice_arrays(input_a, stop=2)\n input_a = [None, [1, 1], None, [1, 1]]\n slice_arrays(input_a, 0)\n slice_arrays(input_a, 0, 1)\n slice_arrays(input_a, stop=2)\n input_a = [None]\n slice_arrays(input_a, 0)\n slice_arrays(input_a, 0, 1)\n slice_arrays(input_a, stop=2)\n input_a = None\n slice_arrays(input_a, 0)\n slice_arrays(input_a, 0, 1)\n slice_arrays(input_a, stop=2)\n\n\nclass TestTrainingWithDataTensors(test.TestCase):\n\n def test_training_and_eval_methods_on_symbolic_tensors_single_io(self):\n with self.test_session():\n x = keras.layers.Input(shape=(3,), name='input')\n y = keras.layers.Dense(4, name='dense')(x)\n model = keras.Model(x, y)\n\n optimizer = 'rmsprop'\n loss = 'mse'\n metrics = ['mae']\n model.compile(optimizer, loss, metrics=metrics)\n\n inputs = keras.backend.zeros(shape=(10, 3))\n targets = keras.backend.zeros(shape=(10, 4))\n\n model.fit(inputs, targets, epochs=1, steps_per_epoch=2, verbose=0)\n model.evaluate(inputs, targets, steps=2, verbose=0)\n model.predict(inputs, steps=2)\n model.train_on_batch(inputs, targets)\n model.test_on_batch(inputs, targets)\n model.fit(inputs, targets,\n epochs=1, steps_per_epoch=2, verbose=0,\n validation_data=(inputs, targets), validation_steps=2)\n\n def test_training_and_eval_methods_on_symbolic_tensors_multi_io(self):\n with self.test_session():\n a = keras.layers.Input(shape=(3,), name='input_a')\n b = keras.layers.Input(shape=(3,), name='input_b')\n\n dense = keras.layers.Dense(4, name='dense')\n c = dense(a)\n d = dense(b)\n e = keras.layers.Dropout(0.5, name='dropout')(c)\n\n model = keras.models.Model([a, b], [d, e])\n\n optimizer = 'rmsprop'\n loss = 'mse'\n loss_weights = [1., 0.5]\n metrics = ['mae']\n model.compile(optimizer, loss, metrics=metrics, loss_weights=loss_weights)\n\n input_a_tf = keras.backend.zeros(shape=(10, 3))\n input_b_tf = keras.backend.zeros(shape=(10, 3))\n\n output_d_tf = keras.backend.zeros(shape=(10, 4))\n output_e_tf = keras.backend.zeros(shape=(10, 4))\n\n model.fit(\n [input_a_tf, input_b_tf], [output_d_tf, output_e_tf],\n epochs=1,\n steps_per_epoch=2,\n verbose=0)\n with self.assertRaisesRegexp(ValueError,\n 'should specify the `steps_per_epoch`'):\n model.fit(\n [input_a_tf, input_b_tf], [output_d_tf, output_e_tf],\n epochs=1,\n batch_size=5,\n verbose=0)\n model.train_on_batch([input_a_tf, input_b_tf], [output_d_tf, output_e_tf])\n\n # Test with dictionary inputs\n model.fit(\n {'input_a': input_a_tf,\n 'input_b': input_b_tf},\n {'dense': output_d_tf,\n 'dropout': output_e_tf},\n epochs=1,\n steps_per_epoch=2,\n verbose=0)\n model.fit(\n {'input_a': input_a_tf,\n 'input_b': input_b_tf},\n {'dense': output_d_tf,\n 'dropout': output_e_tf},\n validation_data=({'input_a': input_a_tf,\n 'input_b': input_b_tf},\n {'dense': output_d_tf,\n 'dropout': output_e_tf}),\n epochs=1,\n steps_per_epoch=2,\n validation_steps=2,\n verbose=0)\n model.train_on_batch(\n {'input_a': input_a_tf,\n 'input_b': input_b_tf},\n {'dense': output_d_tf,\n 'dropout': output_e_tf})\n\n # Test with validation data\n model.fit(\n [input_a_tf, input_b_tf], [output_d_tf, output_e_tf],\n validation_data=([input_a_tf, input_b_tf],\n [output_d_tf, output_e_tf]),\n epochs=1,\n steps_per_epoch=2,\n validation_steps=2,\n verbose=0)\n # Test with validation split\n with self.assertRaisesRegexp(ValueError,\n 'you cannot use `validation_split`'):\n model.fit(\n [input_a_tf, input_b_tf], [output_d_tf, output_e_tf],\n epochs=2,\n steps_per_epoch=2,\n verbose=0,\n validation_split=0.2,\n validation_steps=2)\n\n # Test evaluation / prediction methods\n model.evaluate([input_a_tf, input_b_tf], [output_d_tf, output_e_tf],\n steps=2, verbose=0)\n model.predict([input_a_tf, input_b_tf], steps=2)\n model.test_on_batch([input_a_tf, input_b_tf], [output_d_tf, output_e_tf])\n\n def test_model_with_input_feed_tensor(self):\n \"\"\"We test building a model with a TF variable as input.\n\n We should be able to call fit, evaluate, predict,\n by only passing them data for the placeholder inputs\n in the model.\n \"\"\"\n with self.test_session():\n input_a_np = np.random.random((10, 3))\n input_b_np = np.random.random((10, 3))\n\n output_a_np = np.random.random((10, 4))\n output_b_np = np.random.random((10, 3))\n\n a = keras.Input(\n tensor=keras.backend.variables_module.Variable(input_a_np,\n dtype='float32'))\n b = keras.Input(shape=(3,), name='input_b')\n\n a_2 = keras.layers.Dense(4, name='dense_1')(a)\n dp = keras.layers.Dropout(0.5, name='dropout')\n b_2 = dp(b)\n\n model = keras.models.Model([a, b], [a_2, b_2])\n model.summary()\n\n optimizer = 'rmsprop'\n loss = 'mse'\n loss_weights = [1., 0.5]\n model.compile(optimizer, loss, metrics=['mean_squared_error'],\n loss_weights=loss_weights,\n sample_weight_mode=None)\n\n # test train_on_batch\n out = model.train_on_batch(input_b_np,\n [output_a_np, output_b_np])\n out = model.train_on_batch({'input_b': input_b_np},\n [output_a_np, output_b_np])\n out = model.test_on_batch({'input_b': input_b_np},\n [output_a_np, output_b_np])\n out = model.predict_on_batch({'input_b': input_b_np})\n\n # test fit\n out = model.fit({'input_b': input_b_np},\n [output_a_np, output_b_np], epochs=1, batch_size=10)\n out = model.fit(input_b_np,\n [output_a_np, output_b_np], epochs=1, batch_size=10)\n\n # test evaluate\n out = model.evaluate({'input_b': input_b_np},\n [output_a_np, output_b_np], batch_size=10)\n out = model.evaluate(input_b_np,\n [output_a_np, output_b_np], batch_size=10)\n\n # test predict\n out = model.predict({'input_b': input_b_np}, batch_size=10)\n out = model.predict(input_b_np, batch_size=10)\n self.assertEqual(len(out), 2)\n\n # Now test a model with a single input\n # i.e. we don't pass any data to fit the model.\n a = keras.Input(\n tensor=keras.backend.variables_module.Variable(input_a_np,\n dtype='float32'))\n a_2 = keras.layers.Dense(4, name='dense_1')(a)\n a_2 = keras.layers.Dropout(0.5, name='dropout')(a_2)\n model = keras.models.Model(a, a_2)\n model.summary()\n\n optimizer = 'rmsprop'\n loss = 'mse'\n model.compile(optimizer, loss, metrics=['mean_squared_error'])\n\n # test train_on_batch\n out = model.train_on_batch(None,\n output_a_np)\n out = model.train_on_batch(None,\n output_a_np)\n out = model.test_on_batch(None,\n output_a_np)\n out = model.predict_on_batch(None)\n out = model.train_on_batch([],\n output_a_np)\n out = model.train_on_batch({},\n output_a_np)\n\n # test fit\n out = model.fit(None,\n output_a_np, epochs=1, batch_size=10)\n out = model.fit(None,\n output_a_np, epochs=1, batch_size=10)\n\n # test evaluate\n out = model.evaluate(None,\n output_a_np, batch_size=10)\n out = model.evaluate(None,\n output_a_np, batch_size=10)\n\n # test predict\n out = model.predict(None, steps=3)\n out = model.predict(None, steps=3)\n self.assertEqual(out.shape, (10 * 3, 4))\n\n # Same, without learning phase\n # i.e. we don't pass any data to fit the model.\n a = keras.Input(\n tensor=keras.backend.variables_module.Variable(input_a_np,\n dtype='float32'))\n a_2 = keras.layers.Dense(4, name='dense_1')(a)\n model = keras.models.Model(a, a_2)\n model.summary()\n\n optimizer = 'rmsprop'\n loss = 'mse'\n model.compile(optimizer, loss, metrics=['mean_squared_error'])\n\n # test train_on_batch\n out = model.train_on_batch(None,\n output_a_np)\n out = model.train_on_batch(None,\n output_a_np)\n out = model.test_on_batch(None,\n output_a_np)\n out = model.predict_on_batch(None)\n out = model.train_on_batch([],\n output_a_np)\n out = model.train_on_batch({},\n output_a_np)\n\n # test fit\n out = model.fit(None,\n output_a_np, epochs=1, batch_size=10)\n out = model.fit(None,\n output_a_np, epochs=1, batch_size=10)\n\n # test evaluate\n out = model.evaluate(None,\n output_a_np, batch_size=10)\n out = model.evaluate(None,\n output_a_np, batch_size=10)\n\n # test predict\n out = model.predict(None, steps=3)\n out = model.predict(None, steps=3)\n self.assertEqual(out.shape, (10 * 3, 4))\n\n def test_model_with_partial_loss(self):\n with self.test_session():\n a = keras.Input(shape=(3,), name='input_a')\n a_2 = keras.layers.Dense(4, name='dense_1')(a)\n dp = keras.layers.Dropout(0.5, name='dropout')\n a_3 = dp(a_2)\n model = keras.models.Model(a, [a_2, a_3])\n\n optimizer = 'rmsprop'\n loss = {'dropout': 'mse'}\n model.compile(optimizer, loss, metrics=['mae'])\n\n input_a_np = np.random.random((10, 3))\n output_a_np = np.random.random((10, 4))\n\n # test train_on_batch\n _ = model.train_on_batch(input_a_np, output_a_np)\n _ = model.test_on_batch(input_a_np, output_a_np)\n # fit\n _ = model.fit(input_a_np, [output_a_np])\n # evaluate\n _ = model.evaluate(input_a_np, [output_a_np])\n\n # Same without dropout.\n a = keras.Input(shape=(3,), name='input_a')\n a_2 = keras.layers.Dense(4, name='dense_1')(a)\n a_3 = keras.layers.Dense(4, name='dense_2')(a_2)\n model = keras.models.Model(a, [a_2, a_3])\n\n optimizer = 'rmsprop'\n loss = {'dense_2': 'mse'}\n model.compile(optimizer, loss, metrics={'dense_1': 'mae'})\n\n # test train_on_batch\n _ = model.train_on_batch(input_a_np, output_a_np)\n _ = model.test_on_batch(input_a_np, output_a_np)\n # fit\n _ = model.fit(input_a_np, [output_a_np])\n # evaluate\n _ = model.evaluate(input_a_np, [output_a_np])\n\n def test_model_with_external_loss(self):\n with self.test_session():\n # None loss, only regularization loss.\n a = keras.Input(shape=(3,), name='input_a')\n a_2 = keras.layers.Dense(4, name='dense_1',\n kernel_regularizer='l1',\n bias_regularizer='l2')(a)\n dp = keras.layers.Dropout(0.5, name='dropout')\n a_3 = dp(a_2)\n\n model = keras.models.Model(a, [a_2, a_3])\n\n optimizer = 'rmsprop'\n loss = None\n model.compile(optimizer, loss, metrics=['mae'])\n\n input_a_np = np.random.random((10, 3))\n\n # test train_on_batch\n out = model.train_on_batch(input_a_np, None)\n out = model.test_on_batch(input_a_np, None)\n # fit\n out = model.fit(input_a_np, None)\n # evaluate\n out = model.evaluate(input_a_np, None)\n\n # No dropout, external loss.\n a = keras.Input(shape=(3,), name='input_a')\n a_2 = keras.layers.Dense(4, name='dense_1')(a)\n a_3 = keras.layers.Dense(4, name='dense_2')(a)\n\n model = keras.models.Model(a, [a_2, a_3])\n model.add_loss(keras.backend.mean(a_3 + a_2))\n\n optimizer = 'rmsprop'\n loss = None\n model.compile(optimizer, loss, metrics=['mae'])\n\n # test train_on_batch\n out = model.train_on_batch(input_a_np, None)\n out = model.test_on_batch(input_a_np, None)\n # fit\n out = model.fit(input_a_np, None)\n # evaluate\n out = model.evaluate(input_a_np, None)\n\n # Test model with no external data at all.\n a = keras.Input(\n tensor=keras.backend.variables_module.Variable(input_a_np,\n dtype='float32'))\n a_2 = keras.layers.Dense(4, name='dense_1')(a)\n a_2 = keras.layers.Dropout(0.5, name='dropout')(a_2)\n model = keras.models.Model(a, a_2)\n model.add_loss(keras.backend.mean(a_2))\n\n model.compile(optimizer='rmsprop',\n loss=None,\n metrics=['mean_squared_error'])\n\n # test train_on_batch\n out = model.train_on_batch(None, None)\n out = model.test_on_batch(None, None)\n out = model.predict_on_batch(None)\n\n # test fit\n with self.assertRaises(ValueError):\n out = model.fit(None, None, epochs=1, batch_size=10)\n out = model.fit(None, None, epochs=1, steps_per_epoch=1)\n\n # test fit with validation data\n with self.assertRaises(ValueError):\n out = model.fit(None, None, epochs=1,\n steps_per_epoch=None,\n validation_steps=2)\n out = model.fit(None, None, epochs=1,\n steps_per_epoch=2,\n validation_steps=2)\n\n # test evaluate\n with self.assertRaises(ValueError):\n out = model.evaluate(None, None, batch_size=10)\n out = model.evaluate(None, None, steps=3)\n\n # test predict\n with self.assertRaises(ValueError):\n out = model.predict(None, batch_size=10)\n out = model.predict(None, steps=3)\n self.assertEqual(out.shape, (10 * 3, 4))\n\n # Test multi-output model with no external data at all.\n a = keras.Input(\n tensor=keras.backend.variables_module.Variable(input_a_np,\n dtype='float32'))\n a_1 = keras.layers.Dense(4, name='dense_1')(a)\n a_2 = keras.layers.Dropout(0.5, name='dropout')(a_1)\n model = keras.models.Model(a, [a_1, a_2])\n model.add_loss(keras.backend.mean(a_2))\n\n model.compile(optimizer='rmsprop',\n loss=None,\n metrics=['mean_squared_error'])\n\n # test train_on_batch\n out = model.train_on_batch(None, None)\n out = model.test_on_batch(None, None)\n out = model.predict_on_batch(None)\n\n # test fit\n with self.assertRaises(ValueError):\n out = model.fit(None, None, epochs=1, batch_size=10)\n out = model.fit(None, None, epochs=1, steps_per_epoch=1)\n\n # test fit with validation data\n out = model.fit(None, None, epochs=1,\n steps_per_epoch=2,\n validation_steps=2)\n\n # test evaluate\n with self.assertRaises(ValueError):\n out = model.evaluate(None, None, batch_size=10)\n out = model.evaluate(None, None, steps=3)\n\n # test predict\n with self.assertRaises(ValueError):\n out = model.predict(None, batch_size=10, verbose=1)\n out = model.predict(None, steps=3)\n self.assertEqual(len(out), 2)\n self.assertEqual(out[0].shape, (10 * 3, 4))\n self.assertEqual(out[1].shape, (10 * 3, 4))\n\n def test_target_tensors(self):\n with self.test_session():\n # single-output, as list\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(4, input_shape=(4,), name='dense'))\n input_val = np.random.random((10, 4))\n target_val = np.random.random((10, 4))\n target = keras.backend.variable(target_val)\n model.compile(optimizer='rmsprop', loss='mse', target_tensors=[target])\n model.train_on_batch(input_val, None)\n\n # single-output, as dict\n model.compile(optimizer='rmsprop', loss='mse',\n target_tensors={'dense': target})\n model.train_on_batch(input_val, None)\n\n # test invalid arguments\n with self.assertRaises(TypeError):\n model.compile(optimizer='rmsprop', loss='mse',\n target_tensors=set())\n with self.assertRaises(ValueError):\n model.compile(optimizer='rmsprop', loss='mse',\n target_tensors=[target, target])\n with self.assertRaises(ValueError):\n model.compile(optimizer='rmsprop', loss='mse',\n target_tensors={'dense2': None})\n with self.assertRaises(ValueError):\n model.compile(optimizer='rmsprop', loss='mse',\n target_tensors=[target])\n model.train_on_batch(input_val, target_val)\n\n # multi-output, as list\n input_val = np.random.random((10, 4))\n target_val_a = np.random.random((10, 4))\n target_val_b = np.random.random((10, 4))\n target_a = keras.backend.variable(target_val_a)\n target_b = keras.backend.variable(target_val_b)\n\n inputs = keras.layers.Input(shape=(4,))\n output_a = keras.layers.Dense(4, name='dense_a')(inputs)\n output_b = keras.layers.Dense(4, name='dense_b')(inputs)\n model = keras.models.Model(inputs, [output_a, output_b])\n model.compile(optimizer='rmsprop', loss='mse',\n target_tensors=[target_a, target_b])\n model.train_on_batch(input_val, None)\n\n # multi-output, as dict\n model.compile(optimizer='rmsprop', loss='mse',\n target_tensors={'dense_a': target_a,\n 'dense_b': target_b})\n model.train_on_batch(input_val, None)\n\n # test with sample weights\n model.compile(optimizer='rmsprop', loss='mse',\n target_tensors=[target_a, target_b])\n model.train_on_batch(input_val, None,\n sample_weight={'dense_a': np.random.random((10,))})\n\n def test_model_custom_target_tensors(self):\n with self.test_session():\n a = keras.Input(shape=(3,), name='input_a')\n b = keras.Input(shape=(3,), name='input_b')\n\n a_2 = keras.layers.Dense(4, name='dense_1')(a)\n dp = keras.layers.Dropout(0.5, name='dropout')\n b_2 = dp(b)\n\n y = keras.backend.placeholder([10, 4], name='y')\n y1 = keras.backend.placeholder([10, 3], name='y1')\n y2 = keras.backend.placeholder([7, 5], name='y2')\n model = keras.models.Model([a, b], [a_2, b_2])\n\n optimizer = 'rmsprop'\n loss = 'mse'\n loss_weights = [1., 0.5]\n\n # test list of target tensors\n with self.assertRaises(ValueError):\n model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,\n sample_weight_mode=None, target_tensors=[y, y1, y2])\n model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,\n sample_weight_mode=None, target_tensors=[y, y1])\n input_a_np = np.random.random((10, 3))\n input_b_np = np.random.random((10, 3))\n\n output_a_np = np.random.random((10, 4))\n output_b_np = np.random.random((10, 3))\n\n _ = model.train_on_batch([input_a_np, input_b_np],\n [output_a_np, output_b_np],\n {y: np.random.random((10, 4)),\n y1: np.random.random((10, 3))})\n # test dictionary of target_tensors\n with self.assertRaises(ValueError):\n model.compile(optimizer, loss,\n metrics=[],\n loss_weights=loss_weights,\n sample_weight_mode=None,\n target_tensors={'does_not_exist': y2})\n # test dictionary of target_tensors\n model.compile(optimizer, loss,\n metrics=[],\n loss_weights=loss_weights,\n sample_weight_mode=None,\n target_tensors={'dense_1': y, 'dropout': y1})\n _ = model.train_on_batch([input_a_np, input_b_np],\n [output_a_np, output_b_np],\n {y: np.random.random((10, 4)),\n y1: np.random.random((10, 3))})\n\n # test with custom TF placeholder as target\n pl_target_a = keras.backend.array_ops.placeholder('float32',\n shape=(None, 4))\n model.compile(optimizer='rmsprop', loss='mse',\n target_tensors={'dense_1': pl_target_a})\n model.train_on_batch([input_a_np, input_b_np],\n [output_a_np, output_b_np])\n\n\nif __name__ == '__main__':\n # Bazel sets these environment variables to very long paths.\n # Tempfile uses them to create long paths, and in turn multiprocessing\n # library tries to create sockets named after paths. Delete whatever bazel\n # writes to these to avoid tests failing due to socket addresses being too\n # long.\n for var in ('TMPDIR', 'TMP', 'TEMP'):\n if var in os.environ:\n del os.environ[var]\n\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the swig wrapper tf_optimizer.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import meta_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.grappler import item as gitem\nfrom tensorflow.python.grappler import tf_optimizer\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n\nclass PyWrapOptimizeGraphTest(test.TestCase):\n\n def testBasic(self):\n \"\"\"Make sure arguments can be passed correctly.\"\"\"\n a = constant_op.constant(10, name='a')\n b = constant_op.constant(20, name='b')\n c = math_ops.add_n([a, b], name='c')\n d = math_ops.add_n([b, c], name='d')\n train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)\n # Being a train_op will make 'd' to be added as a fetch node.\n train_op.append(d)\n mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())\n\n rewriter_config = rewriter_config_pb2.RewriterConfig()\n rewriter_config.optimizers.append('constfold')\n\n graph = tf_optimizer.OptimizeGraph(rewriter_config, mg)\n\n self.assertEqual(len(graph.node), 1)\n self.assertItemsEqual([node.name for node in graph.node], ['d'])\n\n def testKeepNodes(self):\n g = ops.Graph()\n with g.as_default():\n a1 = variables.Variable(\n 1.0) # Must be preserved since it's in the collection 'variables'.\n a2 = constant_op.constant(0, shape=[50, 50], name='keep')\n ops.add_to_collection('a2', a2) # Explicitly add to collection.\n b = constant_op.constant(1, shape=[100, 10])\n c = constant_op.constant(0, shape=[10, 30])\n d = math_ops.matmul(b, c)\n ops.add_to_collection('train_op', d) # d is the fetch node.\n\n # Optimize the graph.\n mg = meta_graph.create_meta_graph_def(graph=g)\n rewriter_config = rewriter_config_pb2.RewriterConfig()\n optimized_graph = tf_optimizer.OptimizeGraph(rewriter_config, mg)\n\n # Check that the nodes referenced in various collections have been preserved\n self.assertEqual(len(optimized_graph.node), 5)\n self.assertEqual(d.op.name, optimized_graph.node[0].name)\n self.assertEqual(a1.op.name, optimized_graph.node[1].name)\n self.assertEqual('Variable/initial_value', optimized_graph.node[2].name)\n self.assertEqual(a2.op.name, optimized_graph.node[3].name)\n self.assertEqual('Variable/Assign', optimized_graph.node[4].name)\n\n def testLoops(self):\n g = ops.Graph()\n with g.as_default():\n\n def _Cond(_, counter):\n return counter < end\n\n def _Body(buf, counter):\n buf = array_ops.concat([buf, [counter]], 0)\n counter += 1\n return [buf, counter]\n\n start = array_ops.placeholder(shape=[], dtype=dtypes.int32)\n end = array_ops.placeholder(shape=[], dtype=dtypes.int32)\n init_buf = array_ops.zeros(shape=[0], dtype=dtypes.int32)\n loop_vars = [init_buf, start]\n shape_inv = [\n tensor_shape.TensorShape([None]),\n tensor_shape.TensorShape([])\n ]\n buf, _ = control_flow_ops.while_loop(_Cond, _Body, loop_vars, shape_inv)\n\n f = -array_ops.ones_like(buf, optimize=False)\n buf_shape = array_ops.shape(buf)\n f_shape = array_ops.shape(f)\n ops.add_to_collection('train_op', buf_shape)\n ops.add_to_collection('train_op', f_shape)\n\n # Optimize the graph.\n mg = meta_graph.create_meta_graph_def(graph=g)\n rewriter_config = rewriter_config_pb2.RewriterConfig()\n optimized_graph = tf_optimizer.OptimizeGraph(rewriter_config, mg)\n mg.graph_def.CopyFrom(optimized_graph)\n\n # Check that the nodes referenced in various collections have been preserved\n item = gitem.Item(mg)\n props = item.GetOpProperties()\n buf_prop = props[buf.op.name]\n f_prop = props[f.op.name]\n self.assertEqual(buf_prop, f_prop)\n\n\nif __name__ == '__main__':\n test.main()\n" ]
[ [ "tensorflow.contrib.data.python.ops.threadpool.PrivateThreadPool", "tensorflow.python.data.Dataset.from_tensors", "tensorflow.contrib.eager.python.checkpointable_utils.Checkpoint", "tensorflow.contrib.lookup.KeyValueTensorInitializer", "tensorflow.python.eager.test.main", "numpy.ediff1d", "tensorflow.contrib.eager.python.datasets.Iterator", "numpy.median", "tensorflow.python.data.Dataset.from_tensor_slices", "tensorflow.python.ops.math_ops.mod", "tensorflow.python.data.Dataset.range", "tensorflow.python.ops.script_ops.py_func", "tensorflow.python.eager.test.gpu_device_name", "tensorflow.python.ops.math_ops.add", "numpy.random.randn", "tensorflow.contrib.data.python.ops.unique.unique", "numpy.array", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.ops.gradient_checker.compute_gradient", "tensorflow.python.ops.manip_ops.roll", "tensorflow.python.platform.test.main", "numpy.random.rand", "numpy.roll", "numpy.random.randint" ], [ "tensorflow.python.keras._impl.keras.layers.BatchNormalization", "tensorflow.python.keras._impl.keras.engine.training_utils.check_array_lengths", "numpy.concatenate", "scipy.sparse.random", "tensorflow.python.keras._impl.keras.backend.array_ops.placeholder", "tensorflow.python.keras._impl.keras.layers.Dropout", "numpy.ndarray.tolist", "tensorflow.python.keras._impl.keras.utils.generic_utils.slice_arrays", "tensorflow.python.keras._impl.keras.backend.mean", "tensorflow.python.keras._impl.keras.backend.variables_module.Variable", "numpy.random.randint", "tensorflow.python.keras._impl.keras.layers.Masking", "tensorflow.python.keras._impl.keras.layers.Input", "numpy.arange", "tensorflow.python.keras._impl.keras.backend.variable", "tensorflow.python.keras._impl.keras.layers.Dense", "tensorflow.python.keras._impl.keras.testing_utils.get_test_data", "tensorflow.python.platform.test.main", "tensorflow.python.keras._impl.keras.utils.to_categorical", "numpy.repeat", "tensorflow.python.keras._impl.keras.models.Model", "numpy.zeros", "tensorflow.python.keras._impl.keras.backend.zeros", "tensorflow.python.keras._impl.keras.losses.get", "numpy.array", "tensorflow.python.keras._impl.keras.backend.placeholder", "numpy.sum", "tensorflow.python.keras._impl.keras.Model", "numpy.random.random", "numpy.random.seed", "numpy.ones", "tensorflow.python.keras._impl.keras.layers.Activation", "tensorflow.python.keras._impl.keras.models.Sequential", "tensorflow.python.keras._impl.keras.Input" ], [ "tensorflow.python.grappler.tf_optimizer.OptimizeGraph", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.control_flow_ops.while_loop", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.variables.Variable", "tensorflow.python.framework.ops.add_to_collection", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.framework.ops.get_collection_ref", "tensorflow.python.framework.meta_graph.create_meta_graph_def", "tensorflow.python.grappler.item.Item", "tensorflow.python.platform.test.main", "tensorflow.python.ops.math_ops.matmul", "tensorflow.python.ops.array_ops.ones_like", "tensorflow.core.protobuf.rewriter_config_pb2.RewriterConfig", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.math_ops.add_n", "tensorflow.python.framework.ops.Graph", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.framework.constant_op.constant" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "1.7", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] } ]
minuJeong/moderngl-window
[ "6386478f1e6b07cefda8f4d9324d972ab88b34ec" ]
[ "examples/advanced/boids.py" ]
[ "from pathlib import Path\r\nimport random\r\nimport numpy\r\nfrom pyrr import matrix44\r\n\r\nimport moderngl\r\nimport moderngl_window\r\nfrom moderngl_window.opengl.vao import VAO\r\n\r\n\r\nclass Boids(moderngl_window.WindowConfig):\r\n \"\"\"\r\n An attempt to make something boid-list with GL3.3.\r\n Not currently working as intended, but still creates\r\n and interesting result.\r\n\r\n For this to properly work we need to split the calculations\r\n into several passes.\r\n\r\n We are doing this the O(n^2) way with the gpu using transform feedback.\r\n To make the data avaialble to the vertex shader (looping through it)\r\n we copy the vertex buffer every frame to a texture.\r\n\r\n A better way in the future is to use compute shader.\r\n \"\"\"\r\n title = \"Boids\"\r\n resource_dir = (Path(__file__) / '../../resources').absolute()\r\n aspect_ratio = 3440 / 1440\r\n\r\n def __init__(self, *args, **kwargs):\r\n super().__init__(*args, **kwargs)\r\n\r\n MAX_TEX_WIDTH = 8192\r\n N = MAX_TEX_WIDTH * 1\r\n\r\n def gen_initial_data(n, x_area=2.0, y_area=2.0):\r\n for n in range(n):\r\n # position\r\n yield (random.random() - 0.5) * x_area\r\n yield (random.random() - 0.5) * y_area\r\n # Velocity\r\n yield (random.random() - 0.5)\r\n yield (random.random() - 0.5)\r\n\r\n # Create geometry data\r\n gen = gen_initial_data(N, x_area=self.aspect_ratio * 2 * 0.9, y_area=2.0 * 0.95)\r\n data = numpy.fromiter(gen, count=N * 4, dtype='f4')\r\n self.boids_buffer_1 = self.ctx.buffer(data.tobytes())\r\n self.boids_buffer_2 = self.ctx.buffer(data=self.boids_buffer_1.read())\r\n\r\n self.boids_vao_1 = VAO(name='boids_1', mode=moderngl.POINTS)\r\n self.boids_vao_1.buffer(self.boids_buffer_1, '2f 2f', ['in_position', 'in_velocity'])\r\n\r\n self.boids_vao_2 = VAO(name='boids_2', mode=moderngl.POINTS)\r\n self.boids_vao_2.buffer(self.boids_buffer_2, '2f 2f', ['in_position', 'in_velocity'])\r\n\r\n self.boids_texture = self.ctx.texture((MAX_TEX_WIDTH, N * 2 // MAX_TEX_WIDTH), components=2, dtype='f4')\r\n\r\n # Programs\r\n self.boids_render_program = self.load_program('programs/boids/boids_render.glsl')\r\n self.boids_transform_program = self.load_program('programs/boids/boids_transform.glsl')\r\n\r\n # Prepare for rendering\r\n self.m_proj = matrix44.create_orthogonal_projection(\r\n -self.aspect_ratio, self.aspect_ratio,\r\n -1.0, 1.0,\r\n -1.0, 1.0,\r\n dtype='f4',\r\n )\r\n self.boids_render_program['m_proj'].write(self.m_proj.tobytes())\r\n self.boids_transform_program['data'].value = 0\r\n self.boids_transform_program['num_boids'].value = N\r\n self.boids_transform_program['tex_width'].value = MAX_TEX_WIDTH\r\n\r\n def render(self, time, frame_time):\r\n\r\n self.boids_texture.use(location=0)\r\n self.boids_transform_program['timedelta'].value = frame_time # max(frame_time, 1.0 / 60.0)\r\n self.boids_vao_1.transform(self.boids_transform_program, self.boids_buffer_2)\r\n self.boids_vao_2.render(self.boids_render_program)\r\n\r\n # Swap around ..\r\n self.boids_vao_1, self.boids_vao_2 = self.boids_vao_2, self.boids_vao_1\r\n self.boids_buffer_1, self.boids_buffer_2 = self.boids_buffer_2, self.boids_buffer_1\r\n\r\n # Write vertex data into texture so we can interate it in shader\r\n self.boids_texture.write(self.boids_buffer_1.read())\r\n\r\n\r\nif __name__ == '__main__':\r\n moderngl_window.run_window_config(Boids)\r\n" ]
[ [ "numpy.fromiter" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
linearlabstech/blox
[ "6a5c8a28fcfcb17731be89939284e7ac13a047d7", "6a5c8a28fcfcb17731be89939284e7ac13a047d7" ]
[ "BLOX/Modules/EfficientNetBody.py", "BLOX/DataSet/DataSet.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCopyright (c) 2019, Linear Labs Technologies\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\n\nfrom efficientnet_pytorch import EfficientNet\nimport torch\nfrom torch import nn\n\nclass EfficientNetBody(nn.Module):\n def __init__(self,mtype='efficientnet-b0'): \n super(EfficientNetBody,self).__init__()\n self.model = EfficientNet.from_pretrained(mtype)\n if torch.cuda.is_available():self.model.cuda()\n else:self.model.cpu()\n\n def forward(self,x):\n x = self.model.extract_features(x)\n return x", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCopyright (c) 2019, Linear Labs Technologies\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\n\nimport torch\nfrom random import shuffle\nfrom BLOX.DataSet.TestSet import TestSet\nclass DataSet:\n \"\"\"\n load a saved pt state of shape:\n\n [\n [input,output],\n ...,\n ]\n \n TODO: add support for testing split as well\n\n \"\"\"\n size = 0\n n_classes = -1\n data = None\n _train = None\n _eval = None\n training = True\n is_categorical = False\n def __init__(self,data,eval_split=.15,dtype='float'):\n data = data if isinstance(data,dict) else torch.load(data)\n size = len(data['inputs'])\n self.type = dtype\n eval_size = int(float(size)*eval_split)\n self.tsize = size-eval_size\n self.dsize = eval_size\n self.tidxs = list(range(self.tsize))\n self.didxs = list(range(self.dsize))\n self._train = TestSet({\n 'inputs':data['inputs'][eval_size:],\n 'targets':data['targets'][eval_size:]\n })\n self._eval = TestSet({\n 'inputs':data['inputs'][:eval_size],\n 'targets':data['targets'][:eval_size]\n })\n self.total_size = size\n self._size = self.tsize\n self.idx = 0\n self.bsz = 1\n # if torch.cuda.is_available():self.cuda()\n\n def __len__(self):return self.n\n \n @property\n def n(self):\n return self.tsize if self.training else self.dsize\n\n def shuffle(self):\n shuffle(self.tidxs)\n shuffle(self.didxs)\n return self\n\n # def __iter__ (self):\n # return self#iter( ((x,y) for (x,y) in self) )\n\n # def __next__ (self):\n # try:\n # (x,y) = self._train.__next__() if self.training else self._eval.__next__()\n # except IndexError:\n # print('error')\n # self.idx += 1\n # return x,y\n \n def batchify(self,bsz=64):\n self.bsz = bsz\n self.tsize = self.tsize // bsz\n self.dsize = self.dsize // bsz\n self._size = self.tsize if self.training else self.dsize\n self._train.batchify(bsz)\n self._eval.batchify(bsz)\n self.tidxs = list(range( self.tsize ))\n self.didxs = list(range( self.dsize ))\n return self\n\n @property\n def x(self):\n return self._train.x if self.training else self._eval.x\n @property\n def y(self):\n return self._train.y if self.training else self._eval.y\n\n def cuda(self):\n \"\"\"\n Move data GPU\n \"\"\"\n self._train.cuda()\n self._eval.cuda()\n\n def size(self):\n return self._train.size() if self.training else self._eval.size()\n\n def cpu(self):\n \"\"\"\n Move data CPU\n \"\"\"\n self._train.cpu()\n self._eval.cpu()\n\n def eval(self):\n \"\"\"\n switch to the evalelopment data\n \"\"\"\n self.training = False\n self._size = self.dsize\n return self\n\n def pad(self):\n self._train.pad()\n self._eval.pad()\n return self\n\n def to(self, dtype):\n dtype = dtype.lower()\n assert dtype in ['cpu','gpu'], 'evalice type not supported'\n self.cpu() if dtype == 'cpu' else self.gpu()\n return self\n\n def categorical(self):\n if not self.is_categorical:\n self._train.categorical()\n self._eval.categorical()\n self.is_categorical = True\n return self\n\n # def float(self):\n # self._data['inputs'].float()\n # self._data['targets'].float()\n # return self\n\n # def long(self):\n # self._data['inputs'].long()\n # self._data['targets'].long()\n # return self\n\n def train(self):\n \"\"\"\n switch to the training data\n \"\"\"\n self.training = True\n self._size = self.tsize\n return self\n\n def save(self,fname=None):\n import time\n self.cpu()\n if not fname:\n fname = 'data.{}.ds'.format(int(time.time()))\n torch.save({\n 'inputs': self._train['inputs']+self._eval['inputs'],\n 'targets': self._train['targets']+self._eval['targets']\n },fname)\n\n def __getitem__(self,idx):\n \"\"\"\n By default only return the training set, but this canbe toggled with calling either '.eval()' or '.train()' methods \n \"\"\"\n assert idx <= self.n\n if self.training: return self._train[ self.tidxs[idx] ]\n else: return self._eval[ self.didxs[idx] ]\n\n\n" ]
[ [ "torch.cuda.is_available" ], [ "torch.load", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
motokimura/M3D-RPN
[ "7a9be66cb257c349e51a3eac7e67bdea3a6ddd72" ]
[ "scripts/test_rpn_3d.py" ]
[ "# -----------------------------------------\n# python modules\n# -----------------------------------------\nfrom importlib import import_module\nfrom easydict import EasyDict as edict\nimport torch.backends.cudnn as cudnn\nimport sys\nimport numpy as np\nimport os\n\n# stop python from writing so much bytecode\nsys.dont_write_bytecode = True\nsys.path.append(os.getcwd())\nnp.set_printoptions(suppress=True)\n\n# -----------------------------------------\n# custom modules\n# -----------------------------------------\nfrom lib.imdb_util import *\n\n\ndef parse_args(argv):\n from getopt import getopt\n opts, args = getopt(argv, '', ['config=', 'weight=', 'outdir='])\n # defaults (trainval split #1)\n conf_path = 'weights/M3D-RPN-Release/m3d_rpn_depth_aware_val1_config.pkl'\n weights_path = 'weights/M3D-RPN-Release/m3d_rpn_depth_aware_val1'\n outdir = None\n # read opts\n for opt, arg in opts:\n if opt in ('--config'):\n conf_path = arg\n if opt in ('--weight'):\n weights_path = arg\n if opt in ('--outdir'):\n outdir = arg\n\n if outdir is None:\n # if --outdir option is not used, give the weight file name to output directory\n outdir = os.path.basename(weights_path)\n\n return conf_path, weights_path, outdir\n\n\nconf_path, weights_path, outdir = parse_args(sys.argv[1:])\nprint()\nprint('CONFIG: {}'.format(conf_path))\nprint('WEIGHT: {}'.format(weights_path))\nprint('OUTDIR: {}'.format(outdir))\nprint()\n\n# load config\nconf = edict(pickle_read(conf_path))\nconf.pretrained = None\n\ndata_path = os.path.join(os.getcwd(), 'data')\nresults_path = os.path.join('output', outdir, 'data')\n\n# make directory\nmkdir_if_missing(results_path, delete_if_exist=True)\n\n# -----------------------------------------\n# torch defaults\n# -----------------------------------------\n\n# defaults\ninit_torch(conf.rng_seed, conf.cuda_seed)\n\n# -----------------------------------------\n# setup network\n# -----------------------------------------\n\n# net\nnet = import_module('models.' + conf.model).build(conf)\n\n# load weights\nload_weights(net, weights_path, remove_module=True)\n\n# switch modes for evaluation\nnet.eval()\n\nprint(pretty_print('conf', conf))\n\n# -----------------------------------------\n# test kitti\n# -----------------------------------------\n\ntest_kitti_3d(conf.dataset_test, net, conf, results_path, data_path, use_log=False)" ]
[ [ "numpy.set_printoptions" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MuAuan/llightning-pytorch
[ "38dc9ed75dd8e6f4a2a05e5a10072a549dcbf4d6", "38dc9ed75dd8e6f4a2a05e5a10072a549dcbf4d6", "38dc9ed75dd8e6f4a2a05e5a10072a549dcbf4d6" ]
[ "Coloring/simple_YCC_resnet.py", "LitCIFAR10.py", "Coloring/mayuyu.py" ]
[ "import os\nimport time\nimport numpy as np\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nimport torchvision\nfrom torchvision.datasets import CIFAR10 #MNIST\nfrom torch.utils.data import DataLoader, random_split\nfrom torchvision import transforms\nimport pytorch_lightning as pl\nimport matplotlib.pyplot as plt\nfrom torchsummary import summary\nimport cv2\n\n#from net_encoder_decoder_vgg16 import Encoder, Decoder\n#from net_encoder_decoder_vgg_resnet import Encoder, Decoder\n#from net_encoder_decoder_vgg_resnet2 import Encoder, Decoder\nfrom net_colarization_resnet import ColorizationNet\n\ndef imshow(img,file='', text_=''):\n img = img / 2 + 0.5 # unnormalize\n npimg = img.detach().numpy() #img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.text(x = 3, y = 2, s = text_, c = \"red\")\n plt.pause(3)\n if file != '':\n plt.savefig(file+'.png')\n plt.close()\n\nfrom pytorch_lightning.callbacks import Callback \nclass MyPrintingCallback(Callback):\n def on_epoch_end(self, trainer, pl_module):\n print('')\n\nclass rgb2YCrCb(object):\n def __init__(self):\n self.ts = transforms.ToPILImage()\n self.ts2 = transforms.ToTensor()\n mean, std =[0.5,0.5,0.5], [0.25,0.25,0.25]\n self.ts3 = transforms.Normalize(mean, std)\n pass\n \n def __call__(self, tensor):\n tensor = tensor / 4 + 0.5 # unnormalize\n orgYCrCb = cv2.cvtColor(np.float32(self.ts(tensor)), cv2.COLOR_BGR2YCR_CB)\n Y, Cr,Cb = cv2.split(orgYCrCb)\n CC = cv2.merge((Cr,Cb))\n CC = np.array(CC).reshape(2,32*8,32*8) #(2,32*2,32*2)\n #print(CC.shape)\n return np.array(CC)\n \n def __repr__(self):\n return self.__class__.__name__\n \nclass rgb2YCrCb_(object):\n def __init__(self):\n self.ts = transforms.ToPILImage()\n self.ts2 = transforms.ToTensor()\n mean, std =[0.5,0.5,0.5], [0.25,0.25,0.25]\n self.ts3 = transforms.Normalize(mean, std)\n pass\n \n def __call__(self, tensor):\n #tensor = self.ts3(self.ts2(self.ts(tensor))) / 4 + 0.5 # unnormalize \n tensor = tensor / 4 + 0.5 # unnormalize\n orgYCrCb = cv2.cvtColor(np.float32(self.ts(tensor)), cv2.COLOR_BGR2YCR_CB)\n Y, Cr,Cb = cv2.split(orgYCrCb)\n CC = cv2.merge((Cr,Cb))\n Y = np.array(Y).reshape(1,32*8,32*8) #(1,32*2,32*2)\n #print(Y.shape)\n return Y\n\nclass ImageDataset(torch.utils.data.Dataset):\n\n def __init__(self, data_num,train_=True, transform1 = None, transform2 = None,train = True):\n \n self.transform1 = transform1\n self.transform2 = transform2\n self.ts = transforms.ToPILImage()\n self.ts2 = transforms.ToTensor()\n mean, std =[0.5,0.5,0.5], [0.25,0.25,0.25]\n self.ts3 = transforms.Compose([\n transforms.ToTensor(),\n #transforms.Resize((64,64)),\n transforms.Normalize(mean, std),\n ])\n self.train = train_\n \n self.data_dir = './'\n self.data_num = data_num\n self.data = []\n self.label = []\n\n # download\n CIFAR10(self.data_dir, train=True, download=True)\n self.data =CIFAR10(self.data_dir, train=self.train, transform=self.ts3)\n\n def __len__(self):\n return self.data_num\n\n def __getitem__(self, idx):\n out_data = self.data[idx][0]\n out_label_ = self.data[idx][1]\n out_label = torch.from_numpy(np.array(out_label_)).long()\n \n if self.transform1:\n out_data1 = self.transform1(out_data)\n if self.transform2:\n out_data2 = self.transform2(out_data)\n \n return out_data, out_data1, out_data2, out_label\n \nclass LitAutoEncoder(pl.LightningModule):\n\n def __init__(self, data_dir='./'):\n super().__init__()\n self.ts2 = transforms.ToTensor()\n self.ts = transforms.ToPILImage()\n self.data_dir = data_dir\n self.data_num =50000 #50000\n # Hardcode some dataset specific attributes\n self.num_classes = 10\n self.classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n self.dims = (32*8, 32*8)\n \n self.encoder_decoder = ColorizationNet()\n #self.encoder = Encoder()\n #self.decoder = Decoder()\n\n def forward(self, x):\n # in lightning, forward defines the prediction/inference actions\n \n x = self.encoder_decoder(x)\n return x\n\n def training_step(self, batch, batch_idx):\n # training_step defined the train loop. It is independent of forward\n _,x,x_ , y = batch\n x_hat = self.encoder_decoder(x) ##resnet\n loss = F.mse_loss(x_hat, x_)\n self.log('train_loss', loss, prog_bar = True)\n return loss\n\n def validation_step(self, batch, batch_idx):\n _,x, x_, y = batch\n x_hat = self.encoder_decoder(x)\n loss = F.mse_loss(x_hat, x_)\n self.log('test_loss', loss, prog_bar = True)\n return loss\n \n def test_step(self, batch, batch_idx):\n # Here we just reuse the validation_step for testing\n return self.validation_step(batch, batch_idx)\n \n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=1e-3) \n return optimizer\n \ndef main():\n ts = transforms.ToPILImage()\n ts2 = transforms.ToTensor()\n mean, std =[0.5,0.5,0.5], [0.25,0.25,0.25]\n ts3 = transforms.Normalize(mean, std)\n ts4 = transforms.Resize((256,256))\n meang, stdg =[0.5], [0.25]\n ts5 = transforms.Normalize(meang, stdg)\n trans2 = transforms.Compose([\n transforms.Resize((256,256)),\n #transforms.Normalize(mean, std),\n rgb2YCrCb(), #CrCb\n ])\n trans1 = transforms.Compose([\n transforms.Resize((256,256)),\n #transforms.Normalize(mean, std),\n rgb2YCrCb_(), #Y\n ])\n dim1 =(256,256)\n dim2 = (1,256,256)\n dim3 = (256,256,2)\n data_num = 50000\n cifar10_full =ImageDataset(data_num, train=True, transform1=trans1, transform2=trans2)\n n_train = int(len(cifar10_full)*0.95)\n n_val = int(len(cifar10_full)*0.04)\n n_test = len(cifar10_full)-n_train -n_val\n cifar10_train, cifar10_val, cifar10_test = torch.utils.data.random_split(cifar10_full, [n_train, n_val, n_test])\n \n trainloader = DataLoader(cifar10_train, shuffle=True, drop_last = True, batch_size=32, num_workers=0)\n valloader = DataLoader(cifar10_val, shuffle=False, batch_size=32, num_workers=0)\n testloader = DataLoader(cifar10_test, shuffle=False, batch_size=32, num_workers=0)\n \n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") #for gpu\n # Assuming that we are on a CUDA machine, this should print a CUDA device:\n print(device)\n pl.seed_everything(0)\n\n # model\n autoencoder = LitAutoEncoder()\n #path_ = './simple_coloring/'\n #PATH = path_+'example_cifar4Ln100_9.ckpt'\n #autoencoder = autoencoder.load_from_checkpoint(PATH)\n \n #autoencoder = LitAutoEncoder()\n autoencoder = autoencoder.to(device) #for gpu\n print(autoencoder)\n summary(autoencoder,dim2)\n \n trainer = pl.Trainer(max_epochs=1, gpus=1, callbacks=[MyPrintingCallback()]) ####epoch\n sk = 0\n for i in range(0,10,1):\n trainer.fit(autoencoder, trainloader, valloader) \n print('training_finished')\n \n results = trainer.test(autoencoder, testloader)\n print(results)\n if sk%1==0:\n dataiter = iter(trainloader)\n _,images, images_, labels = dataiter.next()\n print(images.shape, images_.shape)\n\n images0 = []\n for i in range(32):\n print(i, images[i].shape, images_[i].shape)\n YCC_ = cv2.merge((np.array(images[i]).reshape(dim1),np.array(images_[i]).reshape(dim3)))\n images0_ = cv2.cvtColor(YCC_, cv2.COLOR_YCR_CB2BGR)\n images0.append(ts2(images0_/255.))\n # show images \n imshow(torchvision.utils.make_grid(images0), 'cifar10_results',text_ =' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4))) #3\n # print labels\n print(' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4)))\n\n path_ = './simple_coloring/'\n PATH = path_+'example_cifar4Ln100_{}.ckpt'.format(sk)\n trainer.save_checkpoint(PATH)\n\n pretrained_model = autoencoder.load_from_checkpoint(PATH)\n pretrained_model.freeze()\n pretrained_model.eval()\n\n latent_dim,ver = \"Gray2Clolor_resnet\", \"1_{}\".format(sk) #####save condition\n dataiter = iter(testloader)\n images0,images, images1, labels = dataiter.next() #original, Y, CrCb, label\n # show images\n imshow(torchvision.utils.make_grid(images.reshape(32,1,32*8,32*8)/255.),path_+'1_Y_cifar10_{}_{}'.format(latent_dim,0),text_ =' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4)))\n # show images0\n imshow(torchvision.utils.make_grid(images0.reshape(32,3,32,32)),path_+'2_original_cifar10_{}_{}'.format(latent_dim,0),text_ =' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4)))\n # show images0\n imshow(torchvision.utils.make_grid(ts4(images0).reshape(32,3,32*8,32*8)),path_+'3_original_normx2_cifar10_{}_{}'.format(latent_dim,0),text_ =' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4))) \n # show images1\n #imshow(torchvision.utils.make_grid(images1.reshape(32,3,32*2,32*2)),'normalized_images1_cifar10_{}_{}'.format(latent_dim,ver),text_ =' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4))) \n\n decode_img = pretrained_model.encoder_decoder(images[0:32].to('cpu').reshape(32,1,32*8,32*8)) #3\n #decode_img = pretrained_model.decoder(encode_img)\n decode_img_cpu = decode_img.cpu()\n images2 = []\n for i in range(32):\n print(i, images[i].shape, decode_img_cpu[i].shape)\n YCC_ = cv2.merge((np.array(images[i].reshape(dim1)),np.array(decode_img_cpu[i].reshape(dim3))))\n images2_ = cv2.cvtColor(YCC_, cv2.COLOR_YCR_CB2BGR)\n images2.append(ts3(ts2(images2_/255.)))\n #images2.append(ts2(images2_/255.))\n imshow(torchvision.utils.make_grid(images2), path_+'4_preds_cifar10_{}_{}'.format(latent_dim,ver),text_ =' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4)))\n sk += 1\n\nif __name__ == '__main__':\n start_time = time.time()\n main()\n print('elapsed time: {:.3f} [sec]'.format(time.time() - start_time)) \n", "import os\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader, random_split\nfrom torchvision.datasets import MNIST,CIFAR10\nfrom torchvision import transforms\nimport pytorch_lightning as pl\nfrom pytorch_lightning.metrics.functional import accuracy\nfrom torchsummary import summary\n\n\nclass LitCIFAR10(pl.LightningModule):\n \n def __init__(self, data_dir='./', hidden_size=64, learning_rate=2e-4):\n\n super().__init__()\n\n # Set our init args as class attributes\n self.data_dir = data_dir\n self.hidden_size = hidden_size\n self.learning_rate = learning_rate\n\n # Hardcode some dataset specific attributes\n self.num_classes = 10\n self.dims = (3, 32, 32)\n channels, width, height = self.dims\n self.transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n # Define PyTorch model\n self.model = nn.Sequential(\n nn.Conv2d(3, 256, 5),\n nn.MaxPool2d(2, 2),\n nn.BatchNorm2d(256),\n nn.Conv2d(256, 512, 5),\n nn.BatchNorm2d(512),\n nn.Conv2d(512, 1924, 2),\n nn.BatchNorm2d(1924),\n nn.Linear(1924 * 2 * 2, 160),\n nn.Linear(160, 10)\n )\n self.train_acc = pl.metrics.Accuracy()\n self.val_acc = pl.metrics.Accuracy()\n self.test_acc = pl.metrics.Accuracy()\n \n\n def forward(self, x):\n x = self.model(x)\n #x = x.view(x.size(0), -1)\n #x = self.classifier(x)\n return x\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n logits = self(x)\n loss = F.nll_loss(logits, y)\n return loss\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n logits = self(x)\n loss = F.nll_loss(logits, y)\n preds = torch.argmax(logits, dim=1)\n acc = accuracy(preds, y)\n\n # Calling self.log will surface up scalars for you in TensorBoard\n self.log('val_loss', loss, prog_bar=True)\n self.log('val_acc', acc, prog_bar=True)\n return loss\n\n def test_step(self, batch, batch_idx):\n # Here we just reuse the validation_step for testing\n return self.validation_step(batch, batch_idx)\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)\n return optimizer\n\n ####################\n # DATA RELATED HOOKS\n ####################\n\n def prepare_data(self):\n # download\n CIFAR10(self.data_dir, train=True, download=True)\n CIFAR10(self.data_dir, train=False, download=True)\n\n def setup(self, stage=None):\n\n # Assign train/val datasets for use in dataloaders\n if stage == 'fit' or stage is None:\n cifar_full =CIFAR10(self.data_dir, train=True, transform=self.transform)\n n_train = int(len(cifar_full)*0.8)\n n_val = len(cifar_full)-n_train\n self.cifar_train, self.cifar_val = torch.utils.data.random_split(cifar_full, [n_train, n_val])\n\n # Assign test dataset for use in dataloader(s)\n if stage == 'test' or stage is None:\n self.cifar_test = CIFAR10(self.data_dir, train=False, transform=self.transform)\n\n def train_dataloader(self):\n return DataLoader(self.cifar_train, batch_size=32)\n\n def val_dataloader(self):\n return DataLoader(self.cifar_val, batch_size=32)\n\n def test_dataloader(self):\n return DataLoader(self.cifar_test, batch_size=32)\n \nmodel = LitCIFAR10()\n#summary(model,(3,32,32))\ntrainer = pl.Trainer(gpus=1, max_epochs=3, progress_bar_refresh_rate=20)\ntrainer.fit(model)\n\ntrainer.test()", "import numpy as np\n\nimport torch\nimport torchvision\nfrom torch.utils.data import DataLoader, random_split\nfrom torchvision import transforms\nimport cv2\nimport matplotlib.pyplot as plt\nimport glob\nimport os\nfrom PIL import Image\n\n\nts = transforms.ToPILImage()\nts2 = transforms.ToTensor()\nts3 = transforms.Grayscale()\nmean, std = [0.5,0.5,0.5],[0.25,0.25,0.25]\nts4 = transforms.Normalize(mean, std)\n\n#image0 = cv2.imread('YCC.jpg')\n#image0 = cv2.imread('Lenna_(test_image).png')\n#image0 = cv2.imread('mayuyu.jpg')\n#autoencode_preds_cifar10_Gray2ClolarizationNormalizeResize3LYCC_100.png\n#image0 = cv2.imread('autoencode_preds_cifar10_Gray2ClolarizationNormalizeResize3LYCC_100.png')\n#Lenna_(test_image).png\n\"\"\"\nimage1=cv2.cvtColor(image0, cv2.COLOR_BGR2RGB)\nnpimg =ts(ts4(ts2(image1/255.)))\nimage_ = np.transpose(npimg, (0,1, 2))\nplt.title('normalize')\nplt.imshow(image_)\nplt.pause(3)\nplt.savefig('./YCC/normalize.png')\nplt.clf()\n\nimage_=ts(image0).convert('L')\nplt.title('gray')\nplt.imshow(image_)\nplt.pause(3)\nplt.savefig('./YCC/image_gray.png')\nplt.clf()\n\nplt.title('gray_gray')\nplt.imshow(image_, cmap='gray')\nplt.pause(3)\nplt.savefig('./YCC/image_gray_gray.png')\nplt.clf()\n\nimage_g=ts3(ts(image0))\nplt.title('gray_ts')\nplt.imshow(image_g)\nplt.pause(3)\nplt.savefig('./YCC/image_g_gray.png')\nplt.clf()\n\nplt.title('gray_ts')\nplt.imshow(image_g, cmap = 'gray')\nplt.pause(3)\nplt.savefig('./YCC/image_g_gray_gray.png')\nplt.clf()\n\n\nimage1=cv2.cvtColor(image0, cv2.COLOR_BGR2RGB)\nplt.title('image1')\nplt.imshow(image1)\nplt.pause(3)\nplt.savefig('./YCC/original.png')\nplt.clf()\n\norgYCrCb = cv2.cvtColor(image1, cv2.COLOR_BGR2YCR_CB)\nplt.title('orgYCrCb')\nplt.imshow(orgYCrCb)\nplt.savefig('./YCC/orgYCrCb.png')\nplt.pause(1)\nplt.clf()\n\norgYCrCb_ = cv2.cvtColor(orgYCrCb, cv2.COLOR_YCR_CB2BGR)\nplt.title('orgYCrCb_')\nplt.imshow(orgYCrCb_)\nplt.savefig('./YCC/orgYCrCb_.png')\nplt.pause(3)\nplt.clf()\n\nY, Cr,Cb = cv2.split(orgYCrCb)\nplt.title('Y')\nplt.imshow(Y) #, cmap = 'gray')\nplt.savefig('./YCC/Y.png')\nplt.pause(1)\nplt.clf()\n\nplt.title('Y_gray')\nplt.imshow(Y, cmap = 'gray')\nplt.savefig('./YCC/Y_gray.png')\nplt.pause(1)\nplt.clf()\n\nplt.title('Cr')\nplt.imshow(Cr) #, cmap = 'gray')\nplt.savefig('./YCC/Cr.png') # _gray.png')\nplt.pause(1)\nplt.clf()\n\nplt.title('Cr_gray')\nplt.imshow(Cr, cmap = 'gray')\nplt.savefig('./YCC/Cr_gray.png') # _gray.png')\nplt.pause(1)\nplt.clf()\n\n\nplt.title('Cb')\nplt.imshow(Cb) #, cmap = 'gray')\nplt.savefig('./YCC/Cb.png') #_gray.png')\nplt.pause(1)\nplt.clf()\n\nplt.title('Cb_gray')\nplt.imshow(Cb, cmap = 'gray')\nplt.savefig('./YCC/Cb_gray.png') #_gray.png')\nplt.pause(1)\nplt.clf()\n\nCr_=ts(Cr).convert(\"RGB\")\nCb_ = ts(Cb).convert(\"RGB\")\nY_ = ts(Y).convert('RGB')\n#CC = cv2.merge((Y,Cr_,Cb_))\n\nplt.title('Cr_RGB')\nplt.imshow(Cr_)\nplt.savefig('./YCC/Cr_RGB.png')\nplt.pause(3)\nplt.clf()\n\nplt.title('Cb_RGB')\nplt.imshow(Cb_)\nplt.savefig('./YCC/Cb_RGB.png')\nplt.pause(3)\nplt.clf()\n\nplt.title('Y_RGB')\nplt.imshow(Y_)\nplt.savefig('./YCC/Y_RGB.png')\nplt.pause(3)\nplt.clf()\n\n\nYCC = cv2.merge((Y,Cr,Cb))\norgYCrCb_2 = cv2.cvtColor(YCC, cv2.COLOR_YCR_CB2BGR)\n\nplt.title('YCrCb_merge')\nplt.imshow(orgYCrCb_2)\nplt.savefig('./YCC/YCC_RGB_merge.png')\nplt.pause(3)\nplt.clf()\n\norgLAB = cv2.cvtColor(image1, cv2.COLOR_BGR2LAB)\nplt.title('orgLAB')\nplt.imshow(orgLAB)\nplt.savefig('./YCC/orgLAB.png')\nplt.pause(1)\nplt.clf()\n\nL, A,B = cv2.split(orgLAB)\nplt.title('L')\nplt.imshow(L) #, cmap = 'gray')\nplt.savefig('./YCC/L.png')\nplt.pause(1)\nplt.clf()\n\nplt.title('L_gray')\nplt.imshow(L, cmap = 'gray')\nplt.savefig('./YCC/L_gray.png')\nplt.pause(1)\nplt.clf()\n\n\nprint(L.shape,A.shape,B.shape)\n\nplt.title('A')\nplt.imshow(A)\nplt.savefig('./YCC/A.png')\nplt.pause(1)\nplt.clf()\n\nplt.title('A_gray')\nplt.imshow(A, cmap ='gray')\nplt.savefig('./YCC/A_gray.png')\nplt.pause(1)\nplt.clf()\n\nplt.title('B')\nplt.imshow(B)\nplt.savefig('./YCC/B.png')\nplt.pause(1)\nplt.clf()\n\nplt.title('B_gray')\nplt.imshow(B, cmap = 'gray')\nplt.savefig('./YCC/B_gray.png')\nplt.pause(1)\nplt.clf()\n\nLAB = cv2.merge((L,A,B))\norgLAB_2 = cv2.cvtColor(LAB, cv2.COLOR_LAB2BGR)\n\nplt.title('LAB_merge')\nplt.imshow(orgLAB_2)\nplt.savefig('./YCC/LAB_merge.png')\nplt.pause(3)\nplt.clf()\n\nX = np.zeros(L.shape,np.uint8)\nprint(X.shape)\n\nplt.title('X')\nplt.imshow(X)\nplt.pause(1)\nplt.clf()\n\nXAB = cv2.merge((X,A,B))\norgXAB_2 = cv2.cvtColor(XAB, cv2.COLOR_LAB2BGR)\n\nplt.title('orgXAB_2')\nplt.imshow(orgXAB_2)\nplt.pause(3)\nplt.clf()\n\ntrans = torchvision.transforms.Compose([\n #torchvision.transforms.Normalize(self.mean, self.std),\n #torchvision.transforms.Resize(self.dims),\n #MyAddGaussianNoise(0., 0.1),\n torchvision.transforms.Grayscale()\n ])\nx = ts3(ts(image1))\nplt.title('grayscale')\nplt.imshow(x, cmap = 'gray')\nplt.pause(3)\nplt.clf()\n\norgYCrCb = cv2.cvtColor(image1, cv2.COLOR_BGR2YCR_CB)\nY, Cr,Cb = cv2.split(orgYCrCb)\nplt.title('Y')\nplt.imshow(Y, cmap = 'gray')\nplt.pause(3)\nplt.clf()\n\nxCC = cv2.merge((np.uint8(x),Cr,Cb))\norgxCrCb_2 = cv2.cvtColor(xCC, cv2.COLOR_YCR_CB2BGR)\n\nplt.title('orgxCrCb_2')\nplt.imshow(orgxCrCb_2)\nplt.savefig('./YCC/orgxCrCb_2.png')\nplt.pause(3)\nplt.clf()\n\norgLAB = cv2.cvtColor(image1, cv2.COLOR_BGR2LAB)\nL, A,B = cv2.split(orgLAB)\n\nplt.title('L')\nplt.imshow(L, cmap = 'gray')\nplt.savefig('./YCC/L_gray.png')\nplt.pause(3)\nplt.clf()\n\nplt.title('A')\nplt.imshow(A, cmap = 'gray')\nplt.savefig('./YCC/A_gray.png')\nplt.pause(3)\nplt.clf()\n\nplt.title('B')\nplt.imshow(B, cmap = 'gray')\nplt.savefig('./YCC/B_gray.png')\nplt.pause(3)\nplt.clf()\n\nCC = cv2.merge((Cr,Cb))\n#xAB = cv2.merge((np.uint8(x),Cr,Cb))\nxAB = cv2.merge((np.uint8(x),CC))\norgxAB_2 = cv2.cvtColor(xAB, cv2.COLOR_YCR_CB2BGR)\n\nplt.title('xAB_2')\nplt.imshow(orgxAB_2)\nplt.savefig('./YCC/orgxAB_2.png')\nplt.pause(3)\nplt.clf()\n\"\"\"\n\nimport os\nimport time\nimport numpy as np\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nimport torchvision\nfrom torchvision.datasets import CIFAR10 #MNIST\nfrom torch.utils.data import DataLoader, random_split\nfrom torchvision import transforms\nimport pytorch_lightning as pl\nimport matplotlib.pyplot as plt\nfrom torchsummary import summary\nimport cv2\n\n#from net_encoder_decoder2D import Encoder, Decoder\n#from net_encoder_decoder1D2DResize import Encoder, Decoder\nfrom net_encoder_decoder_vgg16 import Encoder, Decoder\n\ndef imshow(img,file='', text_=''):\n img = img / 2 + 0.5 # unnormalize\n npimg = img.detach().numpy() #img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.text(x = 3, y = 2, s = text_, c = \"red\")\n plt.pause(3)\n if file != '':\n plt.savefig(file+'.png')\n plt.close()\n\nfrom pytorch_lightning.callbacks import Callback \nclass MyPrintingCallback(Callback):\n def on_epoch_end(self, trainer, pl_module):\n print('')\n\nclass MyAddGaussianNoise(object):\n def __init__(self, mean=0., std=0.1):\n self.std = std\n self.mean = mean\n \n def __call__(self, tensor):\n return tensor + torch.randn(tensor.size()) * self.std + self.mean\n \n def __repr__(self):\n return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std) \n\nclass rgb2YCrCb(object):\n def __init__(self):\n self.ts = torchvision.transforms.ToPILImage()\n self.ts2 = transform=transforms.ToTensor()\n self.mean, self.std =[0.5,0.5,0.5], [0.25,0.25,0.25]\n self.ts3 = torchvision.transforms.Compose([\n #torchvision.transforms.Normalize(self.mean, self.std),\n torchvision.transforms.ToPILImage()\n #transforms.ToTensor()\n ])\n pass\n \n def __call__(self, tensor):\n tensor = self.ts3(tensor)\n orgYCrCb = cv2.cvtColor(np.float32(tensor), cv2.COLOR_BGR2YCR_CB)\n Y, Cr,Cb = cv2.split(orgYCrCb)\n CC = cv2.merge((Cr,Cb))\n CC = np.array(CC)\n #print(CC.shape)\n return CC\n \n def __repr__(self):\n return self.__class__.__name__\n \nclass rgb2YCrCb_(object):\n def __init__(self):\n self.ts = torchvision.transforms.ToPILImage()\n self.ts2 = transform=transforms.ToTensor()\n self.mean, self.std =[0.5,0.5,0.5], [0.25,0.25,0.25]\n self.ts3 = torchvision.transforms.Compose([\n #torchvision.transforms.Normalize(self.mean, self.std),\n torchvision.transforms.ToPILImage()\n #transforms.ToTensor()\n ]) \n pass\n \n def __call__(self, tensor):\n tensor = self.ts3(tensor)\n orgYCrCb = cv2.cvtColor(np.float32(tensor), cv2.COLOR_BGR2YCR_CB)\n Y, Cr,Cb = cv2.split(orgYCrCb)\n CC = cv2.merge((Cr,Cb))\n Y = np.array(Y).reshape(32*2,32*2)\n #print(Y.shape)\n return Y\n\nclass ImageDataset(torch.utils.data.Dataset):\n\n def __init__(self, data_num,train_=True, transform1 = None, transform2 = None,train = True):\n \n self.transform1 = transform1\n self.transform2 = transform2\n self.ts = torchvision.transforms.ToPILImage()\n self.ts2 = transforms.ToTensor()\n self.mean, self.std =[0.5,0.5,0.5], [0.25,0.25,0.25]\n self.ts3 = torchvision.transforms.Compose([\n torchvision.transforms.Normalize(self.mean, self.std),\n transforms.ToTensor()\n ])\n self.train = train_\n \n self.data_dir = './'\n self.data_num = data_num\n self.data = []\n self.label = []\n\n # download\n CIFAR10(self.data_dir, train=True, download=True)\n #CIFAR10(self.data_dir, train=False, download=True)\n self.data =CIFAR10(self.data_dir, train=self.train, transform=self.ts2)\n\n def __len__(self):\n return self.data_num\n\n def __getitem__(self, idx):\n out_data = self.data[idx][0]\n out_label_ = self.data[idx][1]\n out_label = torch.from_numpy(np.array(out_label_)).long()\n \n if self.transform1:\n out_data1 = self.transform1(out_data)\n if self.transform2:\n out_data2 = self.transform2(out_data)\n \n #print( out_data1.shape, out_data2.shape)\n #ts(np.array(Y).reshape(64,64))\n return out_data, np.array(out_data1).reshape(1,64,64), np.array(out_data2.reshape(2,64,64)), out_label\n \nclass LitAutoEncoder(pl.LightningModule):\n\n def __init__(self, data_dir='./'):\n super().__init__()\n self.data_dir = data_dir\n self.data_num =50000 #50000\n # Hardcode some dataset specific attributes\n self.num_classes = 10\n self.classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n self.dims = (32*2, 32*2)\n self.dims2 = (32*4, 32*4)\n self.mean, self.std =[0.5,0.5,0.5], [0.25,0.25,0.25]\n \n self.trans2 = torchvision.transforms.Compose([\n torchvision.transforms.Normalize(self.mean, self.std),\n torchvision.transforms.Resize(self.dims)\n ])\n self.trans1 = torchvision.transforms.Compose([\n torchvision.transforms.Normalize(self.mean, self.std),\n torchvision.transforms.Resize(self.dims),\n MyAddGaussianNoise(0., 0.1),\n torchvision.transforms.Grayscale()\n ])\n \n self.trans2 = torchvision.transforms.Compose([\n #torchvision.transforms.Normalize(self.mean, self.std),\n torchvision.transforms.Resize(self.dims),\n rgb2YCrCb(), #CC\n transforms.ToTensor()\n ])\n self.trans1 = torchvision.transforms.Compose([\n #torchvision.transforms.Normalize(self.mean, self.std),\n torchvision.transforms.Resize(self.dims),\n rgb2YCrCb_(), #Y\n transforms.ToTensor(),\n #torchvision.transforms.Grayscale()\n ])\n \n self.encoder = Encoder()\n self.decoder = Decoder()\n \n self.train_acc = pl.metrics.Accuracy()\n self.val_acc = pl.metrics.Accuracy()\n self.test_acc = pl.metrics.Accuracy()\n\n def forward(self, x):\n # in lightning, forward defines the prediction/inference actions\n x, _ = self.encoder(x)\n x = self.decoder(x)\n return x\n\n def training_step(self, batch, batch_idx):\n # training_step defined the train loop. It is independent of forward\n _,x,x_ , y = batch\n #print(x.shape, x_.shape)\n z, _ = self.encoder(x)\n x_hat = self.decoder(z)\n loss = F.mse_loss(x_hat, x_)\n self.log('train_loss', loss, prog_bar = True)\n return loss\n\n def validation_step(self, batch, batch_idx):\n _,x, x_, y = batch\n z, _ = self.encoder(x)\n x_hat = self.decoder(z)\n loss = F.mse_loss(x_hat, x_)\n self.log('test_loss', loss, prog_bar = True)\n return loss\n \n def test_step(self, batch, batch_idx):\n # Here we just reuse the validation_step for testing\n return self.validation_step(batch, batch_idx)\n \n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=1e-3) \n return optimizer\n \ndef imshow(img,file='', text_=''):\n img = img / 2 + 0.5 # unnormalize\n npimg = img.detach().numpy() #img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.text(x = 3, y = 2, s = text_, c = \"red\")\n plt.pause(3)\n if file != '':\n plt.savefig(file+'.png')\n plt.close()\n \nts1 = transforms.Resize((64,64)) \nts = transforms.ToPILImage()\nts2 = transforms.ToTensor()\ntrans2 = transforms.Compose([\n transforms.Resize((64,64)),\n rgb2YCrCb(), #CrCb\n])\ntrans1 = transforms.Compose([\n transforms.Resize((64,64)),\n rgb2YCrCb_(), #Y\n])\nmean, std =[0.5,0.5,0.5], [0.25,0.25,0.25]\nts3 = transforms.Compose([\n transforms.Normalize(mean, std),\n #transforms.ToTensor()\n])\n \nautoencoder = LitAutoEncoder()\nPATH = 'example_cifar4L100.ckpt'\npretrained_model = autoencoder.load_from_checkpoint(PATH)\npretrained_model.freeze()\npretrained_model.eval()\n\ndata_num = 50000\ncifar10_full =ImageDataset(data_num, train=True, transform1=trans1, transform2=trans2)\nn_train = int(len(cifar10_full)*0.1)\nn_val = int(len(cifar10_full)*0.1)\nn_test = len(cifar10_full)-n_train -n_val\ncifar10_train, cifar10_val, cifar10_test = torch.utils.data.random_split(cifar10_full, [n_train, n_val, n_test])\n\ntrainloader = DataLoader(cifar10_train, shuffle=True, drop_last = True, batch_size=32, num_workers=0)\nvalloader = DataLoader(cifar10_val, shuffle=False, batch_size=32, num_workers=0)\ntestloader = DataLoader(cifar10_test, shuffle=False, batch_size=32, num_workers=0)\n\nlatent_dim,ver = \"simpleGray2Clolarization\", \"color_plate1\" #####save condition\ndataiter = iter(testloader)\nimages0,images, images1, labels = dataiter.next()\n\n\n\nencode_img,_ = pretrained_model.encoder(images[0:32].to('cpu').reshape(32,1,32*2,32*2)) #3\ndecode_img = pretrained_model.decoder(encode_img)\ndecode_img_cpu = decode_img.cpu()\nimages2 = []\n\nfor i in range(32): #32\n print(i, images[i].shape, decode_img_cpu[i].shape)\n YCC_ = cv2.merge((np.array(images[i].reshape(64,64)),np.array(decode_img_cpu[i].reshape(64,64,2))))\n images2_ = cv2.cvtColor(YCC_, cv2.COLOR_YCR_CB2BGR)\n images2.append(ts3(ts2(images2_/255.)))\nimshow(torchvision.utils.make_grid(images2), 'autoencode_preds_cifar10_{}_{}'.format(latent_dim,ver),text_ =' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4))) \n\"\"\"\nfor i in range(32): \n plt.title('image2_preds')\n img = images2[i] / 2 + 0.5 # unnormalize\n npimg = img.detach().numpy() #img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.savefig('./YCC/5piece/image_preds{}.png'.format(i))\n plt.pause(3)\n plt.clf()\n\n plt.title('image_gray')\n img = images[i] / 2 + 0.5 # unnormalize\n #npimg = img.detach().numpy() #img.numpy()\n plt.imshow(img.reshape(64,64), cmap = 'gray')\n plt.savefig('./YCC/5piece/image_gray{}.png'.format(i))\n plt.pause(3)\n plt.clf()\n \n plt.title('image_original_norm')\n img = ts3(ts2(ts(images0[i]))) / 2 + 0.5 # unnormalize\n npimg = img.detach().numpy() #img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.savefig('./YCC/5piece/image_original{}_.png'.format(i))\n plt.pause(3)\n plt.clf()\n \n plt.title('image_original_')\n img = images0[i] / 2 + 0.5 # unnormalize\n npimg = img.detach().numpy() #img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.savefig('./YCC/5piece/image_original{}.png'.format(i))\n plt.pause(3)\n plt.clf() \n \n plt.title('image_originalx2_')\n img = ts3(ts1(ts2(ts(images0[i])))) / 2 + 0.5 # unnormalize\n npimg = img.detach().numpy() #img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.savefig('./YCC/5piece/image_originalx2_{}.png'.format(i))\n plt.pause(3)\n plt.clf() \n\"\"\"\npath_= 'YCC'\nYCC = cv2.imread('YCC.jpg')\n#YCC = cv2.imread('color_plate1.jpg')\n\nimage1=cv2.cvtColor(YCC, cv2.COLOR_BGR2RGB)\nplt.title('image1')\nplt.imshow(image1)\nplt.pause(3)\nplt.savefig('./YCC/'+path_+'.png')\nplt.clf()\n\norgYCrCb = cv2.cvtColor(YCC, cv2.COLOR_BGR2YCR_CB)\nY, Cr,Cb = cv2.split(orgYCrCb)\n\nplt.title('images[0]_')\nimg = images[0] / 2 + 0.5 # unnormalize\n#npimg = img.detach().numpy() #img.numpy()\nprint('images[0]',img)\nplt.imshow(img.reshape(64,64), cmap = 'gray')\nplt.savefig('./YCC/'+path_+'_gray.png')\nplt.pause(3)\nplt.clf()\n#images[0] = ts2(Y_)\n\nY_=ts(Y).resize((32*2,32*2))\nplt.title('Y_')\nimg = ts2(Y_)*255. / 2 + 0.5 # unnormalize\nprint('Y_',img)\n#npimg = img.detach().numpy() #img.numpy()\nplt.imshow(img.reshape(64,64), cmap = 'gray')\nplt.savefig('./YCC/'+path_+'original_gray.png')\nplt.pause(3)\nplt.clf()\n \nY_=ts(Y).resize((32*2,32*2))\nplt.title('Y_')\nplt.imshow(Y_, cmap = 'gray')\nplt.savefig('./YCC/'+path_+'_mt_original.png')\nplt.pause(3)\nplt.clf()\n#Y_ = np.array(Y_).reshape(1,64,64)\n\nY_2 = ts2(Y_)*255\n\nencode_img,_ = pretrained_model.encoder(Y_2.to('cpu').reshape(1,1,32*2,32*2)) #3\ndecode_img = pretrained_model.decoder(encode_img)\ndecode_img_cpu = decode_img.cpu()\nprint(Y_2.shape, decode_img_cpu.shape)\nYCC_ = cv2.merge((np.array(Y_2.reshape(64,64)),np.array(decode_img_cpu.reshape(64,64,2))))\nimages2_ = cv2.cvtColor(YCC_, cv2.COLOR_YCR_CB2BGR)\nimages2 = ts3(ts2(images2_/255.))\nprint(images2)\nplt.title('preds')\nimg = images2 / 5 + 0.5 # unnormalize\nnpimg = img.detach().numpy() #img.numpy()\nplt.imshow(np.transpose(npimg, (1, 2, 0)))\nplt.savefig('./YCC/'+path_+'_preds_5.png')\nplt.pause(3)\nplt.clf()\n" ]
[ [ "torch.utils.data.DataLoader", "matplotlib.pyplot.savefig", "torch.nn.functional.mse_loss", "torch.utils.data.random_split", "matplotlib.pyplot.close", "numpy.transpose", "torch.cuda.is_available", "matplotlib.pyplot.text", "numpy.array", "matplotlib.pyplot.pause" ], [ "torch.nn.functional.nll_loss", "torch.nn.Conv2d", "torch.utils.data.DataLoader", "torch.nn.MaxPool2d", "torch.nn.Linear", "torch.utils.data.random_split", "torch.nn.BatchNorm2d", "torch.argmax" ], [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.title", "torch.utils.data.DataLoader", "matplotlib.pyplot.savefig", "torch.nn.functional.mse_loss", "torch.utils.data.random_split", "matplotlib.pyplot.clf", "matplotlib.pyplot.close", "numpy.transpose", "numpy.float32", "matplotlib.pyplot.text", "numpy.array", "matplotlib.pyplot.pause" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JiaxiangBU/xgboost-LightGBM_demo
[ "ea9b443121c8124340b5906340a0b9d5a098ac1a" ]
[ "Xgboost_prac/UCI_CAD/UCI_test.py" ]
[ "import numpy as np\nimport time\nimport pandas as pd\nfrom xgboost.sklearn import XGBClassifier\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import KFold\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.metrics import accuracy_score\n\ndata_np=np.array(pd.read_csv('./UCI_CAD.csv'))\n\n\nX=np.array([line[:-1] for line in data_np])\ny=np.array([line[-1] for line in data_np])\n\nxgb_model=XGBClassifier(nthread=4,n_estimators=370,\n silent=False,objective='multi:softmax',\n scale_pos_weight=1,max_depth=4,min_child_weight=2,\n seed=1993,gamma=4.4,colsample_bytree=0.1,subsample=0.1,\n learning_rate=0.1)\n\n# # specify your configurations as a dict\n# param_grid_xgboost={'n_estimators':np.arange(300,400,10)}\n# start_time=time.clock()\n# grid_xgb=GridSearchCV(xgb_model,param_grid_xgboost,cv=5,scoring='accuracy')\n# grid_xgb.fit(X,y)\n# endtime=time.clock()\n# print('score',grid_xgb.grid_scores_)\n# print('Xgboost_best_estimator_',grid_xgb.best_estimator_)\n# print('Xgboost_best_score_',grid_xgb.best_score_)\n# print('Xgboost_best_params_',grid_xgb.best_params_)\n# print(\"run_time\",endtime-start_time)\n\nstart_time=time.clock()\nscore_all=0\nkf=KFold(n_splits=5,shuffle=True)\nfor train,test in kf.split(X):\n print(len(train),len(test))\n X_train=X[train]\n X_test=X[test]\n y_train=y[train]\n y_test=y[test]\n xgb_model.fit(X_train,y_train)\n preds=xgb_model.predict(X_test)\n score=accuracy_score(y_test,preds)\n print(\"score:\",score)\n score_all=score_all+score\nprint(\"score_all\",score_all/5)\nendtime=time.clock()\nprint(\"run_time\",endtime-start_time)\n\n" ]
[ [ "numpy.array", "pandas.read_csv", "sklearn.model_selection.KFold", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
iryzhkov/stock-trading-backend
[ "7161026b7b4deb78a934b66550c85a27c6b32933", "7161026b7b4deb78a934b66550c85a27c6b32933" ]
[ "tests/agent/test_polynomial_model.py", "stock_trading_backend/agent/neural_network_model.py" ]
[ "\"\"\"Unit tests for PolynomialModel class\n\"\"\"\nimport os\nimport unittest\n\nimport pandas as pd\n\nfrom stock_trading_backend.agent import PolynomialModel\n\n\nclass TestPolynomialModel(unittest.TestCase):\n \"\"\"Unit tests for PolynomialModel class.\n \"\"\"\n def test_initializes(self):\n \"\"\"Checks if model initializes properly.\n \"\"\"\n model = PolynomialModel(degree=5)\n self.assertEqual(5, model.degree)\n with self.assertRaises(ValueError):\n _ = PolynomialModel(degree=0)\n\n def test_save_and_load(self):\n \"\"\"Checks if saving and loading functin works properly.\n \"\"\"\n file_path = \"data/test/test.pkl\"\n model = PolynomialModel()\n observation = pd.Series([1, 2, 3], [\"balance\", \"net_worth\", \"owned\"])\n predictions_1 = model.predict(observation, [[0, 1]] * 5)\n model.save(file_path)\n model.load(file_path)\n predictions_2 = model.predict(observation, [[0, 1]] * 5)\n self.assertTrue(all(predictions_1 == predictions_2))\n os.remove(file_path)\n\n def test_predict(self):\n \"\"\"Checks if predict function works properly.\n \"\"\"\n model = PolynomialModel()\n observation = pd.Series([1, 2, 3], [\"balance\", \"net_worth\", \"owned\"])\n predictions = model.predict(observation, [[0, 1]] * 5)\n self.assertEqual(5, len(predictions))\n\n def test_train(self):\n \"\"\"Checks if train function works properly.\n \"\"\"\n model = PolynomialModel(degree=2)\n observations = pd.DataFrame([[1, 2, 3]] * 10, columns=[\"balance\", \"net_worth\", \"owned\"])\n actions = [[0]] * 5 + [[1]] * 5\n expected_values = [[0]] * 5 + [[1]] * 5\n losses = [model.train(observations, actions, expected_values) for i in range(10)]\n self.assertTrue(losses[0] > losses[-1])\n", "\"\"\"Polynomial model class used by agents for building stuff.\n\"\"\"\nfrom torch import nn, optim\n\nimport torch\nimport torch.nn.functional as F\n\nfrom stock_trading_backend.agent.model import Model\n\n\nclass NNModel(nn.Module):\n \"\"\"Torch neural network model.\n \"\"\"\n def __init__(self, num_inputs, num_hidden_layers, num_inner_features):\n \"\"\"Initializer for linear model.\n\n Args:\n num_inputs: the dimension of input data.\n num_hidden_layers: the number of hidden layers.\n num_inner_features: the number of features in the hidden layers\n \"\"\"\n super(NNModel, self).__init__()\n self.input_layer = nn.Linear(num_inputs, num_inner_features)\n hidden_layers = []\n for _ in range(num_hidden_layers):\n hidden_layers.append(nn.Linear(num_inner_features, num_inner_features))\n hidden_layers.append(nn.ReLU())\n self.hidden_layers = nn.Sequential(*hidden_layers)\n self.output_layer = nn.Linear(num_inner_features, 1)\n\n def forward(self, input_tensor):\n \"\"\"Forward pass on the neural network model.\n\n Args:\n input_tensor: the input tensor.\n\n Returns:\n Tensor with model results.\n \"\"\"\n output = F.relu(self.input_layer(input_tensor))\n output = self.hidden_layers(output)\n output = self.output_layer(output)\n return output\n\n\nclass NeuralNetworkModel(Model):\n \"\"\"Neural netowrk model class.\n \"\"\"\n name = \"neural_network_model\"\n\n def __init__(self, learning_rate=1e-3, num_hidden_layers=1, num_inner_features=100):\n \"\"\"Initializer for model class.\n\n Args:\n learning_rate: the learning rate of the model.\n num_hidden_layers: number of hidden layers in the network.\n num_inner_features: number of features in the hidden layers.\n \"\"\"\n super(NeuralNetworkModel, self).__init__()\n self.model = None\n self.optimizer = None\n self.criterion = nn.MSELoss()\n self.learning_rate = learning_rate\n self.num_hidden_layers = num_hidden_layers\n self.num_inner_features = num_inner_features\n self.id_str = \"{}_{}_{}_{}\".format(self.name, learning_rate, num_hidden_layers,\n num_inner_features)\n\n def _init_model(self, num_inputs):\n \"\"\"Initializes internal linear model.\n\n Args:\n num_inputs: number of inputs that model will have.\n \"\"\"\n self.model = NNModel(num_inputs, self.num_hidden_layers, self.num_inner_features)\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)\n\n def _predict(self, state_action_tensor):\n \"\"\"Use provided information to make a prediction.\n\n Args:\n state_action_tensor: pytorch tensor with state-action values.\n\n Returns:\n Predicted values for observation-action tensors.\n \"\"\"\n if self.model is None:\n self._init_model(state_action_tensor.shape[1])\n return self.model(state_action_tensor).detach().reshape(-1)\n\n def _train(self, state_action_tensor, expected_values_tensor):\n \"\"\"Train the model for 1 epoch.\n\n Args:\n state_action_tensor: pytorch tensor with state-action expected_values.\n expected_values: pytorch tensor with expected values for each state-action.\n\n Returns:\n The loss before trainig.\n \"\"\"\n if self.model is None:\n self._init_model(state_action_tensor.shape[1])\n\n self.optimizer.zero_grad()\n output = self.model(state_action_tensor)\n loss = self.criterion(output, expected_values_tensor)\n loss_value = loss.data.item()\n loss.backward()\n self.optimizer.step()\n return loss_value\n" ]
[ [ "pandas.Series", "pandas.DataFrame" ], [ "torch.nn.Linear", "torch.nn.ReLU", "torch.nn.Sequential", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
licanisme/Paddle
[ "d11c140e280880b9d031fa38361f3230aef6cf9c", "d11c140e280880b9d031fa38361f3230aef6cf9c", "d11c140e280880b9d031fa38361f3230aef6cf9c", "d11c140e280880b9d031fa38361f3230aef6cf9c", "d11c140e280880b9d031fa38361f3230aef6cf9c" ]
[ "python/paddle/incubate/hapi/vision/transforms/functional.py", "python/paddle/fluid/tests/unittests/test_fleet_graph_execution_meta_optimizer.py", "python/paddle/fluid/io.py", "python/paddle/incubate/hapi/tests/test_transforms.py", "python/paddle/fluid/tests/unittests/test_fake_quantize_op.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport collections\nimport random\nimport math\n\nimport cv2\nimport numbers\nimport numpy as np\n\nif sys.version_info < (3, 3):\n Sequence = collections.Sequence\n Iterable = collections.Iterable\nelse:\n Sequence = collections.abc.Sequence\n Iterable = collections.abc.Iterable\n\n__all__ = ['flip', 'resize', 'pad', 'rotate', 'to_grayscale']\n\n\ndef flip(image, code):\n \"\"\"\n Accordding to the code (the type of flip), flip the input image\n\n Args:\n image: Input image, with (H, W, C) shape\n code: Code that indicates the type of flip.\n -1 : Flip horizontally and vertically\n 0 : Flip vertically\n 1 : Flip horizontally\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n from paddle.incubate.hapi.vision.transforms import functional as F\n\n fake_img = np.random.rand(224, 224, 3)\n\n # flip horizontally and vertically\n F.flip(fake_img, -1)\n\n # flip vertically\n F.flip(fake_img, 0)\n\n # flip horizontally\n F.flip(fake_img, 1)\n \"\"\"\n return cv2.flip(image, flipCode=code)\n\n\ndef resize(img, size, interpolation=cv2.INTER_LINEAR):\n \"\"\"\n resize the input data to given size\n\n Args:\n input: Input data, could be image or masks, with (H, W, C) shape\n size: Target size of input data, with (height, width) shape.\n interpolation: Interpolation method.\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n from paddle.incubate.hapi.vision.transforms import functional as F\n\n fake_img = np.random.rand(256, 256, 3)\n\n F.resize(fake_img, 224)\n\n F.resize(fake_img, (200, 150))\n \"\"\"\n\n if isinstance(interpolation, Sequence):\n interpolation = random.choice(interpolation)\n\n if isinstance(size, int):\n h, w = img.shape[:2]\n if (w <= h and w == size) or (h <= w and h == size):\n return img\n if w < h:\n ow = size\n oh = int(size * h / w)\n return cv2.resize(img, (ow, oh), interpolation=interpolation)\n else:\n oh = size\n ow = int(size * w / h)\n return cv2.resize(img, (ow, oh), interpolation=interpolation)\n else:\n return cv2.resize(img, size[::-1], interpolation=interpolation)\n\n\ndef pad(img, padding, fill=(0, 0, 0), padding_mode='constant'):\n \"\"\"Pads the given CV Image on all sides with speficified padding mode and fill value.\n\n Args:\n img (np.ndarray): Image to be padded.\n padding (int|tuple): Padding on each border. If a single int is provided this\n is used to pad all borders. If tuple of length 2 is provided this is the padding\n on left/right and top/bottom respectively. If a tuple of length 4 is provided\n this is the padding for the left, top, right and bottom borders\n respectively.\n fill (int|tuple): Pixel fill value for constant fill. Default is 0. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the padding_mode is constant\n padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.\n ``constant`` means padding with a constant value, this value is specified with fill. \n ``edge`` means padding with the last value at the edge of the image. \n ``reflect`` means padding with reflection of image (without repeating the last value on the edge) \n padding ``[1, 2, 3, 4]`` with 2 elements on both sides in reflect mode \n will result in ``[3, 2, 1, 2, 3, 4, 3, 2]``.\n ``symmetric`` menas pads with reflection of image (repeating the last value on the edge)\n padding ``[1, 2, 3, 4]`` with 2 elements on both sides in symmetric mode \n will result in ``[2, 1, 1, 2, 3, 4, 4, 3]``.\n\n Returns:\n numpy ndarray: Padded image.\n\n Examples:\n \n .. code-block:: python\n\n import numpy as np\n\n from paddle.incubate.hapi.vision.transforms.functional import pad\n\n fake_img = np.random.rand(500, 500, 3).astype('float32')\n\n fake_img = pad(fake_img, 2)\n print(fake_img.shape)\n\n \"\"\"\n\n if not isinstance(padding, (numbers.Number, list, tuple)):\n raise TypeError('Got inappropriate padding arg')\n if not isinstance(fill, (numbers.Number, str, list, tuple)):\n raise TypeError('Got inappropriate fill arg')\n if not isinstance(padding_mode, str):\n raise TypeError('Got inappropriate padding_mode arg')\n\n if isinstance(padding, collections.Sequence) and len(padding) not in [2, 4]:\n raise ValueError(\n \"Padding must be an int or a 2, or 4 element tuple, not a \" +\n \"{} element tuple\".format(len(padding)))\n\n assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \\\n 'Expected padding mode be either constant, edge, reflect or symmetric, but got {}'.format(padding_mode)\n\n PAD_MOD = {\n 'constant': cv2.BORDER_CONSTANT,\n 'edge': cv2.BORDER_REPLICATE,\n 'reflect': cv2.BORDER_DEFAULT,\n 'symmetric': cv2.BORDER_REFLECT\n }\n\n if isinstance(padding, int):\n pad_left = pad_right = pad_top = pad_bottom = padding\n if isinstance(padding, collections.Sequence) and len(padding) == 2:\n pad_left = pad_right = padding[0]\n pad_top = pad_bottom = padding[1]\n if isinstance(padding, collections.Sequence) and len(padding) == 4:\n pad_left, pad_top, pad_right, pad_bottom = padding\n\n if isinstance(fill, numbers.Number):\n fill = (fill, ) * (2 * len(img.shape) - 3)\n\n if padding_mode == 'constant':\n assert (len(fill) == 3 and len(img.shape) == 3) or (len(fill) == 1 and len(img.shape) == 2), \\\n 'channel of image is {} but length of fill is {}'.format(img.shape[-1], len(fill))\n\n img = cv2.copyMakeBorder(\n src=img,\n top=pad_top,\n bottom=pad_bottom,\n left=pad_left,\n right=pad_right,\n borderType=PAD_MOD[padding_mode],\n value=fill)\n\n return img\n\n\ndef rotate(img,\n angle,\n interpolation=cv2.INTER_LINEAR,\n expand=False,\n center=None):\n \"\"\"Rotates the image by angle.\n\n Args:\n img (numpy.ndarray): Image to be rotated.\n angle (float|int): In degrees clockwise order.\n interpolation (int, optional):\n interpolation: Interpolation method.\n expand (bool|optional): Optional expansion flag.\n If true, expands the output image to make it large enough to hold the entire rotated image.\n If false or omitted, make the output image the same size as the input image.\n Note that the expand flag assumes rotation around the center and no translation.\n center (2-tuple|optional): Optional center of rotation.\n Origin is the upper left corner.\n Default is the center of the image.\n\n Returns:\n numpy ndarray: Rotated image.\n\n Examples:\n \n .. code-block:: python\n\n import numpy as np\n\n from paddle.incubate.hapi.vision.transforms.functional import rotate\n\n fake_img = np.random.rand(500, 500, 3).astype('float32')\n\n fake_img = rotate(fake_img, 10)\n print(fake_img.shape)\n \"\"\"\n dtype = img.dtype\n\n h, w, _ = img.shape\n point = center or (w / 2, h / 2)\n M = cv2.getRotationMatrix2D(point, angle=-angle, scale=1)\n\n if expand:\n if center is None:\n cos = np.abs(M[0, 0])\n sin = np.abs(M[0, 1])\n\n nW = int((h * sin) + (w * cos))\n nH = int((h * cos) + (w * sin))\n\n M[0, 2] += (nW / 2) - point[0]\n M[1, 2] += (nH / 2) - point[1]\n\n dst = cv2.warpAffine(img, M, (nW, nH))\n else:\n xx = []\n yy = []\n for point in (np.array([0, 0, 1]), np.array([w - 1, 0, 1]),\n np.array([w - 1, h - 1, 1]), np.array([0, h - 1, 1])):\n target = np.dot(M, point)\n xx.append(target[0])\n yy.append(target[1])\n nh = int(math.ceil(max(yy)) - math.floor(min(yy)))\n nw = int(math.ceil(max(xx)) - math.floor(min(xx)))\n\n M[0, 2] += (nw - w) / 2\n M[1, 2] += (nh - h) / 2\n dst = cv2.warpAffine(img, M, (nw, nh), flags=interpolation)\n else:\n dst = cv2.warpAffine(img, M, (w, h), flags=interpolation)\n return dst.astype(dtype)\n\n\ndef to_grayscale(img, num_output_channels=1):\n \"\"\"Converts image to grayscale version of image.\n\n Args:\n img (numpy.ndarray): Image to be converted to grayscale.\n\n Returns:\n numpy.ndarray: Grayscale version of the image.\n if num_output_channels == 1, returned image is single channel\n if num_output_channels == 3, returned image is 3 channel with r == g == b\n \n Examples:\n \n .. code-block:: python\n\n import numpy as np\n\n from paddle.incubate.hapi.vision.transforms.functional import to_grayscale\n\n fake_img = np.random.rand(500, 500, 3).astype('float32')\n\n fake_img = to_grayscale(fake_img)\n print(fake_img.shape)\n \"\"\"\n\n if num_output_channels == 1:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n elif num_output_channels == 3:\n img = cv2.cvtColor(\n cv2.cvtColor(img, cv2.COLOR_RGB2GRAY), cv2.COLOR_GRAY2RGB)\n else:\n raise ValueError('num_output_channels should be either 1 or 3')\n\n return img\n", "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport paddle\nimport os\nfrom launch_function_helper import launch_func\n\n\nclass TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):\n def test_graph_execution_optimizer_not_apply(self):\n node_a = {\n \"PADDLE_TRAINER_ID\": \"0\",\n \"PADDLE_CURRENT_ENDPOINT\": \"127.0.0.1:36003\",\n \"PADDLE_TRAINERS_NUM\": \"2\",\n \"PADDLE_TRAINER_ENDPOINTS\": \"127.0.0.1:36003,127.0.0.1:36004\",\n \"http_proxy\": \"\",\n \"https_proxy\": \"\"\n }\n\n node_b = {\n \"PADDLE_TRAINER_ID\": \"1\",\n \"PADDLE_CURRENT_ENDPOINT\": \"127.0.0.1:36004\",\n \"PADDLE_TRAINERS_NUM\": \"2\",\n \"PADDLE_TRAINER_ENDPOINTS\": \"127.0.0.1:36003,127.0.0.1:36004\",\n \"http_proxy\": \"\",\n \"https_proxy\": \"\"\n }\n\n def node_func():\n import paddle.fleet as fleet\n import paddle.fluid.incubate.fleet.base.role_maker as role_maker\n role = role_maker.PaddleCloudRoleMaker(is_collective=True)\n fleet.init(role)\n input_x = paddle.fluid.layers.data(\n name=\"x\", shape=[32], dtype='float32')\n input_y = paddle.fluid.layers.data(\n name=\"y\", shape=[1], dtype='int64')\n\n fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh')\n fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh')\n prediction = paddle.fluid.layers.fc(input=[fc_2],\n size=2,\n act='softmax')\n cost = paddle.fluid.layers.cross_entropy(\n input=prediction, label=input_y)\n avg_cost = paddle.fluid.layers.mean(x=cost)\n\n strategy = paddle.fleet.DistributedStrategy()\n optimizer = paddle.optimizer.SGD(learning_rate=0.01)\n optimizer = fleet.distributed_optimizer(\n optimizer, strategy=strategy)\n optimizer.minimize(avg_cost)\n\n proc_a = launch_func(node_func, node_a)\n proc_a.start()\n proc_b = launch_func(node_func, node_b)\n proc_b.start()\n proc_a.join()\n proc_b.join()\n\n def test_graph_execution_optimizer(self):\n node_a = {\n \"PADDLE_TRAINER_ID\": \"0\",\n \"PADDLE_CURRENT_ENDPOINT\": \"127.0.0.1:36001\",\n \"PADDLE_TRAINERS_NUM\": \"2\",\n \"PADDLE_TRAINER_ENDPOINTS\": \"127.0.0.1:36001,127.0.0.1:36002\",\n \"http_proxy\": \"\",\n \"https_proxy\": \"\"\n }\n\n node_b = {\n \"PADDLE_TRAINER_ID\": \"1\",\n \"PADDLE_CURRENT_ENDPOINT\": \"127.0.0.1:36002\",\n \"PADDLE_TRAINERS_NUM\": \"2\",\n \"PADDLE_TRAINER_ENDPOINTS\": \"127.0.0.1:36001,127.0.0.1:36002\",\n \"http_proxy\": \"\",\n \"https_proxy\": \"\"\n }\n\n def node_func():\n import paddle.fleet as fleet\n import paddle.fluid.incubate.fleet.base.role_maker as role_maker\n role = role_maker.PaddleCloudRoleMaker(is_collective=True)\n fleet.init(role)\n input_x = paddle.fluid.layers.data(\n name=\"x\", shape=[32], dtype='float32')\n input_y = paddle.fluid.layers.data(\n name=\"y\", shape=[1], dtype='int64')\n\n fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh')\n fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh')\n prediction = paddle.fluid.layers.fc(input=[fc_2],\n size=2,\n act='softmax')\n cost = paddle.fluid.layers.cross_entropy(\n input=prediction, label=input_y)\n avg_cost = paddle.fluid.layers.mean(x=cost)\n\n strategy = paddle.fleet.DistributedStrategy()\n strategy.nccl_comm_num = 2\n strategy.sync_nccl_allreduce = True\n optimizer = paddle.optimizer.SGD(learning_rate=0.01)\n optimizer = fleet.distributed_optimizer(\n optimizer, strategy=strategy)\n optimizer.minimize(avg_cost)\n exe = paddle.fluid.Executor(place=paddle.fluid.CPUPlace())\n exe.run(paddle.fluid.default_startup_program())\n\n import numpy as np\n\n def gen_data():\n return {\n \"x\": np.random.random(size=(128, 32)).astype('float32'),\n \"y\": np.random.randint(\n 2, size=(128, 1)).astype('int64')\n }\n\n for i in range(10):\n cost_val = exe.run(feed=gen_data(), fetch_list=[avg_cost.name])\n print(\"cost of step[{}] = {}\".format(i, cost_val))\n\n proc_a = launch_func(node_func, node_a)\n proc_a.start()\n proc_b = launch_func(node_func, node_b)\n proc_b.start()\n proc_a.join()\n proc_b.join()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport os\nimport errno\nimport warnings\nimport six\nimport logging\nimport pickle\nimport contextlib\nfrom functools import reduce\n\nimport numpy as np\n\nimport paddle\nimport paddle.reader\nfrom paddle.reader import *\nfrom paddle.fluid import layers\nfrom paddle.fluid.executor import Executor, global_scope\nfrom paddle.fluid.evaluator import Evaluator\nfrom paddle.fluid.framework import Program, Parameter, default_main_program, default_startup_program, Variable, \\\n program_guard, dygraph_not_support\nfrom .wrapped_decorator import signature_safe_contextmanager\nfrom paddle.fluid.compiler import CompiledProgram\nfrom paddle.fluid.log_helper import get_logger\nfrom . import reader\nfrom . import unique_name\nfrom .reader import *\nfrom . import dataloader\nfrom .dataloader import *\nfrom . import core\nfrom .. import compat as cpt\n\nbatch = paddle.batch\n\n__all__ = [\n 'save_vars',\n 'save_params',\n 'save_persistables',\n 'load_vars',\n 'load_params',\n 'load_persistables',\n 'save_inference_model',\n 'load_inference_model',\n 'batch',\n 'save',\n 'load',\n 'load_program_state',\n 'set_program_state',\n 'get_program_parameter',\n 'get_program_persistable_vars',\n] + reader.__all__ + paddle.reader.__all__\n\n_logger = get_logger(\n __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')\n\n\ndef is_parameter(var):\n \"\"\"\n Check whether the given variable is an instance of Parameter.\n\n Args:\n var(Variable): The variable to be checked.\n\n Returns:\n bool: True if the given `var` is an instance of Parameter,\n False if not.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n param = fluid.default_main_program().global_block().var('fc.w')\n res = fluid.io.is_parameter(param)\n \"\"\"\n return isinstance(var, Parameter)\n\n\ndef is_persistable(var):\n \"\"\"\n Check whether the given variable is persistable.\n\n Args:\n var(Variable): The variable to be checked.\n\n Returns:\n bool: True if the given `var` is persistable\n False if not.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n param = fluid.default_main_program().global_block().var('fc.b')\n res = fluid.io.is_persistable(param)\n \"\"\"\n if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \\\n var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \\\n var.desc.type() == core.VarDesc.VarType.READER:\n return False\n return var.persistable\n\n\ndef is_belong_to_optimizer(var):\n if not (isinstance(var, Parameter) or var.desc.need_check_feed()):\n return is_persistable(var)\n\n return False\n\n\n@dygraph_not_support\ndef get_program_parameter(program):\n \"\"\"\n :api_attr: Static Graph\n\n Get all the parameters from Program.\n\n Args:\n var(Program): The Program to get parameters\n\n Returns:\n list: The list contains all parameters in the program\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n data = fluid.data(name=\"img\", shape=[64, 784])\n w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32', name='fc_w')\n b = fluid.layers.create_parameter(shape=[200], dtype='float32', name='fc_b')\n list_para = fluid.io.get_program_parameter( fluid.default_main_program() )\n \"\"\"\n return list(filter(is_parameter, program.list_vars()))\n\n\n@dygraph_not_support\ndef get_program_persistable_vars(program):\n \"\"\"\n :api_attr: Static Graph\n\n Get all the persistable vars from Program.\n\n Args:\n var(Program): The Program to get persistable vars\n\n Returns:\n list: The list contains all persistable vars in the program\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n data = fluid.data(name=\"img\", shape=[64, 784])\n w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32', name='fc_w')\n b = fluid.layers.create_parameter(shape=[200], dtype='float32', name='fc_b')\n list_para = fluid.io.get_program_persistable_vars( fluid.default_main_program() )\n \"\"\"\n return list(filter(is_persistable, program.list_vars()))\n\n\ndef _clone_var_in_block_(block, var):\n assert isinstance(var, Variable)\n if var.desc.type() == core.VarDesc.VarType.LOD_TENSOR:\n return block.create_var(\n name=var.name,\n shape=var.shape,\n dtype=var.dtype,\n type=var.type,\n lod_level=var.lod_level,\n persistable=True)\n else:\n return block.create_var(\n name=var.name,\n shape=var.shape,\n dtype=var.dtype,\n type=var.type,\n persistable=True)\n\n\n@signature_safe_contextmanager\ndef _load_program_scope(main=None, startup=None, scope=None):\n prog = main if main else paddle.fluid.Program()\n startup_prog = startup if startup else paddle.fluid.Program()\n scope = scope if scope else paddle.fluid.core.Scope()\n with paddle.fluid.scope_guard(scope):\n with paddle.fluid.program_guard(prog, startup_prog):\n with paddle.fluid.unique_name.guard():\n with paddle.fluid.framework._dygraph_guard(None):\n yield\n\n\ndef _get_valid_program(main_program):\n if main_program is None:\n main_program = default_main_program()\n elif isinstance(main_program, CompiledProgram):\n main_program = main_program._program\n if main_program is None:\n raise TypeError(\n \"The type of input main_program is invalid, expected tyep is Program, but received None\"\n )\n warnings.warn(\n \"The input is a CompiledProgram, this is not recommended.\")\n if not isinstance(main_program, Program):\n raise TypeError(\n \"The type of input main_program is invalid, expected type is fluid.Program, but received %s\"\n % type(main_program))\n return main_program\n\n\n@dygraph_not_support\ndef save_vars(executor,\n dirname,\n main_program=None,\n vars=None,\n predicate=None,\n filename=None):\n \"\"\"\n :api_attr: Static Graph\n\n This API saves specific variables in the `Program` to files.\n\n There are two ways to specify the variables to be saved: set variables in\n a list and assign it to the `vars`, or use the `predicate` function to select\n variables that make `predicate(variable) == True`. The first way has a higher priority.\n\n The `dirname` is used to specify the folder where to save variables.\n If you prefer to save variables in separate files in the `dirname` folder,\n do not set `filename`. If you prefer to save all variables in a single file,\n use `filename` to specify it.\n\n Args:\n executor(Executor): The executor to run for saving variables.\n dirname(str, optional): The folder where to save variables.\n When you need to save the parameter to the memory, set it to None.\n main_program(Program, optional): The program whose variables will be saved.\n If it is None, the default main program will\n be used automatically.\n Default: None\n vars(list[Variable], optional): The list contains all variables to be saved.\n Default: None\n predicate(function, optional): The function selects the variables that make\n `predicate(variable) == True`.\n Default: None\n filename(str, optional): If you prefer to save all variables in a single file,\n use `filename` to specify it. Otherwise, let `filename` be None.\n Default: None\n\n Returns:\n str: When saving parameters to a file, returns None.\n When saving parameters to memory, returns a binary string containing parameters.\n\n Raises:\n TypeError: If `main_program` is not an instance of Program nor None.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n main_prog = fluid.Program()\n startup_prog = fluid.Program()\n with fluid.program_guard(main_prog, startup_prog):\n data = fluid.layers.data(name=\"img\", shape=[64, 784], append_batch_size=False)\n w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32', name='fc_w')\n b = fluid.layers.create_parameter(shape=[200], dtype='float32', name='fc_b')\n hidden_w = fluid.layers.matmul(x=data, y=w)\n hidden_b = fluid.layers.elementwise_add(hidden_w, b)\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(startup_prog)\n\n # The first usage: use `vars` to set the saved variables.\n var_list = [w, b]\n path = \"./my_paddle_vars\"\n fluid.io.save_vars(executor=exe, dirname=path, vars=var_list,\n filename=\"vars_file\")\n # w and b will be save in a file named \"var_file\".\n\n # The second usage: use `predicate` to select the saved variable.\n def name_has_fc(var):\n res = \"fc\" in var.name\n return res\n param_path = \"./my_paddle_model\"\n fluid.io.save_vars(executor=exe, dirname=param_path, main_program=main_prog, vars=None, predicate = name_has_fc)\n # all variables whose names contain \"fc \" are saved.\n \"\"\"\n save_to_memory = False\n if dirname is None and filename is None:\n save_to_memory = True\n\n main_program = _get_valid_program(main_program)\n\n if vars is None:\n return save_vars(\n executor,\n main_program=main_program,\n dirname=dirname,\n vars=list(filter(predicate, main_program.list_vars())),\n filename=filename)\n else:\n params_var_name = unique_name.generate(\"saved_params\")\n # give warning when there is no var in model\n if len(list(vars)) == 0:\n warnings.warn(\n \"no variable in your model, please ensure there are any variables in your model to save\"\n )\n return None\n\n save_program = Program()\n save_block = save_program.global_block()\n\n save_var_map = {}\n for each_var in vars:\n # NOTE: don't save the variable which type is RAW\n if each_var.type == core.VarDesc.VarType.RAW:\n continue\n new_var = _clone_var_in_block_(save_block, each_var)\n if filename is None and save_to_memory is False:\n save_file_path = os.path.join(\n os.path.normpath(dirname), new_var.name)\n save_block.append_op(\n type='save',\n inputs={'X': [new_var]},\n outputs={},\n attrs={'file_path': os.path.normpath(save_file_path)})\n else:\n save_var_map[new_var.name] = new_var\n\n if filename is not None or save_to_memory:\n save_var_list = []\n for name in sorted(save_var_map.keys()):\n save_var_list.append(save_var_map[name])\n\n save_path = str()\n if save_to_memory is False:\n save_path = os.path.join(os.path.normpath(dirname), filename)\n\n saved_params = save_block.create_var(\n type=core.VarDesc.VarType.RAW, name=params_var_name)\n saved_params.desc.set_persistable(True)\n save_block.append_op(\n type='save_combine',\n inputs={'X': save_var_list},\n outputs={'Y': saved_params},\n attrs={\n 'file_path': save_path,\n 'save_to_memory': save_to_memory\n })\n\n # NOTE(zhiqiu): save op will add variable kLookupTablePath in save_program.desc,\n # which leads to diff on save_program and its desc. Call _sync_with_cpp\n # to keep consistency.\n save_program._sync_with_cpp()\n executor.run(save_program)\n if save_to_memory:\n return global_scope().find_var(params_var_name).get_bytes()\n\n\n@dygraph_not_support\ndef save_params(executor, dirname, main_program=None, filename=None):\n \"\"\"\n :api_attr: Static Graph\n\n This operator saves all parameters from the :code:`main_program` to\n the folder :code:`dirname` or file :code:`filename`. You can refer to\n :ref:`api_guide_model_save_reader_en` for more details.\n\n Use the :code:`dirname` to specify the saving folder. If you would like to\n save parameters in separate files, set :code:`filename` None; if you would\n like to save all parameters in a single file, use :code:`filename` to specify\n the file name.\n\n Note:\n Some variables are not Parameter while they are necessary for\n training, such as learning rate, global step, etc. So you can NOT save\n and continue your training just by :ref:`api_fluid_io_save_params`\n and :ref:`api_fluid_io_load_params`. Please use :ref:`api_fluid_io_save_persistables`\n and :ref:`api_fluid_io_load_persistables` instead.\n\n If you want to save your model for the inference, please use the\n :ref:`api_fluid_io_save_inference_model`. You can refer to\n :ref:`api_guide_model_save_reader_en` for more details.\n\n Args:\n executor(Executor): The executor to run for saving parameters, You can\n refer to :ref:`api_guide_executor_en`.\n dirname(str, optional): The saving directory path.\n When you need to save the parameter to the memory, set it to None.\n main_program(Program, optional): The program whose parameters will be\n saved. You can refer to\n :ref:`api_guide_Program_en` for more\n details. If it is None, the default main\n program will be used.\n Default: None\n filename(str, optional): The file to save all parameters. If you prefer\n to save parameters in different files, set it\n to None.\n Default: None\n\n Returns:\n str: When saving parameters to a file, returns None.\n When saving parameters to memory, returns a binary string containing parameters.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n params_path = \"./my_paddle_model\"\n image = fluid.data(name='img', shape=[None, 28, 28], dtype='float32')\n label = fluid.data(name='label', shape=[None, 1], dtype='int64')\n feeder = fluid.DataFeeder(feed_list=[image, label], place=fluid.CPUPlace())\n predict = fluid.layers.fc(input=image, size=10, act='softmax')\n\n loss = fluid.layers.cross_entropy(input=predict, label=label)\n avg_loss = fluid.layers.mean(loss)\n\n exe = fluid.Executor(fluid.CPUPlace())\n exe.run(fluid.default_startup_program())\n fluid.io.save_params(executor=exe, dirname=params_path)\n # The parameters weights and bias of the fc layer in the network are going to\n # be saved in different files in the path \"./my_paddle_model\"\n \"\"\"\n return save_vars(\n executor,\n dirname=dirname,\n main_program=main_program,\n vars=None,\n predicate=is_parameter,\n filename=filename)\n\n\ndef _save_distributed_persistables(executor, dirname, main_program):\n \"\"\"\n save_persistables for distributed training.\n the method will do things listed below:\n 1.save part of persistable variables on trainer.\n 2.receive \"remote prefetch variables\" from parameter servers and merge them.\n 3.save \"distributed lookup table\" on parameter servers.\n 4.receive \"optimizer variables\" from parameter servers and merge them.\n\n Args:\n executor(Executor): The executor to run for saving parameters.\n dirname(str): The saving directory path.\n main_program(Program): The program whose parameters will be\n saved. the main_program must be the trainer_program\n get after transpiler.\n\n Returns:\n None\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n exe = fluid.Executor(fluid.CPUPlace())\n param_path = \"./my_paddle_model\"\n t = distribute_transpiler.DistributeTranspiler()\n t.transpile(...)\n train_program = t.get_trainer_program()\n _save_distributed_persistables(executor=exe, dirname=param_path, main_program=train_program)\n \"\"\"\n\n def __save_remote_params(executor, dirname, remote_params_map):\n \"\"\"\n receive params on pserver through rpc.\n if the params are be sliced, will concat them to one, then save it.\n \"\"\"\n if not remote_params_map:\n return\n\n prog = Program()\n block = prog.global_block()\n\n # recv optimize vars from pserver\n for name, remote_params in remote_params_map.items():\n origin = remote_params[0].origin\n is_slice = remote_params[0].is_slice\n\n slices = [None] * len(remote_params)\n slice_varnames = [None] * len(remote_params)\n remote_varnames = [None] * len(remote_params)\n endpoints = [None] * len(remote_params)\n\n for idx, optimizer in enumerate(remote_params):\n block_id = optimizer.block_id\n slice = optimizer.slice\n endpoint = optimizer.endpoint\n\n index = block_id if is_slice else idx\n slices[index] = slice\n slice_varnames[index] = \"{}.slice.{}\".format(slice.name, idx)\n remote_varnames[index] = slice.name\n endpoints[index] = endpoint\n\n slice_shapes = []\n for slice in slices:\n tmp = [str(dim) for dim in slice.shape]\n slice_shapes.append(\",\".join(tmp))\n\n block.append_op(\n type='recv_save',\n attrs={\n \"trainer_id\": 0,\n \"shape\": origin.shape,\n \"slice_shapes\": slice_shapes,\n \"slice_varnames\": slice_varnames,\n \"remote_varnames\": remote_varnames,\n \"endpoints\": endpoints,\n \"file_path\": os.path.join(dirname, origin.name)\n })\n\n executor.run(prog)\n\n def __save_distributed_lookup_tables(executor, dirname,\n distributed_lookup_table, endpoints):\n \"\"\"\n because the distributed lookup table may too huge to merge and save at one place,\n it will be saved at parameter server independent respectively.\n\n the save directory is dirname/\"__lookup_table__\".\n\n \"\"\"\n prog = Program()\n block = prog.global_block()\n\n # if there is lookup table, the trainer 0 will notify all pserver to save.\n lookup_table_filename = os.path.join(dirname, \"__lookup_table__\")\n attrs = {}\n attrs['epmap'] = endpoints\n attrs['dir'] = lookup_table_filename\n attrs['lookup_table'] = distributed_lookup_table\n block.append_op(\n type='checkpoint_notify', inputs={}, outputs={}, attrs=attrs)\n executor.run(prog)\n\n def __exclude_vars(exclude_var_names=[]):\n def is_valid(var):\n if var.name in exclude_var_names:\n return False\n if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \\\n var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \\\n var.desc.type() == core.VarDesc.VarType.READER:\n return False\n return var.persistable\n\n return is_valid\n\n if not isinstance(main_program, Program):\n raise TypeError(\"'main_program' should be an instance of Program.\")\n\n if not main_program._is_distributed:\n raise ValueError(\n \"'_save_distributed_persistables' just be designed for distributed training.\"\n )\n\n remote_params_map = main_program._parameters_on_pservers.get_distributed_vars_by_vtypes(\n [\"Optimizer\", \"RemotePrefetch\"], groupby=True)\n\n exclude_var_names = []\n if remote_params_map:\n exclude_var_names.extend(remote_params_map.keys())\n\n if main_program._distributed_lookup_table:\n if isinstance(main_program._distributed_lookup_table, list):\n exclude_var_names.extend(main_program._distributed_lookup_table)\n else:\n exclude_var_names.append(main_program._distributed_lookup_table)\n\n local_vars = list(\n filter(__exclude_vars(exclude_var_names), main_program.list_vars()))\n save_vars(\n executor, main_program=main_program, dirname=dirname, vars=local_vars)\n\n if main_program._is_chief:\n if remote_params_map:\n __save_remote_params(executor, dirname, remote_params_map)\n if main_program._distributed_lookup_table:\n __save_distributed_lookup_tables(\n executor, dirname, main_program._distributed_lookup_table,\n main_program._endpoints)\n\n\n@dygraph_not_support\ndef save_persistables(executor, dirname, main_program=None, filename=None):\n \"\"\"\n :api_attr: Static Graph\n\n This operator saves all persistable variables from :code:`main_program` to \n the folder :code:`dirname` or file :code:`filename`. You can refer to \n :ref:`api_guide_model_save_reader_en` for more details. And then\n saves these persistables variables to the folder :code:`dirname` or file\n :code:`filename`.\n\n The :code:`dirname` is used to specify the folder where persistable variables\n are going to be saved. If you would like to save variables in separate\n files, set :code:`filename` None; if you would like to save all variables in a\n single file, use :code:`filename` to specify the file name.\n\n Args:\n executor(Executor): The executor to run for saving persistable variables.\n You can refer to :ref:`api_guide_executor_en` for\n more details.\n\n dirname(str, optional): The saving directory path.\n When you need to save the parameter to the memory, set it to None.\n main_program(Program, optional): The program whose persistbale variables will\n be saved. You can refer to \n :ref:`api_guide_Program_en` for more details.\n If it is None, the default main program will\n be used.\n Default: None.\n filename(str, optional): The file to save all variables. If you prefer to\n save variables in different files, set it to None.\n Default: None.\n\n Returns:\n str: When saving parameters to a file, returns None.\n When saving parameters to memory, returns a binary string containing parameters.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n dir_path = \"./my_paddle_model\"\n file_name = \"persistables\"\n image = fluid.data(name='img', shape=[None, 28, 28], dtype='float32')\n label = fluid.data(name='label', shape=[None, 1], dtype='int64')\n feeder = fluid.DataFeeder(feed_list=[image, label], place=fluid.CPUPlace())\n\n predict = fluid.layers.fc(input=image, size=10, act='softmax')\n loss = fluid.layers.cross_entropy(input=predict, label=label)\n avg_loss = fluid.layers.mean(loss)\n exe = fluid.Executor(fluid.CPUPlace())\n exe.run(fluid.default_startup_program())\n fluid.io.save_persistables(executor=exe, dirname=dir_path, filename=file_name)\n # The persistables variables weights and bias in the fc layer of the network\n # are going to be saved in the same file named \"persistables\" in the path\n # \"./my_paddle_model\"\n \"\"\"\n if main_program and main_program._is_distributed:\n return _save_distributed_persistables(\n executor, dirname=dirname, main_program=main_program)\n else:\n return save_vars(\n executor,\n dirname=dirname,\n main_program=main_program,\n vars=None,\n predicate=is_persistable,\n filename=filename)\n\n\ndef load_vars(executor,\n dirname,\n main_program=None,\n vars=None,\n predicate=None,\n filename=None):\n \"\"\"\n :api_attr: Static Graph\n\n This API loads variables from files by executor.\n\n There are two ways to specify the variables to be loaded: the first way, set\n variables in a list and assign it to the `vars`; the second way, use the\n `predicate` function to select variables that make `predicate(variable) == True`.\n The first way has a higher priority.\n\n The `dirname` is used to specify the folder where to load variables.\n If variables were saved in separate files in the folder `dirname`,\n set `filename` None. If all variables were saved in a single file,\n use `filename` to specify it.\n\n Args:\n executor(Executor): The executor to run for loading variables.\n dirname(str): The folder where to load the variables.\n main_program(Program, optional): The program whose variables will be loaded.\n If it is None, the default main program will\n be used automatically.\n Default: None\n vars(list[Variable], optional): The list that contains all variables to be loaded.\n Default: None\n predicate(function, optional): The function selects variables that make\n `predicate(variable) == True`.\n Default: None\n filename(str, optional): The file which saved all required variables. If variables\n were saved in separate files, set it to be None.\n Default: None\n\n Returns:\n None\n\n Raises:\n TypeError: If `main_program` is not an instance of Program nor None.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n main_prog = fluid.Program()\n startup_prog = fluid.Program()\n with fluid.program_guard(main_prog, startup_prog):\n data = fluid.layers.data(name=\"img\", shape=[64, 784], append_batch_size=False)\n w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32', name='fc_w')\n b = fluid.layers.create_parameter(shape=[200], dtype='float32', name='fc_b')\n hidden_w = fluid.layers.matmul(x=data, y=w)\n hidden_b = fluid.layers.elementwise_add(hidden_w, b)\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(startup_prog)\n\n # The first usage: using `vars` to specify the variables.\n path = \"./my_paddle_vars\"\n var_list = [w, b]\n fluid.io.save_vars(executor=exe, dirname=path, vars=var_list,\n filename=\"vars_file\")\n fluid.io.load_vars(executor=exe, dirname=path, vars=var_list,\n filename=\"vars_file\")\n # w and b will be loaded, and they are supposed to\n # be saved in the same file named 'var_file' in the path \"./my_paddle_vars\".\n\n # The second usage: using the `predicate` function to select variables\n param_path = \"./my_paddle_model\"\n def name_has_fc(var):\n res = \"fc\" in var.name\n return res\n fluid.io.save_vars(executor=exe, dirname=param_path, main_program=main_prog,\n vars=None, predicate=name_has_fc)\n fluid.io.load_vars(executor=exe, dirname=param_path, main_program=main_prog,\n vars=None, predicate=name_has_fc)\n # Load All variables in the `main_program` whose name includes \"fc\".\n # And all the variables are supposed to be saved in separate files.\n\n \"\"\"\n vars_from_memory = False\n if dirname is not None:\n dirname = os.path.normpath(dirname)\n else:\n vars_from_memory = True\n\n if vars is None:\n if main_program is None:\n main_program = default_main_program()\n if not isinstance(main_program, Program):\n raise TypeError(\n \"The type of input main_program is invalid, expected type is fluid.Program, but received %s\"\n % type(main_program))\n\n load_vars(\n executor,\n dirname=dirname,\n main_program=main_program,\n vars=list(filter(predicate, main_program.list_vars())),\n filename=filename)\n else:\n load_prog = Program()\n load_block = load_prog.global_block()\n\n if main_program is None:\n main_program = default_main_program()\n\n if not isinstance(main_program, Program):\n raise TypeError(\n \"The type of input main_program is invalid, expected type is fluid.Program, but received %s\"\n % type(main_program))\n\n # save origin param shape\n orig_para_shape = {}\n load_var_map = {}\n\n check_vars = []\n sparse_vars = []\n\n for each_var in vars:\n assert isinstance(each_var, Variable)\n\n if each_var.type == core.VarDesc.VarType.RAW:\n continue\n\n if isinstance(each_var, Parameter):\n orig_para_shape[each_var.name] = tuple(each_var.desc.get_shape(\n ))\n\n if each_var.type == core.VarDesc.VarType.SELECTED_ROWS:\n sparse_vars.append(each_var)\n continue\n\n new_var = _clone_var_in_block_(load_block, each_var)\n check_vars.append(each_var)\n\n if filename is None:\n if dirname is None:\n raise ValueError(\n \"The directory path and params cannot be None at the same time.\"\n )\n load_block.append_op(\n type='load',\n inputs={},\n outputs={'Out': [new_var]},\n attrs={'file_path': os.path.join(dirname, new_var.name)})\n else:\n load_var_map[new_var.name] = new_var\n\n for each_var in sparse_vars:\n assert isinstance(each_var, Variable)\n\n if filename is not None:\n raise ValueError(\n \"SelectedRows can not be load with load_combine\")\n\n new_var = _clone_var_in_block_(load_block, each_var)\n\n var_path = os.path.join(dirname, new_var.name)\n if not os.path.exists(var_path):\n raise ValueError(\"SelectedRows var {} can not find at {}\".\n format(new_var.name, var_path))\n\n if os.path.isfile(var_path):\n load_block.append_op(\n type='load',\n inputs={},\n outputs={'Out': [new_var]},\n attrs={'file_path': os.path.join(dirname, new_var.name)})\n else:\n blocks = []\n block_paths = os.listdir(var_path)\n\n for block in block_paths:\n if block.startswith(new_var.name):\n blocks.append(block)\n\n slices = []\n for block in blocks:\n slice = load_block.create_var(\n name=block,\n type=new_var.type,\n shape=new_var.shape,\n dtype=new_var.dtype,\n persistable=False)\n slices.append(slice)\n\n file_path = os.path.join(var_path, block, \"Param\")\n load_block.append_op(\n type='load',\n inputs={},\n outputs={'Out': [slice]},\n attrs={'file_path': file_path})\n\n load_block.append_op(\n type='lookup_sparse_table_merge',\n inputs={'X': slices},\n outputs={'Out': new_var},\n attrs={})\n\n if filename is not None:\n load_var_list = []\n for name in sorted(load_var_map.keys()):\n load_var_list.append(load_var_map[name])\n\n if vars_from_memory is False:\n filename = os.path.join(dirname, filename)\n\n load_block.append_op(\n type='load_combine',\n inputs={},\n outputs={\"Out\": load_var_list},\n attrs={\n 'file_path': filename,\n 'model_from_memory': vars_from_memory\n })\n executor.run(load_prog)\n\n # check var shape\n for each_var in check_vars:\n if not isinstance(each_var, Parameter):\n continue\n var_temp = paddle.fluid.global_scope().find_var(each_var.name)\n assert var_temp != None, \"can't not find var: \" + each_var.name\n new_shape = (np.array(var_temp.get_tensor())).shape\n assert each_var.name in orig_para_shape, each_var.name + \"MUST in var list\"\n orig_shape = orig_para_shape.get(each_var.name)\n if new_shape != orig_shape:\n raise RuntimeError(\n \"Variable's shape does not match, the Program requires a parameter with the shape of ({}), \"\n \"while the loaded parameter (namely [ {} ]) has a shape of ({}).\".\n format(orig_shape, each_var.name, new_shape))\n\n\n@dygraph_not_support\ndef load_params(executor, dirname, main_program=None, filename=None):\n \"\"\"\n :api_attr: Static Graph\n\n This API filters out all parameters from the give ``main_program``\n and then tries to load these parameters from the directory ``dirname`` or\n the file ``filename``.\n\n Use the ``dirname`` to specify the directory where parameters were saved. If\n parameters were saved in separate files under the directory `dirname`, set\n ``filename`` as None; if all parameters were saved in a single file, use\n ``filename`` to specify the file name.\n\n **Note**:\n Some variables are not Parameter while they are necessary for\n training, such as learning rate, global step, etc. So you cannot save and\n continue your training just by using :ref:`api_fluid_io_save_params` and\n :ref:`api_fluid_io_load_params`. Please use :ref:`api_fluid_io_save_persistables`\n and :ref:`api_fluid_io_load_persistables` instead.\n\n If you want to load the pre-trained model structure and parameters\n for the inference, please use the :ref:`api_fluid_io_load_inference_model` API. You can\n refer to :ref:`api_guide_model_save_reader_en` for more details.\n\n Args:\n executor(Executor): The executor used for loading parameters.\n See :ref:`api_guide_executor_en` for more details about it.\n dirname(str): The directory path.\n main_program(Program, optional): The program whose parameters will be\n loaded. If it is None, the ``default_main_program``\n will be used automatically. See :ref:`api_guide_Program_en`\n for more about ``Program``.\n Default: None.\n filename(str, optional): The file which saved all parameters. If parameters\n were saved in separated files, set it to None.\n Default: None.\n\n Returns:\n None\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n exe = fluid.Executor(fluid.CPUPlace())\n param_path = \"./my_paddle_model\"\n prog = fluid.default_main_program()\n fluid.io.load_params(executor=exe, dirname=param_path,\n main_program=None)\n \"\"\"\n load_vars(\n executor,\n dirname=dirname,\n main_program=main_program,\n predicate=is_parameter,\n filename=filename)\n\n\n@dygraph_not_support\ndef load_persistables(executor, dirname, main_program=None, filename=None):\n \"\"\"\n :api_attr: Static Graph\n \n This API filters out all variables with ``persistable==True`` from the\n given ``main_program`` and then tries to load these variables from the\n directory ``dirname`` or the file ``filename``.\n\n Use the ``dirname`` to specify the directory where persistable variables\n (refer to :ref:`api_guide_model_save_reader_en`) were saved. If variables\n were saved in separate files, set ``filename`` as None; if all variables\n were saved in a single file, use ``filename`` to specify the file name.\n\n Args:\n executor(Executor): The executor used for loading persistable variables.\n See :ref:`api_guide_executor_en` for more details about it.\n dirname(str): The directory path.\n main_program(Program, optional): The program whose persistable variables will\n be loaded. If it is None, the ``default_main_program``\n will be used automatically. See :ref:`api_guide_Program_en`\n for more about ``Program``.\n Default: None.\n filename(str, optional): The file which saved all persistable variables. If variables\n were saved in separated files, set it to None.\n Default: None.\n\n Returns:\n None\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n exe = fluid.Executor(fluid.CPUPlace())\n param_path = \"./my_paddle_model\"\n prog = fluid.default_main_program()\n fluid.io.load_persistables(executor=exe, dirname=param_path,\n main_program=None)\n \"\"\"\n\n if main_program and main_program._is_distributed:\n _load_distributed_persistables(\n executor, dirname=dirname, main_program=main_program)\n else:\n load_vars(\n executor,\n dirname=dirname,\n main_program=main_program,\n predicate=is_persistable,\n filename=filename)\n\n\ndef _load_distributed_persistables(executor, dirname, main_program=None):\n \"\"\"\n customized load_persistables for distributed training.\n it should be used on parameter server,\n\n Args:\n executor(Executor): The executor to run for saving parameters.\n dirname(str): The load directory path.\n main_program(Program): The program whose parameters will be\n loaded. the main_program must be the pserver_program\n get after transpiler.\n\n Returns:\n None\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n exe = fluid.Executor(fluid.CPUPlace())\n param_path = \"./my_paddle_model\"\n t = distribute_transpiler.DistributeTranspiler()\n t.transpile(...)\n pserver_prog = t.get_pserver_program(...)\n _load_distributed_persistables(executor=exe, dirname=param_path, main_program=pserver_prog)\n \"\"\"\n\n def __is_distributed_part_var(varname):\n trainer_idx = varname.find(\".trainer_\")\n block_idx = varname.find(\".block\")\n return trainer_idx or block_idx\n\n def __load_persistable_vars(executor, dirname, need_load_vars):\n load_prog = Program()\n load_block = load_prog.global_block()\n need_delete_vars = []\n\n for param in need_load_vars:\n origin_var = param.origin\n slice_var = param.slice\n is_slice = param.is_slice\n offset = param.offset\n\n if is_slice:\n slice = load_block.create_var(\n name=slice_var.name,\n type=slice_var.type,\n shape=slice_var.shape,\n dtype=slice_var.dtype,\n persistable=True)\n\n load_block.append_op(\n type='load',\n inputs={},\n outputs={'Out': [slice]},\n attrs={\n 'file_path': os.path.join(dirname, origin_var.name),\n 'seek': offset,\n 'shape': slice.shape\n })\n else:\n origin = load_block.create_var(\n name=\"{}\".format(origin_var.name),\n type=origin_var.type,\n shape=origin_var.shape,\n dtype=origin_var.dtype,\n persistable=True)\n load_block.append_op(\n type='load',\n inputs={},\n outputs={'Out': [origin]},\n attrs={\n 'file_path': os.path.join(dirname, origin_var.name)\n })\n\n load_block.append_op(\n type='delete_var',\n inputs={'X': need_delete_vars}, )\n\n executor.run(load_prog)\n\n if not isinstance(main_program, Program):\n raise TypeError(\"'main_program' should be an instance of Program.\")\n\n if not main_program._is_distributed:\n raise ValueError(\n \"'_load_distributed_persistables' just be designed for distributed training.\"\n )\n\n if not main_program._ps_endpoint:\n raise ValueError(\n \"'_load_distributed_persistables' need current_endpoint set in DistributeTranspiler.transpile\"\n )\n\n need_load_vars = main_program._parameters_on_pservers.get_distributed_vars_by_ep(\n main_program._ps_endpoint)\n __load_persistable_vars(executor, dirname, need_load_vars)\n\n\ndef prepend_feed_ops(inference_program,\n feed_target_names,\n feed_holder_name='feed'):\n if len(feed_target_names) == 0:\n return\n\n global_block = inference_program.global_block()\n feed_var = global_block.create_var(\n name=feed_holder_name,\n type=core.VarDesc.VarType.FEED_MINIBATCH,\n persistable=True)\n\n for i, name in enumerate(feed_target_names):\n if not global_block.has_var(name):\n raise ValueError(\n \"The feeded_var_names[{i}]: '{name}' doesn't exist in pruned inference program. \"\n \"Please check whether '{name}' is a valid feed_var name, or remove it from feeded_var_names \"\n \"if '{name}' is not involved in the target_vars calculation.\".\n format(\n i=i, name=name))\n out = global_block.var(name)\n global_block._prepend_op(\n type='feed',\n inputs={'X': [feed_var]},\n outputs={'Out': [out]},\n attrs={'col': i})\n\n\ndef append_fetch_ops(inference_program,\n fetch_target_names,\n fetch_holder_name='fetch'):\n global_block = inference_program.global_block()\n fetch_var = global_block.create_var(\n name=fetch_holder_name,\n type=core.VarDesc.VarType.FETCH_LIST,\n persistable=True)\n\n for i, name in enumerate(fetch_target_names):\n global_block.append_op(\n type='fetch',\n inputs={'X': [name]},\n outputs={'Out': [fetch_var]},\n attrs={'col': i})\n\n\n@dygraph_not_support\ndef save_inference_model(dirname,\n feeded_var_names,\n target_vars,\n executor,\n main_program=None,\n model_filename=None,\n params_filename=None,\n export_for_deployment=True,\n program_only=False):\n \"\"\"\n :api_attr: Static Graph\n\n Prune the given `main_program` to build a new program especially for inference,\n and then save it and all related parameters to given `dirname` .\n If you just want to save parameters of your trained model, please use the\n :ref:`api_fluid_io_save_params` . You can refer to :ref:`api_guide_model_save_reader_en`\n for more details.\n\n Note:\n The :code:`dirname` is used to specify the folder where inference model\n structure and parameters are going to be saved. If you would like to save params of\n Program in separate files, set `params_filename` None; if you would like to save all\n params of Program in a single file, use `params_filename` to specify the file name.\n\n Args:\n dirname(str): The directory path to save the inference model.\n feeded_var_names(list[str]): list of string. Names of variables that need to be fed\n data during inference.\n target_vars(list[Variable]): list of Variable. Variables from which we can get\n inference results.\n executor(Executor): The executor that saves the inference model. You can refer\n to :ref:`api_guide_executor_en` for more details.\n main_program(Program, optional): The original program, which will be pruned to\n build the inference model. If is set None,\n the global default :code:`_main_program_` will be used.\n Default: None.\n model_filename(str, optional): The name of file to save the inference program\n itself. If is set None, a default filename\n :code:`__model__` will be used.\n params_filename(str, optional): The name of file to save all related parameters.\n If it is set None, parameters will be saved\n in separate files .\n export_for_deployment(bool): If True, programs are modified to only support\n direct inference deployment. Otherwise,\n more information will be stored for flexible\n optimization and re-training. Currently, only\n True is supported.\n Default: True.\n program_only(bool, optional): If True, It will save inference program only, and do not\n save params of Program.\n Default: False.\n\n Returns:\n The fetch variables' name list\n\n Return Type:\n list\n\n Raises:\n ValueError: If `feed_var_names` is not a list of basestring, an exception is thrown.\n ValueError: If `target_vars` is not a list of Variable, an exception is thrown.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n path = \"./infer_model\"\n\n # User defined network, here a softmax regession example\n image = fluid.data(name='img', shape=[None, 28, 28], dtype='float32')\n label = fluid.data(name='label', shape=[None, 1], dtype='int64')\n feeder = fluid.DataFeeder(feed_list=[image, label], place=fluid.CPUPlace())\n predict = fluid.layers.fc(input=image, size=10, act='softmax')\n\n loss = fluid.layers.cross_entropy(input=predict, label=label)\n avg_loss = fluid.layers.mean(loss)\n\n exe = fluid.Executor(fluid.CPUPlace())\n exe.run(fluid.default_startup_program())\n\n # Feed data and train process\n\n # Save inference model. Note we don't save label and loss in this example\n fluid.io.save_inference_model(dirname=path,\n feeded_var_names=['img'],\n target_vars=[predict],\n executor=exe)\n\n # In this example, the save_inference_mode inference will prune the default\n # main program according to the network's input node (img) and output node(predict).\n # The pruned inference program is going to be saved in the \"./infer_model/__model__\"\n # and parameters are going to be saved in separate files under folder\n # \"./infer_model\".\n\n \"\"\"\n if isinstance(feeded_var_names, six.string_types):\n feeded_var_names = [feeded_var_names]\n elif export_for_deployment:\n if len(feeded_var_names) > 0:\n # TODO(paddle-dev): polish these code blocks\n if not (bool(feeded_var_names) and all(\n isinstance(name, six.string_types)\n for name in feeded_var_names)):\n raise ValueError(\"'feed_var_names' should be a list of str.\")\n\n if isinstance(target_vars, Variable):\n target_vars = [target_vars]\n elif export_for_deployment:\n if not (bool(target_vars) and\n all(isinstance(var, Variable) for var in target_vars)):\n raise ValueError(\"'target_vars' should be a list of Variable.\")\n\n main_program = _get_valid_program(main_program)\n\n # remind user to set auc_states to zeros if the program contains auc op\n all_ops = main_program.global_block().ops\n for op in all_ops:\n # clear device of Op\n device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()\n op._set_attr(device_attr_name, \"\")\n if op.type == 'auc':\n warnings.warn(\n \"please ensure that you have set the auc states to zeros before saving inference model\"\n )\n break\n\n # fix the bug that the activation op's output as target will be pruned.\n # will affect the inference performance.\n # TODO(Superjomn) add an IR pass to remove 1-scale op.\n with program_guard(main_program):\n uniq_target_vars = []\n for i, var in enumerate(target_vars):\n if isinstance(var, Variable):\n var = layers.scale(\n var, 1., name=\"save_infer_model/scale_{}\".format(i))\n uniq_target_vars.append(var)\n target_vars = uniq_target_vars\n target_var_name_list = [var.name for var in target_vars]\n\n # when a pserver and a trainer running on the same machine, mkdir may conflict\n save_dirname = dirname\n try:\n save_dirname = os.path.normpath(dirname)\n os.makedirs(save_dirname)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n if model_filename is not None:\n model_basename = os.path.basename(model_filename)\n else:\n model_basename = \"__model__\"\n model_basename = os.path.join(save_dirname, model_basename)\n\n # When export_for_deployment is true, we modify the program online so that\n # it can only be loaded for inference directly. If it's false, the whole\n # original program and related meta are saved so that future usage can be\n # more flexible.\n\n origin_program = main_program.clone()\n\n if export_for_deployment:\n main_program = main_program.clone()\n global_block = main_program.global_block()\n need_to_remove_op_index = []\n for i, op in enumerate(global_block.ops):\n op.desc.set_is_target(False)\n if op.type == \"feed\" or op.type == \"fetch\":\n need_to_remove_op_index.append(i)\n\n for index in need_to_remove_op_index[::-1]:\n global_block._remove_op(index)\n\n main_program.desc.flush()\n\n main_program = main_program._prune_with_input(\n feeded_var_names=feeded_var_names, targets=target_vars)\n main_program = main_program._inference_optimize(prune_read_op=True)\n fetch_var_names = [v.name for v in target_vars]\n\n prepend_feed_ops(main_program, feeded_var_names)\n append_fetch_ops(main_program, fetch_var_names)\n\n main_program.desc._set_version()\n paddle.fluid.core.save_op_compatible_info(main_program.desc)\n with open(model_basename, \"wb\") as f:\n f.write(main_program.desc.serialize_to_string())\n else:\n # TODO(panyx0718): Save more information so that it can also be used\n # for training and more flexible post-processing.\n with open(model_basename + \".main_program\", \"wb\") as f:\n f.write(main_program.desc.serialize_to_string())\n\n if program_only:\n warnings.warn(\n \"save_inference_model specified the param `program_only` to True, It will not save params of Program.\"\n )\n return target_var_name_list\n\n main_program._copy_dist_param_info_from(origin_program)\n\n if params_filename is not None:\n params_filename = os.path.basename(params_filename)\n\n save_persistables(executor, save_dirname, main_program, params_filename)\n return target_var_name_list\n\n\n@dygraph_not_support\ndef load_inference_model(dirname,\n executor,\n model_filename=None,\n params_filename=None,\n pserver_endpoints=None):\n \"\"\"\n :api_attr: Static Graph\n\n Load the inference model from a given directory. By this API, you can get the model\n structure(Inference Program) and model parameters. If you just want to load\n parameters of the pre-trained model, please use the :ref:`api_fluid_io_load_params` API.\n You can refer to :ref:`api_guide_model_save_reader_en` for more details.\n\n Args:\n dirname(str): One of the following:\n - The given directory path.\n - Set to None when reading the model from memory.\n executor(Executor): The executor to run for loading inference model.\n See :ref:`api_guide_executor_en` for more details about it.\n model_filename(str, optional): One of the following:\n - The name of file to load the inference program.\n - If it is None, the default filename ``__model__`` will be used.\n - When ``dirname`` is ``None``, it must be set to a string containing model.\n Default: ``None``.\n params_filename(str, optional): It is only used for the case that all\n parameters were saved in a single binary file. One of the following:\n - The name of file to load all parameters. \n - When ``dirname`` is ``None``, it must be set to a string containing all the parameters.\n - If parameters were saved in separate files, set it as ``None``.\n Default: ``None``.\n\n pserver_endpoints(list, optional): It is only needed by the distributed inference.\n If using a distributed look up table during the training,\n this table is also needed by the inference process. Its value is\n a list of pserver endpoints.\n\n Returns:\n list: The return of this API is a list with three elements:\n (program, feed_target_names, fetch_targets). The `program` is a\n ``Program`` (refer to :ref:`api_guide_Program_en`), which is used for inference.\n The `feed_target_names` is a list of ``str``, which contains names of variables\n that need to feed data in the inference program. The `fetch_targets` is a list of\n ``Variable`` (refer to :ref:`api_guide_Program_en`). It contains variables from which\n we can get inference results.\n\n Raises:\n ValueError: If `dirname` is not a existing directory.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n\n # Build the model\n main_prog = fluid.Program()\n startup_prog = fluid.Program()\n with fluid.program_guard(main_prog, startup_prog):\n data = fluid.layers.data(name=\"img\", shape=[64, 784], append_batch_size=False)\n w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32')\n b = fluid.layers.create_parameter(shape=[200], dtype='float32')\n hidden_w = fluid.layers.matmul(x=data, y=w)\n hidden_b = fluid.layers.elementwise_add(hidden_w, b)\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(startup_prog)\n\n # Save the inference model\n path = \"./infer_model\"\n fluid.io.save_inference_model(dirname=path, feeded_var_names=['img'],\n target_vars=[hidden_b], executor=exe, main_program=main_prog)\n\n # Demo one. Not need to set the distributed look up table, because the\n # training doesn't use a distributed look up table.\n [inference_program, feed_target_names, fetch_targets] = (\n fluid.io.load_inference_model(dirname=path, executor=exe))\n tensor_img = np.array(np.random.random((1, 64, 784)), dtype=np.float32)\n results = exe.run(inference_program,\n feed={feed_target_names[0]: tensor_img},\n fetch_list=fetch_targets)\n\n # Demo two. If the training uses a distributed look up table, the pserver\n # endpoints list should be supported when loading the inference model.\n # The below is just an example.\n endpoints = [\"127.0.0.1:2023\",\"127.0.0.1:2024\"]\n [dist_inference_program, dist_feed_target_names, dist_fetch_targets] = (\n fluid.io.load_inference_model(dirname=path,\n executor=exe,\n pserver_endpoints=endpoints))\n\n # In this example, the inference program was saved in the file\n # \"./infer_model/__model__\" and parameters were saved in\n # separate files under the directory \"./infer_model\".\n # By the inference program, feed_target_names and\n # fetch_targets, we can use an executor to run the inference\n # program for getting the inference result.\n \"\"\"\n load_from_memory = False\n if dirname is not None:\n load_dirname = os.path.normpath(dirname)\n if not os.path.isdir(load_dirname):\n raise ValueError(\"There is no directory named '%s'\" % dirname)\n\n if model_filename is None:\n model_filename = '__model__'\n\n model_filename = os.path.join(load_dirname,\n os.path.basename(model_filename))\n\n if params_filename is not None:\n params_filename = os.path.basename(params_filename)\n\n with open(model_filename, \"rb\") as f:\n program_desc_str = f.read()\n else:\n load_from_memory = True\n if params_filename is None:\n raise ValueError(\n \"The path of params cannot be None when the directory path is None.\"\n )\n load_dirname = dirname\n program_desc_str = model_filename\n params_filename = params_filename\n\n program = Program.parse_from_string(program_desc_str)\n if not core._is_program_version_supported(program._version()):\n raise ValueError(\"Unsupported program version: %d\\n\" %\n program._version())\n # Binary data also need versioning.\n load_persistables(executor, load_dirname, program, params_filename)\n\n if pserver_endpoints:\n program = _endpoints_replacement(program, pserver_endpoints)\n\n feed_target_names = program.desc.get_feed_target_names()\n fetch_target_names = program.desc.get_fetch_target_names()\n fetch_targets = [\n program.global_block().var(name) for name in fetch_target_names\n ]\n\n return [program, feed_target_names, fetch_targets]\n\n\ndef _endpoints_replacement(program, endpoints):\n ENDPOINT_MAP = \"epmap\"\n for op in program.global_block().ops:\n if op.has_attr(ENDPOINT_MAP):\n op.set_attr(ENDPOINT_MAP, endpoints)\n program._sync_with_cpp()\n return program\n\n\ndef get_parameter_value(para, executor):\n \"\"\"\n Get the LoDTensor value of the given parameter.\n\n Args:\n para(Parameter): The parameter to get value from.\n executor(Executor): The executor to run for retrieving the value.\n\n Returns:\n numpy.array: The given parameter's values.\n\n Raises:\n AssertionError: If the `para` is not an instance of Parameter.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n exe = fluid.Executor(fluid.CPUPlace())\n param = fluid.default_main_program().global_block().var('fc.w')\n p = fluid.io.get_parameter_value(param, exe)\n\n \"\"\"\n assert is_parameter(para), \"The input variable is not parameter.\"\n\n get_program = Program()\n block = get_program.global_block()\n new_var = _clone_var_in_block_(block, para)\n return executor.run(get_program, feed={}, fetch_list=[new_var])[0]\n\n\ndef get_parameter_value_by_name(name, executor, program=None):\n \"\"\"\n Get the LoDTensor value of a certain parameter by its name.\n\n Args:\n name(str): The parameter's name.\n executor(Executor): The executor to run for retrieving the value.\n program(Program | None): The program where to find the parameter.\n If it's set to be None, the function will\n try to find the parameter in the default\n main program.\n\n Returns:\n numpy.array: The parameter's values.\n\n Raises:\n TypeError: If given `name` is not an instance of basestring.\n TypeError: If the parameter with the given name doesn't exist.\n AssertionError: If there is a variable named `name` in the\n given program but it is not a Parameter.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n exe = fluid.Executor(fluid.CPUPlace())\n p = fluid.io.get_parameter_value('fc.w', exe)\n \"\"\"\n if program is None:\n program = default_main_program()\n var = program.global_block().var(name)\n return get_parameter_value(var, executor)\n\n\ndef _save_persistable_nodes(executor, dirname, graph):\n \"\"\"\n Save persistable nodes to the given directory by the executor.\n\n Args:\n executor(Executor): The executor to run for saving node values.\n dirname(str): The directory path.\n graph(IrGraph): All the required persistable nodes in the graph will be saved.\n \"\"\"\n persistable_node_names = set()\n persistable_nodes = []\n all_persistable_nodes = graph.all_persistable_nodes()\n for node in all_persistable_nodes:\n name = cpt.to_text(node.name())\n if name not in persistable_node_names:\n persistable_node_names.add(name)\n persistable_nodes.append(node)\n program = Program()\n var_list = []\n for node in persistable_nodes:\n var_desc = node.var()\n if var_desc.type() == core.VarDesc.VarType.RAW or \\\n var_desc.type() == core.VarDesc.VarType.READER:\n continue\n var = program.global_block().create_var(\n name=var_desc.name(),\n shape=var_desc.shape(),\n dtype=var_desc.dtype(),\n type=var_desc.type(),\n lod_level=var_desc.lod_level(),\n persistable=var_desc.persistable())\n var_list.append(var)\n save_vars(executor=executor, dirname=dirname, vars=var_list)\n\n\ndef _load_persistable_nodes(executor, dirname, graph):\n \"\"\"\n Load persistable node values from the given directory by the executor.\n\n Args:\n executor(Executor): The executor to run for loading node values.\n dirname(str): The directory path.\n graph(IrGraph): All the required persistable nodes in the graph will be loaded.\n \"\"\"\n persistable_node_names = set()\n persistable_nodes = []\n all_persistable_nodes = graph.all_persistable_nodes()\n for node in all_persistable_nodes:\n name = cpt.to_text(node.name())\n if name not in persistable_node_names:\n persistable_node_names.add(name)\n persistable_nodes.append(node)\n program = Program()\n var_list = []\n\n def _exist(var):\n return os.path.exists(os.path.join(dirname, var.name))\n\n for node in persistable_nodes:\n var_desc = node.var()\n if var_desc.type() == core.VarDesc.VarType.RAW or \\\n var_desc.type() == core.VarDesc.VarType.READER:\n continue\n var = program.global_block().create_var(\n name=var_desc.name(),\n shape=var_desc.shape(),\n dtype=var_desc.dtype(),\n type=var_desc.type(),\n lod_level=var_desc.lod_level(),\n persistable=var_desc.persistable())\n if _exist(var):\n var_list.append(var)\n else:\n _logger.warn(\"Cannot find the var %s!!!\" % (node.name()))\n load_vars(executor=executor, dirname=dirname, vars=var_list)\n\n\n@dygraph_not_support\ndef save(program, model_path):\n \"\"\"\n :api_attr: Static Graph\n\t:alias_main: paddle.save\n\t:alias: paddle.save,paddle.tensor.save,paddle.tensor.io.save\n\t:old_api: paddle.fluid.save\n\n This function save parameters, optimizer information and network description to model_path.\n\n The parameters contains all the trainable Variable, will save to a file with suffix \".pdparams\".\n The optimizer information contains all the variable used by optimizer. For Adam optimizer, contains beta1, beta2, momentum etc. All the information will save to a file with suffix \".pdopt\". (If the optimizer have no variable need to save (like SGD), the fill will not generated).\n The network description is the description of the program. It's only used for deployment. The description will save to a file with a suffix \".pdmodel\".\n\n Args:\n program(Program) : The program to saved.\n model_path(str): the file prefix to save the program. The format is \"dirname/file_prefix\". If file_prefix is empty str. A exception will be raised\n\n Returns:\n None\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n prog = fluid.default_main_program()\n fluid.save( prog, \"./temp\")\n\n \"\"\"\n\n base_name = os.path.basename(model_path)\n assert base_name != \"\", \\\n \"The input model_path MUST be format of dirname/filename [dirname\\\\filename in Windows system], but received model_path is empty string.\"\n\n dir_name = os.path.dirname(model_path)\n if dir_name and not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n def get_tensor(var):\n t = global_scope().find_var(var.name).get_tensor()\n return np.array(t)\n\n parameter_list = list(filter(is_parameter, program.list_vars()))\n param_dict = {p.name: get_tensor(p) for p in parameter_list}\n with open(model_path + \".pdparams\", 'wb') as f:\n pickle.dump(param_dict, f, protocol=2)\n\n optimizer_var_list = list(\n filter(is_belong_to_optimizer, program.list_vars()))\n\n opt_dict = {p.name: get_tensor(p) for p in optimizer_var_list}\n with open(model_path + \".pdopt\", 'wb') as f:\n pickle.dump(opt_dict, f, protocol=2)\n\n main_program = program.clone()\n program.desc.flush()\n main_program.desc._set_version()\n paddle.fluid.core.save_op_compatible_info(program.desc)\n\n with open(model_path + \".pdmodel\", \"wb\") as f:\n f.write(program.desc.serialize_to_string())\n\n\n@dygraph_not_support\ndef load(program, model_path, executor=None, var_list=None):\n \"\"\"\n :api_attr: Static Graph\n\t:alias_main: paddle.load\n\t:alias: paddle.load,paddle.tensor.load,paddle.tensor.io.load\n\t:old_api: paddle.fluid.io.load\n\n This function get parameters and optimizer information from program, and then get corresponding value from file.\n An exception will throw if shape or dtype of the parameters is not match.\n\n This function can also load model file saved with [ save_params, save_persistables, save_vars ].\n var_list can not be None when load single model file\n ( filename is not None When save_params, save_persistables or save_vars is called ).\n\n Args:\n program(Program): The program will be loaded\n model_path(str): The file prefix store the program\n executor(Executor, optional): The executor used for initialize the parameter\n When startup program is not run.\n var_list(list, optional): The variable list to load single model file saved with\n [ save_params, save_persistables, save_vars ].\n Default: None\n\n Returns:\n None\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n prog = fluid.default_main_program()\n fluid.save( prog, \"./temp\")\n\n fluid.load( prog, \"./temp\")\n\n \"\"\"\n\n assert executor is None or isinstance(executor, Executor)\n\n model_prefix = model_path\n if model_prefix.endswith(\".pdparams\"):\n model_prefix = model_prefix[:-9]\n elif model_prefix.endswith(\".pdopt\"):\n model_prefix = model_prefix[:-6]\n elif model_prefix.endswith(\".pdmodel\"):\n model_prefix = model_prefix[:-8]\n\n parameter_file_name = model_prefix + \".pdparams\"\n\n if not os.path.exists(parameter_file_name):\n # model file save by fluid.save not found, try to load model file saved with\n # [save_vars, save_params, save_persistables]\n _logger.debug(\n \"{} not found, try to load model file saved with [ save_params, save_persistables, save_vars ]\".\n format(parameter_file_name))\n if executor is None:\n raise ValueError(\n \"executor is required when loading model file saved with [ save_params, save_persistables, save_vars ]\"\n )\n if os.path.isdir(model_path):\n binary_file_set = set()\n for root, dirs, files in os.walk(model_path, topdown=False):\n for f in files:\n binary_file_set.add(\n os.path.join(root, f).replace(\"\\\\\", \"/\"))\n program_var_list = list(program.list_vars())\n loaded_var_list = []\n for var in program_var_list:\n var_path = os.path.join(model_path, var.name).replace(\"\\\\\", \"/\")\n if var_path in binary_file_set:\n loaded_var_list.append(var)\n binary_file_set.remove(var_path)\n if len(binary_file_set) > 0:\n unused_var_list = \" \".join(list(binary_file_set))\n _logger.warning(\"variable file [ %s ] not used\" %\n (\" \".join(list(binary_file_set))))\n try:\n load_vars(\n executor=executor, dirname=model_path, vars=loaded_var_list)\n except RuntimeError as e:\n _logger.error(e)\n raise e\n except:\n raise RuntimeError(\n \"Failed to load model file, please make sure model file is saved with the \"\n \"following APIs: save_params, save_persistables, save_vars\")\n\n return\n elif os.path.isfile(model_path):\n if var_list == None:\n raise ValueError(\n \"var_list is required when loading model file saved with [ save_params, save_persistables, save_vars ]\"\n )\n program_var_list = program.list_vars()\n program_var_name_set = set([var.name for var in program_var_list])\n\n # check all the variable inlcuded in program\n for var in var_list:\n if var.name not in program_var_name_set:\n raise LookupError(\n \"loaded var [{}] is not in program variable list\")\n\n dir_name, file_name = os.path.split(model_path)\n try:\n load_vars(\n executor=executor,\n dirname=dir_name,\n vars=var_list,\n filename=file_name)\n except RuntimeError as e:\n _logger.error(e)\n raise e\n except:\n raise RuntimeError(\"Failed to load model file , please make sure model file is saved with the \" \\\n \"the following APIs: [ save_params, save_persistables, save_vars ]. \" \\\n \"When these API called, filename CANNOT be None\")\n\n return\n\n def set_var(var, ndarray):\n t = global_scope().find_var(var.name).get_tensor()\n p = t._place()\n if p.is_cpu_place():\n place = paddle.fluid.CPUPlace()\n elif p.is_cuda_pinned_place():\n place = paddle.fluid.CUDAPinnedPlace()\n else:\n p = paddle.fluid.core.Place()\n p.set_place(t._place())\n place = paddle.fluid.CUDAPlace(p.gpu_device_id())\n\n t.set(ndarray, place)\n\n parameter_list = list(filter(is_parameter, program.list_vars()))\n\n if executor:\n paddle.fluid.core._create_loaded_parameter(parameter_list,\n global_scope(),\n executor._default_executor)\n with open(parameter_file_name, 'rb') as f:\n load_dict = pickle.load(f) if six.PY2 else pickle.load(\n f, encoding='latin1')\n for v in parameter_list:\n assert v.name in load_dict, \\\n \"Can not find [{}] in model file [{}]\".format(\n v.name, parameter_file_name)\n set_var(v, load_dict[v.name])\n\n optimizer_var_list = list(\n filter(is_belong_to_optimizer, program.list_vars()))\n\n if len(optimizer_var_list) > 0:\n opt_file_name = model_prefix + \".pdopt\"\n assert os.path.exists(opt_file_name), \\\n \"Optimizer file [{}] not exits\".format(opt_file_name)\n\n if executor:\n paddle.fluid.core._create_loaded_parameter(\n optimizer_var_list, global_scope(), executor._default_executor)\n\n with open(opt_file_name, 'rb') as f:\n load_dict = pickle.load(f) if six.PY2 else pickle.load(\n f, encoding='latin1')\n for v in optimizer_var_list:\n assert v.name in load_dict, \\\n \"Can not find [{}] in model file [{}]\".format(\n v.name, opt_file_name)\n set_var(v, load_dict[v.name])\n\n\ndef load_program_state(model_path, var_list=None):\n \"\"\"\n :api_attr: Static Graph\n\n Load program state from local file\n\n Args:\n model_path(str): The file prefix store the program\n var_list(list, optional): The variable list to load saved with\n [ save_params, save_persistables, save_vars ].\n Default: None.\n The var_list is only used to get name,\n will not be modified.\n Returns:\n state_dict(dict): the dict store Parameter and optimizer information\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n x = fluid.data( name=\"x\", shape=[10, 10], dtype='float32')\n y = fluid.layers.fc( x, 10)\n z = fluid.layers.fc( y, 10)\n\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run( fluid.default_startup_program() )\n prog = fluid.default_main_program()\n\n fluid.save( prog, \"./temp\")\n program_state = fluid.load_program_state( \"./temp\")\n\n \"\"\"\n model_prefix = model_path\n if model_prefix.endswith(\".pdparams\"):\n model_prefix = model_prefix[:-9]\n elif model_prefix.endswith(\".pdopt\"):\n model_prefix = model_prefix[:-6]\n elif model_prefix.endswith(\".pdmodel\"):\n model_prefix = model_prefix[:-8]\n\n parameter_file_name = model_prefix + \".pdparams\"\n if not os.path.exists(parameter_file_name):\n # model file saved with fluid.save is not found, try to load model file saved with\n # [save_vars, save_params, save_persistables]\n _logger.debug(\n \"{} not found, try to load model file saved with [ save_params, save_persistables, save_vars ]\".\n format(parameter_file_name))\n\n var_name_list = []\n if var_list is None and os.path.isfile(model_path):\n raise ValueError(\n \"var_list can not be None when model_path is a file type\")\n\n for root, dirs, files in os.walk(model_path, topdown=False):\n for f in files:\n file_path = os.path.join(root, f)\n var_temp_name = os.path.relpath(file_path, model_path)\n var_temp_name = var_temp_name.replace(\"\\\\\", \"/\")\n var_name_list.append(var_temp_name)\n\n with _load_program_scope():\n load_prog = Program()\n load_block = load_prog.global_block()\n\n def clone_var_to_block(block, var):\n if not isinstance(var, Variable):\n raise TypeError(\"value in var_list must be variable\")\n return block.create_var(\n name=var.name,\n shape=var.shape,\n dtype=var.dtype,\n type=var.type,\n lod_level=var.lod_level\n if var.desc.type() == core.VarDesc.VarType.LOD_TENSOR else\n None,\n persistable=True)\n\n loaded_var_list = []\n\n if var_list is not None:\n for var in var_list:\n loaded_var_list.append(clone_var_to_block(load_block, var))\n else:\n for var_name in var_name_list:\n loaded_var_list.append(\n load_block.create_var(\n name=var_name, persistable=True))\n\n place = paddle.fluid.CPUPlace()\n exe = paddle.fluid.Executor(place)\n\n try:\n if os.path.isfile(model_path):\n dir_name, file_name = os.path.split(model_path)\n else:\n dir_name = model_path\n file_name = None\n load_vars(\n executor=exe,\n dirname=dir_name,\n vars=loaded_var_list,\n filename=file_name)\n except:\n raise RuntimeError(\n \"Failed to load model file , please make sure model file is saved with the \"\n \"following APIs: save_params, save_persistables, save_vars\")\n res_dict = {}\n for var in loaded_var_list:\n res_dict[var.name] = np.asarray(paddle.fluid.global_scope(\n ).find_var(var.name).get_tensor())\n\n return res_dict\n\n assert os.path.exists(parameter_file_name), \\\n \"Parameter file [{}] not exits\".format(parameter_file_name)\n\n with open(parameter_file_name, 'rb') as f:\n para_dict = pickle.load(f) if six.PY2 else pickle.load(\n f, encoding='latin1')\n\n opt_file_name = model_prefix + \".pdopt\"\n if os.path.exists(opt_file_name):\n with open(opt_file_name, 'rb') as f:\n opti_dict = pickle.load(f) if six.PY2 else pickle.load(\n f, encoding='latin1')\n\n para_dict.update(opti_dict)\n\n return para_dict\n\n\n@dygraph_not_support\ndef set_program_state(program, state_dict):\n \"\"\"\n :api_attr: Static Graph\n\n Set program parameter from state_dict\n\n An exception will throw if shape or dtype of the parameters is not match.\n\n NOTICE: This function MUST called after run start_up_program\n\n Args:\n program(Program): The program to be set\n state_dict(dict): the dict store Parameter and optimizer information\n Returns:\n None\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n x = fluid.data( name=\"x\", shape=[10, 10], dtype='float32')\n y = fluid.layers.fc( x, 10)\n z = fluid.layers.fc( y, 10)\n\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run( fluid.default_startup_program() )\n prog = fluid.default_main_program()\n\n fluid.save( prog, \"./temp\")\n program_state = fluid.load_program_state( \"./temp\")\n\n fluid.set_program_state( prog, program_state)\n\n \"\"\"\n parameter_list = list(filter(is_persistable, program.list_vars()))\n\n used_para_list = {}\n for para in parameter_list:\n var_temp = paddle.fluid.global_scope().find_var(para.name)\n assert var_temp != None, \\\n \"Variable [ {} ] Not found, Please make sure run startup program\".format(para.name)\n if para.name in state_dict:\n # set value from state dict\n orig_para_np = np.array(var_temp.get_tensor())\n new_para_np = state_dict[para.name]\n assert orig_para_np.shape == new_para_np.shape, \\\n \"Parameter's shape does not match, the Program requires a parameter with the shape of ({}), \" \\\n \"while the loaded parameter (namely [ {} ]) has a shape of ({}).\" \\\n .format(orig_para_np.shape, para.name, new_para_np.shape)\n assert orig_para_np.dtype == new_para_np.dtype, \\\n \"Parameter's data type does not match, the Program requires a parameter with a dtype of ({}), \" \\\n \"while the loaded parameter (namely [ {} ]) has a dtype of ({}).\" \\\n .format(orig_para_np.dtype, para.name, new_para_np.dtype)\n\n ten = var_temp.get_tensor()\n ten_place = ten._place()\n\n assert ten_place.is_gpu_place() or ten_place.is_cpu_place(), \\\n \"Place not support, only support CPUPlace and GPUPlace, now is {}\".format(str(ten_place))\n py_place = paddle.fluid.CPUPlace()\n if ten_place.is_cuda_pinned_place():\n place = paddle.fluid.CUDAPinnedPlace()\n elif ten_place.is_gpu_place():\n p = paddle.fluid.core.Place()\n p.set_place(ten_place)\n py_place = paddle.fluid.CUDAPlace(p.gpu_device_id())\n\n ten.set(new_para_np, py_place)\n\n used_para_list[para.name] = 1\n\n unused_para_list = []\n for k, v in state_dict.items():\n if k not in used_para_list:\n unused_para_list.append(k)\n if len(unused_para_list) > 0:\n warnings.warn(\n \"This list is not set, Because of Paramerter not found in program. There are: {}\".\n format(\" \".join(unused_para_list)))\n", "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# when test, you should add hapi root path to the PYTHONPATH,\n# export PYTHONPATH=PATH_TO_HAPI:$PYTHONPATH\nimport unittest\nimport os\nimport tempfile\nimport cv2\nimport shutil\nimport numpy as np\n\nfrom paddle.incubate.hapi.datasets import DatasetFolder\nfrom paddle.incubate.hapi.vision.transforms import transforms\nimport paddle.incubate.hapi.vision.transforms.functional as F\n\n\nclass TestTransforms(unittest.TestCase):\n def setUp(self):\n self.data_dir = tempfile.mkdtemp()\n for i in range(2):\n sub_dir = os.path.join(self.data_dir, 'class_' + str(i))\n if not os.path.exists(sub_dir):\n os.makedirs(sub_dir)\n for j in range(2):\n if j == 0:\n fake_img = (np.random.random(\n (280, 350, 3)) * 255).astype('uint8')\n else:\n fake_img = (np.random.random(\n (400, 300, 3)) * 255).astype('uint8')\n cv2.imwrite(os.path.join(sub_dir, str(j) + '.jpg'), fake_img)\n\n def tearDown(self):\n shutil.rmtree(self.data_dir)\n\n def do_transform(self, trans):\n dataset_folder = DatasetFolder(self.data_dir, transform=trans)\n\n for _ in dataset_folder:\n pass\n\n def test_trans_all(self):\n normalize = transforms.Normalize(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.120, 57.375])\n trans = transforms.Compose([\n transforms.RandomResizedCrop(224), transforms.GaussianNoise(),\n transforms.ColorJitter(\n brightness=0.4, contrast=0.4, saturation=0.4,\n hue=0.4), transforms.RandomHorizontalFlip(),\n transforms.Permute(mode='CHW'), normalize\n ])\n\n self.do_transform(trans)\n\n def test_trans_resize(self):\n trans = transforms.Compose([\n transforms.Resize(300, [0, 1]),\n transforms.RandomResizedCrop((280, 280)),\n transforms.Resize(280, [0, 1]),\n transforms.Resize((256, 200)),\n transforms.Resize((180, 160)),\n transforms.CenterCrop(128),\n transforms.CenterCrop((128, 128)),\n ])\n self.do_transform(trans)\n\n def test_trans_centerCrop(self):\n trans = transforms.Compose([\n transforms.CenterCropResize(224),\n transforms.CenterCropResize(128, 160),\n ])\n self.do_transform(trans)\n\n def test_flip(self):\n trans = transforms.Compose([\n transforms.RandomHorizontalFlip(1.0),\n transforms.RandomHorizontalFlip(0.0),\n transforms.RandomVerticalFlip(0.0),\n transforms.RandomVerticalFlip(1.0),\n ])\n self.do_transform(trans)\n\n def test_color_jitter(self):\n trans = transforms.BatchCompose([\n transforms.BrightnessTransform(0.0),\n transforms.HueTransform(0.0),\n transforms.SaturationTransform(0.0),\n transforms.ContrastTransform(0.0),\n ])\n self.do_transform(trans)\n\n def test_rotate(self):\n trans = transforms.Compose([\n transforms.RandomRotate(90),\n transforms.RandomRotate([-10, 10]),\n transforms.RandomRotate(\n 45, expand=True),\n transforms.RandomRotate(\n 10, expand=True, center=(60, 80)),\n ])\n self.do_transform(trans)\n\n def test_pad(self):\n trans = transforms.Compose([transforms.Pad(2)])\n self.do_transform(trans)\n\n fake_img = np.random.rand(200, 150, 3).astype('float32')\n trans_pad = transforms.Pad(10)\n fake_img_padded = trans_pad(fake_img)\n np.testing.assert_equal(fake_img_padded.shape, (220, 170, 3))\n trans_pad1 = transforms.Pad([1, 2])\n trans_pad2 = transforms.Pad([1, 2, 3, 4])\n img = trans_pad1(fake_img)\n img = trans_pad2(img)\n\n def test_erase(self):\n trans = transforms.Compose(\n [transforms.RandomErasing(), transforms.RandomErasing(value=0.0)])\n self.do_transform(trans)\n\n def test_random_crop(self):\n trans = transforms.Compose([\n transforms.RandomCrop(200),\n transforms.RandomCrop((140, 160)),\n ])\n self.do_transform(trans)\n\n trans_random_crop1 = transforms.RandomCrop(224)\n trans_random_crop2 = transforms.RandomCrop((140, 160))\n\n fake_img = np.random.rand(500, 400, 3).astype('float32')\n fake_img_crop1 = trans_random_crop1(fake_img)\n fake_img_crop2 = trans_random_crop2(fake_img_crop1)\n\n np.testing.assert_equal(fake_img_crop1.shape, (224, 224, 3))\n\n np.testing.assert_equal(fake_img_crop2.shape, (140, 160, 3))\n\n trans_random_crop_same = transforms.RandomCrop((140, 160))\n img = trans_random_crop_same(fake_img_crop2)\n\n trans_random_crop_bigger = transforms.RandomCrop((180, 200))\n img = trans_random_crop_bigger(img)\n\n trans_random_crop_pad = transforms.RandomCrop((224, 256), 2, True)\n img = trans_random_crop_pad(img)\n\n def test_grayscale(self):\n trans = transforms.Compose([transforms.Grayscale()])\n self.do_transform(trans)\n\n trans_gray = transforms.Grayscale()\n fake_img = np.random.rand(500, 400, 3).astype('float32')\n fake_img_gray = trans_gray(fake_img)\n\n np.testing.assert_equal(len(fake_img_gray.shape), 2)\n np.testing.assert_equal(fake_img_gray.shape[0], 500)\n np.testing.assert_equal(fake_img_gray.shape[1], 400)\n\n trans_gray3 = transforms.Grayscale(3)\n fake_img = np.random.rand(500, 400, 3).astype('float32')\n fake_img_gray = trans_gray3(fake_img)\n\n def test_exception(self):\n trans = transforms.Compose([transforms.Resize(-1)])\n\n trans_batch = transforms.BatchCompose([transforms.Resize(-1)])\n\n with self.assertRaises(Exception):\n self.do_transform(trans)\n\n with self.assertRaises(Exception):\n self.do_transform(trans_batch)\n\n with self.assertRaises(ValueError):\n transforms.ContrastTransform(-1.0)\n\n with self.assertRaises(ValueError):\n transforms.SaturationTransform(-1.0),\n\n with self.assertRaises(ValueError):\n transforms.HueTransform(-1.0)\n\n with self.assertRaises(ValueError):\n transforms.BrightnessTransform(-1.0)\n\n with self.assertRaises(ValueError):\n transforms.Pad([1.0, 2.0, 3.0])\n\n with self.assertRaises(TypeError):\n fake_img = np.random.rand(100, 120, 3).astype('float32')\n F.pad(fake_img, '1')\n\n with self.assertRaises(TypeError):\n fake_img = np.random.rand(100, 120, 3).astype('float32')\n F.pad(fake_img, 1, {})\n\n with self.assertRaises(TypeError):\n fake_img = np.random.rand(100, 120, 3).astype('float32')\n F.pad(fake_img, 1, padding_mode=-1)\n\n with self.assertRaises(ValueError):\n fake_img = np.random.rand(100, 120, 3).astype('float32')\n F.pad(fake_img, [1.0, 2.0, 3.0])\n\n with self.assertRaises(ValueError):\n transforms.RandomRotate(-2)\n\n with self.assertRaises(ValueError):\n transforms.RandomRotate([1, 2, 3])\n\n with self.assertRaises(ValueError):\n trans_gray = transforms.Grayscale(5)\n fake_img = np.random.rand(100, 120, 3).astype('float32')\n trans_gray(fake_img)\n\n def test_info(self):\n str(transforms.Compose([transforms.Resize((224, 224))]))\n str(transforms.BatchCompose([transforms.Resize((224, 224))]))\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nfrom op_test import OpTest\nimport paddle.fluid.core as core\n\n\nclass TestFakeQuantizeOp(OpTest):\n def setUp(self):\n self.op_type = \"fake_quantize_abs_max\"\n self.attrs = {'bit_length': 8}\n self.inputs = {'X': np.random.random((124, 240)).astype(\"float32\"), }\n scale = np.max(np.abs(self.inputs['X'])).astype(\"float32\")\n self.outputs = {\n 'Out': np.round(self.inputs['X'] / scale * (\n (1 << (self.attrs['bit_length'] - 1)) - 1)),\n 'OutScale': np.array(scale).astype(\"float32\"),\n }\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestFakeQuantizeOp1(OpTest):\n def setUp(self):\n self.op_type = \"fake_quantize_abs_max\"\n self.attrs = {'bit_length': 8}\n self.inputs = {'X': np.zeros((10, 10)).astype(\"float32\"), }\n scale = np.max(np.abs(self.inputs['X'])).astype(\"float32\")\n inv_scale = 1.0 / (scale + 1e-6) if scale < 1e-30 else 1.0 / scale\n self.outputs = {\n 'Out': np.round(self.inputs['X'] * inv_scale * (\n (1 << (self.attrs['bit_length'] - 1)) - 1)),\n 'OutScale': np.array(scale).astype(\"float32\"),\n }\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestFakeQuantizeOp2(OpTest):\n def setUp(self):\n self.op_type = \"fake_quantize_abs_max\"\n self.attrs = {'bit_length': 8}\n self.inputs = {'X': np.full((10, 10), 1e-40).astype(\"float32\"), }\n scale = np.max(np.abs(self.inputs['X'])).astype(\"float32\")\n inv_scale = 1.0 / (scale + 1e-6) if scale < 1e-30 else 1.0 / scale\n self.outputs = {\n 'Out': np.round(self.inputs['X'] * inv_scale * (\n (1 << (self.attrs['bit_length'] - 1)) - 1)),\n 'OutScale': np.array(scale).astype(\"float32\"),\n }\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestFakeChannelWiseQuantizeOp(OpTest):\n def setUp(self):\n self.op_type = \"fake_channel_wise_quantize_abs_max\"\n self.attrs = {'bit_length': 8}\n self.inputs = {\n 'X': np.random.random((4, 3, 64, 64)).astype(\"float32\"),\n }\n scales = []\n for i in range(self.inputs['X'].shape[0]):\n scales.append(np.max(np.abs(self.inputs['X'][i])).astype(\"float32\"))\n outputs = self.inputs['X'].copy()\n for i, scale in enumerate(scales):\n outputs[i] = np.round(outputs[i] / scale * (\n (1 << (self.attrs['bit_length'] - 1)) - 1))\n\n self.outputs = {\n 'Out': outputs,\n 'OutScale': np.array(scales).astype(\"float32\"),\n }\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestFakeQuantizeRangeAbsMaxOp(OpTest):\n def setUp(self):\n self.op_type = \"fake_quantize_range_abs_max\"\n self.attrs = {\n 'bit_length': int(5),\n 'window_size': int(1),\n 'is_test': False\n }\n x = (np.random.random((8, 16, 7, 7)) - 0.5) * 10\n x = x.astype(\"float32\")\n self.inputs = {\n 'X': x,\n 'Iter': np.zeros(1).astype(\"int64\"),\n 'InScale': np.zeros(1).astype(\"float32\")\n }\n scale = np.max(np.abs(self.inputs['X'])).astype(\"float32\")\n\n out_scales = np.zeros(self.attrs['window_size']).astype(\"float32\")\n out_scales[0] = scale\n self.outputs = {\n 'Out': np.round(self.inputs['X'] / scale * (\n (1 << (self.attrs['bit_length'] - 1)) - 1)),\n 'OutScale': scale,\n 'OutScales': out_scales,\n }\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestMovingAverageAbsMaxScaleOp(OpTest):\n def setUp(self):\n self.op_type = \"moving_average_abs_max_scale\"\n self.attrs = {'moving_rate': float(0.9), 'is_test': False}\n accum = np.zeros(1).astype(\"float32\")\n accum[0] = 1\n state = np.zeros(1).astype(\"float32\")\n state[0] = 1\n self.inputs = {\n 'X': np.random.random((8, 16, 7, 7)).astype(\"float32\"),\n 'InAccum': accum,\n 'InState': state,\n }\n\n out_accum = np.zeros(1).astype(\"float32\")\n out_state = np.zeros(1).astype(\"float32\")\n out_scale = np.zeros(1).astype(\"float32\")\n out_accum[0] = self.attrs['moving_rate'] * accum[0] + np.max(\n np.abs(self.inputs['X'])).astype(\"float32\")\n out_state[0] = self.attrs['moving_rate'] * state[0] + 1\n out_scale = out_accum / out_state\n self.outputs = {\n 'OutAccum': out_accum,\n 'OutState': out_state,\n 'OutScale': out_scale,\n }\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestFakeQuantizeRangeAbsMaxOp2(OpTest):\n def setUp(self):\n self.op_type = \"fake_quantize_range_abs_max\"\n self.attrs = {\n 'bit_length': int(8),\n 'window_size': int(1),\n 'is_test': True\n }\n x = (np.random.random((8, 16, 7, 7)) - 0.5) * 10\n x = x.astype(\"float32\")\n scale = np.array([np.max(np.abs(x)).astype(\"float32\") - 1.0])\n out_scales = np.zeros(self.attrs['window_size']).astype(\"float32\")\n out_scales[0] = scale\n self.inputs = {\n 'X': x,\n 'Iter': np.zeros(1).astype(\"int64\"),\n 'InScale': scale.astype(\"float32\")\n }\n xs = np.clip(x, -scale, scale)\n qs = np.round(xs / scale * ((1 << (self.attrs['bit_length'] - 1)) - 1))\n self.outputs = {\n 'Out': qs,\n 'OutScale': scale.astype(\"float32\"),\n 'OutScales': out_scales,\n }\n\n def test_check_output(self):\n self.check_output(no_check_set=set(['OutScale', 'OutScales']))\n\n\nclass TestMovingOpBase(OpTest):\n def setUp(self):\n self.init_type()\n self.attrs = {\n 'bit_length': int(5),\n 'moving_rate': float(0.9),\n 'is_test': False\n }\n accum = np.zeros(1).astype(\"float32\")\n accum[0] = 1\n state = np.zeros(1).astype(\"float32\")\n state[0] = 1\n scale = np.zeros(1).astype(\"float32\")\n scale[0] = 0.001\n self.inputs = {\n 'X': np.random.random((8, 16, 7, 7)).astype(\"float32\"),\n 'InScale': scale,\n 'InAccum': accum,\n 'InState': state,\n }\n\n out_accum = np.zeros(1).astype(\"float32\")\n out_state = np.zeros(1).astype(\"float32\")\n out_scale = np.zeros(1).astype(\"float32\")\n out_accum[0] = self.attrs['moving_rate'] * accum[0] + np.max(\n np.abs(self.inputs['X'])).astype(\"float32\")\n out_state[0] = self.attrs['moving_rate'] * state[0] + 1\n out_scale = out_accum / out_state\n out_data = self.calc_output(out_scale)\n self.outputs = {\n 'Out': out_data,\n 'OutAccum': out_accum,\n 'OutState': out_state,\n 'OutScale': out_scale,\n }\n\n def init_type(self):\n self.op_type = \"fake_quantize_moving_average_abs_max\"\n\n def calc_output(self, out_scale):\n return np.round(self.inputs['X'] / out_scale * (\n (1 << (self.attrs['bit_length'] - 1)) - 1))\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestFakeQuantDequantMovingOp(TestMovingOpBase):\n def init_type(self):\n self.op_type = \"fake_quantize_dequantize_moving_average_abs_max\"\n\n def calc_output(self, out_scale):\n range_v = (1 << (self.attrs['bit_length'] - 1)) - 1\n return np.round(self.inputs['X'] / out_scale *\n range_v) * out_scale / range_v\n\n def test_check_grad(self):\n x = self.inputs[\"X\"]\n gradient = [np.ones(x.shape) / np.product(x.shape)]\n self.check_grad([\"X\"], \"Out\", user_defined_grads=gradient)\n\n\nclass TestFakeQuantDequantAbsOp(OpTest):\n def setUp(self):\n self.op_type = \"fake_quantize_dequantize_abs_max\"\n self.attrs = {'bit_length': 8}\n self.inputs = {'X': np.random.random((124, 240)).astype(\"float32\"), }\n scale = np.max(np.abs(self.inputs['X'])).astype(\"float32\")\n out_data = self.calc_output(scale)\n self.outputs = {\n 'Out': out_data,\n 'OutScale': np.array(scale).astype(\"float32\"),\n }\n\n def calc_output(self, scale):\n range_v = (1 << (self.attrs['bit_length'] - 1)) - 1\n return np.round(self.inputs['X'] / scale * range_v) * scale / range_v\n\n def test_check_output(self):\n self.check_output()\n\n def test_check_grad(self):\n x = self.inputs[\"X\"]\n gradient = [np.ones(x.shape) / np.product(x.shape)]\n self.check_grad([\"X\"], \"Out\", user_defined_grads=gradient)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.dot", "numpy.array", "numpy.abs" ], [ "numpy.random.random", "numpy.random.randint" ], [ "numpy.array" ], [ "numpy.testing.assert_equal", "numpy.random.random", "numpy.random.rand" ], [ "numpy.product", "numpy.random.random", "numpy.abs", "numpy.clip", "numpy.ones", "numpy.round", "numpy.full", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cpelley/improver
[ "ca028e3a1c842e3ff00b188c8ea6eaedd0a07149", "ca028e3a1c842e3ff00b188c8ea6eaedd0a07149", "ca028e3a1c842e3ff00b188c8ea6eaedd0a07149", "ca028e3a1c842e3ff00b188c8ea6eaedd0a07149", "ebf77fe2adc85ed7aec74c26671872a2e4388ded", "ca028e3a1c842e3ff00b188c8ea6eaedd0a07149" ]
[ "improver/cli/nbhood_land_and_sea.py", "improver/nbhood/recursive_filter.py", "improver_tests/ensemble_copula_coupling/test_RebadgePercentilesAsRealizations.py", "improver/utilities/time_lagging.py", "improver_tests/ensemble_copula_coupling/test_utilities.py", "improver_tests/utilities/test_OccurrenceWithinVicinity.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# (C) British Crown Copyright 2017-2021 Met Office.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Script to run neighbourhooding processing over areas of land and sea\nseparately before combining them to return unified fields. Topographic zones\nmay also be employed, with the sea area being treated as a distinct zone.\"\"\"\nfrom improver import cli\n\n\[email protected]\[email protected]_output\ndef process(\n cube: cli.inputcube,\n mask: cli.inputcube,\n weights: cli.inputcube = None,\n *,\n radii: cli.comma_separated_list,\n lead_times: cli.comma_separated_list = None,\n area_sum=False,\n):\n \"\"\" Module to process land and sea separately before combining them.\n\n Neighbourhood the input dataset over two distinct regions of land and sea.\n If performed as a single level neighbourhood, a land-sea mask should be\n provided. If instead topographic_zone neighbourhooding is being employed,\n the mask should be one of topographic zones. In the latter case a weights\n array is also needed to collapse the topographic_zone coordinate. These\n weights are created with the improver generate-topography-bands-weights\n CLI and should be made using a land-sea mask, which will then be employed\n within this code to draw the distinction between the two surface types.\n\n Args:\n cube (iris.cube.Cube):\n A cube to be processed.\n mask (iris.cube.Cube):\n A cube containing either a mask of topographic zones over land or\n a land-sea mask. If this is a land-sea mask, land points should be\n set to one and sea points set to zero.\n weights (iris.cube.Cube):\n A cube containing the weights which are used for collapsing the\n dimension gained through masking. These weights must have been\n created using a land-sea mask. (Optional).\n radii (list of float):\n The radius or a list of radii in metres of the neighbourhood to\n apply.\n If it is a list, it must be the same length as lead_times, which\n defines at which lead time to use which nbhood radius. The radius\n will be interpolated for intermediate lead times.\n lead_times (list of int):\n The lead times in hours that correspond to the radii to be used.\n If lead_times are set, radii must be a list the same length as\n lead_times. Lead times must be given as integer values.\n area_sum (bool):\n Return sum rather than fraction over the neighbourhood area.\n\n Returns:\n (tuple): tuple containing:\n **result** (iris.cube.Cube):\n A cube of the processed data.\n\n Raises:\n ValueError:\n If the topographic zone mask has the attribute\n topographic_zones_include_seapoints.\n IOError:\n if a weights cube isn't given and a topographic_zone mask is given.\n ValueError:\n If the weights cube has the attribute\n topographic_zones_include_seapoints.\n RuntimeError:\n If lead times are not None and has a different length to radii.\n TypeError:\n A weights cube has been provided but no topographic zone.\n\n \"\"\"\n import numpy as np\n\n from improver.nbhood.nbhood import NeighbourhoodProcessing\n from improver.nbhood.use_nbhood import ApplyNeighbourhoodProcessingWithAMask\n\n sum_or_fraction = \"sum\" if area_sum else \"fraction\"\n\n masking_coordinate = None\n if any(\n \"topographic_zone\" in coord.name() for coord in mask.coords(dim_coords=True)\n ):\n\n if mask.attributes[\"topographic_zones_include_seapoints\"] == \"True\":\n raise ValueError(\n \"The topographic zones mask cube must have been \"\n \"masked to exclude sea points, but \"\n \"topographic_zones_include_seapoints = True\"\n )\n\n if not weights:\n raise TypeError(\n \"A weights cube must be provided if using a mask \"\n \"of topographic zones to collapse the resulting \"\n \"vertical dimension.\"\n )\n\n if weights.attributes[\"topographic_zones_include_seapoints\"] == \"True\":\n raise ValueError(\n \"The weights cube must be masked to exclude sea \"\n \"points, but topographic_zones_include_seapoints \"\n \"= True\"\n )\n\n masking_coordinate = \"topographic_zone\"\n land_sea_mask = weights[0].copy(data=weights[0].data.mask)\n land_sea_mask.rename(\"land_binary_mask\")\n land_sea_mask.remove_coord(masking_coordinate)\n # Create land and sea masks in IMPROVER format (inverse of\n # numpy standard) 1 - include this region, 0 - exclude this region.\n land_only = land_sea_mask.copy(\n data=np.logical_not(land_sea_mask.data).astype(int)\n )\n sea_only = land_sea_mask.copy(data=land_sea_mask.data.astype(int))\n\n else:\n if weights is not None:\n raise TypeError(\"A weights cube has been provided but will not be \" \"used\")\n land_sea_mask = mask\n # In this case the land is set to 1 and the sea is set to 0 in the\n # input mask.\n sea_only = land_sea_mask.copy(\n data=np.logical_not(land_sea_mask.data).astype(int)\n )\n land_only = land_sea_mask.copy(data=land_sea_mask.data.astype(int))\n\n if lead_times is None:\n radius_or_radii = float(radii[0])\n else:\n if len(radii) != len(lead_times):\n raise RuntimeError(\n \"If leadtimes are supplied, it must be a list\"\n \" of equal length to a list of radii.\"\n )\n radius_or_radii = [float(x) for x in radii]\n lead_times = [int(x) for x in lead_times]\n\n # Section for neighbourhood processing land points.\n if land_only.data.max() > 0.0:\n if masking_coordinate is None:\n result_land = NeighbourhoodProcessing(\n \"square\",\n radius_or_radii,\n lead_times=lead_times,\n sum_or_fraction=sum_or_fraction,\n re_mask=True,\n )(cube, land_only)\n else:\n result_land = ApplyNeighbourhoodProcessingWithAMask(\n masking_coordinate,\n radius_or_radii,\n lead_times=lead_times,\n collapse_weights=weights,\n sum_or_fraction=sum_or_fraction,\n re_mask=False,\n )(cube, mask)\n result = result_land\n\n # Section for neighbourhood processing sea points.\n if sea_only.data.max() > 0.0:\n result_sea = NeighbourhoodProcessing(\n \"square\",\n radius_or_radii,\n lead_times=lead_times,\n sum_or_fraction=sum_or_fraction,\n re_mask=True,\n )(cube, sea_only)\n result = result_sea\n\n # Section for combining land and sea points following land and sea points\n # being neighbourhood processed individually.\n if sea_only.data.max() > 0.0 and land_only.data.max() > 0.0:\n # Recombine cubes to be a single output.\n combined_data = result_land.data.filled(0) + result_sea.data.filled(0)\n result = result_land.copy(data=combined_data)\n\n return result\n", "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# (C) British Crown Copyright 2017-2021 Met Office.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Module to apply a recursive filter to neighbourhooded data.\"\"\"\nimport warnings\nfrom typing import List, Optional, Tuple\n\nimport iris\nimport numpy as np\nfrom iris.cube import Cube, CubeList\nfrom numpy import ndarray\n\nfrom improver import PostProcessingPlugin\nfrom improver.generate_ancillaries.generate_orographic_smoothing_coefficients import (\n OrographicSmoothingCoefficients,\n)\nfrom improver.metadata.constants.time_types import TIME_COORDS\nfrom improver.utilities.cube_checker import check_cube_coordinates\nfrom improver.utilities.pad_spatial import pad_cube_with_halo, remove_halo_from_cube\n\n\nclass RecursiveFilter(PostProcessingPlugin):\n \"\"\"\n Apply a recursive filter to the input cube.\n \"\"\"\n\n def __init__(self, iterations: Optional[int] = None, edge_width: int = 15) -> None:\n \"\"\"\n Initialise the class.\n\n Args:\n iterations:\n The number of iterations of the recursive filter.\n edge_width:\n Half the width of the padding halo applied before\n recursive filtering.\n Raises:\n ValueError: If number of iterations is not None and is set such\n that iterations is less than 1.\n Warns:\n UserWarning:\n If iterations is higher than 2.\n \"\"\"\n if iterations is not None:\n if iterations < 1:\n raise ValueError(\n \"Invalid number of iterations: must be >= 1: {}\".format(iterations)\n )\n if iterations > 2:\n warnings.warn(\n \"More than two iterations degrades the conservation\"\n \"of probability assumption.\"\n )\n self.iterations = iterations\n self.edge_width = edge_width\n self.smoothing_coefficient_name_format = \"smoothing_coefficient_{}\"\n\n def __repr__(self) -> str:\n \"\"\"Represent the configured plugin instance as a string.\"\"\"\n result = \"<RecursiveFilter: iterations: {}, edge_width: {}\"\n return result.format(self.iterations, self.edge_width)\n\n @staticmethod\n def _recurse_forward(\n grid: ndarray, smoothing_coefficients: ndarray, axis: int\n ) -> ndarray:\n \"\"\"\n Method to run the recursive filter in the forward direction.\n\n In the forward direction:\n Recursive filtering is calculated as:\n\n .. math::\n B_i = ((1 - \\\\rm{smoothing\\\\_coefficient_{i-1}}) \\\\times A_i) +\n (\\\\rm{smoothing\\\\_coefficient_{i-1}} \\\\times B_{i-1})\n\n Progressing from gridpoint i-1 to i:\n :math:`B_i` = new value at gridpoint i\n\n :math:`A_i` = Old value at gridpoint i\n\n :math:`B_{i-1}` = New value at gridpoint i - 1\n\n Args:\n grid:\n 2D array containing the input data to which the recursive\n filter will be applied.\n smoothing_coefficients:\n Matching 2D array of smoothing_coefficient values that will be\n used when applying the recursive filter along the specified\n axis.\n axis:\n Index of the spatial axis (0 or 1) over which to recurse.\n\n Returns:\n 2D array containing the smoothed field after the recursive\n filter method has been applied to the input array in the\n forward direction along the specified axis.\n \"\"\"\n lim = grid.shape[axis]\n for i in range(1, lim):\n if axis == 0:\n grid[i, :] = (1.0 - smoothing_coefficients[i - 1, :]) * grid[\n i, :\n ] + smoothing_coefficients[i - 1, :] * grid[i - 1, :]\n if axis == 1:\n grid[:, i] = (1.0 - smoothing_coefficients[:, i - 1]) * grid[\n :, i\n ] + smoothing_coefficients[:, i - 1] * grid[:, i - 1]\n return grid\n\n @staticmethod\n def _recurse_backward(\n grid: ndarray, smoothing_coefficients: ndarray, axis: int\n ) -> ndarray:\n \"\"\"\n Method to run the recursive filter in the backwards direction.\n\n In the backwards direction:\n Recursive filtering is calculated as:\n\n .. math::\n B_i = ((1 - \\\\rm{smoothing\\\\_coefficient}) \\\\times A_i) +\n (\\\\rm{smoothing\\\\_coefficient} \\\\times B_{i+1})\n\n Progressing from gridpoint i+1 to i:\n :math:`B_i` = new value at gridpoint i\n\n :math:`A_i` = Old value at gridpoint i\n\n :math:`B_{i+1}` = New value at gridpoint i+1\n\n Args:\n grid:\n 2D array containing the input data to which the recursive\n filter will be applied.\n smoothing_coefficients:\n Matching 2D array of smoothing_coefficient values that will be\n used when applying the recursive filter along the specified\n axis.\n axis:\n Index of the spatial axis (0 or 1) over which to recurse.\n\n Returns:\n 2D array containing the smoothed field after the recursive\n filter method has been applied to the input array in the\n backwards direction along the specified axis.\n \"\"\"\n lim = grid.shape[axis]\n for i in range(lim - 2, -1, -1):\n if axis == 0:\n grid[i, :] = (1.0 - smoothing_coefficients[i, :]) * grid[\n i, :\n ] + smoothing_coefficients[i, :] * grid[i + 1, :]\n if axis == 1:\n grid[:, i] = (1.0 - smoothing_coefficients[:, i]) * grid[\n :, i\n ] + smoothing_coefficients[:, i] * grid[:, i + 1]\n return grid\n\n @staticmethod\n def _run_recursion(\n cube: Cube,\n smoothing_coefficients_x: Cube,\n smoothing_coefficients_y: Cube,\n iterations: int,\n ) -> Cube:\n \"\"\"\n Method to run the recursive filter.\n\n Args:\n cube:\n 2D cube containing the input data to which the recursive\n filter will be applied.\n smoothing_coefficients_x:\n 2D cube containing array of smoothing_coefficient values that\n will be used when applying the recursive filter along the\n x-axis.\n smoothing_coefficients_y:\n 2D cube containing array of smoothing_coefficient values that\n will be used when applying the recursive filter along the\n y-axis.\n iterations:\n The number of iterations of the recursive filter\n\n Returns:\n Cube containing the smoothed field after the recursive filter\n method has been applied to the input cube.\n \"\"\"\n (x_index,) = cube.coord_dims(cube.coord(axis=\"x\").name())\n (y_index,) = cube.coord_dims(cube.coord(axis=\"y\").name())\n output = cube.data\n\n for _ in range(iterations):\n output = RecursiveFilter._recurse_forward(\n output, smoothing_coefficients_x.data, x_index\n )\n output = RecursiveFilter._recurse_backward(\n output, smoothing_coefficients_x.data, x_index\n )\n output = RecursiveFilter._recurse_forward(\n output, smoothing_coefficients_y.data, y_index\n )\n output = RecursiveFilter._recurse_backward(\n output, smoothing_coefficients_y.data, y_index\n )\n cube.data = output\n return cube\n\n def _validate_coefficients(\n self, cube: Cube, smoothing_coefficients: CubeList\n ) -> List[Cube]:\n \"\"\"Validate the smoothing coefficients cubes.\n\n Args:\n cube:\n 2D cube containing the input data to which the recursive\n filter will be applied.\n\n smoothing_coefficients:\n A cubelist containing two cubes of smoothing_coefficient values,\n one corresponding to smoothing in the x-direction, and the other\n to smoothing in the y-direction.\n\n Returns:\n A list of smoothing coefficients cubes ordered: [x-coeffs, y-coeffs].\n\n Raises:\n ValueError: Smoothing coefficient cubes are not named correctly.\n ValueError: If any smoothing_coefficient cube value is over 0.5\n ValueError: The coordinate to be smoothed within the\n smoothing coefficient cube is not of the expected length.\n ValueError: The coordinate to be smoothed within the\n smoothing coefficient cube does not have the expected points.\n \"\"\"\n # Ensure cubes are in x, y order.\n smoothing_coefficients.sort(key=lambda cell: cell.name())\n axes = [\"x\", \"y\"]\n\n for axis, smoothing_coefficient in zip(axes, smoothing_coefficients):\n\n # Check the smoothing coefficient cube name is as expected\n expected_name = self.smoothing_coefficient_name_format.format(axis)\n if smoothing_coefficient.name() != expected_name:\n msg = (\n \"The smoothing coefficient cube name {} does not match the \"\n \"expected name {}\".format(\n smoothing_coefficient.name(), expected_name\n )\n )\n raise ValueError(msg)\n\n # Check the smoothing coefficients do not exceed an empirically determined\n # maximum value; larger values damage conservation significantly.\n if (smoothing_coefficient.data > 0.5).any():\n raise ValueError(\n \"All smoothing_coefficient values must be less than 0.5. \"\n \"A large smoothing_coefficient value leads to poor \"\n \"conservation of probabilities\"\n )\n\n for test_axis in axes:\n coefficient_crd = smoothing_coefficient.coord(axis=test_axis)\n if test_axis == axis:\n expected_points = (\n cube.coord(axis=test_axis).points[1:]\n + cube.coord(axis=test_axis).points[:-1]\n ) / 2\n else:\n expected_points = cube.coord(axis=test_axis).points\n\n if len(coefficient_crd.points) != len(\n expected_points\n ) or not np.allclose(coefficient_crd.points, expected_points):\n msg = (\n f\"The smoothing coefficients {test_axis} dimension does not \"\n \"have the expected length or values compared with the cube \"\n \"to which smoothing is being applied.\\n\\nSmoothing \"\n \"coefficient cubes must have coordinates that are:\\n\"\n \"- one element shorter along the dimension being smoothed \"\n f\"({axis}) than in the target cube, with points in that \"\n \"dimension equal to the mean of each pair of points along \"\n \"the dimension in the target cube\\n- equal to the points \"\n \"in the target cube along the dimension not being smoothed\"\n )\n raise ValueError(msg)\n\n return smoothing_coefficients\n\n def _pad_coefficients(self, coeff_x, coeff_y):\n \"\"\"Pad smoothing coefficients\"\"\"\n pad_x, pad_y = [\n pad_cube_with_halo(\n coeff, 2 * self.edge_width, 2 * self.edge_width, pad_method=\"symmetric\",\n )\n for coeff in [coeff_x, coeff_y]\n ]\n return pad_x, pad_y\n\n @staticmethod\n def _update_coefficients_from_mask(\n coeffs_x: Cube, coeffs_y: Cube, mask: Cube\n ) -> Tuple[Cube, Cube]:\n \"\"\"\n Zero all smoothing coefficients for data points that are masked\n\n Args:\n coeffs_x\n coeffs_y\n mask\n\n Returns:\n Updated smoothing coefficients\n \"\"\"\n plugin = OrographicSmoothingCoefficients(\n use_mask_boundary=False, invert_mask=False\n )\n plugin.zero_masked(coeffs_x, coeffs_y, mask)\n return coeffs_x, coeffs_y\n\n def process(self, cube: Cube, smoothing_coefficients: CubeList) -> Cube:\n \"\"\"\n Set up the smoothing_coefficient parameters and run the recursive\n filter. Smoothing coefficients can be generated using\n :class:`~.OrographicSmoothingCoefficients`\n and :func:`~improver.cli.generate_orographic_smoothing_coefficients`.\n The steps undertaken are:\n\n 1. Split the input cube into slices determined by the co-ordinates in\n the x and y directions.\n 2. Construct an array of filter parameters (smoothing_coefficients_x\n and smoothing_coefficients_y) for each cube slice that are used to\n weight the recursive filter in the x- and y-directions.\n 3. Pad each cube slice with a square-neighbourhood halo and apply\n the recursive filter for the required number of iterations.\n 4. Remove the halo from the cube slice and append the recursed cube\n slice to a 'recursed cube'.\n 5. Merge all the cube slices in the 'recursed cube' into a 'new cube'.\n 6. Modify the 'new cube' so that its scalar dimension co-ordinates are\n consistent with those in the original input cube.\n 7. Return the 'new cube' which now contains the recursively filtered\n values for the original input cube.\n\n The smoothing_coefficient determines how much \"value\" of a cell\n undergoing filtering is comprised of the current value at that cell and\n how much comes from the adjacent cell preceding it in the direction in\n which filtering is being applied. A larger smoothing_coefficient\n results in a more significant proportion of a cell's new value coming\n from its neighbouring cell.\n\n Args:\n cube:\n Cube containing the input data to which the recursive filter\n will be applied.\n smoothing_coefficients:\n A cubelist containing two cubes of smoothing_coefficient values,\n one corresponding to smoothing in the x-direction, and the other\n to smoothing in the y-direction.\n\n Returns:\n Cube containing the smoothed field after the recursive filter\n method has been applied.\n\n Raises:\n ValueError:\n If the cube contains masked data from multiple cycles or times\n \"\"\"\n cube_format = next(cube.slices([cube.coord(axis=\"y\"), cube.coord(axis=\"x\")]))\n coeffs_x, coeffs_y = self._validate_coefficients(\n cube_format, smoothing_coefficients\n )\n\n mask_cube = None\n if np.ma.is_masked(cube.data):\n # Assumes mask is the same for each x-y slice. This may not be\n # true if there are several time slices in the cube - so throw\n # an error if this is so.\n for coord in TIME_COORDS:\n if cube.coords(coord) and len(cube.coord(coord).points) > 1:\n raise ValueError(\n \"Dealing with masks from multiple time points is unsupported\"\n )\n\n mask_cube = cube_format.copy(data=cube_format.data.mask)\n coeffs_x, coeffs_y = self._update_coefficients_from_mask(\n coeffs_x, coeffs_y, mask_cube,\n )\n\n padded_coefficients_x, padded_coefficients_y = self._pad_coefficients(\n coeffs_x, coeffs_y\n )\n\n recursed_cube = iris.cube.CubeList()\n for output in cube.slices([cube.coord(axis=\"y\"), cube.coord(axis=\"x\")]):\n\n padded_cube = pad_cube_with_halo(\n output, 2 * self.edge_width, 2 * self.edge_width, pad_method=\"symmetric\"\n )\n\n new_cube = self._run_recursion(\n padded_cube,\n padded_coefficients_x,\n padded_coefficients_y,\n self.iterations,\n )\n new_cube = remove_halo_from_cube(\n new_cube, 2 * self.edge_width, 2 * self.edge_width\n )\n\n if mask_cube is not None:\n new_cube.data = np.ma.MaskedArray(new_cube.data, mask=mask_cube.data)\n\n recursed_cube.append(new_cube)\n\n new_cube = recursed_cube.merge_cube()\n new_cube = check_cube_coordinates(cube, new_cube)\n\n return new_cube\n", "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# (C) British Crown Copyright 2017-2021 Met Office.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nUnit tests for the\n`ensemble_copula_coupling.RebadgePercentilesAsRealizations` class.\n\n\"\"\"\nimport unittest\n\nimport numpy as np\nfrom iris.coords import AuxCoord, DimCoord\nfrom iris.cube import Cube\nfrom iris.exceptions import InvalidCubeError\nfrom iris.tests import IrisTest\n\nfrom improver.ensemble_copula_coupling.ensemble_copula_coupling import (\n RebadgePercentilesAsRealizations as Plugin,\n)\nfrom improver.synthetic_data.set_up_test_cubes import set_up_percentile_cube\n\nfrom .ecc_test_data import ECC_TEMPERATURE_REALIZATIONS\n\n\nclass Test_process(IrisTest):\n\n \"\"\"Test the process method of the\n RebadgePercentilesAsRealizations plugin.\"\"\"\n\n def setUp(self):\n \"\"\"Set up temperature percentile cube for testing\"\"\"\n self.cube = set_up_percentile_cube(\n np.sort(ECC_TEMPERATURE_REALIZATIONS, axis=0),\n np.array([10, 50, 90], dtype=np.float32),\n )\n\n def test_basic(self):\n \"\"\"Test that a cube is produced with a realization dimension\"\"\"\n result = Plugin().process(self.cube)\n self.assertIsInstance(result, Cube)\n self.assertIsInstance(result.coord(\"realization\"), DimCoord)\n self.assertEqual(result.coord(\"realization\").units, \"1\")\n\n def test_specify_realization_numbers(self):\n \"\"\"Use the ensemble_realization_numbers optional argument to specify\n particular values for the ensemble realization numbers.\"\"\"\n ensemble_realization_numbers = [12, 13, 14]\n result = Plugin().process(self.cube, ensemble_realization_numbers)\n self.assertArrayEqual(\n result.coord(\"realization\").points, ensemble_realization_numbers\n )\n\n def test_number_of_realizations(self):\n \"\"\"Check the values for the realization coordinate generated without\n specifying the ensemble_realization_numbers argument.\"\"\"\n result = Plugin().process(self.cube)\n self.assertArrayAlmostEqual(\n result.coord(\"realization\").points, np.array([0, 1, 2])\n )\n\n def test_raises_exception_if_realization_already_exists(self):\n \"\"\"Check that we raise an exception if a realization coordinate already\n exists.\"\"\"\n self.cube.add_aux_coord(AuxCoord(0, \"realization\"))\n msg = r\"Cannot rebadge percentile coordinate to realization.*\"\n with self.assertRaisesRegex(InvalidCubeError, msg):\n Plugin().process(self.cube)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# (C) British Crown Copyright 2017-2021 Met Office.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Provide support utilities for time lagging ensembles\"\"\"\n\nfrom typing import List, Union\n\nimport numpy as np\nfrom iris.cube import Cube, CubeList\n\nfrom improver import BasePlugin\nfrom improver.metadata.forecast_times import rebadge_forecasts_as_latest_cycle\nfrom improver.utilities.cube_manipulation import MergeCubes\n\n\nclass GenerateTimeLaggedEnsemble(BasePlugin):\n \"\"\"Combine realizations from different forecast cycles into one cube\"\"\"\n\n def process(self, cubelist: Union[List[Cube], CubeList]) -> Cube:\n \"\"\"\n Take an input cubelist containing forecasts from different cycles and\n merges them into a single cube.\n\n The steps taken are:\n 1. Update forecast reference time and period to match the latest\n contributing cycle.\n 2. Check for duplicate realization numbers. If a duplicate is\n found, renumber all of the realizations uniquely.\n 3. Concatenate into one cube along the realization axis.\n\n Args:\n cubelist:\n List of input forecasts\n\n Returns:\n Concatenated forecasts\n \"\"\"\n cubelist = rebadge_forecasts_as_latest_cycle(cubelist)\n\n # Take all the realizations from all the input cube and\n # put in one array\n all_realizations = [cube.coord(\"realization\").points for cube in cubelist]\n all_realizations = np.concatenate(all_realizations)\n # Find unique realizations\n unique_realizations = np.unique(all_realizations)\n\n # If we have fewer unique realizations than total realizations we have\n # duplicate realizations so we rebadge all realizations in the cubelist\n if len(unique_realizations) < len(all_realizations):\n first_realization = 0\n for cube in cubelist:\n n_realization = len(cube.coord(\"realization\").points)\n cube.coord(\"realization\").points = np.arange(\n first_realization, first_realization + n_realization, dtype=np.int32\n )\n first_realization = first_realization + n_realization\n\n # slice over realization to deal with cases where direct concatenation\n # would result in a non-monotonic coordinate\n lagged_ensemble = MergeCubes()(cubelist, slice_over_realization=True)\n\n return lagged_ensemble\n", "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# (C) British Crown Copyright 2017-2021 Met Office.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nUnit tests for the\n`ensemble_copula_coupling.EnsembleCopulaCouplingUtilities` class.\n\"\"\"\nimport importlib\nimport unittest\nimport unittest.mock as mock\nfrom datetime import datetime\nfrom unittest.case import skipIf\nfrom unittest.mock import patch\n\nimport numpy as np\nfrom cf_units import Unit\nfrom iris.coords import DimCoord\nfrom iris.cube import Cube, CubeList\nfrom iris.exceptions import CoordinateNotFoundError\nfrom iris.tests import IrisTest\n\nfrom improver.ensemble_copula_coupling.utilities import (\n choose_set_of_percentiles,\n concatenate_2d_array_with_2d_array_endpoints,\n create_cube_with_percentiles,\n get_bounds_of_distribution,\n insert_lower_and_upper_endpoint_to_1d_array,\n interpolate_multiple_rows_same_x,\n interpolate_multiple_rows_same_y,\n restore_non_percentile_dimensions,\n slow_interp_same_x,\n slow_interp_same_y,\n)\nfrom improver.synthetic_data.set_up_test_cubes import (\n set_up_percentile_cube,\n set_up_variable_cube,\n)\n\nfrom .ecc_test_data import ECC_TEMPERATURE_REALIZATIONS, set_up_spot_test_cube\n\n\nclass Test_concatenate_2d_array_with_2d_array_endpoints(IrisTest):\n\n \"\"\"Test the concatenate_2d_array_with_2d_array_endpoints.\"\"\"\n\n def test_basic(self):\n \"\"\"Test that result is a numpy array with the expected contents.\"\"\"\n expected = np.array([[0, 20, 50, 80, 100]])\n input_array = np.array([[20, 50, 80]])\n result = concatenate_2d_array_with_2d_array_endpoints(input_array, 0, 100)\n self.assertIsInstance(result, np.ndarray)\n self.assertArrayAlmostEqual(result, expected)\n\n def test_1d_input(self):\n \"\"\"Test 1D input results in the expected error\"\"\"\n input_array = np.array([-40, 200, 1000])\n msg = \"Expected 2D input\"\n with self.assertRaisesRegex(ValueError, msg):\n concatenate_2d_array_with_2d_array_endpoints(input_array, -100, 10000)\n\n def test_3d_input(self):\n \"\"\"Test 3D input results in expected error\"\"\"\n input_array = np.array([[[-40, 200, 1000]]])\n msg = \"Expected 2D input\"\n with self.assertRaisesRegex(ValueError, msg):\n concatenate_2d_array_with_2d_array_endpoints(input_array, -100, 10000)\n\n\nclass Test_choose_set_of_percentiles(IrisTest):\n\n \"\"\"Test the choose_set_of_percentiles plugin.\"\"\"\n\n def test_basic(self):\n \"\"\"\n Test that the plugin returns a list with the expected number of\n percentiles.\n \"\"\"\n no_of_percentiles = 3\n result = choose_set_of_percentiles(no_of_percentiles)\n self.assertIsInstance(result, list)\n self.assertEqual(len(result), no_of_percentiles)\n\n def test_data(self):\n \"\"\"\n Test that the plugin returns a list with the expected data values\n for the percentiles.\n \"\"\"\n data = np.array([25, 50, 75])\n no_of_percentiles = 3\n result = choose_set_of_percentiles(no_of_percentiles)\n self.assertArrayAlmostEqual(result, data)\n\n def test_random(self):\n \"\"\"\n Test that the plugin returns a list with the expected number of\n percentiles, if the random sampling option is selected.\n \"\"\"\n no_of_percentiles = 3\n result = choose_set_of_percentiles(no_of_percentiles, sampling=\"random\")\n self.assertIsInstance(result, list)\n self.assertEqual(len(result), no_of_percentiles)\n\n def test_unknown_sampling_option(self):\n \"\"\"\n Test that the plugin returns the expected error message,\n if an unknown sampling option is selected.\n \"\"\"\n no_of_percentiles = 3\n msg = \"Unrecognised sampling option\"\n with self.assertRaisesRegex(ValueError, msg):\n choose_set_of_percentiles(no_of_percentiles, sampling=\"unknown\")\n\n\nclass Test_create_cube_with_percentiles(IrisTest):\n\n \"\"\"Test the _create_cube_with_percentiles plugin.\"\"\"\n\n def setUp(self):\n \"\"\"Set up temperature cube.\"\"\"\n self.cube = set_up_variable_cube(ECC_TEMPERATURE_REALIZATIONS[0])\n self.cube_data = ECC_TEMPERATURE_REALIZATIONS\n\n def test_basic(self):\n \"\"\"Test that the plugin returns an Iris.cube.Cube with suitable units.\"\"\"\n cube_data = self.cube_data + 2\n percentiles = [10, 50, 90]\n result = create_cube_with_percentiles(percentiles, self.cube, cube_data)\n self.assertIsInstance(result, Cube)\n self.assertEqual(result.units, self.cube.units)\n\n def test_changed_cube_units(self):\n \"\"\"Test that the plugin returns a cube with chosen units.\"\"\"\n cube_data = self.cube_data + 2\n percentiles = [10, 50, 90]\n result = create_cube_with_percentiles(\n percentiles, self.cube, cube_data, cube_unit=\"1\"\n )\n self.assertEqual(result.units, Unit(\"1\"))\n\n def test_many_percentiles(self):\n \"\"\"Test that the plugin returns an Iris.cube.Cube with many percentiles.\n \"\"\"\n percentiles = np.linspace(0, 100, 100)\n cube_data = np.zeros(\n [\n len(percentiles),\n len(self.cube.coord(\"latitude\").points),\n len(self.cube.coord(\"longitude\").points),\n ]\n )\n result = create_cube_with_percentiles(percentiles, self.cube, cube_data)\n self.assertEqual(cube_data.shape, result.data.shape)\n\n def test_incompatible_percentiles(self):\n \"\"\"\n Test that the plugin fails if the percentile values requested\n are not numbers.\n \"\"\"\n percentiles = [\"cat\", \"dog\", \"elephant\"]\n cube_data = np.zeros(\n [\n len(percentiles),\n len(self.cube.coord(\"latitude\").points),\n len(self.cube.coord(\"longitude\").points),\n ]\n )\n msg = \"could not convert string to float\"\n with self.assertRaisesRegex(ValueError, msg):\n create_cube_with_percentiles(percentiles, self.cube, cube_data)\n\n def test_percentile_points(self):\n \"\"\"\n Test that the plugin returns an Iris.cube.Cube\n with a percentile coordinate with the desired points.\n \"\"\"\n cube_data = self.cube_data + 2\n percentiles = [10, 50, 90]\n result = create_cube_with_percentiles(percentiles, self.cube, cube_data)\n self.assertIsInstance(result.coord(\"percentile\"), DimCoord)\n self.assertArrayAlmostEqual(result.coord(\"percentile\").points, percentiles)\n\n def test_spot_forecasts_percentile_points(self):\n \"\"\"\n Test that the plugin returns a Cube with a percentile dimension\n coordinate and that the percentile dimension has the expected points\n for an input spot forecast.\n \"\"\"\n cube = set_up_spot_test_cube()\n spot_data = cube.data.copy() + 2\n spot_cube = next(cube.slices_over(\"realization\"))\n spot_cube.remove_coord(\"realization\")\n\n percentiles = [10, 50, 90]\n result = create_cube_with_percentiles(percentiles, spot_cube, spot_data)\n self.assertIsInstance(result, Cube)\n self.assertIsInstance(result.coord(\"percentile\"), DimCoord)\n self.assertArrayAlmostEqual(result.coord(\"percentile\").points, percentiles)\n\n def test_percentile_length_too_short(self):\n \"\"\"\n Test that the plugin raises the default ValueError, if the number\n of percentiles is fewer than the length of the zeroth dimension of the\n required cube data.\n \"\"\"\n cube_data = self.cube_data + 2\n percentiles = [10, 50]\n msg = \"Require data with shape\"\n with self.assertRaisesRegex(ValueError, msg):\n create_cube_with_percentiles(percentiles, self.cube, cube_data)\n\n def test_percentile_length_too_long(self):\n \"\"\"\n Test that the plugin raises the default ValueError, if the number\n of percentiles exceeds the length of the zeroth dimension of the\n required data.\n \"\"\"\n cube_data = self.cube_data[0, :, :] + 2\n percentiles = [10, 50, 90]\n msg = \"Require data with shape\"\n with self.assertRaisesRegex(ValueError, msg):\n create_cube_with_percentiles(percentiles, self.cube, cube_data)\n\n def test_metadata_copy(self):\n \"\"\"\n Test that the metadata dictionaries within the input cube, are\n also present on the output cube.\n \"\"\"\n self.cube.attributes = {\"source\": \"ukv\"}\n cube_data = self.cube_data + 2\n percentiles = [10, 50, 90]\n result = create_cube_with_percentiles(percentiles, self.cube, cube_data)\n self.assertDictEqual(self.cube.metadata._asdict(), result.metadata._asdict())\n\n def test_coordinate_copy(self):\n \"\"\"\n Test that the coordinates within the input cube, are\n also present on the output cube.\n \"\"\"\n cube_data = self.cube_data + 2\n percentiles = [10, 50, 90]\n result = create_cube_with_percentiles(percentiles, self.cube, cube_data)\n for coord in self.cube.coords():\n if coord not in result.coords():\n msg = \"Coordinate: {} not found in cube {}\".format(coord, result)\n raise CoordinateNotFoundError(msg)\n\n\nclass Test_get_bounds_of_distribution(IrisTest):\n\n \"\"\"Test the get_bounds_of_distribution plugin.\"\"\"\n\n def test_basic(self):\n \"\"\"Test that the result is a numpy array.\"\"\"\n cube_name = \"air_temperature\"\n cube_units = Unit(\"degreesC\")\n result = get_bounds_of_distribution(cube_name, cube_units)\n self.assertIsInstance(result, np.ndarray)\n\n def test_check_data(self):\n \"\"\"\n Test that the expected results are returned for the bounds_pairing.\n \"\"\"\n cube_name = \"air_temperature\"\n cube_units = Unit(\"degreesC\")\n bounds_pairing = (-100, 60)\n result = get_bounds_of_distribution(cube_name, cube_units)\n self.assertArrayAlmostEqual(result, bounds_pairing)\n\n def test_check_unit_conversion(self):\n \"\"\"\n Test that the expected results are returned for the bounds_pairing,\n if the units of the bounds_pairings need to be converted to match\n the units of the forecast.\n \"\"\"\n cube_name = \"air_temperature\"\n cube_units = Unit(\"fahrenheit\")\n bounds_pairing = (-148, 140) # In fahrenheit\n result = get_bounds_of_distribution(cube_name, cube_units)\n self.assertArrayAlmostEqual(result, bounds_pairing)\n\n def test_check_exception_is_raised(self):\n \"\"\"\n Test that the expected results are returned for the bounds_pairing.\n \"\"\"\n cube_name = \"nonsense\"\n cube_units = Unit(\"degreesC\")\n msg = \"The bounds_pairing_key\"\n with self.assertRaisesRegex(KeyError, msg):\n get_bounds_of_distribution(cube_name, cube_units)\n\n\nclass Test_insert_lower_and_upper_endpoint_to_1d_array(IrisTest):\n\n \"\"\"Test the insert_lower_and_upper_endpoint_to_1d_array.\"\"\"\n\n def test_basic(self):\n \"\"\"Test that the result is a numpy array with the expected contents.\"\"\"\n expected = [0, 20, 50, 80, 100]\n percentiles = np.array([20, 50, 80])\n result = insert_lower_and_upper_endpoint_to_1d_array(percentiles, 0, 100)\n self.assertIsInstance(result, np.ndarray)\n self.assertArrayAlmostEqual(result, expected)\n\n def test_2d_example(self):\n \"\"\"Test 2D input results in expected error\"\"\"\n percentiles = np.array([[-40, 200, 1000], [-40, 200, 1000]])\n msg = \"Expected 1D input\"\n with self.assertRaisesRegex(ValueError, msg):\n insert_lower_and_upper_endpoint_to_1d_array(percentiles, -100, 10000)\n\n\nclass Test_restore_non_percentile_dimensions(IrisTest):\n\n \"\"\"Test the restore_non_percentile_dimensions.\"\"\"\n\n def setUp(self):\n \"\"\"Set up template cube and temperature data.\"\"\"\n self.cube = set_up_variable_cube(282 * np.ones((3, 3), dtype=np.float32))\n # function is designed to reshape an input data array with dimensions of\n # \"percentiles x points\" - generate suitable input data\n self.expected_data = np.sort(ECC_TEMPERATURE_REALIZATIONS, axis=0)\n points_data = [self.expected_data[i].flatten() for i in range(3)]\n self.input_data = np.array(points_data)\n\n def test_multiple_percentiles(self):\n \"\"\"\n Test the result is an array with the expected shape and contents.\n \"\"\"\n reshaped_array = restore_non_percentile_dimensions(\n self.input_data, self.cube, 3\n )\n self.assertIsInstance(reshaped_array, np.ndarray)\n self.assertArrayAlmostEqual(reshaped_array, self.expected_data)\n\n def test_single_percentile(self):\n \"\"\"\n Test the array size and contents if the percentile coordinate is scalar.\n \"\"\"\n expected = np.array(\n [[226.15, 237.4, 248.65], [259.9, 271.15, 282.4], [293.65, 304.9, 316.15]],\n dtype=np.float32,\n )\n reshaped_array = restore_non_percentile_dimensions(\n self.input_data[0], self.cube, 1\n )\n self.assertArrayAlmostEqual(reshaped_array, expected)\n\n def test_multiple_timesteps(self):\n \"\"\"\n Test that the data has been reshaped correctly when there are multiple timesteps.\n The array contents are also checked. The output cube has only a single percentile,\n which is therefore demoted to a scalar coordinate.\n \"\"\"\n expected = np.array(\n [\n [[4.0, 4.71428571], [5.42857143, 6.14285714]],\n [[6.85714286, 7.57142857], [8.28571429, 9.0]],\n ]\n )\n\n cubelist = CubeList([])\n for i, hour in enumerate([7, 8]):\n cubelist.append(\n set_up_percentile_cube(\n np.array([expected[i, :, :]], dtype=np.float32),\n np.array([50], dtype=np.float32),\n units=\"degC\",\n time=datetime(2015, 11, 23, hour),\n frt=datetime(2015, 11, 23, 6),\n )\n )\n percentile_cube = cubelist.merge_cube()\n\n reshaped_array = restore_non_percentile_dimensions(\n percentile_cube.data.flatten(),\n next(percentile_cube.slices_over(\"percentile\")),\n 1,\n )\n self.assertArrayAlmostEqual(reshaped_array, expected)\n\n\nnumba_installed = True\ntry:\n importlib.util.find_spec(\"numba\")\n from improver.ensemble_copula_coupling.numba_utilities import (\n fast_interp_same_x,\n fast_interp_same_y,\n )\nexcept ImportError:\n numba_installed = False\n\n\nclass Test_interpolate_multiple_rows_same_y(IrisTest):\n\n \"\"\"Test interpolate_multiple_rows_same_y\"\"\"\n\n def setUp(self):\n \"\"\"Set up arrays.\"\"\"\n np.random.seed(0)\n self.x = np.arange(0, 1, 0.01)\n self.xp = np.sort(np.random.random_sample((100, 100)), axis=1)\n self.fp = np.arange(0, 100, 1).astype(float)\n\n def test_slow(self):\n \"\"\"Test slow interp against known result.\"\"\"\n xp = np.array([[0, 1, 2, 3, 4], [-4, -3, -2, -1, 0]], dtype=np.float32)\n fp = np.array([0, 2, 4, 6, 8], dtype=np.float32)\n x = np.array([-1, 0.5, 2], dtype=np.float32)\n expected = np.array([[0, 1, 4], [6, 8, 8]], dtype=np.float32)\n result = slow_interp_same_y(x, xp, fp)\n np.testing.assert_allclose(result, expected)\n\n @patch.dict(\"sys.modules\", numba=None)\n @patch(\"improver.ensemble_copula_coupling.utilities.slow_interp_same_y\")\n def test_slow_interp_same_y_called(self, interp_imp):\n \"\"\"Test that slow_interp_same_y is called if numba is not installed.\"\"\"\n interpolate_multiple_rows_same_y(\n mock.sentinel.x, mock.sentinel.xp, mock.sentinel.fp\n )\n interp_imp.assert_called_once_with(\n mock.sentinel.x, mock.sentinel.xp, mock.sentinel.fp\n )\n\n @skipIf(not (numba_installed), \"numba not installed\")\n @patch(\"improver.ensemble_copula_coupling.numba_utilities.fast_interp_same_y\")\n def test_fast_interp_same_y_called(self, interp_imp):\n \"\"\"Test that fast_interp_same_y is called if numba is installed.\"\"\"\n interpolate_multiple_rows_same_y(\n mock.sentinel.x, mock.sentinel.xp, mock.sentinel.fp\n )\n interp_imp.assert_called_once_with(\n mock.sentinel.x, mock.sentinel.xp, mock.sentinel.fp\n )\n\n @skipIf(not (numba_installed), \"numba not installed\")\n def test_fast(self):\n \"\"\"Test fast interp against known result.\"\"\"\n xp = np.array([[0, 1, 2, 3, 4], [-4, -3, -2, -1, 0]], dtype=np.float32)\n fp = np.array([0, 2, 4, 6, 8], dtype=np.float32)\n x = np.array([-1, 0.5, 2], dtype=np.float32)\n expected = np.array([[0, 1, 4], [6, 8, 8]], dtype=np.float32)\n result = fast_interp_same_y(x, xp, fp)\n np.testing.assert_allclose(result, expected)\n\n @skipIf(not (numba_installed), \"numba not installed\")\n def test_slow_vs_fast(self):\n \"\"\"Test that slow and fast versions give same result.\"\"\"\n result_slow = slow_interp_same_y(self.x, self.xp, self.fp)\n result_fast = fast_interp_same_y(self.x, self.xp, self.fp)\n np.testing.assert_allclose(result_slow, result_fast)\n\n @skipIf(not (numba_installed), \"numba not installed\")\n def test_slow_vs_fast_unordered(self):\n \"\"\"Test that slow and fast versions give same result\n when x is not sorted.\"\"\"\n shuffled_x = self.x.copy()\n np.random.shuffle(shuffled_x)\n result_slow = slow_interp_same_y(shuffled_x, self.xp, self.fp)\n result_fast = fast_interp_same_y(shuffled_x, self.xp, self.fp)\n np.testing.assert_allclose(result_slow, result_fast)\n\n @skipIf(not (numba_installed), \"numba not installed\")\n def test_slow_vs_fast_repeated(self):\n \"\"\"Test that slow and fast versions give same result when\n rows of xp contain repeats.\"\"\"\n xp_repeat = self.xp.copy()\n xp_repeat[:, 51] = xp_repeat[:, 50]\n result_slow = slow_interp_same_y(self.x, xp_repeat, self.fp)\n result_fast = fast_interp_same_y(self.x, xp_repeat, self.fp)\n np.testing.assert_allclose(result_slow, result_fast)\n\n @skipIf(not (numba_installed), \"numba not installed\")\n def test_slow_vs_multi(self):\n \"\"\"Test that slow interp gives same result as\n interpolate_multiple_rows_same_y.\"\"\"\n result_slow = slow_interp_same_y(self.x, self.xp, self.fp)\n result_multiple = interpolate_multiple_rows_same_y(self.x, self.xp, self.fp)\n np.testing.assert_allclose(result_slow, result_multiple)\n\n\nclass TestInterpolateMultipleRowsSameX(IrisTest):\n\n \"\"\"Test interpolate_multiple_rows\"\"\"\n\n def setUp(self):\n \"\"\"Set up arrays.\"\"\"\n np.random.seed(0)\n self.x = np.arange(0, 1, 0.01)\n self.xp = np.sort(np.random.random_sample(100))\n self.fp = np.random.random((100, 100))\n\n def test_slow(self):\n \"\"\"Test slow interp against known result.\"\"\"\n xp = np.array([0, 1, 2, 3, 4], dtype=np.float32)\n fp = np.array([[0, 0.5, 1, 1.5, 2], [0, 2, 4, 6, 8]], dtype=np.float32)\n x = np.array([-1, 0.5, 2], dtype=np.float32)\n expected = np.array([[0, 0.25, 1], [0, 1, 4]], dtype=np.float32)\n result = slow_interp_same_x(x, xp, fp)\n np.testing.assert_allclose(result, expected)\n\n @skipIf(not (numba_installed), \"numba not installed\")\n def test_fast(self):\n \"\"\"Test fast interp against known result.\"\"\"\n xp = np.array([0, 1, 2, 3, 4], dtype=np.float32)\n fp = np.array([[0, 0.5, 1, 1.5, 2], [0, 2, 4, 6, 8]], dtype=np.float32)\n x = np.array([-1, 0.5, 2], dtype=np.float32)\n expected = np.array([[0, 0.25, 1], [0, 1, 4]], dtype=np.float32)\n result = fast_interp_same_x(x, xp, fp)\n np.testing.assert_allclose(result, expected)\n\n @skipIf(not (numba_installed), \"numba not installed\")\n def test_slow_vs_fast(self):\n \"\"\"Test that slow and fast versions give same result.\"\"\"\n result_slow = slow_interp_same_x(self.x, self.xp, self.fp)\n result_fast = fast_interp_same_x(self.x, self.xp, self.fp)\n np.testing.assert_allclose(result_slow, result_fast)\n\n @skipIf(not (numba_installed), \"numba not installed\")\n def test_slow_vs_fast_unordered(self):\n \"\"\"Test that slow and fast versions give same result\n when x is not sorted.\"\"\"\n shuffled_x = self.x.copy()\n np.random.shuffle(shuffled_x)\n result_slow = slow_interp_same_x(shuffled_x, self.xp, self.fp)\n result_fast = fast_interp_same_x(shuffled_x, self.xp, self.fp)\n np.testing.assert_allclose(result_slow, result_fast)\n\n @skipIf(not (numba_installed), \"numba not installed\")\n def test_slow_vs_fast_repeated(self):\n \"\"\"Test that slow and fast versions give same result when xp\n contains repeats.\"\"\"\n repeat_xp = self.xp.copy()\n repeat_xp[51] = repeat_xp[50]\n result_slow = slow_interp_same_x(self.x, repeat_xp, self.fp)\n result_fast = fast_interp_same_x(self.x, repeat_xp, self.fp)\n np.testing.assert_allclose(result_slow, result_fast)\n\n @skipIf(not (numba_installed), \"numba not installed\")\n def test_slow_vs_multi(self):\n \"\"\"Test that slow interp gives same result as\n interpolate_multiple_rows_same_x.\"\"\"\n result_slow = slow_interp_same_x(self.x, self.xp, self.fp)\n result_multiple = interpolate_multiple_rows_same_x(self.x, self.xp, self.fp)\n np.testing.assert_allclose(result_slow, result_multiple)\n\n @patch.dict(\"sys.modules\", numba=None)\n @patch(\"improver.ensemble_copula_coupling.utilities.slow_interp_same_x\")\n def test_slow_interp_same_x_called(self, interp_imp):\n \"\"\"Test that slow_interp_same_x is called if numba is not installed.\"\"\"\n interpolate_multiple_rows_same_x(\n mock.sentinel.x, mock.sentinel.xp, mock.sentinel.fp\n )\n interp_imp.assert_called_once_with(\n mock.sentinel.x, mock.sentinel.xp, mock.sentinel.fp\n )\n\n @skipIf(not (numba_installed), \"numba not installed\")\n @patch(\"improver.ensemble_copula_coupling.numba_utilities.fast_interp_same_x\")\n def test_fast_interp_same_x_called(self, interp_imp):\n \"\"\"Test that fast_interp_same_x is called if numba is installed.\"\"\"\n interpolate_multiple_rows_same_x(\n mock.sentinel.x, mock.sentinel.xp, mock.sentinel.fp\n )\n interp_imp.assert_called_once_with(\n mock.sentinel.x, mock.sentinel.xp, mock.sentinel.fp\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# (C) British Crown Copyright 2017-2021 Met Office.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Unit tests for the utilities.OccurrenceWithinVicinity plugin.\"\"\"\n\nimport datetime\nfrom typing import Tuple\n\nimport numpy as np\nimport pytest\nfrom iris.cube import Cube\n\nfrom improver.synthetic_data.set_up_test_cubes import (\n add_coordinate,\n set_up_variable_cube,\n)\nfrom improver.utilities.spatial import OccurrenceWithinVicinity\n\n\ndef land_mask_cube_generator(shape: Tuple[int, int] = (5, 5)) -> Cube:\n \"\"\"Creates a land-mask cube for use in these tests\"\"\"\n mask = np.zeros(shape, dtype=np.int8)\n mask[:, 3:] = 1\n return set_up_variable_cube(\n mask,\n name=\"land_binary_mask\",\n units=\"1\",\n spatial_grid=\"equalarea\",\n grid_spacing=2000.0,\n domain_corner=(0.0, 0.0),\n )\n\n\[email protected](name=\"all_land_cube\")\ndef land_mask_cube_44_fixture() -> Cube:\n cube = land_mask_cube_generator((4, 4))\n cube.data = np.zeros_like(cube.data)\n return cube\n\n\[email protected](name=\"land_mask_cube\")\ndef land_mask_cube_55_fixture() -> Cube:\n cube = land_mask_cube_generator()\n return cube\n\n\[email protected](name=\"cube\")\ndef cube_fixture() -> Cube:\n \"\"\"Sets up a cube for testing\"\"\"\n return set_up_variable_cube(\n np.zeros((5, 5), dtype=np.float32),\n spatial_grid=\"equalarea\",\n grid_spacing=2000.0,\n domain_corner=(0.0, 0.0),\n )\n\n\nDISTANCE = 2000\n\n\ndef test_repr():\n \"\"\"Test that the __repr__ returns the expected string.\"\"\"\n result = str(OccurrenceWithinVicinity(10000))\n msg = \"<OccurrenceWithinVicinity: distance: 10000>\"\n assert result == msg\n\n\ndef test_basic(cube):\n \"\"\"Test for binary events to determine where there is an occurrence\n within the vicinity.\"\"\"\n expected = np.array(\n [\n [1.0, 1.0, 1.0, 0.0, 0.0],\n [1.0, 1.0, 1.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0, 0.0],\n ]\n )\n cube.data[0, 1] = 1.0\n cube.data[2, 3] = 1.0\n result = OccurrenceWithinVicinity(DISTANCE).maximum_within_vicinity(cube)\n assert isinstance(result, Cube)\n assert np.allclose(result.data, expected)\n\n\ndef test_fuzzy(cube):\n \"\"\"Test for non-binary events to determine where there is an occurrence\n within the vicinity.\"\"\"\n expected = np.array(\n [\n [1.0, 1.0, 1.0, 0.0, 0.0],\n [1.0, 1.0, 1.0, 0.5, 0.5],\n [0.0, 0.0, 0.5, 0.5, 0.5],\n [0.0, 0.0, 0.5, 0.5, 0.5],\n [0.0, 0.0, 0.0, 0.0, 0.0],\n ]\n )\n cube.data[0, 1] = 1.0\n cube.data[2, 3] = 0.5\n result = OccurrenceWithinVicinity(DISTANCE).maximum_within_vicinity(cube)\n assert isinstance(result, Cube)\n assert np.allclose(result.data, expected)\n\n\ndef test_different_distance(cube):\n \"\"\"Test for binary events to determine where there is an occurrence\n within the vicinity for an alternative distance.\"\"\"\n expected = np.array(\n [\n [1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0],\n [0.0, 1.0, 1.0, 1.0, 1.0],\n [0.0, 1.0, 1.0, 1.0, 1.0],\n ]\n )\n cube.data[0, 1] = 1.0\n cube.data[2, 3] = 1.0\n distance = 4000.0\n result = OccurrenceWithinVicinity(distance).maximum_within_vicinity(cube)\n assert isinstance(result, Cube)\n assert np.allclose(result.data, expected)\n\n\ndef test_masked_data(cube):\n \"\"\"Test masked values are ignored in OccurrenceWithinVicinity.\"\"\"\n expected = np.array(\n [\n [1.0, 1.0, 1.0, 0.0, 10.0],\n [1.0, 1.0, 1.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0, 0.0],\n ]\n )\n cube.data[0, 1] = 1.0\n cube.data[2, 3] = 1.0\n cube.data[0, 4] = 10.0\n mask = np.zeros((5, 5))\n mask[0, 4] = 1\n cube.data = np.ma.array(cube.data, mask=mask)\n result = OccurrenceWithinVicinity(DISTANCE).maximum_within_vicinity(cube)\n assert isinstance(result, Cube)\n assert isinstance(result.data, np.ma.core.MaskedArray)\n assert np.allclose(result.data.data, expected)\n assert np.allclose(result.data.mask, mask)\n\n\ndef test_with_land_mask(cube, land_mask_cube):\n \"\"\"Test that a land mask is used correctly.\"\"\"\n expected = np.array(\n [\n [1.0, 1.0, 1.0, 10.0, 10.0],\n [1.0, 1.0, 1.0, 10.0, 10.0],\n [0.0, 0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0, 0.0],\n ]\n )\n cube.data[0, 1] = 1.0 # would not cross mask\n cube.data[2, 3] = 1.0 # would cross mask\n cube.data[0, 4] = 10.0 # would not cross mask\n result = OccurrenceWithinVicinity(\n DISTANCE, land_mask_cube=land_mask_cube\n ).maximum_within_vicinity(cube)\n assert isinstance(result, Cube)\n assert ~isinstance(result.data, np.ma.core.MaskedArray)\n assert np.allclose(result.data, expected)\n\n\ndef test_with_land_mask_and_mask(cube, land_mask_cube):\n \"\"\"Test that a land mask is used correctly when cube also has a mask.\"\"\"\n expected = np.array(\n [\n [1.0, 1.0, 1.0, 0.0, 10.0],\n [1.0, 1.0, 1.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0, 0.0],\n ]\n )\n cube.data[0, 1] = 1.0 # would not cross mask\n cube.data[2, 3] = 1.0 # would cross mask\n cube.data[0, 4] = 10.0 # is masked in input\n mask = np.zeros((5, 5))\n mask[0, 4] = 1\n cube.data = np.ma.array(cube.data, mask=mask)\n result = OccurrenceWithinVicinity(\n DISTANCE, land_mask_cube=land_mask_cube\n ).maximum_within_vicinity(cube)\n assert isinstance(result, Cube)\n assert isinstance(result.data, np.ma.core.MaskedArray)\n assert np.allclose(result.data.data, expected)\n assert np.allclose(result.data.mask, mask)\n\n\ndef test_with_invalid_land_mask_name(land_mask_cube):\n \"\"\"Test that a mis-named land mask is rejected correctly.\"\"\"\n bad_mask_cube = land_mask_cube.copy()\n bad_mask_cube.rename(\"kittens\")\n with pytest.raises(\n ValueError,\n match=\"Expected land_mask_cube to be called land_binary_mask, not kittens\",\n ):\n OccurrenceWithinVicinity(DISTANCE, land_mask_cube=bad_mask_cube)\n\n\ndef test_with_invalid_land_mask_coords(cube, land_mask_cube):\n \"\"\"Test that a spatially mis-matched land mask is rejected correctly.\"\"\"\n bad_mask_cube = land_mask_cube.copy()\n bad_points = np.array(bad_mask_cube.coord(axis=\"x\").points)\n bad_points[0] += 1\n bad_mask_cube.coord(axis=\"x\").points = bad_points\n with pytest.raises(\n ValueError,\n match=\"Supplied cube do not have the same spatial coordinates and land mask\",\n ):\n OccurrenceWithinVicinity(DISTANCE, land_mask_cube=bad_mask_cube)(cube)\n\n\[email protected](name=\"cube_with_realizations\")\ndef cube_with_realizations_fixture() -> Cube:\n return set_up_variable_cube(\n np.zeros((2, 4, 4), dtype=np.float32),\n \"lwe_precipitation_rate\",\n \"m s-1\",\n \"equalarea\",\n grid_spacing=2000.0,\n domain_corner=(0.0, 0.0),\n )\n\n\nTIMESTEPS = [\n datetime.datetime(2017, 11, 9, 12),\n datetime.datetime(2017, 11, 9, 15),\n]\n\n\[email protected](\"land_fixture\", [None, \"all_land_cube\"])\ndef test_with_multiple_realizations_and_times(\n request, cube_with_realizations, land_fixture\n):\n \"\"\"Test for multiple realizations and times, so that multiple\n iterations will be required within the process method.\"\"\"\n cube = cube_with_realizations\n land = request.getfixturevalue(land_fixture) if land_fixture else None\n expected = np.array(\n [\n [\n [\n [0.0, 0.0, 0.0, 0.0],\n [1.0, 1.0, 1.0, 0.0],\n [1.0, 1.0, 1.0, 0.0],\n [1.0, 1.0, 1.0, 0.0],\n ],\n [\n [0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0],\n ],\n ],\n [\n [\n [0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0],\n ],\n [\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0],\n ],\n ],\n ]\n )\n cube = add_coordinate(\n cube, TIMESTEPS, \"time\", order=[1, 0, 2, 3], is_datetime=True,\n )\n cube.data[0, 0, 2, 1] = 1.0\n cube.data[1, 1, 1, 3] = 1.0\n orig_shape = cube.data.copy().shape\n result = OccurrenceWithinVicinity(DISTANCE, land)(cube)\n assert isinstance(result, Cube)\n assert result.data.shape == orig_shape\n assert np.allclose(result.data, expected)\n\n\[email protected](\"land_fixture\", [None, \"all_land_cube\"])\ndef test_with_multiple_realizations(request, cube_with_realizations, land_fixture):\n \"\"\"Test for multiple realizations, so that multiple\n iterations will be required within the process method.\"\"\"\n cube = cube_with_realizations\n land = request.getfixturevalue(land_fixture) if land_fixture else None\n expected = np.array(\n [\n [\n [0.0, 0.0, 0.0, 0.0],\n [1.0, 1.0, 1.0, 0.0],\n [1.0, 1.0, 1.0, 0.0],\n [1.0, 1.0, 1.0, 0.0],\n ],\n [\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0],\n ],\n ]\n )\n cube.data[0, 2, 1] = 1.0\n cube.data[1, 1, 3] = 1.0\n result = OccurrenceWithinVicinity(DISTANCE, land)(cube)\n assert isinstance(result, Cube)\n assert np.allclose(result.data, expected)\n\n\[email protected](\"land_fixture\", [None, \"all_land_cube\"])\ndef test_with_multiple_times(request, cube_with_realizations, land_fixture):\n \"\"\"Test for multiple times, so that multiple\n iterations will be required within the process method.\"\"\"\n cube = cube_with_realizations\n land = request.getfixturevalue(land_fixture) if land_fixture else None\n expected = np.array(\n [\n [\n [0.0, 0.0, 0.0, 0.0],\n [1.0, 1.0, 1.0, 0.0],\n [1.0, 1.0, 1.0, 0.0],\n [1.0, 1.0, 1.0, 0.0],\n ],\n [\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0],\n ],\n ]\n )\n cube = cube[0]\n cube = add_coordinate(cube, TIMESTEPS, \"time\", is_datetime=True,)\n cube.data[0, 2, 1] = 1.0\n cube.data[1, 1, 3] = 1.0\n orig_shape = cube.data.shape\n result = OccurrenceWithinVicinity(DISTANCE, land)(cube)\n assert isinstance(result, Cube)\n assert result.data.shape == orig_shape\n assert np.allclose(result.data, expected)\n\n\[email protected](\"land_fixture\", [None, \"all_land_cube\"])\ndef test_no_realization_or_time(request, cube_with_realizations, land_fixture):\n \"\"\"Test for no realizations and no times, so that the iterations\n will not require slicing cubes within the process method.\"\"\"\n cube = cube_with_realizations\n land = request.getfixturevalue(land_fixture) if land_fixture else None\n expected = np.array(\n [\n [0.0, 0.0, 0.0, 0.0],\n [1.0, 1.0, 1.0, 0.0],\n [1.0, 1.0, 1.0, 0.0],\n [1.0, 1.0, 1.0, 0.0],\n ]\n )\n cube = cube[0]\n cube.data[2, 1] = 1.0\n orig_shape = cube.data.shape\n result = OccurrenceWithinVicinity(DISTANCE, land)(cube)\n assert isinstance(result, Cube)\n assert result.data.shape == orig_shape\n assert np.allclose(result.data, expected)\n" ]
[ [ "numpy.logical_not" ], [ "numpy.ma.MaskedArray", "numpy.ma.is_masked", "numpy.allclose" ], [ "numpy.array", "numpy.sort" ], [ "numpy.concatenate", "numpy.arange", "numpy.unique" ], [ "numpy.random.random", "numpy.random.seed", "numpy.linspace", "numpy.arange", "numpy.random.random_sample", "numpy.sort", "numpy.random.shuffle", "numpy.ones", "numpy.testing.assert_allclose", "numpy.array" ], [ "numpy.allclose", "numpy.zeros_like", "numpy.ma.array", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SimengSun/revisit-nplm
[ "bbe1cdaecf1d7d104d27b1035a591ebbd3b5141e" ]
[ "data/text.py" ]
[ "\"\"\"\n\tImplement torch iterable dataset\n\t\t- build vocab ordered by freq for \n\"\"\"\nfrom tqdm import tqdm\nimport torch\nimport torch.utils.data\nfrom torch.utils.data.dataloader import DataLoader\nimport os\nimport sys\nimport pickle5 as pickle #import pickle\nimport math\nfrom collections import defaultdict\n\nSPLITS = ['train', 'valid', 'test']\nEOS = '<eos>'\nPAD = '<pad>'\n\nclass Dataset(torch.utils.data.IterableDataset):\n\n\tdef __init__(self, data_dir, batch_size, split):\n\n\t\tself.data_dir = data_dir\n\t\tif not self.data_exist():\n\t\t\tself.build_vocab()\n\t\t\tfor s in SPLITS:\n\t\t\t\tself.binarize(s)\n\n\t\tself.load_vocab()\n\t\tself.data = self.load_data(split, batch_size) # bsz x (len(data)/bsz)\n\t\tself.start = 0\n\t\tself.end = self.data.size(1)\n\t\tself.split = split\n\n\tdef __iter__(self):\n\t\tworker_info = torch.utils.data.get_worker_info()\n\t\tif worker_info is None: # single-process data loading, return the full iterator\n\t\t\titer_start = self.start\n\t\t\titer_end = self.end\n\t\telse: \t\t\t\t\t# in a worker process split workload\n\t\t\tper_worker = int(math.ceil((self.end - self.start) / float(worker_info.num_workers)))\n\t\t\tworker_id = worker_info.id\n\t\t\titer_start = self.start + worker_id * per_worker\n\t\t\titer_end = min(iter_start + per_worker, self.end)\n\t\treturn iter(self.data.transpose(1,0)[iter_start:iter_end])\n\n\t@property\n\tdef eos_idx(self):\n\t\treturn self.tok2id[EOS]\n\t\n\t@property\n\tdef padding_idx(self):\n\t\treturn self.tok2id[PAD]\n\n\t@property\n\tdef size(self):\n\t\treturn len(self.id2tok)\n\t\n\n\tdef build_vocab(self, min_freq=0, max_freq=sys.maxsize):\n\t\t\"\"\"\n\t\tbuild vocab + add eos\n\t\tencode sentence\n\t\t\"\"\"\n\t\twith open(os.path.join(self.data_dir, 'train.txt'), 'r') as fn:\n\t\t\tdata = fn.readlines()\n\n\t\tif 'lambada' in self.data_dir:\n\t\t\twith open(os.path.join(self.data_dir, 'test.txt'), 'r') as fn:\n\t\t\t\tdata.extend(fn.readlines())\n\n\t\t\twith open(os.path.join(self.data_dir, 'valid.txt'), 'r') as fn:\n\t\t\t\tdata.extend(fn.readlines())\n\n\t\tprint('building vocab ...')\n\t\tself.vocab = defaultdict(int)\n\t\tself.tok2id = {}\n\t\tself.id2tok = []\n\n\t\tfor line in tqdm(data):\n\t\t\tline = line.strip().split()\n\t\t\tfor tok in line:\n\t\t\t\tself.vocab[tok] += 1\n\t\t\n\t\tself.vocab = {a : self.vocab[a] for a in self.vocab if self.vocab[a] >= min_freq and self.vocab[a] <= max_freq}\n\t\t# sort vocab in case of using adaptive softmax\n\t\tself.vocab = list(sorted(self.vocab.items(), key=lambda a: a[1], reverse=True))\n\t\tprint(self.vocab[:10])\n\n\t\tif 'lambada' in self.data_dir:\n\t\t\tself.vocab = self.vocab[:60000]\n\t\t\tself.vocab.append(('<unk>', 0))\n\n\t\tself.id2tok = ['<pad>'] + ['<eos>'] + [a[0] for a in self.vocab] \n\t\tself.tok2id = {a : i for i, a in enumerate(self.id2tok)}\n\t\tself.vocab_size = len(self.id2tok)\n\n\t\tprint('end building vocab ...')\n\t\tprint('vocab size', len(self.tok2id))\n\t\twith open(os.path.join(self.data_dir, 'vocab.pkl'), 'wb') as fn: \n\t\t\tpickle.dump({'id2tok': self.id2tok, 'tok2id': self.tok2id, 'vocab_size':self.vocab_size}, fn)\n\n\tdef encode_line(self, line):\n\n\t\tif 'lambada' not in self.data_dir:\n\t\t\treturn torch.tensor([self.tok2id[tok] for tok in line+['<eos>']])\n\t\telse:\n\t\t\treturn torch.tensor([self.tok2id[tok] if tok in self.tok2id else self.tok2id['<unk>'] for tok in line])\n\n\tdef decode_tokids(self, tensor):\n\t\ttokens = []\n\t\tfor tokid in tensor:\n\t\t\ttokens.append(self.id2tok[tokid])\n\t\ttokens = [t if t != '<eos>' else '\\n' for t in tokens]\n\t\treturn ' '.join(tokens)\n\n\tdef binarize(self, split):\n\t\t\"\"\"binarize data to torch.tensor shape (doc_len, )\"\"\"\n\t\twith open(os.path.join(self.data_dir, f\"{split}.txt\"), \"r\") as fn:\n\t\t\tdata = [line.strip().split() for line in fn.readlines()]\n\n\t\tprint('binarizing data ...')\n\t\tdoc = []\n\t\tfor line in tqdm(data):\n\t\t\tif line != '':\n\t\t\t\tdoc.append(self.encode_line(line))\n\n\t\tdoc = torch.cat(doc)\n\n\t\tprint('end binarizing data ...')\n\t\tprint('doc shape', doc.shape)\n\t\tprint([self.id2tok[i] for i in doc[:100]])\n\t\twith open(os.path.join(self.data_dir, f\"{split}.bin\"), \"wb\") as fout:\n\t\t\tpickle.dump({\"data\": doc}, fout, protocol=pickle.HIGHEST_PROTOCOL)\n\n\tdef load_vocab(self):\n\t\t\n\t\twith open(os.path.join(self.data_dir, 'vocab.pkl'), 'rb') as fn: \n\t\t\tdata = pickle.load(fn)\n\t\tprint('loading vocab...')\n\t\tself.id2tok = data['id2tok']\n\t\tself.tok2id = data['tok2id']\n\t\tself.vocab_size = data['vocab_size']\n\t\t# self.id2freq = data['id2freq']\n\t\tprint(f'vocab size {self.vocab_size}')\n\n\tdef data_exist(self):\n\t\treturn all([os.path.exists(os.path.join(self.data_dir, f\"{fn}.bin\")) \\\n\t\t\tfor fn in ['train', 'valid', 'test'] ] + [os.path.exists(os.path.join(self.data_dir, \"vocab.pkl\"))])\n\n\tdef load_data(self, split, bsz):\n\n\t\twith open(os.path.join(self.data_dir, f\"{split}.bin\"), \"rb\") as fin:\n\t\t\tdata = pickle.load(fin)['data']\n\n\t\tnstep = data.size(0) // bsz\n\t\treturn data[ : nstep * bsz].view(bsz, -1)\n\n" ]
[ [ "torch.tensor", "torch.utils.data.get_worker_info", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
VarunNangalia/ga-learner-dsmp-repo
[ "c7c1485e6745ba62e666cce7e2accf6eee30ed17" ]
[ "-Publish-Superhero-Statistics/code.py" ]
[ "# --------------\n#Header files\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\n#path of the data file- path\r\ndata = pd.read_csv(path)\r\n#Code starts here \r\ndata['Gender'].replace('-','Agender',inplace=True)\r\ngender_count = data['Gender'].value_counts()\r\ngender_count.plot(kind='bar', title =\"Gender\",figsize=(15,10),legend=True, fontsize=12)\r\nplt.show()\n\n\n# --------------\n#Code starts here\r\nalignment=data['Alignment'].value_counts()\r\nalignment.plot.pie()\r\nplt.title('Character Alignment')\r\nplt.show()\n\n\n# --------------\n#Code starts here\r\nimport pandas as pd\r\n#strength and combat\r\nsc_df = data[['Strength','Combat']].copy()\r\nsc_covariance= round((sc_df['Strength'].cov(sc_df['Combat'])),2)\r\nsc_strength = round((sc_df['Strength'].std()),2)\r\nsc_combat = round((sc_df['Combat'].std()),2)\r\nsc_pearson = round((sc_covariance/(sc_combat*sc_strength)),2)\r\n#intelligence and combat\r\nic_df = round((data[['Intelligence','Combat']].copy()),2)\r\nic_covariance = round((ic_df['Intelligence'].cov(ic_df['Combat'])),2)\r\nic_intelligence = round((ic_df['Intelligence'].std()),2)\r\nic_combat = round((ic_df['Combat'].std()),2)\r\nic_pearson = round((ic_covariance/(ic_combat*ic_intelligence)),2)\r\n\n\n\n# --------------\n#Code starts here\r\ntotal_high = np.quantile(data['Total'], .99)\r\n#print(total_high)\r\nsuper_best = data[data['Total']>total_high]\r\n\r\nsuper_best_names = super_best['Name'].tolist()\r\nprint(super_best_names)\n\n\n# --------------\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nfig,(ax_1,ax_2,ax_3) = plt.subplots(3,1)\r\nax_1.plot(data['Intelligence'])\r\nax_1.set_title('Intelligence')\r\nax_1.set_xlabel('Intelligence')\r\nax_1.legend()\r\n\r\nax_2.plot(data['Speed'])\r\nax_2.set_title('Speed')\r\nax_2.set_xlabel('Speed')\r\nax_2.legend()\r\n\r\nax_3.plot(data['Power'])\r\nax_3.set_title('Power')\r\nax_3.set_xlabel('Power')\r\nax_3.legend()\r\nplt.tight_layout()\r\nplt.show()\n\n\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "numpy.quantile", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
ParikhKadam/cycloid
[ "c5e64e8379f801417a38755eb6b2fde881dabd8c", "c5e64e8379f801417a38755eb6b2fde881dabd8c" ]
[ "design/trackplan/trackplan.py", "tools/ceilslam/gui.py" ]
[ "# racetrack route planner\n# based on apex cone locations and track widths, get a \nimport numpy as np\n\n\nmaxv = 10\nlaterala = 8\nmaxk = 1.5\nbw_v = np.pi*2*0.7\nbw_w = np.pi*2*1.5\n\n# track is a set of points and radii (positive or negative if track goes CCW/CW\n# around each point)\n# so first we determine the nearest point\n\n# T is [5, NUM_PTS]; [(x, y, r), i]\n# TODO: precompute handy normals\n\n\ndef gettargetv(k):\n kmin = laterala / (maxv**2)\n targetv = maxv\n if np.abs(k) > kmin:\n targetv = np.sqrt(laterala / np.abs(k))\n return targetv\n\n\n# get nearest point on track, its direction normal and curvature\ndef gettrack(xy, T):\n Tn = np.hstack([T[:, 1:], T[:, :1]])\n Nn = Tn[:2] - T[:2]\n L = np.linalg.norm(Nn, axis=0)\n S = (Tn[2] - T[2]) / L\n C = np.sqrt(1 - S**2)\n Nn /= L\n Nn = np.vstack([-Nn[0]*S - Nn[1]*C, Nn[0]*C - Nn[1]*S])\n\n # we need to rotate Nn / Np based on the radii of the current/next/previous points\n # ...unless they have the same radius, in which case this doesn't work.\n # ln = np.linalg.norm(Tn[:2] - T[:2], axis=0) * T[2] / (Tn[2] - T[2])\n # print 'ln', ln\n # crap. for now let's just ignore the problem\n\n Pn = (Tn[:2] + Nn*Tn[2]).T\n P = (T[:2] + Nn*T[2]).T\n\n tnum = np.sum((Pn - P)*(xy - P), axis=1)\n tden = np.sum((Pn - P)**2, axis=1)\n t = np.clip(tnum / tden, 0, 1)\n # closest point on each edge in the polygon to xy\n pxy = (P.T*(1-t) + Pn.T*t).T\n dists = np.sqrt(np.sum((pxy-xy)**2, axis=1))\n i = np.argmin(dists)\n if t[i] == 0 or t[i] == 1:\n if t[i] == 1:\n i = (i+1) % T.shape[1]\n # closest point is one of the circles\n dp = xy - T[:2, i].T\n dp /= np.linalg.norm(dp)\n p = T[:2, i].T + dp * np.abs(T[2, i])\n n = np.array([dp[1], -dp[0]]) * np.sign(T[2, i])\n return p, n, 1.0/T[2, i], gettargetv(1.0/T[2, i])\n else:\n # closest point is on the linear sections\n n = Pn[i] - P[i]\n n /= np.linalg.norm(n)\n finalv = gettargetv(1.0/Tn[2, i])\n # need to compute deceleration\n tt = t[i]**2\n return pxy[i], n, 0, maxv*(1-tt) + tt*finalv\n\n return None\n\n\ndef step(X, u, targetv, dt):\n # X = [x y theta v w]\n # velocity control\n if targetv > X[3]:\n ebw = np.exp(-bw_v * dt)\n else:\n ebw = np.exp(-bw_v * 2 * dt)\n vnew = (1 - ebw) * targetv + ebw * X[3]\n v = (X[3] + vnew) * 0.5\n\n # yaw rate control\n targetw = v * u\n ebw = np.exp(-bw_w * dt)\n wnew = (1 - ebw) * targetw + ebw * X[4]\n thetanew = X[2] + wnew * dt\n theta = X[2] + wnew * dt * 0.5\n\n X[0] += np.cos(theta)*v*dt\n X[1] += np.sin(theta)*v*dt\n X[2] = thetanew\n X[3] = vnew\n X[4] = wnew\n\n return X\n\n\ndef drive(X, dt):\n p, n, k, v = gettrack(X[:2], T)\n nx = np.array([n[1], -n[0]])\n ye = np.dot(X[:2] - p, nx)\n C, S = np.cos(X[2]), np.sin(X[2])\n R = np.array([[C, S], [-S, C]])\n Rn = np.dot(R, n)\n # not sure if psie is backwards or not\n psie = np.arctan2(Rn[1], Rn[0])\n # print n, C, S, ye, psie, k\n Cp = np.cos(psie)\n Sp = np.sin(psie)\n # print psie, Cp, Sp\n Cpy = Cp / (1 - k * ye)\n Kpy = 1.0\n Kvy = 5.0\n ds = X[3]*Cpy*dt\n return -Cpy*(ye*Cpy*(-Kpy*Cp) + Sp*(k*Sp - Kvy*Cp) + k), v, ds\n\n\ndef trackexport(T):\n ''' compute various positions and normals for export '''\n output = np.zeros((9, T.shape[1]))\n\n output[:3] = T[:3] # first three dimensions of output are unchanged\n\n Tn = np.hstack([T[:, 1:], T[:, :1]])\n Nn = Tn[:2] - T[:2]\n L = np.linalg.norm(Nn, axis=0)\n S = (Tn[2] - T[2]) / L\n C = np.sqrt(1 - S**2)\n Nn /= L\n Nn = np.vstack([-Nn[0]*S - Nn[1]*C, Nn[0]*C - Nn[1]*S])\n Nn /= np.linalg.norm(Nn, axis=0)\n\n # we need to rotate Nn / Np based on the radii of the current/next/previous points\n # ...unless they have the same radius, in which case this doesn't work.\n # ln = np.linalg.norm(Tn[:2] - T[:2], axis=0) * T[2] / (Tn[2] - T[2])\n # print 'ln', ln\n # crap. for now let's just ignore the problem\n\n Pn = (Tn[:2] + Nn*Tn[2])\n P = (T[:2] + Nn*T[2])\n output[3:5] = P\n output[5:7] = Pn\n output[7:9] = Nn\n\n print(output.shape[1])\n for i in range(output.shape[1]):\n print(' '.join(map(str, output[:, i])))\n\n return output\n\n\nif __name__ == '__main__':\n from matplotlib import pyplot as plt\n T = np.array([\n [0, 0, 1],\n [9, -1, 2],\n [10, -4, 1],\n [5, -3, -1],\n [0, -5, 1],\n ], np.float32).T\n\n T = np.array([\n [208, -181, 147],\n [488, -170, 110],\n [304, -306, -118],\n [126, -198, 88],\n ], np.float32).T*1.2\n T[1] -= 20\n T[0] -= 408\n T[1] += 102\n T *= 0.02\n\n print(trackexport(T))\n\n if False:\n plt.plot(T[0], T[1], 'o')\n t = np.linspace(0, 2*np.pi, 100)\n for x in range(T.shape[1]):\n plt.plot(np.cos(t)*T[2, x] + T[0, x], np.sin(t)*T[2, x] + T[1, x])\n plt.axis('equal')\n\n xy = np.array([7.0, -3.0])\n pp, n, k, _ = gettrack(xy, T)\n plt.plot(xy[0], xy[1], 'o')\n\n plt.plot([pp[0], pp[0]+n[0]], [pp[1], pp[1] + n[1]], '-x')\n\n plt.show()\n\n if False:\n X = np.zeros(5)\n v = np.zeros(100)\n w = np.zeros(100)\n for i in range(100):\n X = step(X, 2, 5, 1.0/30)\n v[i] = X[3]\n w[i] = X[4]\n plt.plot(v)\n plt.plot(w)\n plt.plot(w/v)\n plt.show()\n\n if True:\n totalS = 0\n X = np.array([1, 1, 0.8, 0, 0], np.float32)\n Nsteps = 222*5\n xy = np.zeros((2, Nsteps))\n dt = 1.0 / 30\n y = 0\n for i in range(Nsteps):\n u, v, ds = drive(X, dt)\n u = np.clip(u, -maxk, maxk)\n X = step(X, u, v, dt)\n xy[:, i] = X[:2]\n totalS += ds\n\n print('distance around track', totalS)\n plt.plot(T[0], T[1], 'o')\n t = np.linspace(0, 2*np.pi, 100)\n for x in range(T.shape[1]):\n plt.plot(np.cos(t)*T[2, x] + T[0, x], np.sin(t)*T[2, x] + T[1, x])\n plt.axis('equal')\n\n plt.plot(xy[0], xy[1])\n plt.plot(xy[0, -1], xy[1, -1], 'x')\n plt.show()\n", "# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\nimport cv2\nimport numpy as np\nimport time\nimport glfw\nimport OpenGL.GL as gl\nimport imgui\nfrom imgui.integrations.glfw import GlfwRenderer\n\nimport ceiltrack\nimport recordreader\n\n# starting position for localization\n# negative x because we also mirror the track about X\nHOME = [ceiltrack.X_GRID*-2.5, ceiltrack.Y_GRID*0.5]\n\n\ndef load_texture(im):\n # gl.glEnable(gl.GL_TEXTURE_2D)\n texid = gl.glGenTextures(1)\n gl.glBindTexture(gl.GL_TEXTURE_2D, texid)\n gl.glTexParameteri(gl.GL_TEXTURE_2D,\n gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)\n gl.glTexParameteri(gl.GL_TEXTURE_2D,\n gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)\n gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA,\n im.shape[1], im.shape[0], 0,\n gl.GL_BGR, gl.GL_UNSIGNED_BYTE, im)\n return texid\n\n\ndef unload_texture(texid):\n gl.glDeleteTextures([texid])\n\n\ndef impl_glfw_init():\n width, height = 1280, 720\n window_name = \"cycloid replay viewer\"\n\n if not glfw.init():\n print(\"Could not initialize OpenGL context\")\n exit(1)\n\n # OS X supports only forward-compatible core profiles from 3.2\n glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)\n glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)\n glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)\n\n glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, gl.GL_TRUE)\n\n # Create a windowed mode window and its OpenGL context\n window = glfw.create_window(\n int(width), int(height), window_name, None, None\n )\n glfw.make_context_current(window)\n\n if not window:\n glfw.terminate()\n print(\"Could not initialize Window\")\n exit(1)\n\n return window\n\n\nclass SLAMGUI:\n def __init__(self, fname):\n self.unloadlist = []\n self.f = open(fname, \"rb\")\n print(\"scanning \", fname, \"...\")\n self.scanner = recordreader.RecordScanner(self.f)\n self.frametexid = None\n self.playing = False\n self.ts = []\n self.camdata = ceiltrack.ceillut()\n self.f.seek(0, 0)\n self.ceilheight = ceiltrack.CEIL_HEIGHT\n\n # do a full tracking here on load\n B = np.float32([HOME[0], HOME[1], 0])\n self.track = []\n match_time = 0\n opt_time = 0\n first = True\n floordata = []\n floormask = None\n for frdata in recordreader.RecordIterator(self.f):\n if 'yuv420' not in frdata:\n continue\n self.ts.append(frdata['tstamp'])\n yuv420 = frdata['yuv420']\n gray = yuv420[:480]\n bgr = cv2.cvtColor(yuv420, cv2.COLOR_YUV2BGR_I420)\n t0 = time.time()\n xy = ceiltrack.match(gray, *self.camdata)\n tm = time.time()\n if first:\n first = False\n for i in range(6):\n cost, dB = ceiltrack.cost(xy, *B)\n B += dB\n #B_straight, cost_straight = B, cost\n #B = np.float32([HOME[0], HOME[1], np.pi/2])\n #for i in range(6):\n # cost, dB = ceiltrack.cost(xy, *B)\n # B += dB\n #if cost_straight < cost:\n # B = B_straight\n # we need an example frame to initialize the floor lookup table\n # to filter out the visible body posts\n self.floorlut = ceiltrack.floorlut(gray)\n floormask = self.floorlut[0]\n else:\n for i in range(2):\n c, dB = ceiltrack.cost(xy, *B)\n B += dB\n topt = time.time()\n match_time += tm - t0\n opt_time += topt - tm\n self.track.append(B.copy())\n floordata.append(bgr[floormask])\n self.ts = np.array(self.ts)\n self.track = np.array(self.track)\n self.origtrack = self.track.copy()\n self.track[:, 0] = -self.track[:, 0]\n self.track[:, 2] = -self.track[:, 2]\n # mirror the floor-pixel lookup table x coordinates also\n self.floorlut[1][0] = -self.floorlut[1][0]\n self.floordata = np.array(floordata)\n\n self.loadframe(0)\n print(\"done,\", match_time, \"secs match_time\", opt_time, \"sec opt_time\")\n floorimg = ceiltrack.render_floor(\n self.track, self.floordata, self.floorlut[1])\n if True:\n xgm = ceiltrack.X_GRID * ceiltrack.CEIL_HEIGHT\n ygm = ceiltrack.Y_GRID * ceiltrack.CEIL_HEIGHT\n Z = 50 # pixels per meter\n for x in range(0, 1+int(1000 / (xgm*Z))):\n for y in range(0, 1+int(500 / (ygm*Z))):\n cv2.circle(floorimg, (int(x*xgm*Z), int(y*ygm*Z)), int(0.25*Z), (255, 255, 0))\n cv2.imwrite(\"map.png\", floorimg)\n self.floortex = load_texture(floorimg)\n print(\"home location:\", HOME)\n\n def loadframe(self, i):\n if self.frametexid is not None:\n self.unloadlist.append(self.frametexid)\n self.i = i\n self.frame = self.scanner.frame(i)\n if 'yuv420' not in self.frame:\n return\n yuv420 = self.frame['yuv420']\n # optional: front view and annotated ceiling view?\n im = cv2.cvtColor(yuv420, cv2.COLOR_YUV2BGR_I420)\n\n xg = ceiltrack.X_GRID * self.ceilheight / ceiltrack.CEIL_HEIGHT\n yg = ceiltrack.Y_GRID * self.ceilheight / ceiltrack.CEIL_HEIGHT\n gray = yuv420[:480]\n xy = ceiltrack.match(gray, *self.camdata)\n B = self.origtrack[self.i]\n for i in range(6):\n cost, dB = ceiltrack.costxyg(xg, yg, xy, *B)\n B += dB\n\n for gp in ceiltrack.mkgrid(xg, yg, 31, *-B)[0]:\n cv2.circle(im, (int(gp[0]), int(gp[1])), 3, (255, 0, 0), 1)\n\n self.frametexid = load_texture(im)\n\n def nextframe(self):\n if self.i < self.scanner.num_frames() - 1:\n self.loadframe(self.i+1)\n\n def render_timeline(self):\n imgui.begin(\"timeline\")\n tstamp = self.frame['tstamp']\n if imgui.button(\"<\"):\n self.playing = False\n if self.i > 0:\n self.loadframe(self.i - 1)\n imgui.same_line()\n if self.playing:\n if (self.i == len(self.ts)-1) or imgui.button(\"stop\"):\n self.playing = False\n elif time.time() >= self.ts[self.i+1] - self.t0:\n self.nextframe()\n elif imgui.button(\"play\"):\n self.playing = True\n self.t0 = tstamp - time.time()\n imgui.same_line()\n if imgui.button(\">\"):\n self.playing = False\n self.nextframe()\n tsfrac = tstamp - int(tstamp)\n tstring = time.strftime(\"%H:%M:%S.\", time.localtime(\n tstamp)) + \"%02d\" % (tsfrac*100)\n imgui.same_line()\n imgui.text(tstring)\n\n w = imgui.get_window_width()\n imgui.image(self.frametexid, w, 480*w/640)\n\n changed, i = imgui.slider_int(\n \"frame\", self.i, 0, self.scanner.num_frames()-1)\n if changed:\n self.playing = False\n self.loadframe(i)\n imgui.end()\n\n def render_map(self):\n imgui.begin(\"map\")\n imgui.slider_float(\"x (m)\", self.track[self.i, 0] * ceiltrack.CEIL_HEIGHT, -80, 80)\n imgui.slider_float(\"y (m)\", self.track[self.i, 1] * ceiltrack.CEIL_HEIGHT, -80, 80)\n imgui.slider_float(\"theta\", self.track[self.i, 2] % (np.pi*2), -7, 7)\n imgui.slider_float(\"x (grid)\", self.track[self.i, 0] / ceiltrack.X_GRID, -10, 10)\n imgui.slider_float(\"y (grid)\", self.track[self.i, 1] / ceiltrack.X_GRID, -10, 10)\n\n changed, self.ceilheight = imgui.slider_float(\"ceiling height (m)\", self.ceilheight, 2, 4)\n if changed:\n self.loadframe(self.i)\n\n dl = imgui.get_window_draw_list()\n pos = imgui.get_cursor_screen_pos()\n siz = imgui.get_content_region_available()\n if siz[1] == 0:\n siz = [400, 300]\n # just use a fixed size\n w = siz[0]\n imgui.image_button(self.floortex, w, w/2, frame_padding=0)\n # imgui.image_button(self.floortex, siz[0], siz[0])\n origin = [pos[0], pos[1]]\n scale = 50 * ceiltrack.CEIL_HEIGHT * w/1000\n trackcolor = imgui.get_color_u32_rgba(0.3, 0.5, 0.3, 1)\n for i in range(1, self.i):\n dl.add_line(\n origin[0] + scale * self.track[i-1, 0],\n origin[1] + scale * self.track[i-1, 1],\n origin[0] + scale * self.track[i, 0],\n origin[1] + scale * self.track[i, 1],\n trackcolor, 1.5)\n\n carcolor = imgui.get_color_u32_rgba(0, 1, 0.6, 1)\n B = self.track[self.i]\n dl.add_line(\n origin[0] + scale * B[0],\n origin[1] + scale * B[1],\n origin[0] + scale * (B[0] + np.cos(B[2])),\n origin[1] + scale * (B[1] - np.sin(B[2])),\n carcolor, 1.5)\n\n imgui.end()\n\n def render(self):\n for t in self.unloadlist:\n unload_texture(t)\n self.unloadlist = []\n self.render_timeline()\n self.render_map()\n\n\ndef main(recfile):\n imgui.create_context()\n window = impl_glfw_init()\n impl = GlfwRenderer(window)\n slamgui = SLAMGUI(recfile)\n\n while not glfw.window_should_close(window):\n glfw.poll_events()\n impl.process_inputs()\n imgui.new_frame()\n\n if imgui.begin_main_menu_bar():\n if imgui.begin_menu(\"File\", True):\n clicked_quit, _ = imgui.menu_item(\n \"Quit\", 'Cmd+Q', False, True)\n if clicked_quit:\n exit(0)\n imgui.end_menu()\n imgui.end_main_menu_bar()\n\n slamgui.render()\n gl.glClearColor(0, 0, 0, 1)\n gl.glClear(gl.GL_COLOR_BUFFER_BIT)\n\n imgui.render()\n impl.render(imgui.get_draw_data())\n glfw.swap_buffers(window)\n\n impl.shutdown()\n glfw.terminate()\n\n\nif __name__ == \"__main__\":\n import sys\n\n if len(sys.argv) < 2:\n print(\"usage:\", sys.argv[0], \"[cycloid-x.rec]\")\n exit(1)\n\n main(sys.argv[1])\n" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.linspace", "numpy.arctan2", "matplotlib.pyplot.plot", "numpy.argmin", "numpy.exp", "numpy.hstack", "numpy.clip", "numpy.sin", "matplotlib.pyplot.axis", "numpy.zeros", "matplotlib.pyplot.show", "numpy.array", "numpy.sum", "numpy.abs", "numpy.linalg.norm", "numpy.cos", "numpy.sign", "numpy.vstack" ], [ "numpy.array", "numpy.cos", "numpy.float32", "numpy.sin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GaoxiangLuo/flame
[ "16bd1715a545421d45ea0fc32544e448389de49c", "16bd1715a545421d45ea0fc32544e448389de49c" ]
[ "lib/python/flame/examples/mnist/aggregator/pytorch/main.py", "lib/python/flame/optimizer/fedopt.py" ]
[ "# Copyright 2022 Cisco Systems, Inc. and its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"MNIST horizontal FL aggregator for PyTorch.\n\nThe example below is implemented based on the following example from pytorch:\nhttps://github.com/pytorch/examples/blob/master/mnist/main.py.\n\"\"\"\n\nimport logging\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom flame.config import Config\nfrom flame.dataset import Dataset\nfrom flame.mode.horizontal.top_aggregator import TopAggregator\nfrom torchvision import datasets, transforms\n\nlogger = logging.getLogger(__name__)\n\n\nclass Net(nn.Module):\n \"\"\"Net class.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize.\"\"\"\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, 3, 1)\n self.conv2 = nn.Conv2d(32, 64, 3, 1)\n self.dropout1 = nn.Dropout(0.25)\n self.dropout2 = nn.Dropout(0.5)\n self.fc1 = nn.Linear(9216, 128)\n self.fc2 = nn.Linear(128, 10)\n\n def forward(self, x):\n \"\"\"Forward.\"\"\"\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout2(x)\n x = self.fc2(x)\n output = F.log_softmax(x, dim=1)\n return output\n\n\nclass PyTorchMnistAggregator(TopAggregator):\n \"\"\"PyTorch Mnist Aggregator.\"\"\"\n\n def __init__(self, config: Config) -> None:\n \"\"\"Initialize a class instance.\"\"\"\n self.config = config\n self.model = None\n self.dataset: Dataset = None\n\n self.device = None\n self.test_loader = None\n\n def initialize(self):\n \"\"\"Initialize role.\"\"\"\n self.device = torch.device(\n \"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n self.model = Net().to(self.device)\n\n def load_data(self) -> None:\n \"\"\"Load a test dataset.\"\"\"\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307, ), (0.3081, ))\n ])\n\n dataset = datasets.MNIST('./data',\n train=False,\n download=True,\n transform=transform)\n\n self.test_loader = torch.utils.data.DataLoader(dataset)\n\n # store data into dataset for analysis (e.g., bias)\n self.dataset = Dataset(dataloader=self.test_loader)\n\n def train(self) -> None:\n \"\"\"Train a model.\"\"\"\n # Implement this if testing is needed in aggregator\n pass\n\n def evaluate(self) -> None:\n \"\"\"Evaluate (test) a model.\"\"\"\n self.model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in self.test_loader:\n data, target = data.to(self.device), target.to(self.device)\n output = self.model(data)\n test_loss += F.nll_loss(\n output, target,\n reduction='sum').item() # sum up batch loss\n pred = output.argmax(\n dim=1,\n keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n total = len(self.test_loader.dataset)\n test_loss /= total\n test_accuray = correct / total\n\n logger.info(f\"Test loss: {test_loss}\")\n logger.info(f\"Test accuracy: {correct}/{total} ({test_accuray})\")\n\n # update metrics after each evaluation so that the metrics can be\n # logged in a model registry.\n self.update_metrics({\n 'test-loss': test_loss,\n 'test-accuracy': test_accuray\n })\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('config', nargs='?', default=\"./config.json\")\n\n args = parser.parse_args()\n\n config = Config(args.config)\n\n a = PyTorchMnistAggregator(config)\n a.compose()\n a.run()\n", "# Copyright 2022 Cisco Systems, Inc. and its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"FedOPT optimizer\"\"\"\n\"\"\"https://arxiv.org/abs/2003.00295\"\"\"\nfrom abc import abstractmethod\nimport logging\n\nfrom diskcache import Cache\n\nfrom .fedavg import FedAvg\nfrom ..common.util import (MLFramework, get_ml_framework_in_use,\n valid_frameworks)\n\nfrom collections import OrderedDict\n\nlogger = logging.getLogger(__name__)\n\nclass FedOPT(FedAvg):\n \"\"\"FedOPT class.\"\"\"\n\n def __init__(self, beta_1, beta_2, eta, tau):\n \"\"\"Initialize FedOPT instance.\"\"\"\n super().__init__()\n self.current_weights = None\n self.d_t = None\n self.m_t = None\n self.v_t = None\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.eta = eta\n self.tau = tau\n\n ml_framework_in_use = get_ml_framework_in_use()\n if ml_framework_in_use == MLFramework.PYTORCH:\n self.adapt_fn = self._adapt_pytorch\n elif ml_framework_in_use == MLFramework.TENSORFLOW:\n self.adapt_fn = self._adapt_tesnorflow\n else:\n raise NotImplementedError(\n \"supported ml framework not found; \"\n f\"supported frameworks are: {valid_frameworks}\")\n\n def do(self, cache: Cache, total: int):\n \"\"\"Do aggregates models of trainers.\n\n Return: aggregated model\n \"\"\"\n logger.debug(\"calling fedopt\")\n\n self.agg_weights = super().do(cache, total)\n if self.agg_weights is None:\n return self.current_weights\n\n if self.current_weights is None:\n self.current_weights = self.agg_weights\n else:\n self.adapt_fn(self.agg_weights, self.current_weights)\n\n return self.current_weights\n\n @abstractmethod\n def _delta_v_pytorch(self):\n return\n\n @abstractmethod\n def _delta_v_tensorflow(self):\n return\n\n def _adapt_pytorch(self, average, current):\n import torch\n logger.debug(\"calling _adapt_pytorch\")\n\n self.d_t = {k: average[k] - current[k] for k in average.keys()}\n\n if self.m_t is None:\n self.m_t = {k: torch.zeros_like(self.d_t[k]) for k in self.d_t.keys()}\n self.m_t = {k: self.beta_1 * self.m_t[k] + (1 - self.beta_1) * self.d_t[k] for k in self.m_t.keys()}\n\n if self.v_t is None:\n self.v_t = {k: torch.zeros_like(self.d_t[k]) for k in self.d_t.keys()}\n self._delta_v_pytorch()\n\n self.current_weights = OrderedDict({k: self.current_weights[k] + self.eta * self.m_t[k] / (torch.sqrt(self.v_t[k]) + self.tau) for k in self.current_weights.keys()})\n\n def _adapt_tesnorflow(self, average, current):\n logger.debug(\"calling _adapt_tensorflow\")\n # TODO: Implement Tensorflow Version\n raise NotImplementedError(\"Tensorflow implementation not yet implemented\")\n" ]
[ [ "torch.nn.Dropout", "torch.nn.functional.log_softmax", "torch.nn.functional.nll_loss", "torch.nn.Conv2d", "torch.utils.data.DataLoader", "torch.nn.Linear", "torch.nn.functional.relu", "torch.no_grad", "torch.cuda.is_available", "torch.flatten", "torch.nn.functional.max_pool2d" ], [ "torch.sqrt", "torch.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Ravie403/chainerrl-visualizer
[ "302bcd574d435ab68652b084764d4bb777300494" ]
[ "examples/a3c_breakout/main.py" ]
[ "import chainer\nimport numpy as np\nfrom chainerrl.agents import a3c\nfrom chainerrl import links\nfrom chainerrl import misc\nfrom chainerrl.optimizers import rmsprop_async\nfrom chainerrl import policy\nfrom chainerrl import v_function\n\nfrom chainerrl.wrappers import atari_wrappers\n\nfrom chainerrl_visualizer import launch_visualizer\n\n\nclass A3CFF(chainer.ChainList, a3c.A3CModel):\n\n def __init__(self, n_actions):\n self.head = links.NIPSDQNHead()\n self.pi = policy.FCSoftmaxPolicy(\n self.head.n_output_channels, n_actions)\n self.v = v_function.FCVFunction(self.head.n_output_channels)\n super().__init__(self.head, self.pi, self.v)\n\n def pi_and_v(self, state):\n out = self.head(state)\n return self.pi(out), self.v(out)\n\n\ndef phi(x):\n # Feature extractor\n return np.asarray(x, dtype=np.float32) / 255\n\n\ndef make_env():\n env = atari_wrappers.wrap_deepmind(\n atari_wrappers.make_atari(env_name),\n episode_life=False,\n clip_rewards=False)\n env.seed(seed)\n return env\n\n\nseed = 0\nenv_name = 'BreakoutNoFrameskip-v4'\n\nmisc.set_random_seed(seed)\n\nenv = make_env()\nn_actions = env.action_space.n\n\nmodel = A3CFF(n_actions)\nopt = rmsprop_async.RMSpropAsync(lr=7e-4, eps=1e-1, alpha=0.99)\nopt.setup(model)\nopt.add_hook(chainer.optimizer.GradientClipping(40))\n\nagent = a3c.A3C(model, opt, t_max=5, gamma=0.99,\n beta=1e-2, phi=phi)\n\nagent.load('parameters')\n\nACTION_MEANINGS = {\n 0: 'NOOP',\n 1: 'FIRE',\n 2: 'RIGHT',\n 3: 'LEFT',\n}\n\nlaunch_visualizer(agent, env, ACTION_MEANINGS, raw_image_input=True)\n" ]
[ [ "numpy.asarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mbed92/rl-physnet
[ "62b6e8a84a6704a50855434933a147f507f94263" ]
[ "nn/train/log.py" ]
[ "import os\n\nimport tensorflow as tf\nfrom tqdm import tqdm\n\n\nclass ExperimentHandler:\n\n def __init__(self, working_path, out_name, max_to_keep=3, **objects_to_save) -> None:\n super().__init__()\n\n # prepare log writers\n train_log_path = _get_or_create_dir(working_path, out_name, 'logs', 'train')\n val_log_path = _get_or_create_dir(working_path, out_name, 'logs', 'val')\n\n self.train_writer = tf.summary.create_file_writer(train_log_path)\n self.val_writer = tf.summary.create_file_writer(val_log_path)\n\n # prepare checkpoints\n self.last_path = _get_or_create_dir(working_path, out_name, 'checkpoints', 'last')\n self.best_path = _get_or_create_dir(working_path, out_name, 'checkpoints', 'best')\n\n self.checkpoint_last, self.checkpoint_manager_last = _prepare_checkpoint_manager(\n self.last_path, max_to_keep,\n **objects_to_save\n )\n\n self.checkpoint_best, self.checkpoint_manager_best = _prepare_checkpoint_manager(\n self.best_path, max_to_keep,\n **objects_to_save\n )\n\n def log_training(self):\n self.train_writer.set_as_default()\n\n def log_validation(self):\n self.val_writer.set_as_default()\n\n def flush(self):\n self.train_writer.flush()\n self.val_writer.flush()\n\n def save_last(self):\n self.checkpoint_manager_last.save()\n\n def save_best(self):\n self.checkpoint_manager_best.save()\n\n def restore_best(self):\n self.checkpoint_best.restore(self.checkpoint_manager_best.latest_checkpoint)\n\n def restore(self, path):\n self.checkpoint_last.restore(tf.train.latest_checkpoint(path)).assert_consumed()\n\n\ndef restore_from_checkpoint(path, **kwargs):\n checkpoint = tf.train.Checkpoint(**kwargs)\n return checkpoint.restore(path)\n\n\ndef restore_from_checkpoint_latest(path, **kwargs):\n return restore_from_checkpoint(tf.train.latest_checkpoint(path), **kwargs)\n\n\ndef as_progressbar(label, epoch, total):\n bar = '%s epoch %d | {l_bar}{bar} | Elapsed: {elapsed} | Remaining: {remaining} | Inverted Rate: {rate_inv_fmt}' \\\n % (label, epoch)\n return tqdm(ncols=120, bar_format=bar, total=total)\n\n\ndef _prepare_checkpoint_manager(path, max_to_keep, **kwargs):\n checkpoint = tf.train.Checkpoint(**kwargs)\n checkpoint_manager = tf.train.CheckpointManager(\n checkpoint=checkpoint,\n directory=path,\n max_to_keep=max_to_keep\n )\n return checkpoint, checkpoint_manager\n\n\ndef _get_or_create_dir(*paths):\n join_path = os.path.join(*paths)\n os.makedirs(join_path, exist_ok=True)\n return join_path\n" ]
[ [ "tensorflow.train.latest_checkpoint", "tensorflow.train.Checkpoint", "tensorflow.train.CheckpointManager", "tensorflow.summary.create_file_writer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pirovc/grimer
[ "169f8d3009004d6d2f4ca4d3e7dfec819078cb34", "169f8d3009004d6d2f4ca4d3e7dfec819078cb34" ]
[ "scripts/ehomd_download.py", "scripts/bacdive_download.py" ]
[ "#!/usr/bin/env python3\nimport pandas as pd\nimport sys\nimport urllib.request\nimport re\n\n\ndef get_taxid(url):\n try:\n sys.stderr.write(url+\"\\n\")\n assembly_stats = url + \"/\" + url.split(\"/\")[-1] + \"_assembly_stats.txt\"\n filedata = urllib.request.urlopen(assembly_stats).read().decode()\n x = re.search(\"# Taxid:[\\s0-9]*\\\\r\\\\n\", filedata)\n if x:\n return re.findall(\"\\d+\", x.group())[0]\n else:\n return None\n except:\n return None\n\n# Can be Oral, Nasal or both (\"Nasal,Oral\")\nhabitats = [\"Oral\", \"Nasal\"]\ndata = \"http://www.ehomd.org/ftp/genomes/PROKKA/current/SEQID_info.csv\"\n\ndf = pd.read_table(data, sep=\",\", usecols=[\"Habitat\", \"Sequence_Source\"])\ndf = df[df[\"Habitat\"].isin(habitats + [\"Nasal,Oral\"])].drop_duplicates()\ndf[\"taxid\"] = df[\"Sequence_Source\"].map(get_taxid)\n\nprint('\"Human Oral Microbiome Database (eHOMD)\":')\nfor h in habitats:\n print(' \"' + h + '\":')\n parsed_ids = set(df.taxid[df.Habitat.str.contains(h)])\n print(' url: \"http://www.ehomd.org/?name=HOMD\"')\n print(\" ids: [\" + \", \".join(parsed_ids) + \"]\")\n\nsys.stderr.write(\"Could not retrieve taxid for: \" + \"\\n\".join(df[df.taxid.isna()][\"Sequence_Source\"].to_list()) + \"\\n\")\n", "#!/usr/bin/env python3\nimport pandas as pd\nfrom multitax import NcbiTx\nimport sys\n\n## TODO\n## filter infection?\n## find names?\n\ndata = {(\"Host_Human-HostBodySite_Limb\", \"Limbs\"): \"https://bacdive.dsmz.de/isolation-sources?filters%5B0%5D%5Bcat1%5D=4&filters%5B0%5D%5Bcat2%5D=29&filters%5B0%5D%5Bcat3%5D=&filters%5B0%5D%5Bcolor%5D=4&filters%5B1%5D%5Bcat1%5D=5&filters%5B1%5D%5Bcat2%5D=39&filters%5B1%5D%5Bcat3%5D=&filters%5B1%5D%5Bcolor%5D=5&csv=1\",\n (\"Host_Human-HostBodySite_Organ_Ear\", \"Ear\"): \"https://bacdive.dsmz.de/isolation-sources?filters%5B0%5D%5Bcat1%5D=4&filters%5B0%5D%5Bcat2%5D=29&filters%5B0%5D%5Bcat3%5D=&filters%5B0%5D%5Bcolor%5D=4&filters%5B1%5D%5Bcat1%5D=5&filters%5B1%5D%5Bcat2%5D=40&filters%5B1%5D%5Bcat3%5D=209&filters%5B1%5D%5Bcolor%5D=5&csv=1\",\n (\"Host_Human-HostBodySite_Organ_Eye\", \"Eye\"): \"https://bacdive.dsmz.de/isolation-sources?filters%5B0%5D%5Bcat1%5D=4&filters%5B0%5D%5Bcat2%5D=29&filters%5B0%5D%5Bcat3%5D=&filters%5B0%5D%5Bcolor%5D=4&filters%5B1%5D%5Bcat1%5D=5&filters%5B1%5D%5Bcat2%5D=40&filters%5B1%5D%5Bcat3%5D=210&filters%5B1%5D%5Bcolor%5D=5&csv=1\",\n (\"Host_Human-HostBodySite_Organ_Nose\", \"Nose\"): \"https://bacdive.dsmz.de/isolation-sources?filters%5B0%5D%5Bcat1%5D=4&filters%5B0%5D%5Bcat2%5D=29&filters%5B0%5D%5Bcat3%5D=&filters%5B0%5D%5Bcolor%5D=4&filters%5B1%5D%5Bcat1%5D=5&filters%5B1%5D%5Bcat2%5D=40&filters%5B1%5D%5Bcat3%5D=217&filters%5B1%5D%5Bcolor%5D=5&csv=1\",\n (\"Host_Human-HostBodySite_Organ_SkinNailHair\", \"Skin/Nail/Hair\"): \"https://bacdive.dsmz.de/isolation-sources?filters%5B0%5D%5Bcat1%5D=4&filters%5B0%5D%5Bcat2%5D=29&filters%5B0%5D%5Bcat3%5D=&filters%5B0%5D%5Bcolor%5D=4&filters%5B1%5D%5Bcat1%5D=5&filters%5B1%5D%5Bcat2%5D=40&filters%5B1%5D%5Bcat3%5D=219&filters%5B1%5D%5Bcolor%5D=5&csv=1\",\n (\"Host_Human-HostBodySite_Organ_OralCavityAndAirways\", \"Oral\"): \"https://bacdive.dsmz.de/isolation-sources?filters%5B0%5D%5Bcat1%5D=4&filters%5B0%5D%5Bcat2%5D=29&filters%5B0%5D%5Bcat3%5D=&filters%5B0%5D%5Bcolor%5D=4&filters%5B1%5D%5Bcat1%5D=5&filters%5B1%5D%5Bcat2%5D=41&filters%5B1%5D%5Bcat3%5D=&filters%5B1%5D%5Bcolor%5D=5&csv=1\",\n (\"Host_Human-HostBodyProduct_OralCavityAndAirways_Saliva\", \"Saliva\"): \"https://bacdive.dsmz.de/isolation-sources?filters%5B1%5D%5Bcat1%5D=4&filters%5B1%5D%5Bcat2%5D=29&filters%5B1%5D%5Bcat3%5D=&filters%5B1%5D%5Bcolor%5D=4&filters%5B2%5D%5Bcat1%5D=6&filters%5B2%5D%5Bcat2%5D=47&filters%5B2%5D%5Bcat3%5D=276&filters%5B2%5D%5Bcolor%5D=6&csv=1\"}\n\ntax = NcbiTx()\n\nprint('\"Human-related bacterial isolates from BacDive:\"')\n\nfor (search, name), url in data.items():\n print(' \"' + name + '\":')\n print(' url: \"https://bacdive.dsmz.de/search?search=taxid:{}\"')\n parsed_ids = set()\n df = pd.read_table(url, sep=\",\", index_col=0).dropna(subset=[\"Species\"])\n for species in df.Species.unique():\n taxids = tax.search_name(species, exact=True) # rank=\"species\"\n if not taxids:\n sys.stderr.write(\"Species name not found: \" + species + \"\\n\")\n elif len(taxids) > 1:\n sys.stderr.write(\"Species with ambiguous name: \" + species + \"\\n\")\n else:\n parsed_ids.add(taxids[0])\n print(\" ids: [\" + \", \".join(parsed_ids) + \"]\")\n" ]
[ [ "pandas.read_table" ], [ "pandas.read_table" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Hiroshiba/hifi-gan
[ "17601a07573309ee305c58bf87a041f267b1c0c8" ]
[ "hifi_gan/meldataset.py" ]
[ "import math\nimport os\nimport random\nimport torch\nimport torch.utils.data\nimport numpy as np\nfrom librosa.core import load\nfrom librosa.util import normalize\nfrom librosa.filters import mel as librosa_mel_fn\n\nMAX_WAV_VALUE = 32768.0\n\n\ndef load_wav(full_path, sampling_rate=None):\n if os.path.splitext(full_path)[1] != '.npy':\n data, sampling_rate = load(full_path, sr=sampling_rate)\n else:\n a = np.load(full_path, allow_pickle=True).item()\n assert sampling_rate == a['rate']\n data = a['array']\n return data, sampling_rate\n\n\ndef dynamic_range_compression(x, C=1, clip_val=1e-5):\n return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)\n\n\ndef dynamic_range_decompression(x, C=1):\n return np.exp(x) / C\n\n\ndef dynamic_range_compression_torch(x, C=1, clip_val=1e-5):\n return torch.log(torch.clamp(x, min=clip_val) * C)\n\n\ndef dynamic_range_decompression_torch(x, C=1):\n return torch.exp(x) / C\n\n\ndef spectral_normalize_torch(magnitudes):\n output = dynamic_range_compression_torch(magnitudes)\n return output\n\n\ndef spectral_de_normalize_torch(magnitudes):\n output = dynamic_range_decompression_torch(magnitudes)\n return output\n\n\nmel_basis = {}\nhann_window = {}\n\n\ndef mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):\n if torch.min(y) < -1.:\n print('min value is ', torch.min(y))\n if torch.max(y) > 1.:\n print('max value is ', torch.max(y))\n\n global mel_basis, hann_window\n if fmax not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)\n hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)\n\n y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')\n y = y.squeeze(1)\n\n spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)],\n center=center, pad_mode='reflect', normalized=False, onesided=True)\n\n spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))\n\n spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec\n\n\ndef get_dataset_filelist(a):\n ext = '.wav' if not a.input_wavs_npy else '.npy'\n with open(a.input_training_file, 'r', encoding='utf-8') as fi:\n training_files = [os.path.join(a.input_wavs_dir, x.split('|')[0] + ext)\n for x in fi.read().split('\\n') if len(x) > 0]\n\n with open(a.input_validation_file, 'r', encoding='utf-8') as fi:\n validation_files = [os.path.join(a.input_wavs_dir, x.split('|')[0] + ext)\n for x in fi.read().split('\\n') if len(x) > 0]\n return training_files, validation_files\n\n\nclass MelDataset(torch.utils.data.Dataset):\n def __init__(self, training_files, segment_size, n_fft, num_mels,\n hop_size, win_size, sampling_rate, fmin, fmax, split=True, shuffle=True, n_cache_reuse=1,\n device=None, fmax_loss=None, fine_tuning=False, base_mels_path=None):\n self.audio_files = training_files\n random.seed(1234)\n if shuffle:\n random.shuffle(self.audio_files)\n self.segment_size = segment_size\n self.sampling_rate = sampling_rate\n self.split = split\n self.n_fft = n_fft\n self.num_mels = num_mels\n self.hop_size = hop_size\n self.win_size = win_size\n self.fmin = fmin\n self.fmax = fmax\n self.fmax_loss = fmax_loss\n self.cached_wav = None\n self.n_cache_reuse = n_cache_reuse\n self._cache_ref_count = 0\n self.device = device\n self.fine_tuning = fine_tuning\n self.base_mels_path = base_mels_path\n\n def __getitem__(self, index):\n filename = self.audio_files[index]\n if self._cache_ref_count == 0:\n audio, sampling_rate = load_wav(filename, self.sampling_rate)\n if not self.fine_tuning:\n audio = normalize(audio) * 0.95\n self.cached_wav = audio\n if sampling_rate != self.sampling_rate:\n raise ValueError(\"{} SR doesn't match target {} SR\".format(\n sampling_rate, self.sampling_rate))\n self._cache_ref_count = self.n_cache_reuse\n else:\n audio = self.cached_wav\n self._cache_ref_count -= 1\n\n audio = torch.FloatTensor(audio)\n audio = audio.unsqueeze(0)\n\n if not self.fine_tuning:\n if self.split:\n if audio.size(1) >= self.segment_size:\n max_audio_start = audio.size(1) - self.segment_size\n audio_start = random.randint(0, max_audio_start)\n audio = audio[:, audio_start:audio_start+self.segment_size]\n else:\n audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant')\n\n mel = mel_spectrogram(audio, self.n_fft, self.num_mels,\n self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax,\n center=False)\n else:\n mel = np.load(\n os.path.join(self.base_mels_path, os.path.splitext(os.path.split(filename)[-1])[0] + '.npy'))\n mel = torch.from_numpy(mel)\n\n if len(mel.shape) < 3:\n mel = mel.unsqueeze(0)\n\n if self.split:\n frames_per_seg = math.ceil(self.segment_size / self.hop_size)\n\n if audio.size(1) >= self.segment_size:\n mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1)\n mel = mel[:, :, mel_start:mel_start + frames_per_seg]\n audio = audio[:, mel_start * self.hop_size:(mel_start + frames_per_seg) * self.hop_size]\n else:\n mel = torch.nn.functional.pad(mel, (0, frames_per_seg - mel.size(2)), 'constant')\n audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant')\n\n mel_loss = mel_spectrogram(audio, self.n_fft, self.num_mels,\n self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax_loss,\n center=False)\n\n return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze())\n\n def __len__(self):\n return len(self.audio_files)\n" ]
[ [ "torch.max", "numpy.clip", "torch.min", "torch.from_numpy", "torch.exp", "torch.FloatTensor", "numpy.load", "torch.clamp", "numpy.exp", "torch.hann_window" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pbevan1/Skin-Deep-Unlearning
[ "b8802db8bd61bbf3fdeb10c9899a4117ae38e89c" ]
[ "misc_code/marking_detection.py" ]
[ "import cv2\nimport numpy as np\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\nimport shutil\nfrom zipfile import ZipFile\n\n# Detecting Gentian Violet Markers\n\n# defining numpy arrays for HSV threshold values to look for in images\nlower_violet = np.array([125, 100, 60], dtype=np.uint8)\nupper_violet = np.array([145, 255, 255], dtype=np.uint8)\n\nfolder = '/Data/train'\n# Looping through images to identify those with gentian violet pixels\nfor im in os.listdir(folder):\n src = f'/Data/train/{im}'\n img = cv2.imread(src) # Reading image\n img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # Converting image to HSV for more effective trhesholding\n array = cv2.inRange(img, lower_violet, upper_violet) # Creating array of pixels that fit within the threshold\n if 255 in array: # Checking if the array contains any values of 255 (within the HSV values above)\n shutil.copy(src, '/Data/Train_Marked_OpenCV2') # Copying to new directory for inspection\n\n# Manually weed out anomolies by looking through 'marked' images\n\n# Making list of the remaining images with gentian violet markers\nmarked_list = []\nfor i in os.listdir('/Data/Train_Marked_OpenCV2'):\n marked_list.append(str(i)[:12])\ntrain = pd.read_csv(r'/Data/train.csv') # Opening metadata/labels\ntrain['marked'] = 0 # Creating 'marked' column and setting the default to 0 (False)\ntrain.loc[train.image_name.isin(marked_list), 'marked'] = 1 # Setting images identified as marked to 1 (True)\n\n# Manually labeled scale data\n\n# Making list of the images with scales\nscale_list = []\nscale_images_path = '/content/drive/MyDrive/MSc Project/Data/train_512/train_scale'\nfor i in os.listdir(scale_images_path):\n scale_list.append(str(i)[:12])\ntrain['scale'] = 0 # Creating 'scale' column and setting the default to 0 (False)\ntrain.loc[train.image_name.isin(scale_list), 'scale'] = 1 # Setting images identified as having a scale to 1 (True)\ntrain.to_csv('/content/drive/MyDrive/MSc Project/Data/train.csv', index=False) # Saving the metadata/labels file with new columns\n" ]
[ [ "numpy.array", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
ncilfone/fairscale
[ "b434b7354898febf718f23c7ff21368a6e0bbe1a", "b434b7354898febf718f23c7ff21368a6e0bbe1a" ]
[ "fairscale/optim/adascale.py", "tests/nn/checkpoint/test_checkpoint_activations_norm.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n# Copyright 2020 Petuum, Inc. All Rights Reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of Petuum, Inc. nor the names of its contributors may be\n# used to endorse or promote products derived from this software without\n# specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport functools\nfrom typing import TYPE_CHECKING, Any, Dict, List, Optional, Type\n\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nimport torch.distributed as dist\nfrom torch.optim import SGD, Optimizer\n\nif TYPE_CHECKING: # pragma: no cover\n from torch.optim.optimizer import _params_t\nelse:\n _params_t = Any\n\n\nclass AdaScale(Optimizer):\n \"\"\"\n Implements the AdaScale_ algorithm for scaling the learning rate for\n distributed and large batch size training. Can be used in combination with\n ``torch.nn.parallel.DistributedDataParallel`` and ``torch.optim.SGD``.\n\n .. _AdaScale: https://proceedings.icml.cc/static/paper_files/icml/2020/4682-Supplemental.pdf\n\n This class subclasses `Optimizer` so that `torch.optim.lr_scheduler` can\n work with it. In other words, AdaScale is intended to be a complete wrapper of an\n torch Optimizer.\n\n Note that, AdaScale does *not* help increase per-GPU batch size.\n\n There are several ways to integrate AdaScale with your training loop.\n We show two examples below.\n\n Example 1: using PyTorch's `lr_scheduler` classes.\n\n .. code-block:: python\n\n optim = AdaScale(SGD(model.parameters(), lr=0.001))\n model = DistributedDataParallel(model)\n scheduler = LambdaLR(optim, lr_lambda=...)\n\n last_epoch = 0\n done = False\n step = 0\n while not done:\n for batch in dataset:\n optim.zero_grad()\n logits = model()\n loss = criterion(logits, ...)\n loss.backward()\n step += optim.gain()\n optim.step()\n epoch = step // len(dataset)\n if epoch > last_epoch:\n scheduler.step()\n last_epoch = epoch\n if epoch >= MAX_EPOCHS:\n done = True\n\n Example 2: using a custom `update_lr()` function that update the learning\n rate based on the current step count per epoch.\n\n .. code-block:: python\n\n optim = AdaScale(SGD(model.parameters(), lr=0.001))\n model = DistributedDataParallel(model)\n\n step = 0\n while step < max_steps:\n for batch in ...:\n optim.zero_grad()\n logits = model()\n loss = criterion()\n loss.backward()\n step += optim.gain()\n optim.step()\n update_lr(step)\n\n Args:\n optimizer (torch.optim.Optimizer):\n Optimizer to apply AdaScale to.\n world_size (int):\n Number of world_size for distributed training.\n If None, defaults to ``dist.get_world_size()``.\n scale (float):\n Scaling factor of the batch size from scale equals 1, e.g. using a 10x\n larger batch size (summed across all ranks with gradient accumulation)\n means a scale of 10.\n If None, defaults to ``world_size * num_gradients_to_accumulate``.\n smoothing (float):\n Smoothing factor for moving average.\n If None, it defaults to ``max(1 - (world_size * num_gradients_to_accumulate)/1000, 0)``.\n Note, for very high scale training, higher smoothing value might be needed,\n esp at the begining of the training. Therefore, if your scale is close to or larger\n than 1000, try experimenting with smoothing value > 0 if the final accuracy is poor.\n num_gradients_to_accumulate (int):\n Number of passes that we accumulate gradients locally\n between each optimizer step. This can be changed during\n training as long as the train loop changes gradient accumulation\n accordingly.\n Default to 1, which does not accumulate gradients.\n debias_ewma (bool):\n (experimental) Use debias exponential moving average\n for smoothing and mu and sigma variables. False will\n use the method in the paper's Appendix B.3.\n Default: True, which is what have been validated so far.\n \"\"\"\n\n def __init__(\n self,\n optimizer: torch.optim.Optimizer,\n world_size: Optional[int] = None,\n scale: Optional[float] = None,\n smoothing: float = None,\n num_gradients_to_accumulate: int = 1,\n debias_ewma: bool = True,\n ):\n self._optimizer = optimizer\n self._local_grad_sqr: Optional[torch.Tensor] = None\n self._world_size: int = (\n world_size if world_size is not None else dist.get_world_size() if dist.is_initialized() else 1\n )\n self._num_backward_calls = 0\n self._last_final_backward_call = 0\n self._num_grads_to_accum = num_gradients_to_accumulate\n self._debias_ewma = debias_ewma\n\n # Proxy the param_groups so that `torch.optim.lr_scheduler` can work.\n self.param_groups = self._optimizer.param_groups\n\n self.set_num_gradients_to_accumulate(num_gradients_to_accumulate, update_smoothing=True)\n\n # The previous function call sets smoothing to its default value.\n # Override that here if smoothing was passed as an argument.\n if smoothing is not None:\n self._smoothing = smoothing\n\n if self._world_size * self._num_grads_to_accum <= 1:\n # gain will be NaN since we will be dividing by zero in paper's B.3 where (S-1) == 0.\n raise RuntimeError(\"AdaScale does not support a single worker without grad accumulation.\")\n\n # Per-param-group sqr & var states (sigma^2 & mu^2 in the paper).\n self._optimizer.state.setdefault(\n \"adascale\",\n {\n \"grad_sqr_avg\": np.ones(len(optimizer.param_groups)),\n \"grad_var_avg\": np.zeros(len(optimizer.param_groups)),\n },\n )\n\n self._scale = 1.0 # Assign to inform mypy about the typing of this variable.\n self.set_scale(self._world_size * self._num_grads_to_accum if scale is None else scale)\n\n self._hook_handles: List[Any] = []\n self._hook()\n\n def _hook(self) -> None:\n \"\"\" Internal function to register the gradient hooks.\n\n Note, don't assume every parameter will generate a gradient (i.e. triggering the hook)\n in every backward pass, which is the reason that we have ``find_unused_params`` flag\n in the DDP class in ``torch.nn.parallel``.\n \"\"\"\n assert self._hook_handles == [], \"Must run unhook first\"\n for idx, param_group in enumerate(self._optimizer.param_groups):\n for param in param_group[\"params\"]:\n h = param.register_hook(functools.partial(self._backward_hook, idx))\n self._hook_handles.append(h)\n\n def __del__(self) -> None:\n \"\"\" Unhook in case caller forgets to call unhook.\n\n This however may not \"work\" since there would be circular reference\n between the hook objects and this objects. In that case, neither will\n get GC'ed. Calling unhook explicitly if you really want to delete\n AdaScale from memory.\n \"\"\"\n self.unhook()\n\n def unhook(self) -> None:\n \"\"\" Unregister hook handles.\n\n This is public because caller may need to call this to ensure all GPU\n memory are released. Otherwise, the hook may prevent parameters from being\n released from the GPU memory pool.\n\n Internally, we use this to support ``add_param_group()`` API.\n \"\"\"\n for h in self._hook_handles:\n h.remove()\n self._hook_handles = []\n\n @property\n def _state(self) -> Dict[str, np.ndarray]:\n \"\"\"\n Return the states of AdaScale.\n \"\"\"\n return self._optimizer.state[\"adascale\"]\n\n @property\n def scale(self) -> float:\n \"\"\"\n The scaling factor of the current batch size, relative to the baseline\n batch size, which could be a DDP training. For example, if the\n baseline batch size is 32 on 2 GPUs, but using a scaled-up batch size\n of 80 on 4 GPUs, then then the scaling factor is 80 * 4 / 32 / 2 = 5.\n\n This is exposed API mainly for logging purpose. Note, this is different\n from ``self.gain()``.\n\n Returns:\n (float):\n The current scaling factor.\n \"\"\"\n return self._scale\n\n @property\n def smoothing(self) -> float:\n \"\"\"\n The smoothing constant used in exponentially-weighted moving average\n tracking the gradient norm mean and variance within AdaScale.\n\n This is exposed API since the value is computed and caller may\n want to obtain this value and log it.\n\n Returns:\n (float):\n The current smoothing value.\n \"\"\"\n return self._smoothing\n\n def set_scale(self, scale: float, update_estimate: bool = True) -> None:\n \"\"\"\n Set the scaling factor of the current batch size. It is up to the\n application to invoke this function to make sure that AdaScale's\n scaling factor matches the actual batch size used during training.\n\n Args:\n scale (float):\n New scaling factor to be applied to AdaScale.\n update_estimate (bool):\n Whether to update the scale-depenent estimate of gradient\n variance; this is highly recommended. (default: True)\n \"\"\"\n assert self._local_grad_sqr is None, \"Don't change scale in backward phase\"\n assert scale >= 1, \"Scale must be at least 1\"\n if update_estimate and hasattr(self, \"_scale\"):\n assert self._scale >= 1, \"bug: old scale isn't valid\"\n # Rescale grad_var_avg to account for the change in scale\n if self._debias_ewma and \"grad_var_avg_biased\" in self._state:\n self._state[\"grad_var_avg_biased\"] *= self._scale / scale\n elif \"grad_var_avg_total\" in self._state: # _debias_ewma==False\n self._state[\"grad_var_avg_total\"] *= self._scale / scale\n self._state[\"grad_var_avg\"] *= self._scale / scale\n self._scale = scale\n\n def _grad_sqr_avg(self, pg_idx: Optional[int] = None) -> float:\n \"\"\"\n Current estimate of the squared l2-norm of the true gradient\n (sigma squared in the AdaScale paper).\n\n Args:\n pg_idx (Optional[int]):\n Optional index for a parameter group.\n\n Returns:\n (float):\n Estimate of squared l2-norm.\n \"\"\"\n if pg_idx is not None:\n return self._state[\"grad_sqr_avg\"][pg_idx]\n else:\n return float(np.sum(self._state[\"grad_sqr_avg\"]))\n\n def _grad_var_avg(self, pg_idx: Optional[int] = None) -> float:\n \"\"\"\n Current estimate of the trace of the covariance of the true gradient\n (mu squared in the AdaScale paper).\n\n Args:\n pg_idx (Optional[int]):\n Optional index for a parameter group.\n\n Returns:\n (float):\n Estimate of trace of the covariance.\n \"\"\"\n if pg_idx is not None:\n return self._state[\"grad_var_avg\"][pg_idx]\n else:\n return float(np.sum(self._state[\"grad_var_avg\"]))\n\n def gain(self, pg_idx: Optional[int] = None) -> float:\n \"\"\"\n Current estimate of the AdaScale gain ratio (r_t in the paper).\n\n Args:\n pg_idx (int):\n Optional index of a parameter group.\n Default None: returns \"averaged\" gain for all groups.\n\n Returns:\n (float):\n Estimate of gain ratio.\n \"\"\"\n var = self._grad_var_avg(pg_idx)\n sqr = self._grad_sqr_avg(pg_idx)\n gain = (var + sqr) / (var / self.scale + sqr)\n return gain\n\n def _update_avg(self, name: str, value: np.ndarray, factor: float) -> None:\n if self._debias_ewma:\n # This function computes and stores the moving average of a vector\n # using a smoothing factor.\n biased = self._state.get(name + \"_biased\", np.zeros(value.shape[0]))\n unbias = self._state.get(name + \"_unbias\", np.zeros(value.shape[0]))\n biased = factor * biased + (1.0 - factor) * value\n unbias = factor * unbias + (1.0 - factor)\n self._state[name + \"_biased\"] = biased\n self._state[name + \"_unbias\"] = unbias\n self._state[name] = biased / unbias\n else:\n # Moving average procedure described in Appendix B.3\n # For iterations t < 1 / (1 - smoothing) define grad_var_avg\n # and grad_sqr_avg as mean of the past samples. After that\n # start using running average.\n #\n # Note: we only keep a single _count for all parameter groups.\n # Ideally, it should be a vector and in case a PG is added\n # after some iterations are done. But, then the if condition\n # below will need to be a np.where. I leave this corner\n # case to a future exercise.\n count = self._state.get(name + \"_count\", np.zeros(1))\n count[0] += 1\n self._state[name + \"_count\"] = count\n if count < 1 / (1 - self._smoothing):\n total = self._state.get(name + \"_total\", None)\n if total is None:\n total = value\n else:\n total += value\n self._state[name + \"_total\"] = total\n self._state[name] = total / count\n else:\n self._state[name] = factor * self._state[name] + (1.0 - factor) * value\n\n def _backward_hook(self, pg_idx: int, grad: torch.Tensor) -> None:\n # This method should be invoked once for each parameter during the\n # backward pass, before gradients are synchronized between world_size.\n\n # Store the local gradient square sums in a vector.\n # This vector is also used for error checking. Whenever it is not None,\n # it means that we are in backward pass.\n if self._local_grad_sqr is None:\n self._local_grad_sqr = torch.zeros(\n len(self._optimizer.param_groups), device=grad.device, requires_grad=False,\n )\n self._local_grad_sqr[pg_idx] += grad.pow(2).sum()\n\n # Now, ensure we queue a callback at the end of the callback queue.\n # This will fire after all gradient callbacks are done (esp. those\n # queued by DDP.\n self._final_callback_queued = False\n Variable._execution_engine.queue_callback(self._queue_callback)\n\n def _queue_callback(self) -> None:\n # This method should be invoked after the entire backward pass. We want\n # to make sure self._final_callback is invoked once, only after all\n # gradients have been synchronized between each worker. However, the\n # synchronization code in DistributedDataParallel is also done in a\n # callback, which might not yet be executed. Therefore, we enqueue\n # self._final_callback from this method, which should ensure it is\n # invoked after the gradient synchronization callback.\n if self._final_callback_queued:\n return\n self._final_callback_queued = True\n Variable._execution_engine.queue_callback(self._final_callback)\n\n def _final_callback(self) -> None:\n # This method should be invoked once for each backward pass, after\n # gradients have been synchronized between each worker, unless we\n # are in gradient accumulation mode, where grads are not all_reduced\n # between the GPUs.\n self._final_callback_queued = False\n assert isinstance(self._local_grad_sqr, torch.Tensor)\n\n # Keep track of number of backward calls for gradient accumulation.\n # TODO (min): this may not work with activation checkpointing when\n # multiple backward calls happen in a big backward.\n self._num_backward_calls += 1\n\n # TODO (min, mike): We need to have a way to check that training loop & DDP\n # is doing the right thing where the gradient is reduced\n # in this backward pass.\n # Longer term, we may compute the gain and then inform\n # the training loop when it is a good time to step().\n assert (\n self._num_backward_calls - self._last_final_backward_call\n ) <= self._num_grads_to_accum, (\n f\"bug: {self._num_backward_calls} - {self._last_final_backward_call} should <= {self._num_grads_to_accum}\"\n )\n if (self._num_backward_calls - self._last_final_backward_call) % self._num_grads_to_accum != 0:\n assert self._local_grad_sqr is not None, \"We should still be in backward phase\"\n return\n\n # Since self._local_grad_sqr is FP32, sum shouldn't overflow.\n # This vector has length of # of param_groups, so it is small, but we\n # use async to hide the all_reduce latency, esp when # of nodes is large.\n work = None\n if self._world_size > 1:\n work = dist.all_reduce(self._local_grad_sqr, async_op=True) # SUM\n\n # Compute the sums of squares for reduced gradients.\n # Divide by _num_grads_to_accum since the gradients are accumulated.\n total_grad_sqr = np.array(\n [sum(param.grad.pow(2).sum().item() for param in group[\"params\"]) for group in self._optimizer.param_groups]\n )\n # Divide by (_num_grads_to_accum ** 2) to account for gradient\n # accumulation.\n if self._num_grads_to_accum > 1:\n # np array doesn't support /=.\n total_grad_sqr = total_grad_sqr / (self._num_grads_to_accum ** 2)\n\n # Wait for all_reduce to be done and move it to cpu & np.\n if work:\n work.wait()\n local_grad_sqr = self._local_grad_sqr.cpu().numpy()\n\n # See appendix B.3 of the paper.\n # Modified to handle cases where scale != world_size\n #\n # local_grad_sqr is \\sum_{i=1}^{c N} \\norm{g_t_i}^2\n # where N is world size and c is num_grads_to_accum\n # total_grad_sqr is \\norm{\\bar{g}_t}^2\n S = self._scale\n cN = self._world_size * self._num_grads_to_accum\n grad_var = local_grad_sqr * (S / cN) / (cN - 1) - total_grad_sqr * S / (cN - 1)\n grad_sqr = total_grad_sqr - grad_var / S\n grad_var = np.maximum(grad_var, 1e-6)\n grad_sqr = np.maximum(grad_sqr, 0.0)\n self._update_avg(\"grad_sqr_avg\", grad_sqr, self.smoothing)\n self._update_avg(\"grad_var_avg\", grad_var, self.smoothing)\n self._last_final_backward_call = self._num_backward_calls\n # Indicating backward is done.\n self._local_grad_sqr = None\n\n def step(self, *args: Any, **kwargs: Any) -> Optional[float]:\n \"\"\"\n Run one optimizer step using Adascale. Essentially just invokes\n ``optimizer.step(*args, **kwargs)`` with a scaled learning rate.\n\n .. note::\n\n It is possible that this function becames a performance\n bottleneck if you have frequent updates. To avoid that,\n making bigger steps and reducing update frequency is generally\n better for performance.\n\n Args:\n args (Any):\n Positional arguments passed to ``optimizer.step``.\n kwargs (Any):\n Keyword arguments passed to ``optimizer.step``.\n\n Returns:\n (Tensor):\n The loss tensor if a closure if used to re-evaluate the model.\n \"\"\"\n assert self._local_grad_sqr is None, \"Don't step without finishing backward phase\"\n # Set original LR and set new LR.\n original_lr = []\n for idx, param_group in enumerate(self._optimizer.param_groups):\n original_lr.append(param_group[\"lr\"])\n param_group[\"lr\"] = self.gain(pg_idx=idx) * param_group[\"lr\"]\n\n # Step it.\n res = self._optimizer.step(*args, **kwargs)\n\n # Restore the original LR.\n for lr, param_group in zip(original_lr, self._optimizer.param_groups):\n param_group[\"lr\"] = lr\n\n return res\n\n def add_param_group(self, pg: Dict) -> None:\n \"\"\" Support adding parameter groups\n\n We need to re-size some of the state and re-register the backward hooks.\n \"\"\"\n assert self._local_grad_sqr is None, \"Can't add parameter group during backward\"\n self._optimizer.add_param_group(pg)\n # Update the hooks.\n self.unhook()\n self._hook()\n # Extend the states.\n for name in self._state.keys():\n assert name.startswith(\"grad_sqr_avg\") or name.startswith(\"grad_var_avg\"), name\n if name.endswith(\"_count\"):\n # This is the \"_count\" variable, should be a 1D int.\n assert self._state[name].shape == (1,), self._state[name].shape\n continue\n # must be a np array, extend it with the right value and check the shape.\n val = 1 if name == \"grad_sqr_avg\" else 0\n self._state[name] = np.append(self._state[name], val)\n assert self._state[name].shape == (len(self._optimizer.param_groups),)\n\n def zero_grad(self) -> None:\n \"\"\"Proxy function to optimizer, because some training loops need this.\"\"\"\n assert self._local_grad_sqr is None, \"Don't zero_grad in backward\"\n return self._optimizer.zero_grad()\n\n def state_dict(self) -> Dict:\n \"\"\" Proxy function to optimizer, checkpointing needs this.\n\n .. note::\n\n Do NOT checkpoint in the middle of gradient accumulation since\n associated AdaScale internal states are not saved in the checkpoint.\n \"\"\"\n assert self._local_grad_sqr is None, \"Don't checkpoint in backward\"\n return self._optimizer.state_dict()\n\n def load_state_dict(self, data: Dict) -> None:\n \"\"\" Proxy function to optimizer, checkpointing needs this.\n\n .. note::\n\n Do NOT checkpoint in the middle of gradient accumulation since\n associated AdaScale internal states are not saved in the checkpoint.\n \"\"\"\n assert self._local_grad_sqr is None, \"Don't load checkpoint in backward\"\n return self._optimizer.load_state_dict(data)\n\n def set_num_gradients_to_accumulate(self, num_gradients_to_accumulate: int, update_smoothing: bool = True,) -> None:\n \"\"\"Set the number of gradients to accumulate to a new value.\n\n This is experimental. This could be called while training so that\n we can gradually increasing the steps between updates. Almost always,\n `set_scale` needs to be called to update the scale as well.\n\n TODO (min): need a way of determine how much to increase the step size?\n\n TODO (min): have both `set_scale` and `set_num_gradients_to_accumulate`\n is hard to use and easy to make mistake. I think it is better\n to specific a specify a `base_scale`. But more discussion is\n needed here.\n\n Args:\n num_gradients_to_accumulate (int):\n Number of gradients to accumulate (calls to backward) between\n each optimizer step\n update_smoothing (bool):\n Whether to update smoothing factor or not. Default: True.\n \"\"\"\n assert self._local_grad_sqr is None, \"Don't change num_grad_to_accum in backward\"\n assert num_gradients_to_accumulate >= 1, f\"Invalid value {num_gradients_to_accumulate}\"\n self._num_grads_to_accum = num_gradients_to_accumulate\n if update_smoothing:\n # Set smoothing based on effective world_size rather than scale here,\n # since world_size determines the number of samples being averaged over\n # at every update.\n #\n # When effective world size is large enough, smoothing is probably\n # not needed, so the smoothing factor is 0.\n self._smoothing = max(1 - self._world_size * self._num_grads_to_accum / 1000, 0)\n\n def __getattr__(self, name: str) -> Any:\n \"\"\"Forward missing attributes to wrapped optimizer.\"\"\"\n try:\n return super().__getattr__(name) # defer to Optimizer logic\n except AttributeError:\n return getattr(self._optimizer, name) # fallback to wrapped optim\n\n\nclass AdaScaleWrapper(AdaScale):\n \"\"\"\n A thin wrapper for AdaScale so that the constructor resembles a\n standard optimizer. This allows it to work with other Optimizer\n Wrappers, like `OSS`.\n\n .. warn::\n OSS(AdaScaleWrapper) (i.e. OSS wrapping AdaScale) resulting in each\n rank's AdaScale operates on different set of parameters. They\n will get different gain values and it is unclear how to adjust\n effective step size in that case. We have not validated effectiveness\n or benefit in this case.\n\n OTOH, AdaScale(OSS) (i.e. AdaScale wrapping OSS) is recommended\n and is numerically identical to AdaScale without OSS. Since\n AdaScale doesn't incur per-parameter state, the memory benefit\n of OSS is still the same.\n\n Args:\n params (list of tensors):\n parameters to be optimized\n optim (class subtyping torch.optim.Optimizer):\n a optimizer class to be wrapped.\n additional_optim_args (argument dict):\n keyward arguments to the `optim` class above.\n\n The rest params are in-sync with the `AdaScale` class above.\n \"\"\"\n\n def __init__(\n self,\n params: _params_t,\n world_size: Optional[int] = None,\n scale: Optional[float] = None,\n smoothing: float = None,\n num_gradients_to_accumulate: int = 1,\n debias_ewma: bool = True,\n optim_cls: Type[Optimizer] = SGD,\n **additional_optim_args: Any,\n ):\n optim_obj = optim_cls(params, **additional_optim_args)\n super().__init__(optim_obj, world_size, scale, smoothing, num_gradients_to_accumulate, debias_ewma)\n", "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n# pylint: disable=missing-module-docstring\n# pylint: disable=missing-class-docstring\n# pylint: disable=missing-function-docstring\n\n\"\"\" Test checkpoint_wrapper with normalization layers. \"\"\"\n\nimport pytest\nimport torch\nfrom torch.nn import BatchNorm2d, LayerNorm, Linear, Sequential\nfrom torch.optim import SGD\n\nfrom fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper\nfrom fairscale.utils.testing import objects_are_equal, torch_version\n\nNORM_TYPES = [LayerNorm, BatchNorm2d]\nMP_TYPES = [\"fp32\", \"fp16\", \"call_half\"]\n\n\ndef get_model(norm_type, checkpointed, mixed_precision):\n assert norm_type in NORM_TYPES, norm_type\n assert checkpointed in [True, False], checkpointed\n assert mixed_precision in MP_TYPES\n\n model = Sequential(Linear(3, 2), norm_type(2))\n\n if mixed_precision == \"fp16\":\n # Set param.data and buffers as fp16\n for p in model.parameters():\n p.data = p.data.half()\n for m in model:\n for n, b in m.named_buffers():\n setattr(m, n, b.half())\n elif mixed_precision == \"call_half\":\n model.half()\n\n if checkpointed:\n model = checkpoint_wrapper(model)\n\n return model\n\n\[email protected](\"device\", [\"cpu\", \"cuda\"])\[email protected](\"norm_type\", NORM_TYPES)\[email protected](\"mixed_precision\", MP_TYPES)\ndef test_norm(device, norm_type, mixed_precision):\n \"\"\"Test checkpoint_wrapper with different norm layers.\"\"\"\n if device == \"cuda\" and not torch.cuda.is_available():\n pytest.skip(\"Skip due to lack of GPU\")\n\n # Get input, ref, checkpoint models and make them equal.\n in_data = torch.rand(2, 2, 3, 3).to(device)\n m_ref = get_model(norm_type, False, mixed_precision).to(device)\n m_cpt = get_model(norm_type, True, mixed_precision).to(device)\n m_cpt.load_state_dict(m_ref.state_dict())\n\n if torch_version() >= (1, 6, 0):\n # This assert fails on 1.5.1.\n assert objects_are_equal(m_ref.state_dict(), m_cpt.state_dict())\n\n if mixed_precision != \"fp32\":\n in_data = in_data.half()\n\n # Needed due to checkpointing.\n in_data.requires_grad = True\n for model in (m_ref, m_cpt):\n optim = SGD(model.parameters(), lr=0.1)\n if device == \"cpu\" and mixed_precision != \"fp32\":\n # Got: RuntimeError: \"batch_norm\"/\"layer_norm\" not implemented for 'Half'.\n with pytest.raises(RuntimeError):\n out = model(in_data)\n return\n else:\n # Everything else work.\n out = model(in_data)\n out.sum().backward()\n optim.step()\n\n if torch_version() >= (1, 6, 0):\n assert objects_are_equal(m_ref.state_dict(), m_cpt.state_dict())\n" ]
[ [ "numpy.maximum", "torch.autograd.Variable._execution_engine.queue_callback", "torch.distributed.is_initialized", "numpy.append", "torch.distributed.get_world_size", "torch.distributed.all_reduce", "numpy.sum", "numpy.zeros" ], [ "torch.nn.Linear", "torch.rand", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lianghongzhuo/ycb-tools
[ "e5d99a23a2b8b345c571acf9c8ea10a648d7fb03" ]
[ "create_ycb_sdf.py" ]
[ "import os\nimport trimesh\nimport numpy as np\n\n\"\"\"\nCreates Gazebo compatible SDF files from downloaded YCB data.\n\nThis looks through all the YCB objects you have downloaded in a particular \nfolder, and creates Gazebo compatible SDF files from a set of templates.\n\nIf the object has google_16k meshes downloaded, it will use those; else, it\nwill use the tsdf meshes which are of lower quality. \n\nWe recommend ensuring that you've enabled `google_16k` as one of the file \ntypes to download in the `download_ycb_dataset.py` script.\n\nSebastian Castro 2020\n\"\"\"\n\n# Define downsample ratio for mesh. This makes Gazebo run much faster.\ndownsample_ratio = 0.33\n\n# Define folders\nycb_folder = os.path.join(\"models\", \"ycb\")\ntemplate_folder = os.path.join(\"templates\", \"ycb\")\n\nif __name__ == \"__main__\":\n\n print(\"Creating files to use YCB objects in Gazebo...\")\n\n # Get the list of all downloaded mesh folders\n folder_names = os.listdir(ycb_folder)\n\n # Get the template files to copy over\n config_template_file = os.path.join(template_folder, \"model.config\")\n model_template_file = os.path.join(template_folder, \"template.sdf\")\n material_template_file = os.path.join(template_folder, \"template.material\")\n with open(config_template_file, \"r\") as f:\n config_template_text = f.read()\n with open(model_template_file, \"r\") as f:\n model_template_text = f.read()\n with open(material_template_file, \"r\") as f:\n material_template_text = f.read()\n\n # Now loop through all the folders\n for folder in folder_names:\n if folder != \"template\":\n try:\n print(\"Creating Gazebo files for {} ...\".format(folder))\n\n # Extract model name and folder\n model_long = folder\n model_short = folder[4:]\n model_folder = os.path.join(ycb_folder, model_long)\n\n # Check if there are Google meshes; else use the TSDF folder\n if \"google_16k\" in os.listdir(model_folder):\n mesh_type = \"google_16k\"\n else:\n mesh_type = \"tsdf\"\n\n # Extract key data from the mesh\n if mesh_type == \"google_16k\":\n mesh_file = os.path.join(model_folder, \"google_16k\", \"textured.obj\")\n elif mesh_type == \"tsdf\":\n mesh_file = os.path.join(model_folder, \"tsdf\", \"textured.obj\")\n else:\n raise NotImplementedError\n mesh = trimesh.load(mesh_file)\n # Mass and moments of inertia\n mass_text = str(mesh.mass)\n tf = mesh.principal_inertia_transform\n inertia = trimesh.inertia.transform_inertia(tf, mesh.moment_inertia)\n # Center of mass\n com_vec = mesh.center_mass.tolist()\n eul = trimesh.transformations.euler_from_matrix(np.linalg.inv(tf), axes=\"sxyz\")\n com_vec.extend(list(eul))\n com_text = str(com_vec)\n com_text = com_text.replace(\"[\", \"\")\n com_text = com_text.replace(\"]\", \"\")\n com_text = com_text.replace(\",\", \"\")\n\n # Create a downsampled mesh file with a subset of vertices and faces\n if downsample_ratio < 1:\n mesh_pts = mesh.vertices.shape[0]\n num_pts = int(mesh_pts * downsample_ratio)\n (_, face_idx) = mesh.sample(num_pts, True)\n downsampled_mesh = mesh.submesh((face_idx,), append=True)\n with open(os.path.join(model_folder, \"downsampled.obj\"), \"w\") as f:\n downsampled_mesh.export(f, \"obj\")\n collision_mesh_text = model_long + \"/downsampled.obj\"\n else:\n collision_mesh_text = model_long + \"/\" + mesh_type + \"/textured.obj\"\n\n # Copy and modify the model configuration file template\n config_text = config_template_text.replace(\"$MODEL_SHORT\", model_short)\n with open(os.path.join(model_folder, \"model.config\"), \"w\") as f:\n f.write(config_text)\n\n # Copy and modify the model file template\n model_text = model_template_text.replace(\"$MODEL_SHORT\", model_short)\n model_text = model_text.replace(\"$MODEL_LONG\", model_long)\n model_text = model_text.replace(\"$MESH_TYPE\", mesh_type)\n model_text = model_text.replace(\"$COLLISION_MESH\", collision_mesh_text)\n model_text = model_text.replace(\"$MASS\", mass_text)\n model_text = model_text.replace(\"$COM_POSE\", com_text)\n model_text = model_text.replace(\"$IXX\", str(inertia[0][0]))\n model_text = model_text.replace(\"$IYY\", str(inertia[1][1]))\n model_text = model_text.replace(\"$IZZ\", str(inertia[2][2]))\n model_text = model_text.replace(\"$IXY\", str(inertia[0][1]))\n model_text = model_text.replace(\"$IXZ\", str(inertia[0][2]))\n model_text = model_text.replace(\"$IYZ\", str(inertia[1][2]))\n with open(os.path.join(model_folder, model_short + \".sdf\"), \"w\") as f:\n f.write(model_text)\n\n # Copy and modify the material file template\n if mesh_type == \"google_16k\":\n texture_file = \"texture_map.png\"\n elif mesh_type == \"tsdf\":\n texture_file = \"textured.png\"\n else:\n raise NotImplementedError\n material_text = material_template_text.replace(\"$MODEL_SHORT\", model_short)\n material_text = material_text.replace(\"$MODEL_LONG\", model_long)\n material_text = material_text.replace(\"$MESH_TYPE\", mesh_type)\n material_text = material_text.replace(\"$TEXTURE_FILE\", texture_file)\n with open(os.path.join(model_folder, model_short + \".material\"), \"w\") as f:\n f.write(material_text)\n except:\n print(\"Error processing {}. Textured mesh likely does not exist for this object.\".format(folder))\n\n print(\"Done.\")\n\n" ]
[ [ "numpy.linalg.inv" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
neilzhang-ucsb/ABRS
[ "03acc766f87e58870fe39d5403570c44be69f235" ]
[ "real_time_ABRS.py" ]
[ "#real_time_ABRS\r\n\r\n# Copyright (c) 2019 Primoz Ravbar UCSB\r\n# Licensed under BSD 2-Clause [see LICENSE for details]\r\n# Written by Primoz Ravbar\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\nimport pickle\r\nimport msvcrt\r\n\r\n\r\nfrom scipy import misc #pip install pillow\r\nimport scipy\r\nfrom scipy import ndimage\r\n\r\nfrom PIL import Image\r\n\r\n\r\nfrom ABRS_modules import getting_frame_record\r\nfrom ABRS_modules import center_of_gravity\r\nfrom ABRS_modules import subtract_average\r\nfrom ABRS_modules import smooth_2d\r\nfrom ABRS_modules import smooth_1d\r\nfrom ABRS_modules import discrete_radon_transform\r\nfrom ABRS_modules import computeSpeedFromPosXY\r\nfrom ABRS_modules import create_3C_image\r\n\r\nimport tensorflow as tf\r\n\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras.datasets import cifar10\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\r\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\r\nfrom tensorflow.keras.callbacks import TensorBoard\r\n\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Dense, Dropout, LSTM\r\n\r\n\r\ncap = cv2.VideoCapture('Empty_Chrimson_dusted_1_2.avi');fb=4 #insert path to the raw movie; see README for the format\r\n\r\n\r\nnewSize = (400,400);\r\nstartFrame = 0;\r\nendFrame = 50012;\r\n\r\nkernelSize = 100\r\nsmoothingWindow = 89\r\n\r\nwindowSize = 10006 #size of window for training -- ignore in this version\r\n\r\nwinST = 16;\r\n\r\nhalfWindowSpeed = 15\r\n\r\nind = 0;\r\n\r\nprevFrame = np.zeros((400,400))\r\nfrRec = np.zeros((16+1,newSize[0]*newSize[1]))\r\n\r\ntrainImRec = np.zeros((80*80,1000))\r\ntrainLabelRec = np.zeros((1,1000))\r\n\r\npredictionsProbRec = np.zeros((10,endFrame))\r\n\r\netho = np.zeros((1,endFrame))\r\n\r\npathToABRSfolder = 'INSERT PATH TO ABRS MAIN FOLDER HERE'\r\n \r\n\r\nmodel = keras.models.load_model('modelConv2ABRS_3C')\r\nmodel.summary()\r\n\r\nfeatureCol = np.zeros((30,1));\r\nfeatureColAP = np.zeros((30,1));\r\nposCol = np.zeros((2,1));\r\nimCol = np.zeros((80*80,1));\r\nbehCol = np.zeros((1,1));\r\n\r\nfeatureMat = np.zeros((30,kernelSize))\r\nposMat = np.zeros((2,kernelSize))\r\nimMat = np.zeros((80*80,windowSize))\r\nbehMat = np.zeros((1,windowSize))\r\n\r\nim3Crec = np.zeros((1000,80,80,3))\r\n\r\nkernelInd = 0\r\ntrainInd = windowSize\r\nkeyInd = 0\r\nframeInd = 0\r\n\r\nwhile(cap.isOpened()): \r\n ret, frame = cap.read() #\r\n\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #\r\n\r\n rs = cv2.resize(gray,(newSize[0],newSize[1]));\r\n\r\n currentFrame = rs.astype(float)/1;\r\n diffFrame = currentFrame - prevFrame;\r\n prevFrame = currentFrame;\r\n\r\n diffFrameAbs = np.absolute(diffFrame)\r\n\r\n frameVect = currentFrame.reshape(1,newSize[0]*newSize[1]);\r\n frameVectFloat = frameVect.astype(float);\r\n\r\n frRecShort = np.delete(frRec, 0, 0);\r\n frRec = np.vstack((frRecShort,frameVectFloat));\r\n\r\n sumFrRec = np.sum(frRec,0);\r\n \r\n posDic, maxMovement, cfrVectRec, frameVectFloatRec = getting_frame_record(frRec, 0, winST,fb);\r\n \r\n im3CRaw = create_3C_image (cfrVectRec)\r\n \r\n if np.count_nonzero(im3CRaw[:,:,0])>6400: \r\n im3CRaw[:,:,0] = np.zeros((80,80))\r\n \r\n if np.count_nonzero(im3CRaw[:,:,1])>800: \r\n im3CRaw[:,:,1] = np.zeros((80,80))\r\n \r\n rgbArray = np.zeros((80,80,3), 'uint8')\r\n rgbArray[..., 0] = im3CRaw[:,:,0]\r\n rgbArray[..., 1] = im3CRaw[:,:,1]\r\n rgbArray[..., 2] = im3CRaw[:,:,2]\r\n im3C = Image.fromarray(rgbArray)\r\n\r\n X_rs = np.zeros((1,80,80,3))\r\n \r\n X_rs[0,:,:,:]=im3C\r\n\r\n storeFrameRec = 0\r\n if storeFrameRec == 1:\r\n im3Crec[frameInd,:,:,:]=im3C\r\n\r\n X = X_rs/256 # normalize\r\n\r\n\r\n predictionsProb = model.predict(X)\r\n\r\n predictionsProbRec[:,ind] = predictionsProb\r\n\r\n predictionLabel = np.zeros((1,np.shape(predictionsProb)[0]))\r\n predictionLabel[0,:] = np.argmax(predictionsProb,axis=1)\r\n \r\n\r\n beh = predictionLabel\r\n\r\n if maxMovement < 200: #this is to \r\n beh=7\r\n \r\n etho[0,ind]=beh\r\n \r\n print(beh)\r\n\r\n ###### this part is being developed for online training and for semi-automatic ethogram production \r\n \r\n trainKey = 'n'\r\n if keyInd == windowSize: \r\n trainKey = input('train?')\r\n \r\n\r\n if trainKey == 't':\r\n\r\n trainLabelRec[0,trainInd-windowSize:trainInd] = behMat\r\n trainImRec[:,trainInd-windowSize:trainInd] = imMat\r\n \r\n trainInd = trainInd +windowSize\r\n keyInd=0\r\n print(trainKey)\r\n\r\n if trainKey == 'f':\r\n beh = input('behavior?')\r\n trainLabelRec[0,trainInd-windowSize:trainInd] = beh\r\n trainImRec[:,trainInd-windowSize:trainInd] = imMat\r\n \r\n trainInd = trainInd +1\r\n keyInd=0\r\n print(trainKey) \r\n\r\n if trainKey != 't' and keyInd>windowSize:\r\n keyInd=0\r\n print(trainKey)\r\n\r\n keyInd = keyInd + 1\r\n\r\n frameInd = frameInd + 1\r\n\r\n ##################################################################\r\n\r\n \r\n cv2.imshow('im3CRaw',im3CRaw)\r\n cv2.imshow('frame',gray)\r\n\r\n\r\n \r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n if ind > endFrame-1:\r\n break\r\n\r\n ind=ind+1\r\n \r\ncap.release()\r\ncv2.destroyAllWindows()\r\n\r\n" ]
[ [ "tensorflow.keras.models.load_model", "numpy.absolute", "numpy.delete", "numpy.argmax", "numpy.shape", "numpy.count_nonzero", "numpy.zeros", "numpy.sum", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
DT021/GamestonkTerminal
[ "10d231ec2f86a19e69fdb65a2f4d37f33f723f6a" ]
[ "gamestonk_terminal/etf/etf_controller.py" ]
[ "\"\"\"ETF Controller\"\"\"\n__docformat__ = \"numpy\"\n\nimport argparse\nimport os\nfrom typing import List\nimport matplotlib.pyplot as plt\nfrom prompt_toolkit.completion import NestedCompleter\nfrom gamestonk_terminal import feature_flags as gtff\nfrom gamestonk_terminal.helper_funcs import get_flair\nfrom gamestonk_terminal.menu import session\nfrom gamestonk_terminal.etf.stockanalysis_model import (\n name_search,\n open_web,\n etf_overview,\n compare_etfs,\n etf_holdings,\n)\nfrom gamestonk_terminal.etf.screener_model import etf_screener\nfrom gamestonk_terminal.etf import wsj_view\n\n\nclass ETFController:\n CHOICES = [\n \"cls\",\n \"?\",\n \"help\",\n \"q\",\n \"quit\",\n \"web\",\n \"search\",\n \"overview\",\n \"compare\",\n \"holdings\",\n \"screener\",\n \"gainers\",\n \"decliners\",\n \"active\",\n ]\n\n def __init__(self):\n \"\"\"CONSTRUCTOR\"\"\"\n\n self.etf_parser = argparse.ArgumentParser(add_help=False, prog=\"etf\")\n self.etf_parser.add_argument(\"cmd\", choices=self.CHOICES)\n\n def print_help(self):\n \"\"\"Print help\"\"\"\n print(\n \"https://github.com/GamestonkTerminal/GamestonkTerminal/tree/main/gamestonk_terminal/etf\"\n )\n print(\"\\nETF:\")\n print(\" cls clear screen\")\n print(\" ?/help show this menu again\")\n print(\" q quit this menu, and shows back to main menu\")\n print(\" quit quit to abandon program\")\n print(\"\\nStockAnalysis.com\")\n print(\" web open StockAnalysis.com/etf\")\n print(\" search search ETFs matching name (i.e. BlackRock or Invesco)\")\n print(\" overview get overview of ETF symbol\")\n print(\" holdings get top holdings for ETF\")\n print(\" compare compare overview of multiple ETF\")\n print(\" screener screen etfs based on overview data\")\n print(\"\\n Wall St. Journal\")\n print(\" gainers show top gainers\")\n print(\" decliners show top decliners\")\n print(\" active show most active\")\n print(\"\")\n\n def switch(self, an_input: str):\n \"\"\"Process and dispatch input\n\n Returns\n -------\n True, False or None\n False - quit the menu\n True - quit the program\n None - continue in the menu\n \"\"\"\n\n # Empty command\n if not an_input:\n print(\"\")\n return None\n\n (known_args, other_args) = self.etf_parser.parse_known_args(an_input.split())\n\n # Help menu again\n if known_args.cmd == \"?\":\n self.print_help()\n return None\n\n # Clear screen\n if known_args.cmd == \"cls\":\n os.system(\"cls||clear\")\n return None\n\n return getattr(\n self, \"call_\" + known_args.cmd, lambda: \"Command not recognized!\"\n )(other_args)\n\n def call_help(self, _):\n \"\"\"Process Help command\"\"\"\n self.print_help()\n\n def call_q(self, _):\n \"\"\"Process Q command - quit the menu\"\"\"\n return False\n\n def call_quit(self, _):\n \"\"\"Process Quit command - quit the program\"\"\"\n return True\n\n def call_web(self, other_args: List[str]):\n \"\"\"Process web command\"\"\"\n open_web(other_args)\n\n def call_search(self, other_args: List[str]):\n \"\"\"Process search command\"\"\"\n name_search(other_args)\n\n def call_overview(self, other_args: List[str]):\n \"\"\"Process overview command\"\"\"\n etf_overview(other_args)\n\n def call_holdings(self, other_args: List[str]):\n \"\"\"Process holdings command\"\"\"\n etf_holdings(other_args)\n\n def call_compare(self, other_args):\n \"\"\"Process compare command\"\"\"\n compare_etfs(other_args)\n\n def call_screener(self, other_args):\n \"\"\"Process screener command\"\"\"\n etf_screener(other_args)\n\n def call_gainers(self, other_args):\n \"\"\"Process gainers command\"\"\"\n wsj_view.show_top_mover(\"gainers\", other_args)\n\n def call_decliners(self, other_args):\n \"\"\"Process decliners command\"\"\"\n wsj_view.show_top_mover(\"decliners\", other_args)\n\n def call_active(self, other_args):\n \"\"\"Process gainers command\"\"\"\n wsj_view.show_top_mover(\"active\", other_args)\n\n\ndef menu():\n etf_controller = ETFController()\n etf_controller.print_help()\n plt.close(\"all\")\n while True:\n # Get input command from user\n if session and gtff.USE_PROMPT_TOOLKIT:\n completer = NestedCompleter.from_nested_dict(\n {c: None for c in etf_controller.CHOICES}\n )\n an_input = session.prompt(\n f\"{get_flair()} (etf)> \",\n completer=completer,\n )\n else:\n an_input = input(f\"{get_flair()} (etf)> \")\n\n try:\n process_input = etf_controller.switch(an_input)\n\n if process_input is not None:\n return process_input\n\n except SystemExit:\n print(\"The command selected doesn't exist\\n\")\n continue\n" ]
[ [ "matplotlib.pyplot.close" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ding-ma/applied-ml
[ "91f5ade1984e84fd252fbc76d72f0ee8bd5c96d0" ]
[ "mini-project-2/twenty_news_run.py" ]
[ "import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport numpy as np\r\nimport itertools\r\nfrom random import randrange\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\r\nfrom sklearn.utils import shuffle\r\nfrom sklearn.naive_bayes import MultinomialNB, BernoulliNB\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom datetime import datetime\r\nfrom backports.zoneinfo import ZoneInfo\r\nfrom model.CrossValidation import CrossVal\r\nfrom model.Helpers import evaluate_acc, print_acc_err, DATASET_PATH, NAIVE_BAYES_REPEAT_DICT, LOGISITC_REPEAT_DICT\r\nfrom model.NaiveBayes import BernoulliBayes, MultiNomialBayes\r\nimport sys\r\nfrom statistics import mean\r\nimport logging\r\n\r\n\r\nMODEL = MultinomialNB\r\n\r\n# only needed for kCV\r\nVECTORIZER = TfidfVectorizer()\r\n\r\nexperiment_description = f\"\"\"\r\nOnly word tokenization, no other cleaning\r\nMultiNomialBayes(), CountVectorizer()\r\n\"\"\"\r\n\r\nlogging.basicConfig(\r\n format=\"%(asctime)s %(levelname)-8s %(message)s\",\r\n level=logging.INFO,\r\n datefmt=\"%Y-%m-%d %H:%M:%S\",\r\n handlers=[\r\n logging.FileHandler(filename=\"logs/News-{}.log\".format(datetime.now().strftime(\"%Y-%m-%d_%H%M%S\"))),\r\n logging.StreamHandler(sys.stdout),\r\n ],\r\n)\r\n\r\nlogging.info(experiment_description)\r\n\r\ntwenty_news_df = pd.read_csv(DATASET_PATH.joinpath(\"twenty_news_row_array_token_lower.csv\"))\r\ntwenty_news_df = shuffle(twenty_news_df, random_state=1)\r\ntwenty_news_df[\"sentence\"] = twenty_news_df[\"sentence\"].apply(lambda x: \" \".join(eval(x)))\r\n\r\ntwenty_news_df_X = twenty_news_df[\"sentence\"]\r\ntwenty_news_df_y = twenty_news_df[\"target\"]\r\n\r\ntwenty_CV = CrossVal(twenty_news_df_X, twenty_news_df_y)\r\nres = twenty_CV.kfoldCV(MultiNomialBayes(), CountVectorizer())\r\nprint_acc_err(res)\r\n" ]
[ [ "sklearn.utils.shuffle", "sklearn.feature_extraction.text.TfidfVectorizer", "sklearn.feature_extraction.text.CountVectorizer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pjordan/deep-reinforcement-learning
[ "4784635b67a30aadb5a7d93a0945781f55b6dccf" ]
[ "p3_collab-compet/view-actors.py" ]
[ "from model import Actor, Critic\nfrom config import Config\nimport torch\nfrom rlcc.act import NetworkActor, StackedActor\nfrom unityagents import UnityEnvironment\nimport numpy as np\nimport imageio\nimport os\nfrom itertools import cycle\n\nconfigs = []\nnames = []\n\n# \nconfigs.append(Config())\nconfigs[-1].fc1_units = 400\nconfigs[-1].fcs1_units = 400\nconfigs[-1].fc2_units = 300\nnames.append(\"ddpg-400-300-128bs-100p-01s\")\n\nconfigs.append(Config())\nconfigs[-1].fc1_units = 400\nconfigs[-1].fcs1_units = 400\nconfigs[-1].fc2_units = 300\nnames.append(\"ddpg-400-300-128bs-100p-001s\")\n\n\nconfigs.append(Config())\nconfigs[-1].fc1_units = 400\nconfigs[-1].fcs1_units = 400\nconfigs[-1].fc2_units = 300\nnames.append(\"ddpg-400-300-128bs-100p-0001s\")\n\nconfigs.append(Config())\nconfigs[-1].fc1_units = 400\nconfigs[-1].fcs1_units = 400\nconfigs[-1].fc2_units = 300\nnames.append(\"ddpg-400-300-128bs-10p-01s\")\n\nconfigs.append(Config())\nconfigs[-1].fc1_units = 400\nconfigs[-1].fcs1_units = 400\nconfigs[-1].fc2_units = 300\nnames.append(\"ddpg-400-300-256bs-100p-001s\")\n\nconfigs.append(Config())\nconfigs[-1].fc1_units = 200\nconfigs[-1].fcs1_units = 200\nconfigs[-1].fc2_units = 150\nnames.append(\"ddpg-200-150-128bs-100p-001s-3t\")\n\nconfigs.append(Config())\nconfigs[-1].fc1_units = 200\nconfigs[-1].fcs1_units = 200\nconfigs[-1].fc2_units = 150\nnames.append(\"ddpg-200-150-128bs-100p-001s-4t\")\n\nconfigs.append(Config())\nconfigs[-1].fc1_units = 100\nconfigs[-1].fcs1_units = 100\nconfigs[-1].fc2_units = 75\nnames.append(\"ddpg-100-75-128bs-100p-001s-3t\")\n\nconfigs.append(Config())\nconfigs[-1].fc1_units = 100\nconfigs[-1].fcs1_units = 100\nconfigs[-1].fc2_units = 75\nnames.append(\"ddpg-100-75-128bs-100p-001s-4t\")\n\nconfigs.append(Config())\nconfigs[-1].fc1_units = 50\nconfigs[-1].fcs1_units = 50\nconfigs[-1].fc2_units = 35\nnames.append(\"ddpg-50-35-128bs-100p-001s-3t\")\n\nconfigs.append(Config())\nconfigs[-1].fc1_units = 50\nconfigs[-1].fcs1_units = 50\nconfigs[-1].fc2_units = 35\nnames.append(\"ddpg-50-35-128bs-100p-001s-4t\")\n\nconfigs.append(Config())\nconfigs[-1].fc1_units = 200\nconfigs[-1].fcs1_units = 200\nconfigs[-1].fc2_units = 150\nnames.append(\"ddpg-200-150-256bs-100p-001s-3t\")\n\n\nactors = []\nfor c,n in zip(configs, names):\n model_path = 'saved-models/{}/checkpoint_actor.pth'.format(n)\n actor_model = Actor(c)\n actor_model.load_state_dict(torch.load(model_path, map_location='cpu'))\n actor_model.to(c.device)\n base_actor = NetworkActor(actor_model, c.device)\n actor = StackedActor([base_actor, base_actor])\n actors.append(actor)\n\nenv = UnityEnvironment(file_name=\"Tennis.app\")\n\nbrain_name = env.brain_names[0]\nbrain = env.brains[brain_name]\n\nactor_iter = cycle(actors)\n\nwhile True:\n env_info = env.reset(train_mode=False)[brain_name]\n states = env_info.vector_observations \n frames = []\n actor = next(actor_iter)\n while True:\n actions = actor.act(states) \n env_info = env.step(actions)[brain_name]\n # print(env_info.visual_observations)\n # frames.append(env_info.visual_observations[0])\n states = env_info.vector_observations \n dones = env_info.local_done \n if np.any(dones): \n break\n\n#imageio.mimsave(os.path.join('episode-gifs', 'ddpg-200-150-128bs-100p-001s-3t.gif'), frames, duration=.04)\n \n#env.close()\n" ]
[ [ "numpy.any", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hytsang/cs-ranking
[ "241626a6a100a27b96990b4f199087a6dc50dcc0", "241626a6a100a27b96990b4f199087a6dc50dcc0" ]
[ "csrank/dataset_reader/labelranking/survey_dataset_reader.py", "csrank/dataset_reader/objectranking/util.py" ]
[ "import os\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import ShuffleSplit\nfrom sklearn.preprocessing import Imputer, StandardScaler\nfrom sklearn.utils import check_random_state\n\nfrom csrank.constants import LABEL_RANKING\nfrom csrank.util import ranking_ordering_conversion\nfrom ..dataset_reader import DatasetReader\n\n\nclass SurveyDatasetReader(DatasetReader):\n def __init__(self, random_state=None, **kwargs):\n super(SurveyDatasetReader, self).__init__(learning_problem=LABEL_RANKING, dataset_folder='survey_data',\n **kwargs)\n self.train_file = os.path.join(self.dirname, 'rawdata_all.dta')\n self.random_state = check_random_state(random_state)\n self.__load_dataset__()\n\n def __load_dataset__(self):\n df = pd.io.stata.read_stata(self.train_file)\n orderings = []\n features = []\n for row in df.itertuples():\n orderings.append(row[4:8])\n context_feature = [float(i) if i != '.' else np.NAN for i in row[13:33]]\n features.append(context_feature)\n X = np.array(features)\n X = Imputer().fit_transform(X)\n X = np.array([np.log(np.array(X[:, i]) + 1) for i in range(len(features[0]))])\n X = np.array(X.T)\n self.X = StandardScaler().fit_transform(X)\n orderings = np.array(orderings) - 1\n self.rankings = ranking_ordering_conversion(orderings)\n\n def get_train_test_dataset(self):\n cv_iter = ShuffleSplit(n_splits=1, test_size=0.3, random_state=self.random_state)\n (train_idx, test_idx) = list(cv_iter.split(self.X))[0]\n return self.X[train_idx], self.rankings[train_idx], self.X[test_idx], self.rankings[test_idx]\n\n def get_complete_dataset(self):\n return self.X, self.rankings\n", "import logging\nfrom itertools import combinations\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import f1_score\n\nfrom csrank.util import ranking_ordering_conversion\n\n__all__ = ['generate_complete_pairwise_dataset', 'complete_linear_regression_dataset',\n 'complete_linear_regression_dataset', \"weighted_cosine_similarity\", \"get_key_for_indices\"]\n\n\ndef generate_pairwise_instances(features):\n pairs = np.array(list(combinations(features, 2)))\n\n n_pairs = len(pairs)\n neg_indices = np.arange(0, n_pairs, 2)\n\n a, b = np.copy(pairs[neg_indices, 0]), np.copy(pairs[neg_indices, 1])\n pairs[neg_indices, 1] = a\n pairs[neg_indices, 0] = b\n\n X1 = pairs[:, 0]\n X2 = pairs[:, 1]\n Y_double = np.ones([n_pairs, 1]) * np.array([1, 0])\n Y_single = np.repeat(1, n_pairs)\n\n Y_double[neg_indices] = [0, 1]\n Y_single[neg_indices] = 0\n return X1, X2, Y_double, Y_single\n\n\ndef generate_complete_pairwise_dataset(X, rankings):\n try:\n n_instances, n_objects, n_features = X.shape\n rankings = rankings.astype(int)\n rankings -= np.min(rankings)\n orderings = ranking_ordering_conversion(rankings)\n X_sorted = [X[i, orderings[i], :] for i in range(n_instances)]\n except ValueError:\n # TODO Add the code to change the rankings to orderings and sort X according to that\n logger = logging.getLogger(\"generate_complete_pairwise_dataset\")\n logger.error(\"Value Error: {}, {} \".format(X[0], rankings[0]))\n X_sorted = X\n Y_double = []\n X1 = []\n X2 = []\n Y_single = []\n for features in X_sorted:\n x1, x2, y1, y2 = generate_pairwise_instances(features)\n X1.extend(x1)\n X2.extend(x2)\n Y_double.extend(y1)\n Y_single.extend(y2)\n X1 = np.array(X1)\n X2 = np.array(X2)\n Y_double = np.array(Y_double)\n Y_single = np.array(Y_single)\n X_train = X1 - X2\n return X_train, X1, X2, Y_double, Y_single\n\n\ndef complete_linear_regression_dataset(X, rankings):\n X1 = []\n Y_single = []\n for features, rank in zip(X, rankings):\n X1.extend(features)\n norm_ranks = rank / np.max(rank, axis=0)\n Y_single.extend(norm_ranks)\n X1 = np.array(X1)\n Y_single = np.array(Y_single)\n return X1, Y_single\n\n\ndef get_key_for_indices(idx1, idx2):\n return str(tuple(sorted([idx1, idx2])))\n\n\ndef weighted_cosine_similarity(weights, x, y):\n denominator = np.sqrt(np.sum(weights * x * x)) * np.sqrt(\n np.sum(weights * y * y))\n sim = np.sum(weights * x * y) / denominator\n return sim\n\n\ndef similarity_function_for_multilabel_instances(X_labels, Y_labels, X, Y):\n similarity = f1_score(X_labels, Y_labels, average='macro')\n similarity = np.dot(X, Y) / (np.linalg.norm(X) * np.linalg.norm(Y)) + similarity\n return similarity\n\n\ndef initialize_similarity_matrix(mypath):\n dataFrame = pd.read_csv(mypath)\n similarity_dictionary = dataFrame.set_index('col_major_index')['similarity'].to_dict()\n return similarity_dictionary\n\n\ndef sub_sampling(name, Xt, Yt, n_objects=5):\n logger = logging.getLogger(name=name)\n bucket_size = int(Xt.shape[1] / n_objects)\n # logger.info(\"#########################################################################\")\n # logger.info(\"X instances {} objects {} bucket_size {}\".format(Xt.shape[0], Xt.shape[1], bucket_size))\n X_train = []\n Y_train = []\n for i in range(bucket_size):\n X = np.copy(Xt)\n Y = np.copy(Yt)\n rs = np.random.RandomState(42 + i)\n idx = rs.randint(bucket_size, size=(len(X), n_objects))\n # TODO: subsampling multiple rankings\n idx += np.arange(start=0, stop=X.shape[1], step=bucket_size)[:n_objects]\n X = X[np.arange(X.shape[0])[:, None], idx]\n Y = Y[np.arange(X.shape[0])[:, None], idx]\n tmp_sort = Y.argsort(axis=-1)\n Y = np.empty_like(Y)\n Y[np.arange(len(X))[:, None], tmp_sort] = np.arange(n_objects)\n if len(X_train) == 0:\n X_train = X\n Y_train = Y\n else:\n Y_train = np.concatenate([Y_train, Y], axis=0)\n X_train = np.concatenate([X_train, X], axis=0)\n logger.info(\"Sampled instances {} objects {}\".format(X_train.shape[0], X_train.shape[1]))\n return X_train, Y_train\n" ]
[ [ "pandas.io.stata.read_stata", "sklearn.model_selection.ShuffleSplit", "sklearn.preprocessing.Imputer", "sklearn.preprocessing.StandardScaler", "numpy.array", "sklearn.utils.check_random_state" ], [ "numpy.dot", "pandas.read_csv", "numpy.min", "numpy.arange", "numpy.empty_like", "numpy.linalg.norm", "numpy.ones", "numpy.concatenate", "numpy.max", "numpy.copy", "sklearn.metrics.f1_score", "numpy.repeat", "numpy.array", "numpy.sum", "numpy.random.RandomState" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
jbwang1997/BboxToolk
[ "a1e6b9dfcf1f533b630d656dc114ff62a5b37ba9" ]
[ "BboxToolkit/datasets/HRSCio.py" ]
[ "import os\nimport time\nimport os.path as osp\nimport xml.etree.ElementTree as ET\nimport numpy as np\n\nfrom PIL import Image\nfrom functools import partial\nfrom multiprocessing import Pool\nfrom .misc import img_exts, get_classes, _ConstMapper\n\n\ndef load_hrsc(img_dir, ann_dir, classes=None, img_keys=None, obj_keys=None, nproc=10):\n assert osp.isdir(img_dir), f'The {img_dir} is not an existing dir!'\n assert ann_dir is None or osp.isdir(ann_dir), f'The {ann_dir} is not an existing dir!'\n\n classes = get_classes('HRSC' if classes is None else classes)\n if (len(classes) == 1) and (classes[0] == 'ship'):\n cls2lbl = _ConstMapper(0)\n else:\n cls2lbl = dict()\n for i, cls in enumerate(classes):\n if len(cls) < 9:\n cls = '1' + '0' * (8 - len(cls)) + cls\n cls2lbl[cls] = i\n\n img_keys = dict() if img_keys is None else img_keys\n obj_keys = dict() if obj_keys is None else obj_keys\n\n contents = []\n print('Starting loading HRSC dataset information.')\n start_time = time.time()\n _load_func = partial(_load_hrsc_single,\n img_dir=img_dir,\n ann_dir=ann_dir,\n img_keys=img_keys,\n obj_keys=obj_keys,\n cls2lbl=cls2lbl)\n if nproc > 1:\n pool = Pool(nproc)\n contents = pool.map(_load_func, os.listdir(img_dir))\n pool.close()\n else:\n contents = list(map(_load_func, os.listdir(img_dir)))\n contents = [c for c in contents if c is not None]\n end_time = time.time()\n print(f'Finishing loading HRSC, get {len(contents)} images,',\n f'using {end_time-start_time:.3f}s.')\n return contents, ['ship']\n\n\ndef _load_hrsc_single(imgfile, img_dir, ann_dir, img_keys, obj_keys, cls2lbl):\n img_id, ext = osp.splitext(imgfile)\n if ext not in img_exts:\n return None\n\n xmlfile = None if ann_dir is None else osp.join(ann_dir, img_id+'.xml')\n content = _load_hrsc_xml(xmlfile, img_keys, obj_keys, cls2lbl)\n\n if not ('width' in content and 'height' in content):\n imgpath = osp.join(img_dir, imgfile)\n size = Image.open(imgpath).size\n content.update(dict(width=size[0], height=size[1]))\n content.update(dict(filename=imgfile, id=img_id))\n return content\n\n\ndef _load_hrsc_xml(xmlfile, img_keys, obj_keys, cls2lbl):\n hbboxes, bboxes, labels, diffs = list(), list(), list(), list()\n content = {k: None for k in img_keys}\n ann = {k: [] for k in obj_keys}\n if xmlfile is None:\n pass\n elif not osp.isfile(xmlfile):\n print(f\"Can't find {xmlfile}, treated as empty xmlfile\")\n else:\n tree = ET.parse(xmlfile)\n root = tree.getroot()\n\n content['width'] = int(root.find('Img_SizeWidth').text)\n content['height'] = int(root.find('Img_SizeHeight').text)\n for k, xml_k in img_keys.items():\n node = root.find(xml_k)\n value = None if node is None else node.text\n content[k] = value\n\n objects = root.find('HRSC_Objects')\n for obj in objects.findall('HRSC_Object'):\n cls = obj.find('Class_ID').text\n if cls not in cls2lbl:\n continue\n\n labels.append(cls2lbl[cls])\n hbboxes.append([\n float(obj.find('box_xmin').text),\n float(obj.find('box_ymin').text),\n float(obj.find('box_xmax').text),\n float(obj.find('box_ymax').text)\n ])\n bboxes.append([\n float(obj.find('mbox_cx').text),\n float(obj.find('mbox_cy').text),\n float(obj.find('mbox_w').text),\n float(obj.find('mbox_h').text),\n -float(obj.find('mbox_ang').text)\n ])\n diffs.append(\n int(obj.find('difficult').text))\n\n for k, xml_k in obj_keys.items():\n node = obj.find(xml_k)\n value = None if node is None else node.text\n ann[k].append(value)\n\n hbboxes = np.array(hbboxes, dtype=np.float32) if hbboxes \\\n else np.zeros((0, 4), dtype=np.float32)\n bboxes = np.array(bboxes, dtype=np.float32) if bboxes \\\n else np.zeros((0, 5), dtype=np.float32)\n labels = np.array(labels, dtype=np.int64) if diffs \\\n else np.zeros((0, ), dtype=np.int64)\n diffs = np.array(diffs, dtype=np.int64) if diffs \\\n else np.zeros((0, ), dtype=np.int64)\n\n ann['hbboxes'] = hbboxes\n ann['bboxes'] = bboxes\n ann['labels'] = labels\n ann['diffs'] = diffs\n content['ann'] = ann\n return content\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
glauberrleite/ventilator-microcontroller
[ "3b0b489a71e841bf152059585de1d54f4e95e4cc" ]
[ "test/collect_data.py" ]
[ "import serial\nimport numpy as np\nimport time\nimport signal\nimport sys\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef signal_handler(sig, frame):\n ser.close()\n \n df = pd.DataFrame({'time':l_time, 'state':l_state, 'fl_int':l_fl_int, 'pres_int':l_pres_int, 'pres_pac':l_pres_pac, 'pres_exp':l_pres_exp, 'fl_pac':l_fl_pac})\n df.to_csv('list.csv', index=False)\n print(\"XAU\")\n\n sys.exit(0)\n\nsignal.signal(signal.SIGINT, signal_handler)\n\n#plt.ion()\n#fig=plt.figure()\n\nk = 0\nTs = 0.01\nl_time = list()\nl_state = list()\nl_fl_int = list()\nl_fl_pac = list()\nl_pres_exp = list()\nl_pres_pac = list()\nl_pres_int = list()\nk = 0\nser = serial.Serial('/dev/ttyACM0', 9600)\nser.close()\nser.open()\n#ser.write(\"START\".encode())\n\nstate = \"\"\nfl_int = \"\"\nfl_pac = \"\"\npres_pac = \"\"\npres_int = \"\"\npres_exp = \"\"\n\nwhile True:\n data = ser.readline().decode('utf-8')\n\n #print(data)\n state, fl_int, pres_int, pres_pac, pres_exp, fl_pac = data.split('\\t')\n \n l_time.append(k * Ts)\n l_state.append(state)\n l_fl_int.append(float(fl_int))\n l_fl_pac.append(float(fl_pac))\n l_pres_int.append(float(pres_int))\n l_pres_pac.append(float(pres_pac))\n l_pres_exp.append(float(pres_exp))\n\n #plt.scatter(k * Ts, float(pres_pac), c='blue')\n\n #plt.cla()\n #plt.plot(l_time[len(l_time)-100:len(l_time)-1], l_state[len(l_state)-100:len(l_state)-1], linewidth=2, c='blue')\n\n #print(state + '\\t' + fl_int + '\\t' + fl_pac_ins + '\\t' + fl_pac_exp + '\\t' + pres_pac + '\\t' + pres_int)\n #print(y[:,0])\n\n k += 1\n\n time.sleep(Ts)\n #plt.show()\n #plt.pause(Ts) # Note this correction\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
yieldsfalsehood/rng_fdw
[ "31a181a6a912f0b1072c7ed9f09c5bd0afa052b8" ]
[ "rng_fdw/__init__.py" ]
[ "#!/usr/bin/env python\n\nfrom multicorn import ForeignDataWrapper\n\nimport numpy as np\nimport scipy.stats\n\nclass RNGWrapper(ForeignDataWrapper):\n\n def __init__(self, options, columns):\n\n super(RNGWrapper, self).__init__(options, columns)\n self.columns = columns\n\n # default to the normal distribution if none was specified\n distribution = options.get(\"distribution\", \"norm\")\n\n # this should be made to fail indicating that the distribution\n # given doesn't exist\n try:\n self.func = getattr(scipy.stats, distribution)\n except:\n pass\n\n def execute(self, quals, columns):\n\n has_size = False\n size = 20\n params = dict()\n\n for qual in quals:\n\n # right now we only handle simple equality\n # constraints. any other predicates will cause no results\n # to be generated (because they won't be satisfied).\n if qual.is_list_operator or qual.operator != \"=\":\n pass\n\n # if a constraint on \"size\" is given, use that to override\n # the default value (20). otherwise, keep a record of the\n # parameters provided and their values\n if qual.field_name == \"size\":\n has_size = True\n size = qual.value\n else:\n params[qual.field_name] = np.float(qual.value)\n\n # instantiate a distribution object from the parameters and\n # generate some variates!\n F = self.func(**params)\n for x in F.rvs(size=size):\n # this is a messy way of saying:\n # 1. set the column \"val\" to the value of this variate\n # 2. include all the equality predicates that were passed\n # in as extracted above\n # 3. set the column \"size\" to the provided value if one\n # was given (otherwise leave it null)\n d = dict([(\"val\", x)] + params.items() + ([(\"size\", size)] if has_size else []))\n yield d\n" ]
[ [ "numpy.float" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Grazziela/ChiralLigands
[ "8a786212c464bdef8141643d29122c18fd73bc5f" ]
[ "Dragon_signature desciptors/Step 2 - Regression Models/RandomForestRegression_SHAP_Analysis.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 4 13:52:25 2019\r\n\r\nSHAP importance calculation for Random Forests with standard parameters\r\nusing training/test split\r\n\r\nObs: Run the code in the same folder as your data files\r\n\r\n@author: Grazziela Figueredo\r\n\"\"\"\r\nimport pandas as pd #for manipulating data\r\nimport numpy as np #for manipulating data\r\n\r\nimport sklearn #for building models\r\nimport sklearn.ensemble #for building models\r\nfrom sklearn.model_selection import train_test_split #for creating a hold-out sample\r\nfrom sklearn.metrics import r2_score\r\nfrom sklearn.metrics import mean_squared_error\r\n\r\nimport shap #SHAP package for model interpretability\r\nimport matplotlib.pyplot as plt \r\nfrom matplotlib import cm\r\n\r\ndef plot_regression(y, y_hat, figure_title):\r\n fig, ax = plt.subplots()\r\n ax.scatter(y, y_hat)\r\n ax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4)\r\n ax.set_xlabel('Measured ' + dependent_variable, fontsize = 13)\r\n ax.set_ylabel('Predicted ' + dependent_variable, fontsize = 13)\r\n plt.title(figure_title, fontsize = 13)\r\n coefficient_of_dermination = r2_score(y, y_hat)\r\n legend = 'R2: '+str(float(\"{0:.2f}\".format(coefficient_of_dermination)))\r\n plt.legend(['Best fit',legend],loc = 'upper left', fontsize = 13)\r\n plt.show()\r\n \r\n rmse = np.sqrt(mean_squared_error(y, y_hat))\r\n print(\"\\n\\n RMSE train RF: %f\" % (rmse)) \r\n print(\"\\n R2 train RF: %f\" % (coefficient_of_dermination))\r\n\r\n\r\n# Random Forest Regression using standard parameters\r\ndef random_forest_regression(X_train, y_train, X_test, y_test): \r\n rf = sklearn.ensemble.RandomForestRegressor()\r\n rf.fit(X_train, y_train)\r\n y_hat = rf.predict(X_train)\r\n \r\n plot_regression(y_train, y_hat, \"Results for the Training Set\")\r\n y_hat = rf.predict(X_test)\r\n plot_regression(y_test, y_hat, \"Results for the Test Set\")\r\n \r\n return rf\r\n\r\n\r\n# Reading input data\r\ndata = pd.read_excel('LigandSubstrateBoronDragonDescriptors_LASSO.xlsx')\r\n\r\n# Determining X and y arrays. Y is supposed to be the last column of the input file\r\nindex = len(data.columns)\r\nX = data.iloc[:,0:index-1]\r\ny = data.iloc[:,index-1]\r\n\r\n# Variable used to plot the y axis name in the regression graphs\r\ndependent_variable = y.name\r\n\r\n# Training and test sets split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1, test_size = 0.3)\r\n\r\n##############################################################################\r\n\r\nrf = random_forest_regression(X_train, y_train, X_test, y_test)\r\n\r\n# Random Forest explainer\r\nexplainerRF = shap.TreeExplainer(rf)\r\nshap_values_RF_test = explainerRF.shap_values(X_test)\r\nshap_values_RF_train = explainerRF.shap_values(X_train)\r\n\r\ndf_shap_RF_test = pd.DataFrame(shap_values_RF_test, columns=X_test.columns.values)\r\ndf_shap_RF_train = pd.DataFrame(shap_values_RF_train, columns=X_train.columns.values)\r\n\r\n# if a feature has 10 or less unique values then treat it as categorical\r\ncategorical_features = np.argwhere(np.array([len(set(X_train.values[:,x]))\r\nfor x in range(X_train.values.shape[1])]) <= 10).flatten()\r\n\r\n# Printing SHAP results\r\nprint('Shap for RF:\\n\\n')\r\nplt.figure()\r\nshap.summary_plot(shap_values_RF_train, X_train, plot_type=\"bar\", max_display = 8) \r\n\r\nplt.figure()\r\nshap.summary_plot(shap_values_RF_train, X_train, max_display = 8, color_bar_label = 'Descriptor value', show = False, plot_size= (4.5,3))\r\nplt.grid()\r\n#Changing plot colours\r\nfor fc in plt.gcf().get_children():\r\n for fcc in fc.get_children():\r\n if hasattr(fcc, \"set_cmap\"):\r\n fcc.set_cmap(cm.get_cmap('coolwarm'))\r\n" ]
[ [ "sklearn.ensemble.RandomForestRegressor", "matplotlib.pyplot.legend", "pandas.read_excel", "sklearn.metrics.r2_score", "matplotlib.pyplot.title", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "matplotlib.pyplot.subplots", "sklearn.metrics.mean_squared_error", "matplotlib.pyplot.gcf", "matplotlib.pyplot.grid", "matplotlib.cm.get_cmap", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
BEL-Public/mffpy
[ "8515824d89a77cf10f7c36bb405f61d338b6f5fe" ]
[ "mffpy/header_block/header_block.py" ]
[ "\"\"\"Management of header blocks in .mff binary files\n\n.mff binary files have a blocked structure. Consecutive blocks can be\nseparated by a header, which brings us to the topic of this module.\n\nThe header consists of either a single flag (`flag=0`) or a block describing\nthe following bytes of signal data (`flag=1`). Regardless, the flag is 32-bit\nwide.\n\nThis module adds functionality to read and write these header blocks.\n\n**Header block structure**\n\n+-------------+-------------+---------------------------------------+\n| start byte | end byte | description |\n+-------------+-------------+---------------------------------------+\n| 0 | 4 | header flag, if 1, header present |\n| 4 | 8 | bytes in header := `hb` |\n| 8 | 12 | bytes in data blob w/out header |\n| 12 | 16 | channel count := `nc` |\n| 16 | 16 + 4 * nc | per-channel byte offset |\n| 16 + 4 * nc | 16 + 8 * nc | per-channel frequency and byte depths |\n| 16 + 8 * nc | hb | optional header bytes |\n+-------------+-------------+---------------------------------------+\n\nOptional header bytes are described in \"./optional_header_block.py\"\n\"\"\"\n\nfrom typing import Optional, Tuple\nfrom collections import namedtuple\n\nimport numpy as np\n\nfrom .helpers import FileLike, read, skip, write\nfrom . import optional_header_block as opt\n\nHEADER_BLOCK_PRESENT = 1\n\n_HeaderBlock = namedtuple('_HeaderBlock', [\n 'header_size',\n 'block_size',\n 'num_channels',\n 'num_samples',\n 'sampling_rate',\n 'optional_header'\n])\n\n\nclass HeaderBlock(_HeaderBlock):\n\n def __new__(cls,\n block_size: int,\n num_channels: int,\n num_samples: int,\n sampling_rate: int,\n header_size: Optional[int] = None,\n optional_header: opt.BlockTypes = opt.NoOptHeaderBlock()):\n \"\"\"create new HeaderBlock instance\n\n Parameters\n ----------\n block_size : byte size of the block\n num_channels : channel count in the block\n num_samples : sample count per channel in the block\n sampling_rate : sampling_rate per channel in the block\n header_size : byte size of the header (computed if None)\n optional_header : optional header with additional fields\n \"\"\"\n computed_size = cls.compute_byte_size(num_channels, optional_header)\n if header_size and header_size != computed_size:\n raise ValueError(f\"\"\"header of inconsistent size:\n {header_size} != {computed_size}\"\"\")\n\n header_size = computed_size\n return super().__new__(cls, header_size, block_size, num_channels,\n num_samples, sampling_rate, optional_header)\n\n @classmethod\n def from_file(cls, fp: FileLike):\n \"\"\"return HeaderBlock, read from fp\"\"\"\n\n # Each block starts with a 4-byte-long header flag which is\n # * `0`: there is no header\n # * `1`: it follows a header\n if read(fp, 'i') == 0:\n return None\n # Read general information\n header_size, block_size, num_channels = read(fp, '3i')\n # number of 4-byte samples per channel in the data block\n num_samples = (block_size//num_channels) // 4\n # Read channel-specific information\n # Skip byte offsets\n skip(fp, 4 * num_channels)\n # Sample rate/depth: Read one skip, over the rest\n # We also check that depth is always 4-byte floats (32 bit)\n sampling_rate, depth = cls.decode_rate_depth(read(fp, 'i'))\n skip(fp, 4 * (num_channels - 1))\n assert depth == 32, f\"\"\"\n Unable to read MFF with `depth != 32` [`depth={depth}`]\"\"\"\n optional_header = opt.from_file(fp)\n return cls(\n block_size=block_size,\n header_size=header_size,\n num_samples=num_samples,\n num_channels=num_channels,\n sampling_rate=sampling_rate,\n optional_header=optional_header,\n )\n\n def write(self, fp: FileLike):\n \"\"\"write HeaderBlock to file pointer `fp`\"\"\"\n write(fp, '4i', (\n HEADER_BLOCK_PRESENT,\n self.header_size,\n self.block_size,\n self.num_channels\n ))\n num_samples = (self.block_size//self.num_channels) // 4\n # Write channel offset into the data block\n arr = 4 * num_samples * np.arange(self.num_channels).astype(np.int32)\n fp.write(arr.tobytes())\n # write sampling-rate/depth word\n sr_d = self.encode_rate_depth(self.sampling_rate, 32)\n arr = sr_d * np.ones(self.num_channels, dtype=np.int32)\n fp.write(arr.tobytes())\n self.optional_header.write(fp)\n\n @staticmethod\n def decode_rate_depth(x: int) -> Tuple[int, int]:\n \"\"\"return rate and depth from encoded representation\"\"\"\n rate = x >> 8\n depth = x & 0xff\n return rate, depth\n\n @staticmethod\n def encode_rate_depth(rate: int, depth: int) -> int:\n \"\"\"return joined rate and byte depth of samples\n\n Sampling rate and sample depth are encoded in a single 4-byte integer.\n The first byte is the depth the last 3 bytes give the sampling rate.\n \"\"\"\n assert depth < (\n 1 << 8), f\"depth must be smaller than 256 (got {depth})\"\n assert rate < (\n 1 << 24), f\"depth must be smaller than {1<<24} (got {rate})\"\n return (rate << 8) + depth\n\n @staticmethod\n def compute_byte_size(num_channels: int,\n optional_header: opt.BlockTypes) -> int:\n \"\"\"returns sum of header byte size and optional header size\n\n `(5 + ..)`: The 4-byte int of the optional header byte size constitutes\n the \"5\", not in `optional_header.byte_size`. See the file description\n for detailed infos on all bytes.\n \"\"\"\n return 4 * (5 + 2 * num_channels) + optional_header.byte_size\n" ]
[ [ "numpy.arange", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ashao/SmartSim
[ "54ca7a72e4e19a167b67b8d16daf113e81f75817" ]
[ "smartsim/experiment.py" ]
[ "# BSD 2-Clause License\n#\n# Copyright (c) 2021, Hewlett Packard Enterprise\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport os.path as osp\nimport time\nfrom os import getcwd\nfrom pprint import pformat\n\nimport pandas as pd\nfrom tqdm import trange\n\nfrom .control import Controller\nfrom .entity import Ensemble, EntityList, Model, SmartSimEntity\nfrom .error import SmartSimError\nfrom .generation import Generator\nfrom .utils import get_logger\nfrom .utils.entityutils import separate_entities\nfrom .utils.helpers import colorize, init_default\n\nlogger = get_logger(__name__)\n\n\nclass Experiment:\n \"\"\"Experiments are the main user interface in SmartSim.\n\n Experiments can create instances to launch called ``Model``\n and ``Ensemble``. Through the ``Experiment`` interface, users\n can programmatically create, configure, start, stop, and\n query the instances they create.\n \"\"\"\n\n def __init__(self, name, exp_path=None, launcher=\"local\"):\n \"\"\"Initialize an ``Experiment``\n\n :param name: name for the ``Experiment``\n :type name: str\n :param exp_path: path to location of ``Experiment`` directory if generated\n :type exp_path: str\n :param launcher: type of launcher, options are \"slurm\", \"pbs\",\n \"cobalt\", or \"local\". Defaults to \"local\"\n :type launcher: str\n \"\"\"\n self.name = name\n if exp_path:\n if not isinstance(exp_path, str):\n raise TypeError(\"exp_path argument was not of type str\")\n if not osp.isdir(osp.abspath(exp_path)):\n raise NotADirectoryError(\"Experiment path provided does not exist\")\n exp_path = osp.abspath(exp_path)\n self.exp_path = init_default(osp.join(getcwd(), name), exp_path, str)\n self._control = Controller(launcher=launcher)\n\n def start(self, *args, block=True, summary=False):\n \"\"\"Launch instances passed as arguments\n\n Start the ``Experiment`` by turning specified instances into jobs\n for the underlying launcher and launching them.\n\n Instances of ``Model``, ``Ensemble`` and ``Orchestrator``\n can all be passed as arguments to the start method.\n\n :param block: block execution until all non-database\n jobs are finished, defaults to True\n :type block: bool, optional\n :param summary: print a launch summary prior to launch,\n defaults to False\n :type summary: bool, optional\n \"\"\"\n try:\n if summary:\n self._launch_summary(*args)\n self._control.start(*args, block=block)\n except SmartSimError as e:\n logger.error(e)\n raise\n\n def stop(self, *args):\n \"\"\"Stop specific instances launched by this ``Experiment``\n\n Instances of ``Model``, ``Ensemble`` and ``Orchestrator``\n can all be passed as arguments to the start method.\n\n :raises TypeError: if wrong type\n :raises SmartSimError: if stop request fails\n \"\"\"\n try:\n for entity in args:\n if isinstance(entity, SmartSimEntity):\n self._control.stop_entity(entity)\n elif isinstance(entity, EntityList):\n self._control.stop_entity_list(entity)\n else:\n raise TypeError(\n f\"Argument was of type {type(entity)} not SmartSimEntity or EntityList\"\n )\n except SmartSimError as e:\n logger.error(e)\n raise\n\n def generate(self, *args, tag=None, overwrite=False):\n \"\"\"Generate the file structure for an ``Experiment``\n\n ``Experiment.generate`` creates directories for each instance\n passed to organize Experiments that launch many instances\n\n If files or directories are attached to ``Model`` objects\n using ``Model.attach_generator_files()``, those files or\n directories will be symlinked, copied, or configured and\n written into the created directory for that instance.\n\n Instances of ``Model``, ``Ensemble`` and ``Orchestrator``\n can all be passed as arguments to the generate method.\n\n :param tag: tag used in `to_configure` generator files\n :type tag: str, optional\n :param overwrite: overwrite existing folders and contents\n :type overwrite: bool, optional\n \"\"\"\n try:\n generator = Generator(self.exp_path, overwrite=overwrite)\n if tag:\n generator.set_tag(tag)\n generator.generate_experiment(*args)\n except SmartSimError as e:\n logger.error(e)\n raise\n\n def poll(self, interval=10, verbose=True):\n \"\"\"Monitor jobs through logging to stdout.\n\n This method should only be used if jobs were launched\n with ``Experiment.start(block=False)``\n\n :param interval: frequency of logging to stdout\n :type interval: int\n :param verbose: set verbosity\n :type verbose: bool\n :raises SmartSimError:\n \"\"\"\n try:\n self._control.poll(interval, verbose)\n except SmartSimError as e:\n logger.error(e)\n raise\n\n def finished(self, entity):\n \"\"\"Query if a job as completed\n\n A instance of ``Model``, ``Ensemble`` can be passed\n as an argument.\n\n :param entity: object launched by this ``Experiment``\n :type entity: SmartSimEntity | EntityList\n :returns: True if job has completed\n :rtype: bool\n \"\"\"\n try:\n return self._control.finished(entity)\n except SmartSimError as e:\n logger.error(e)\n raise\n\n def get_status(self, *args):\n \"\"\"Query the status of a job\n\n Instances of ``Model``, ``Ensemble`` and ``Orchestrator``\n can all be passed as arguments to ``Experiment.get_status()``\n\n :returns: status of the job\n :rtype: list[str]\n :raises SmartSimError: if status retrieval fails\n :raises TypeError:\n \"\"\"\n try:\n statuses = []\n for entity in args:\n if isinstance(entity, SmartSimEntity):\n statuses.append(self._control.get_entity_status(entity))\n elif isinstance(entity, EntityList):\n statuses.extend(self._control.get_entity_list_status(entity))\n else:\n raise TypeError(\n f\"Argument was of type {type(entity)} not SmartSimEntity or EntityList\"\n )\n return statuses\n except SmartSimError as e:\n logger.error(e)\n raise\n\n def create_ensemble(\n self,\n name,\n params=None,\n batch_settings=None,\n run_settings=None,\n replicas=None,\n perm_strategy=\"all_perm\",\n **kwargs,\n ):\n \"\"\"Create an ``Ensemble`` of ``Model`` instances\n\n Ensembles can be launched sequentially or as a batch\n if using a non-local launcher. e.g. slurm\n\n Ensembles require one of the following combinations\n of arguments\n - ``run_settings`` and ``params``\n - ``run_settings`` and ``replicas``\n - ``batch_settings``\n - ``batch_settings``, ``run_settings``, and ``params``\n - ``batch_settings``, ``run_settings``, and ``replicas``\n\n If given solely batch settings, an empty ensemble\n will be created that models can be added to manually\n through ``Ensemble.add_model()``.\n The entire ensemble will launch as one batch.\n\n Provided batch and run settings, either ``params``\n or ``replicas`` must be passed and the entire ensemble\n will launch as a single batch.\n\n Provided solely run settings, either ``params``\n or ``replicas`` must be passed and the ensemble members\n will each launch sequentially.\n\n :param name: name of the ensemble\n :type name: str\n :param params: parameters to expand into ``Model`` members\n :type params: dict[str, Any]\n :param batch_settings: describes settings for ``Ensemble`` as batch workload\n :type batch_settings: BatchSettings\n :param run_settings: describes how each ``Model`` should be executed\n :type run_settings: RunSettings\n :param replicas: number of replicas to create\n :type replicas: int\n :param perm_strategy: strategy for expanding ``params`` into\n ``Model`` instances from params argument\n options are \"all_perm\", \"stepped\", \"random\"\n or a callable function\n :type perm_strategy: str\n :raises SmartSimError: if initialization fails\n :return: ``Ensemble`` instance\n :rtype: Ensemble\n \"\"\"\n try:\n new_ensemble = Ensemble(\n name,\n params,\n batch_settings=batch_settings,\n run_settings=run_settings,\n perm_strat=perm_strategy,\n replicas=replicas,\n **kwargs,\n )\n return new_ensemble\n except SmartSimError as e:\n logger.error(e)\n raise\n\n def create_model(\n self, name, run_settings, params=None, path=None, enable_key_prefixing=False\n ):\n \"\"\"Create a ``Model``\n\n By default, all ``Model`` instances start with the cwd\n as their path unless specified. If specified or not, upon\n user passing the instance to ``Experiment.generate()``,\n the ``Model`` path will be overwritten and replaced\n with the created directory for the ``Model``\n\n :param name: name of the model\n :type name: str\n :param run_settings: defines how ``Model`` should be run,\n :type run_settings: RunSettings\n :param params: model parameters for writing into configuration files\n :type params: dict, optional\n :param path: path to where the model should be executed at runtime\n :type path: str, optional\n :param enable_key_prefixing: If true, data sent to the Orchestrator\n using SmartRedis from this ``Model`` will\n be prefixed with the ``Model`` name.\n :type enable_key_prefixing: bool\n :raises SmartSimError: if initialization fails\n :return: the created ``Model``\n :rtype: Model\n \"\"\"\n path = init_default(getcwd(), path, str)\n params = init_default({}, params, dict)\n try:\n new_model = Model(name, params, path, run_settings)\n if enable_key_prefixing:\n new_model.enable_key_prefixing()\n return new_model\n except SmartSimError as e:\n logger.error(e)\n raise\n\n def reconnect_orchestrator(self, checkpoint):\n \"\"\"Reconnect to a running ``Orchestrator``\n\n This method can be used to connect to a Redis deployment\n that was launched by a previous ``Experiment``. This way\n users can run many experiments utilizing the same Redis\n deployment\n\n :param checkpoint: the `smartsim_db.dat` file created\n when an ``Orchestrator`` is launched\n :type checkpoint: str\n \"\"\"\n try:\n orc = self._control.reload_saved_db(checkpoint)\n return orc\n except SmartSimError as e:\n logger.error(e)\n raise\n\n def summary(self):\n \"\"\"Return a summary of the ``Experiment``\n\n The summary will show each instance that has been\n launched and completed in this ``Experiment``\n\n :return: Dataframe of ``Experiment`` history\n :rtype: pd.DataFrame\n \"\"\"\n index = 0\n df = pd.DataFrame(\n columns=[\n \"Name\",\n \"Entity-Type\",\n \"JobID\",\n \"RunID\",\n \"Time\",\n \"Status\",\n \"Returncode\",\n ]\n )\n # TODO should this include running jobs?\n for job in self._control._jobs.completed.values():\n for run in range(job.history.runs + 1):\n df.loc[index] = [\n job.entity.name,\n job.entity.type,\n job.history.jids[run],\n run,\n job.history.job_times[run],\n job.history.statuses[run],\n job.history.returns[run],\n ]\n index += 1\n return df\n\n def _launch_summary(self, *args):\n \"\"\"Experiment pre-launch summary of entities that will be launched\"\"\"\n\n def sprint(p):\n print(p, flush=True)\n\n sprint(\"\\n\")\n models, ensembles, orchestrator = separate_entities(args)\n\n header = colorize(\"=== LAUNCH SUMMARY ===\", color=\"cyan\", bold=True)\n exname = colorize(\"Experiment: \" + self.name, color=\"green\", bold=True)\n expath = colorize(\"Experiment Path: \" + self.exp_path, color=\"green\")\n launch = colorize(\n \"Launching with: \" + str(self._control._launcher), color=\"green\"\n )\n numens = colorize(\"# of Ensembles: \" + str(len(ensembles)), color=\"green\")\n numods = colorize(\"# of Models: \" + str(len(models)), color=\"green\")\n has_orc = \"yes\" if orchestrator else \"no\"\n orches = colorize(\"Database: \" + has_orc, color=\"green\")\n\n sprint(f\"{header}\")\n sprint(f\"{exname}\\n{expath}\\n{launch}\\n{numens}\\n{numods}\\n{orches}\\n\")\n\n if ensembles:\n sprint(colorize(\"=== ENSEMBLES ===\", color=\"cyan\", bold=True))\n for ens in ensembles:\n name = colorize(ens.name, color=\"green\", bold=True)\n num_models = colorize(\n \"# of models in ensemble: \" + str(len(ens)), color=\"green\"\n )\n batch_settings = colorize(\n \"Batch Settings: \\n\" + str(ens.batch_settings),\n color=\"green\",\n )\n run_settng = colorize(\n \"Run Settings: \\n\" + str(ens.run_settings),\n color=\"green\",\n )\n batch = colorize(f\"Launching as batch: {ens.batch}\", color=\"green\")\n\n sprint(f\"{name}\")\n sprint(f\"{num_models}\")\n sprint(f\"{batch}\")\n if ens.batch:\n print(f\"{batch_settings}\")\n else:\n sprint(f\"{run_settng}\")\n sprint(\"\\n\")\n if models:\n sprint(colorize(\"=== MODELS ===\", color=\"cyan\", bold=True))\n for model in models:\n model_name = colorize(model.name, color=\"green\", bold=True)\n parameters = colorize(\n \"Model Parameters: \\n\" + pformat(model.params), color=\"green\"\n )\n run_settng = colorize(\n \"Model Run Settings: \\n\" + str(model.run_settings),\n color=\"green\",\n )\n sprint(f\"{model_name}\")\n sprint(f\"{parameters}\")\n sprint(f\"{run_settng}\")\n sprint(\"\\n\")\n if orchestrator:\n sprint(colorize(\"=== DATABASE ===\", color=\"cyan\", bold=True))\n size = colorize(\n \"# of database nodes: \" + str(len(orchestrator)), color=\"green\"\n )\n batch = colorize(f\"Launching as batch: {orchestrator.batch}\", color=\"green\")\n sprint(f\"{batch}\")\n sprint(f\"{size}\")\n\n sprint(\"\\n\")\n\n wait, steps = 10, 100\n prog_bar = trange(\n steps,\n desc=\"Launching in...\",\n leave=False,\n ncols=80,\n mininterval=0.25,\n bar_format=\"{desc}: {bar}| {remaining} {elapsed}\",\n )\n for _ in prog_bar:\n time.sleep(wait / steps)\n\n def __str__(self):\n return self.name\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
WerzSoft/PElyzer
[ "18ab87bb4aafaf7a9f02749545a4ba110c1036fe" ]
[ "tests/estadisticas.py" ]
[ "import numpy as np\r\nimport pandas as pd\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.manifold import TSNE\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.cm as cm\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport seaborn as sns\r\n\r\n\r\ndatos = pd.read_csv(\"pelyzer/recursos/dataset.csv\")\r\n\r\n#visualizacion valores nulos\r\nsns.heatmap(datos.isnull(), cbar=True, cmap=\"OrRd_r\")\r\nplt.title(\"Nulos heatmap\")\r\nplt.savefig(\"imagenes/nulos_heatmap.png\")\r\nplt.clf()\r\n\r\nsns.countplot('is_exe', hue='malware', data=datos)\r\nplt.title(\"Malware por tipo - exe\")\r\nplt.savefig(\"imagenes/tipo_exe.png\")\r\nplt.clf()\r\n\r\nsns.barplot(x='is_exe', y='malware', data=datos)\r\nplt.ylabel(\"Índice Malware\")\r\nplt.title(\"Índice de malware por tipo (exe)\")\r\nplt.savefig(\"imagenes/indice_tipo_exe.png\")\r\nplt.clf()\r\n\r\nsns.countplot('is_dll', hue='malware', data=datos)\r\nplt.title(\"Malware por tipo - dll\")\r\nplt.savefig(\"imagenes/tipo_dll.png\")\r\nplt.clf()\r\n\r\nsns.barplot(x='is_dll', y='malware', data=datos)\r\nplt.ylabel(\"Índice Malware\")\r\nplt.title(\"Índice de malware por tipo (dll)\")\r\nplt.savefig(\"imagenes/indice_tipo_dll.png\")\r\nplt.clf()\r\n\r\n\r\nsns.countplot('is_driver', hue='malware', data=datos)\r\nplt.title(\"Malware por tipo - driver\")\r\nplt.savefig(\"imagenes/tipo_driver.png\")\r\nplt.clf()\r\n\r\nsns.barplot(x='is_driver', y='malware', data=datos)\r\nplt.ylabel(\"Índice Malware\")\r\nplt.title(\"Índice de malware por tipo (driver)\")\r\nplt.savefig(\"imagenes/indice_tipo_driver.png\")\r\nplt.clf()\r\n\r\n\r\nsns.countplot('unk_opcodes', hue='malware', data=datos)\r\nplt.title(\"Malware por opdcodes desconocidos\")\r\nplt.savefig(\"imagenes/unk_opcodes.png\")\r\nplt.clf()\r\n\r\nsns.barplot(x='unk_opcodes', y='malware', data=datos)\r\nplt.ylabel(\"Índice Malware\")\r\nplt.title(\"Índice de malware por opcodes desconocidos\")\r\nplt.savefig(\"imagenes/indice_unk_opcodes.png\")\r\nplt.clf()\r\n\r\n\r\nsns.countplot('n_std_sec', hue='malware', data=datos)\r\nplt.title(\"Malware por secciones estandar\")\r\nplt.savefig(\"imagenes/secciones_estandar.png\")\r\nplt.clf()\r\n\r\nsns.barplot(x='n_std_sec', y='malware', data=datos)\r\nplt.ylabel(\"Índice Malware\")\r\nplt.title(\"Índice de malware por secciones estandar\")\r\nplt.savefig(\"imagenes/indice_secciones_estandar.png\")\r\nplt.clf()\r\n\r\n\r\nsns.countplot('n_susp_sec', hue='malware', data=datos)\r\nplt.title(\"Malware por secciones sospechosas\")\r\nplt.savefig(\"imagenes/secciones_sospechosas.png\")\r\nplt.clf()\r\n\r\nsns.barplot(x='n_susp_sec', y='malware', data=datos)\r\nplt.ylabel(\"Índice Malware\")\r\nplt.title(\"Índice de malware por secciones sospechosas\")\r\nplt.savefig(\"imagenes/indice_secciones_sospechosas.png\")\r\nplt.clf()\r\n\r\nsns.countplot('checksum_invalido', hue='malware', data=datos)\r\nplt.title(\"Malware por checksum invalido\")\r\nplt.savefig(\"imagenes/checksum.png\")\r\nplt.clf()\r\n\r\nsns.barplot(x='checksum_invalido', y='malware', data=datos)\r\nplt.ylabel(\"Índice Malware\")\r\nplt.title(\"Índice de malware por checksum invalido\")\r\nplt.savefig(\"imagenes/indice_checksum.png\")\r\nplt.clf()\r\n\r\n\r\nsns.countplot('firmado', hue='malware', data=datos)\r\nplt.title(\"Malware por firma\")\r\nplt.savefig(\"imagenes/firmado.png\")\r\nplt.clf()\r\n\r\nsns.barplot(x='firmado', y='malware', data=datos)\r\nplt.ylabel(\"Índice Malware\")\r\nplt.title(\"Índice de malware por firma\")\r\nplt.savefig(\"imagenes/indice_firma.png\")\r\nplt.clf()\r\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.title", "matplotlib.pyplot.savefig", "matplotlib.pyplot.clf", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
pawangeek/CovidHack
[ "3a57106eeb443550bb801a8120243b992bc960fb" ]
[ "app.py" ]
[ "import requests, json, populartimes\nimport numpy as np\nimport pandas as pd\nfrom itsdangerous import URLSafeTimedSerializer, SignatureExpired\nfrom form import DetailForm, UserForm, UserLogin, NGOForm\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom datetime import datetime, timedelta\nfrom flask import Flask, redirect, url_for, request, render_template, session, flash\nfrom flask_mail import Mail, Message\nfrom models import get_coords\nfrom flask_login import LoginManager, login_required\nfrom flask_googlemaps import GoogleMaps, Map\nfrom haversine import haversine\n\n# Create Flask App\napp = Flask(__name__)\napp.secret_key = \"xb1\\x058\\xb8o\\x82\\xaf\\xdb\\xd5I\"\napp.config.from_pyfile('config.cfg')\n\n# Get Google Places API: https://developers.google.com/places/web-service/get-api-key and replace\nMyAPI_key = \"Put your key\"\n\nGoogleMaps(app,key=MyAPI_key)\nmail = Mail(app)\n\nkey = 'xb1\\x058\\xb8o\\x82\\xaf\\xdb\\xd5I'\nengine = create_engine(\"mysql+pymysql://root:pawan@localhost/covid\")\ndb = scoped_session(sessionmaker(bind=engine))\ns = URLSafeTimedSerializer(key)\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = \"userlogin\"\n\n# URL for request to google place text search to find user address based on what is typed in\nurl = \"https://maps.googleapis.com/maps/api/place/textsearch/json?\"\n\n# constant factor used later to calculate the area (longitude, latitude) to scan for stores\nalpha = 180/(np.pi*6371000)\n\[email protected]('/')\ndef home():\n return render_template('homes.html')\n\n\[email protected](\"/userregister\", methods=[\"GET\", \"POST\"])\ndef register():\n form = UserForm(request.form)\n if request.method == 'POST' and form.validate():\n fname = form.fname.data\n lname = form.lname.data\n email = form.email.data\n password = form.password.data\n confirm = form.confirm_password.data\n\n # ipaddr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)\n # loc = get_coords(ipaddr)\n\n ipaddr = '157.37.154.227' # hard coded till we deploy it\n loc = get_coords(ipaddr)\n lat, lon = loc[0], loc[1]\n\n emaildata = db.execute(\"SELECT email FROM users WHERE email=:email\", {\"email\": email}).fetchone()\n\n if emaildata is not None:\n flash(\"Email taken\", \"danger\")\n return render_template(\"userregister.html\", form=form)\n\n if password == confirm:\n db.execute(\"INSERT INTO users (first_name, last_name, email, pass,lon, lat) VALUES (:fname, :lname, :email, :password, :lon,:lan)\", {\n \"fname\": fname, \"lname\": lname, \"email\": email, \"password\": password, \"lon\":lon, \"lat\":lat})\n db.commit()\n\n email = request.form['email']\n token = s.dumps(email, salt='email-confirm')\n\n msg = Message('Confirm Email', sender='[email protected]', recipients=[email])\n link = url_for('confirm_email', token=token, _external=True)\n\n msg.body = link\n mail.send(msg)\n flash(\"A confirmation email has been sent. Please confirm your email.\", \"success\")\n return render_template(\"userregister.html\", form=form)\n\n else:\n flash(\"Passwords do not match\", \"danger\")\n return render_template(\"userregister.html\",form=form)\n\n return render_template(\"userregister.html\",form=form)\n\n\[email protected](\"/ngoregister\", methods=[\"GET\",\"POST\"])\ndef ngoregister():\n form = NGOForm(request.form)\n if request.method == 'POST' and form.validate():\n name = form.name.data\n email = form.email.data\n password = form.password.data\n confirm = form.confirm_password.data\n\n # ipaddr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)\n # loc = get_coords(ipaddr)\n\n ipaddr = '157.37.154.227' # hard coded till we deploy it\n loc = get_coords(ipaddr)\n lat, lon = loc[0], loc[1]\n\n emaildata = db.execute(\"SELECT email FROM users WHERE email=:email\", {\"email\": email}).fetchone()\n\n if emaildata is not None:\n flash(\"Email taken\", \"danger\")\n return render_template(\"ngoregister.html\", form=form)\n\n\n if password == confirm:\n db.execute(\"INSERT INTO users (first_name, email, pass, usertype ,lon, lat) VALUES (:fname, :email, :password, :usertype, :lon,:lat)\",\n { \"fname\": name, \"email\": email, \"password\": password, \"usertype\":3, \"lon\":lon, \"lat\":lat})\n db.commit()\n\n email = request.form['email']\n token = s.dumps(email, salt='email-confirm')\n\n msg = Message('Confirm Email', sender='[email protected]', recipients=[email])\n link = url_for('confirm_email', token=token, _external=True)\n\n msg.body = link\n mail.send(msg)\n flash(\"A confirmation email has been sent. Please confirm your email.\", \"success\")\n return render_template(\"ngoregister.html\", form=form)\n\n else:\n flash(\"Passwords do not match\", \"danger\")\n return render_template(\"ngoregister.html\",form=form)\n\n return render_template(\"ngoregister.html\",form=form)\n\[email protected]('/confirm_email/<token>')\ndef confirm_email(token):\n try:\n email = s.loads(token, salt='email-confirm', max_age=36000)\n flash(\"You are registered. Please login\", \"success\")\n return redirect(url_for('userlogin'))\n except SignatureExpired:\n flash(\"The link has expired. Please login\", \"danger\")\n return render_template(\"userregister.html\")\n\n\[email protected](\"/userlogin\", methods=[\"GET\", \"POST\"])\ndef userlogin():\n form = UserLogin(request.form)\n if request.method == 'POST' and form.validate():\n\n email = request.form.get(\"email\")\n password = request.form.get(\"password\")\n\n emaildata = db.execute(\"SELECT email FROM users WHERE email=:email\", {\"email\": email}).fetchone()\n passwordData = db.execute(\"SELECT pass FROM users WHERE email=:email\", {\"email\": email}).fetchone()\n userTypeData = db.execute(\"SELECT userType FROM users WHERE email=:email\", {\"email\": email}).fetchone()\n\n if emaildata is None:\n flash(\"Email not found. Please try again.\", \"danger\")\n return render_template(\"userlogin.html\", form=form)\n else:\n\n for password_data in passwordData:\n if password==password_data:\n session[\"log\"] = True\n flash(\"You are logged in.\")\n session[\"USER\"] = email\n\n print(userTypeData.userType)\n\n if (int(userTypeData.userType)==2):\n print(\"yes\")\n return redirect(url_for('userhome'))\n elif (int(userTypeData.userType)==3):\n return redirect(url_for('ngohome'))\n else:\n flash(\"Incorrect password\", \"danger\")\n return render_template(\"userlogin.html\",form=form)\n return render_template(\"userlogin.html\",form=form)\n\n\[email protected](\"/forgetpassword\", methods=[\"GET\", \"POST\"])\ndef forget_password():\n if request.form.get(\"email\"):\n email = request.form['email']\n emaildata = db.execute(\"SELECT email FROM users WHERE email=:email\", {\"email\": email}).fetchone()\n\n if emaildata is None:\n flash(\"Email not found. Please try again.\", \"danger\")\n return render_template(\"forgetpassword.html\")\n else:\n new_password = (str('new_password'))\n db.execute(\"UPDATE users SET pass=:password WHERE email=:email\", {\"password\": new_password, \"email\": email})\n db.commit()\n msg = Message('Forget Password',\n sender='[email protected]', recipients=[email])\n msg.body = 'Your new password is: new_password'\n\n mail.send(msg)\n flash(\"Your password was sent to your email.\", \"success\")\n return redirect(url_for('userlogin'))\n return render_template(\"forgetpassword.html\")\n\n\[email protected](\"/logout\")\n@login_required\ndef logout():\n session.clear()\n flash(\"You are logged out\", \"success\")\n return redirect(url_for('userlogin'))\n\[email protected](401)\ndef page_not_found(e):\n return render_template('error.html')\n\[email protected]('/error')\ndef error():\n return render_template('error.html')\n\n\[email protected]('/stop')\ndef stop():\n return render_template('stop.html')\n\n\[email protected](\"/admin\")\ndef admin():\n return render_template(\"adminlogin.html\")\n\n\ndef get_map(loc):\n mymap = Map(identifier=\"view-side\", lat=loc[0], lng=loc[1],\n markers=[{\n 'icon': 'http://maps.google.com/mapfiles/ms/icons/blue-dot.png','zoom': 16,\n 'lat': loc[0], 'lng': loc[1], 'infobox': \"<b>Your current location</b>\",\n 'style':'width:500px'}])\n\n return mymap\n\n\[email protected](\"/ngohome\", methods=['GET','POST'])\ndef ngohome():\n # Hardcoded till we deploy it so that we can get user ip addr\n ipaddr = '157.37.154.227'\n loc = get_coords(ipaddr)\n\n # hardcoded can be taken by nearest requesters\n loc2 = [26.9363461, 75.9213346]\n mymap = get_map(loc)\n\n calc_dist = haversine(loc,loc2)\n if calc_dist<20:\n print(\"yes there is a request\")\n\n return render_template(\"ngohome.html\", mymap=mymap)\n\n\[email protected](\"/userhome\", methods=['GET','POST'])\ndef userhome():\n\n # Hardcoded till we deploy it so that we can get user ip addr\n ipaddr = '157.37.154.227'\n loc = get_coords(ipaddr)\n mymap = get_map(loc)\n\n if request.form.get(\"packets\"):\n packets = request.form['packets']\n email = session[\"USER\"]\n current = db.execute(\"SELECT quantity FROM users WHERE email=:email\", {\"email\": email}).fetchone()\n\n if int(packets) < 1:\n flash(\"Invalid Quantity\", \"danger\")\n return redirect(url_for('userhome'))\n if int(packets) > 6:\n flash(\"Maximum 6 from one signup\", \"danger\")\n return redirect(url_for('userhome'))\n if int(current.quantity) != 0:\n flash(\"You already submitted a request\",\"danger\")\n return redirect(url_for('userhome'))\n\n else:\n db.execute(\"UPDATE users SET quantity=:quantity WHERE email=:email\", {\"quantity\": packets, \"email\": email})\n db.commit()\n\n flash(\"Your Request has been submitted\", \"success\")\n return redirect(url_for('userhome'))\n\n # ipaddr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)\n # loc = get_coords(ipaddr)\n return render_template(\"userhome.html\", mymap= mymap)\n\n\n# Run Store Search and Selection\[email protected]('/details', methods=['GET', 'POST'])\ndef detail():\n form = DetailForm(request.form)\n if request.method == 'POST' and form.validate():\n # Import User Input\n user_address = form.user_address.data\n store_type = form.store_type.data\n radius = form.radius.data \n\n radius = int(radius)\n \n # Find the google place from the user_address input\n user_address_res = requests.get(url + 'query=' + user_address + '&key=' + MyAPI_key)\n x = user_address_res.json()\n\n user_location = x[\"results\"][0][\"geometry\"][\"location\"]\n\n user_latitude = user_location[\"lat\"]\n user_longitude = user_location[\"lng\"]\n \n # Define search area around the user location\n delta = radius*alpha\n\n p1 = (user_latitude-delta, user_longitude-delta)\n p2 = (user_latitude+delta, user_longitude+delta)\n\n if store_type == 'supermarket':\n results = populartimes.get(MyAPI_key, [\"grocery_or_supermarket\"], p1, p2, radius=radius, all_places=False, n_threads=1)\n \n if store_type == 'pharmacy':\n results = populartimes.get(MyAPI_key, [\"pharmacy\"], p1, p2, radius=radius, all_places=False, n_threads=10)\n\n # Find out the current time at the user's location (can only be found by a place details request)\n user_location_id = x[\"results\"][0][\"reference\"]\n\n url_details = \"https://maps.googleapis.com/maps/api/place/details/json?\"\n user_location_details_res = requests.get(url_details+\"key=\"+MyAPI_key+\"&place_id=\" + user_location_id)\n y = user_location_details_res.json()\n\n utc_offset = y[\"result\"][\"utc_offset\"]\n time_now = datetime.utcnow()+timedelta(hours=utc_offset/60)\n \n # Create a list of stores with their activity data (current if available, otherwise average at current time)\n # Closed stores (activity=0) are omitted\n store_list = []\n\n for item in results:\n if \"current_popularity\" in item:\n store_list.append([item[\"current_popularity\"], item[\"name\"], item[\"id\"]])\n else:\n temp = item[\"populartimes\"][time_now.weekday()][\"data\"][time_now.hour]\n if temp != 0:\n store_list.append([temp, item[\"name\"], item[\"id\"]])\n \n # If no Stores are found give out an error\n if len(store_list) == 0:\n # return 'there has been an error: No data available for this choice'\n return redirect(url_for('error'))\n \n # Select the store with the least activity and get its ID and name\n df = pd.DataFrame(store_list)\n store_place_id = df.iloc[df[0].idxmin(), 2]\n store_name = df.iloc[df[0].idxmin(), 1]\n \n # Create google maps link based of store_place_id\n store_gmap_url = \"https://www.google.com/maps/place/?q=place_id:\" + store_place_id\n\n return render_template('stop.html', value=store_name, key=store_gmap_url)\n\n else:\n return render_template('details.html', form=form)\n\n\n@login_manager.user_loader\ndef load_user(userid):\n return db.query.get(userid)\n\n\nif __name__ == '__main__':\n app.config['TEMPLATES_AUTO_RELOAD'] = True\n app.run(debug=True)\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
jmichellehu/vmap
[ "0f631a0e2d625215ea419cd7ea537f6d5a2ccd57" ]
[ "vmap/vmap.py" ]
[ "#! /usr/bin/env python\n\n#David Shean\n#[email protected]\n\n#This script uses ASP correlator to produce disparity maps from two inputs\n#Input data should be orthorectified/mapped in the same projected coordinate system\n#Run disp2v.py to convert to surface velocities\n\nimport sys\nimport os\nimport argparse\nimport subprocess\nfrom datetime import datetime, timedelta\nfrom distutils.spawn import find_executable\n\nfrom osgeo import gdal\nimport numpy as np\n\nfrom pygeotools.lib import warplib, geolib, iolib\nfrom pygeotools.lib.malib import calcperc \nfrom pygeotools.lib.timelib import get_t_factor_fn\n\n#Generate and execute stereo commands\ndef run_cmd(bin, args, **kw):\n #Note, need to add full executable\n binpath = find_executable(bin)\n if binpath is None:\n msg = (\"Unable to find executable %s\\n\" \n \"Install ASP and ensure it is in your PATH env variable\\n\" \n \"https://ti.arc.nasa.gov/tech/asr/intelligent-robotics/ngt/stereo/\" % bin)\n sys.exit(msg)\n call = [binpath,]\n call.extend(args)\n print(' '.join(call))\n try:\n code = subprocess.call(call, shell=False)\n except OSError as e:\n raise Exception('%s: %s' % (binpath, e))\n if code != 0:\n raise Exception('Stereo step ' + kw['msg'] + ' failed')\n\ndef get_stereo_opt(threads=28, kernel=(35,35), nlevels=5, spr=1, timeout=360, erode=0, align='None'):\n stereo_opt = []\n #This is irrelevant\n stereo_opt.extend(['-t', 'pinhole'])\n #Set number of threads/cores to use (will use all CPUs if possible)\n stereo_opt.extend(['--threads', str(threads)])\n #This assumes input images are already mapped \n stereo_opt.extend(['--alignment-method', align])\n #This will attempt to remove most of the offset between two images, for relative offsets\n #stereo_opt.extend(['--alignment-method', 'Homography'])\n #stereo_opt.append('--ip-debug-images')\n #This should be explored further\n stereo_opt.append('--individually-normalize')\n #Integer correlator kernel size\n stereo_opt.extend(['--corr-kernel', str(kernel[0]), str(kernel[1])])\n stereo_opt.extend(['--corr-max-levels', str(nlevels)])\n if timeout > 0:\n stereo_opt.extend(['--corr-timeout', str(timeout)])\n #Define the search area\n #Useful if you know your orhotorectification is good to, say 100 pixels in any direction\n #stereo_opt.extend(['--corr-search', '-100', '-100', '100', '100'])\n stereo_opt.extend(['--subpixel-mode', str(spr)])\n #If using Semi-global matching (spr 0):\n if spr > 3:\n #Use SGM\n stereo_opt.extend(['--stereo-algorithm', '1'])\n #Use MGM\n #stereo_opt.extend(['--stereo-algorithm', '2'])\n #bro nodes had 128 GB of RAM, 28 threads, ~4.7 GB/thread\n stereo_opt.extend(['--corr-tile-size', '3600'])\n stereo_opt.extend(['--xcorr-threshold', '-1'])\n stereo_opt.extend(['--median-filter-size', '5'])\n stereo_opt.extend(['--texture-smooth-size', '11'])\n else:\n #Sub-pixel kernel size (ASP default is 35)\n #Set to same as integer correlator kernel\n stereo_opt.extend(['--subpixel-kernel', str(kernel[0]), str(kernel[1])])\n #Note: stereo_fltr throws out a lot of good data when noisy\n #Want to play with the following options\n #--rm-half-kernel 5 5\n #--rm_min_matches 60\n #--rm-threshold 3\n if erode > 0:\n stereo_opt.extend(['--erode-max-size', str(erode)])\n return stereo_opt\n\ndef make_ln(outdir, outprefix, ext):\n #Create symbolic links with appropriate names \n ln_fn = os.path.join(outdir, outdir+ext)\n if os.path.lexists(ln_fn):\n os.remove(ln_fn)\n os.symlink(os.path.split(outprefix)[1]+ext, ln_fn)\n return ln_fn\n\ndef gen_d_sub(d_sub_fn, dx, dy, pad_perc=0.1, ndv=-9999):\n nl = dx.shape[0]\n ns = dx.shape[1]\n #Use GDT_Byte or GDT_Int16 to save space?\n dtype = gdal.GDT_Int32\n opt = iolib.gdal_opt\n d_sub_ds = iolib.gtif_drv.Create(d_sub_fn, ns, nl, 3, dtype, opt)\n d_sub_ds.GetRasterBand(1).WriteArray(np.rint(dx.filled(ndv)).astype(np.int32))\n d_sub_ds.GetRasterBand(2).WriteArray(np.rint(dy.filled(ndv)).astype(np.int32))\n d_sub_ds.GetRasterBand(3).WriteArray((~dx.mask).astype(np.int32))\n for n in range(1, d_sub_ds.RasterCount+1):\n band = d_sub_ds.GetRasterBand(n)\n band.SetNoDataValue(float(ndv))\n d_sub_ds = None\n\n #Now write D_sub_spread.tif - defines spread around D_sub values\n d_sub_ds = iolib.fn_getds(d_sub_fn)\n d_sub_spread_fn = os.path.splitext(d_sub_fn)[0]+'_spread.tif'\n d_sub_spread_ds = iolib.gtif_drv.CreateCopy(d_sub_spread_fn, d_sub_ds, 0)\n dx_spread = np.ma.abs(dx * pad_perc)\n dy_spread = np.ma.abs(dy * pad_perc)\n d_sub_spread_ds.GetRasterBand(1).WriteArray(np.rint(dx_spread.filled(ndv)).astype(np.int32))\n d_sub_spread_ds.GetRasterBand(2).WriteArray(np.rint(dy_spread.filled(ndv)).astype(np.int32))\n d_sub_spread_ds.GetRasterBand(3).WriteArray((~dx_spread.mask).astype(np.int32))\n for n in range(1, d_sub_spread_ds.RasterCount+1):\n band = d_sub_spread_ds.GetRasterBand(n)\n band.SetNoDataValue(float(ndv))\n d_sub_spread_ds = None\n #Copy proj/gt to D_sub and D_sub_spread?\n\n#Return ndarray with h, v, m\ndef get_vel(fn, fill=True):\n ds = gdal.Open(fn)\n if fill:\n import dem_downsample_fill\n ds = dem_downsample_fill.gdalfill_ds(ds)\n u_b = ds.GetRasterBand(1)\n v_b = ds.GetRasterBand(2)\n u = iolib.b_getma(u_b)\n v = iolib.b_getma(v_b)\n m = np.ma.sqrt(u*u + v*v)\n return u, v, m\n\ndef getparser():\n parser = argparse.ArgumentParser(description=\"Generate velocity map via feature-tracking\")\n parser.add_argument('-outdir', default=None, help='Output directory')\n parser.add_argument('-threads', type=int, default=iolib.cpu_count(), help='Number of threads to use(default: %(default)s)')\n parser.add_argument('-tr', default='min', help='Output resolution (default: %(default)s)')\n #Set correlator kernel size\n parser.add_argument('-kernel', type=int, default=35, help='Correlator kernel size. Smaller kernels offer more detail but are prone to more noise. Odd integers required (~9-51 px recommended). (default: %(default)s)')\n align_choices = ['AffineEpipolar', 'Homography', 'Epipolar', 'None']\n parser.add_argument('-align', default='None', choices=align_choices, help='Alignment method to warp second image to match first image, if not already orthorectified. Provides flexibility for L1B inputs')\n #Integer correlator seeding\n #D_sub is low-resolution correlation (default), which works well for most situations\n #sparse_disp will use sparse seeding from full-res chips, useful for ice sheets with limited low-frequency texture\n #existing_velocity will accept existing vx and vy rasters. Useful for limiting search range and limiting blunders. Measures products are useful for ice sheets.\n seedmode_choices = ['D_sub', 'sparse_disp', 'existing_velocity']\n parser.add_argument('-seedmode', type=str, choices=seedmode_choices, default='D_sub', help='Seeding option (default: %(default)s)')\n parser.add_argument('-vx_fn', type=str, default=None, help='Seed E-W velocity map filename')\n parser.add_argument('-vy_fn', type=str, default=None, help='Seed N-S velocity map filename')\n \n #Sub-pixel refinement\n #0) None, 1) Parabolic, 2) Bayes, 3) AffineAdaptive\n #See ASP doc or Shean et al, ISPRS, (2016)\n #1 is fast but lower quality\n #2 is slow but highest quality, \n #3 is a good compromise for speed and quality\n refinement_choices = list(range(12))\n parser.add_argument('-refinement', type=int, default=1, help='Sub-pixel refinement type (see ASP doc): 0) None, 1) Parabolic, 2) Bayes, 3) AffineAdaptive 4) LK, 5) Bayes w/gamma, 6) SGM Linear, 7) SGM Poly4, 8) SGM Cos, 9) SGM Parabola, 10) SGM None, 11) SGM Blend (default: %(default)s)')\n #Numer of gaussian pyramids to use\n #Can look at texture in GDAL overviews to make a decision\n #If you can see plenty of texture at 1/32 resolution, go with 5 \n #For featureless areas, limiting to 2 can help, or even 0\n parser.add_argument('-pyramid-levels', type=int, default=5, help='Number of pyramid levels for correlation (default: %(default)s)')\n #This helps get rid of bogus \"islands\" in the disparity maps\n parser.add_argument('-erode', type=int, default=1024, help='Erode isolated blobs smaller than this many pixels. Set to 0 to disable (default: %(default)s)')\n parser.add_argument('-filter', action='store_true', help='Filter the output F.tif, smoothing with Gaussian filter')\n #This masks input images to improve performance. Useful for forested areas.\n parser.add_argument('-mask_input', action='store_true', help='Mask any vegetation/water in input images. Requires demcoreg')\n parser.add_argument('-remove_offsets', action='store_true', help='Remove median horizontal and vertical offsets over stable control surfaces')\n parser.add_argument('-dt', type=str, choices=['yr','day','none'], default='yr', help='Time increment (default: %(default)s)')\n\n #Inputs can be images, DEMs, shaded relief maps\n #Personal experience suggests multi-directional hillshades with identical illumination work well\n #Only 2 input datsets allowed for this - want to stay modular\n parser.add_argument('fn1', type=str, help='Raster filename 1')\n parser.add_argument('fn2', type=str, help='Raster filename 2')\n return parser\n\ndef main():\n parser = getparser()\n args = parser.parse_args()\n if args.seedmode == 'existing_velocity':\n if args.vx_fn is None or args.vy_fn is None:\n parser.error('\"-seedmode existing_velocity\" requires \"-vx_fn\" and \"-vy_fn\"')\n\n print('\\n%s' % datetime.now())\n print('%s UTC\\n' % datetime.utcnow())\n\n align = args.align\n seedmode = args.seedmode\n spr = args.refinement\n erode = args.erode\n #Correlator tile timeout\n #With proper seeding, correlation should be very fast\n #timeout = 360 \n timeout = 1200 \n threads = args.threads\n\n kernel = (args.kernel, args.kernel)\n #SGM correlator\n if spr > 3:\n #kernel = (7,7)\n kernel = (11,11)\n erode = 0\n\n #Smooth the output F.tif \n smoothF = args.filter \n\n res = args.tr\n #Resample input to something easier to work with\n #res = 4.0\n\n #Open input files\n fn1 = args.fn1\n fn2 = args.fn2 \n\n if not iolib.fn_check(fn1) or not iolib.fn_check(fn2):\n sys.exit(\"Unable to locate input files\")\n\n if args.outdir is not None:\n outdir = args.outdir\n else:\n outdir = '%s__%s_vmap_%sm_%ipx_spm%i' % (os.path.splitext(os.path.split(fn1)[1])[0], \\\n os.path.splitext(os.path.split(fn2)[1])[0], res, kernel[0], spr)\n\n #Note, can encounter filename length issues in boost, just use vmap prefix\n outprefix = '%s/vmap' % (outdir)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n\n #Check to see if inputs have geolocation and projection information\n ds1 = iolib.fn_getds(fn1)\n ds2 = iolib.fn_getds(fn2)\n\n if geolib.srs_check(ds1) and geolib.srs_check(ds2):\n ds1_clip_fn = os.path.join(outdir, os.path.splitext(os.path.basename(fn1))[0]+'_warp.tif')\n ds2_clip_fn = os.path.join(outdir, os.path.splitext(os.path.basename(fn2))[0]+'_warp.tif')\n\n if not os.path.exists(ds1_clip_fn) or not os.path.exists(ds2_clip_fn):\n #This should write out files to new subdir\n ds1_clip, ds2_clip = warplib.diskwarp_multi_fn([fn1, fn2], extent='intersection', res=res, r='average', outdir=outdir)\n ds1_clip = None\n ds2_clip = None\n #However, if inputs have identical extent/res/proj, then link to original files\n if not os.path.exists(ds1_clip_fn):\n os.symlink(os.path.abspath(fn1), ds1_clip_fn)\n if not os.path.exists(ds2_clip_fn):\n os.symlink(os.path.abspath(fn2), ds2_clip_fn)\n align = 'None'\n\n #Mask support - limit correlation only to rock/ice surfaces, no water/veg\n #This masks input images - guarantee we won't waste time correlating over vegetation\n #TODO: Add support to load arbitrary raster or shp mask\n if args.mask_input:\n ds1_masked_fn = os.path.splitext(ds1_clip_fn)[0]+'_masked.tif'\n ds2_masked_fn = os.path.splitext(ds2_clip_fn)[0]+'_masked.tif'\n\n if not os.path.exists(ds1_masked_fn) or not os.path.exists(ds2_masked_fn):\n #Load NLCD or bareground mask\n from demcoreg.dem_mask import get_lulc_mask\n\n ds1_clip = iolib.fn_getds(ds1_clip_fn)\n lulc_mask_fn = os.path.join(outdir, 'lulc_mask.tif')\n #if not os.path.exists(nlcd_mask_fn):\n lulc_mask = get_lulc_mask(ds1_clip, mask_glaciers=False, filter='not_forest')\n iolib.writeGTiff(lulc_mask, lulc_mask_fn, ds1_clip) \n ds1_clip = None\n\n #Now apply to original images \n #This could be problematic for huge inputs, see apply_mask.py\n #lulc_mask = lulc_mask.astype(int)\n for fn in (ds1_clip_fn, ds2_clip_fn):\n ds = iolib.fn_getds(fn)\n a = iolib.ds_getma(ds)\n a = np.ma.array(a, mask=~(lulc_mask))\n if a.count() > 0:\n out_fn = os.path.splitext(fn)[0]+'_masked.tif'\n iolib.writeGTiff(a,out_fn,ds)\n a = None\n else:\n sys.exit(\"No unmasked pixels over bare earth\")\n ds1_clip_fn = ds1_masked_fn\n ds2_clip_fn = ds2_masked_fn\n else:\n ds1_clip_fn = fn1\n ds2_clip_fn = fn2\n #Now let user specify alignment methods as option - don't hardcode\n #align = 'Homography'\n #align = 'AffineEpipolar'\n ds1 = None\n ds2 = None\n\n #Should have extra kwargs option here\n stereo_opt = get_stereo_opt(threads=threads, kernel=kernel, timeout=timeout, \\\n erode=erode, spr=spr, align=align)\n \n #Stereo arguments\n #Latest version of ASP should accept tif without camera models\n #stereo_args = [ds1_clip_fn, ds2_clip_fn, outprefix]\n #Nope - still need to provide dummy camera models, and they must be unique files\n #Use the dummy.tsai file bundled in the vmap repo\n dummy_tsai = os.path.join(os.path.split(os.path.realpath(__file__))[0], 'dummy.tsai')\n dummy_tsai2 = os.path.splitext(dummy_tsai)[0]+'2.tsai'\n if not os.path.exists(dummy_tsai2):\n dummy_tsai2 = os.symlink(dummy_tsai, os.path.splitext(dummy_tsai)[0]+'2.tsai')\n stereo_args = [ds1_clip_fn, ds2_clip_fn, dummy_tsai, dummy_tsai2, outprefix]\n\n #Run stereo_pprc\n if not os.path.exists(outprefix+'-R_sub.tif'):\n run_cmd('stereo_pprc', stereo_opt+stereo_args, msg='0: Preprocessing')\n #Copy proj info to outputs, this should happen automatically now?\n for ext in ('L', 'R', 'L_sub', 'R_sub', 'lMask', 'rMask', 'lMask_sub', 'rMask_sub'):\n geolib.copyproj(ds1_clip_fn, '%s-%s.tif' % (outprefix,ext))\n\n #Prepare seeding for stereo_corr\n #TODO: these are untested after refactoring\n if not os.path.exists(outprefix+'_D_sub.tif'):\n #Don't need to do anything for default seed-mode 1\n if seedmode == 'sparse_disp':\n #Sparse correlation of full-res images\n stereo_opt.extend(['--corr-seed-mode', '3'])\n sparse_disp_opt = []\n sparse_disp_opt.extend(['--Debug', '--coarse', '512', '--fine', '256', '--no_epipolar_fltr']) \n sparse_disp_opt.extend(['-P', str(threads)])\n sparse_disp_args = [outprefix+'-L.tif', outprefix+'-R.tif', outprefix]\n run_cmd('sparse_disp', sparse_disp_opt+sparse_disp_args, msg='0.5: D_sub generation')\n elif seedmode == 'existing_velocity':\n #User-input low-res velocity maps for seeding\n #TODO: Add functions that fetch best available velocities for Ant/GrIS or user-defined low-res velocities\n #Automatically query GoLive velocities here\n vx_fn = args.vx_fn \n vy_fn = args.vy_fn \n #Check for existence\n\n #HMA seeding\n vdir = '/nobackup/deshean/rpcdem/hma/velocity_jpl_amaury_2013-2015'\n vx_fn = os.path.join(vdir, 'PKH_WRS2_B8_2013_2015_snr5_n1_r170_res12.x_vel.TIF')\n vy_fn = os.path.join(vdir, 'PKH_WRS2_B8_2013_2015_snr5_n1_r170_res12.y_vel.TIF')\n\n if os.path.exists(vx_fn) and os.path.exists(vy_fn):\n ds1_clip = iolib.fn_getds(ds1_clip_fn)\n ds1_res = geolib.get_res(ds1_clip, square=True)[0]\n\n #Compute L_sub res - use this for output dimensions\n L_sub_fn = outprefix+'-L_sub.tif' \n L_sub_ds = gdal.Open(L_sub_fn)\n L_sub_x_scale = float(ds1_clip.RasterXSize) / L_sub_ds.RasterXSize\n L_sub_y_scale = float(ds1_clip.RasterYSize) / L_sub_ds.RasterYSize\n L_sub_scale = np.max([L_sub_x_scale, L_sub_y_scale])\n L_sub_res = ds1_res * L_sub_scale\n\n #Since we are likely upsampling here, use cubicspline\n vx_ds_clip, vy_ds_clip = warplib.memwarp_multi_fn([vx_fn, vy_fn], extent=ds1_clip, \\\n t_srs=ds1_clip, res=L_sub_res, r='cubicspline')\n\n ds1_clip = None\n\n #Get vx and vy arrays\n vx = iolib.ds_getma(vx_ds_clip)\n vy = iolib.ds_getma(vy_ds_clip)\n\n #Determine time interval between inputs\n #Use to scaling of known low-res velocities\n t_factor = get_t_factor_fn(ds1_clip_fn, ds2_clip_fn, ds=vx_ds_clip)\n\n if t_factor is not None:\n #Compute expected offset in scaled pixels \n dx = (vx*t_factor)/L_sub_res\n dy = (vy*t_factor)/L_sub_res\n #Note: Joughin and Rignot's values are positive y up!\n #ASP is positive y down, so need to multiply these values by -1\n #dy = -(vy*t_factor)/L_sub_res\n\n #Should smooth/fill dx and dy\n\n #If absolute search window is only 30x30\n #Don't seed, just use fixed search window \n #search_window_area_thresh = 900\n search_window_area_thresh = 0 \n search_window = np.array([dx.min(), dy.min(), dx.max(), dy.max()])\n dx_p = calcperc(dx, perc=(0.5, 99.5))\n dy_p = calcperc(dy, perc=(0.5, 99.5))\n search_window = np.array([dx_p[0], dy_p[0], dx_p[1], dy_p[1]])\n search_window_area = (search_window[2]-search_window[0]) * (search_window[3]-search_window[1])\n if search_window_area < search_window_area_thresh:\n stereo_opt.extend(['--corr-seed-mode', '0'])\n stereo_opt.append('--corr-search')\n stereo_opt.extend([str(x) for x in search_window])\n #pad_perc=0.1\n #stereo_opt.extend(['--corr-sub-seed-percent', str(pad_perc)]\n #Otherwise, generate a D_sub map from low-res velocity\n else:\n stereo_opt.extend(['--corr-seed-mode', '3'])\n #This is relative to the D_sub scaled disparities\n d_sub_fn = L_sub_fn.split('-L_sub')[0]+'-D_sub.tif' \n gen_d_sub(d_sub_fn, dx, dy)\n\n #If the above didn't generate a D_sub.tif for seeding, run stereo_corr to generate Low-res D_sub.tif\n if not os.path.exists(outprefix+'-D_sub.tif'):\n newopt = ['--compute-low-res-disparity-only',]\n run_cmd('stereo_corr', newopt+stereo_opt+stereo_args, msg='1.1: Low-res Correlation')\n #Copy projection info to D_sub\n geolib.copyproj(outprefix+'-L_sub.tif', outprefix+'-D_sub.tif')\n \n #Mask D_sub to limit correlation over bare earth surfaces\n #This _should_ be a better approach than masking input images, but stereo_corr doesn't honor D_sub\n #Still need to mask input images before stereo_pprc\n #Left this in here for reference, or if this changes in ASP\n if False:\n D_sub_ds = gdal.Open(outprefix+'-D_sub.tif', gdal.GA_Update)\n\n #Mask support - limit correlation only to rock/ice surfaces, no water/veg\n from demcoreg.dem_mask import get_nlcd, mask_nlcd\n nlcd_fn = get_nlcd()\n nlcd_ds = warplib.diskwarp_multi_fn([nlcd_fn,], extent=D_sub_ds, res=D_sub_ds, t_srs=D_sub_ds, r='near', outdir=outdir)[0]\n #validmask = mask_nlcd(nlcd_ds, valid='rock+ice')\n validmask = mask_nlcd(nlcd_ds, valid='not_forest', mask_glaciers=False)\n nlcd_mask_fn = os.path.join(outdir, 'nlcd_validmask.tif')\n iolib.writeGTiff(validmask, nlcd_mask_fn, nlcd_ds) \n\n #Now apply to D_sub (band 3 is valid mask)\n #validmask = validmask.astype(int)\n for b in (1,2,3):\n dsub = iolib.ds_getma(D_sub_ds, b)\n dsub = np.ma.array(dsub, mask=~(validmask))\n D_sub_ds.GetRasterBand(b).WriteArray(dsub.filled())\n D_sub_ds = None\n\n #OK, finally run stereo_corr full-res integer correlation with appropriate seeding\n if not os.path.exists(outprefix+'-D.tif'):\n run_cmd('stereo_corr', stereo_opt+stereo_args, msg='1: Correlation')\n geolib.copyproj(ds1_clip_fn, outprefix+'-D.tif')\n\n #Run stereo_rfne\n if spr > 0:\n if not os.path.exists(outprefix+'-RD.tif'):\n run_cmd('stereo_rfne', stereo_opt+stereo_args, msg='2: Refinement')\n geolib.copyproj(ds1_clip_fn, outprefix+'-RD.tif')\n d_fn = make_ln(outdir, outprefix, '-RD.tif')\n else:\n ln_fn = outprefix+'-RD.tif'\n if os.path.lexists(ln_fn):\n os.remove(ln_fn)\n os.symlink(os.path.split(outprefix)[1]+'-D.tif', ln_fn)\n\n #Run stereo_fltr\n if not os.path.exists(outprefix+'-F.tif'):\n run_cmd('stereo_fltr', stereo_opt+stereo_args, msg='3: Filtering')\n geolib.copyproj(ds1_clip_fn, outprefix+'-F.tif')\n\n d_fn = make_ln(outdir, outprefix, '-F.tif')\n\n if smoothF and not os.path.exists(outprefix+'-F_smooth.tif'):\n print('Smoothing F.tif')\n from pygeotools.lib import filtlib \n #Fill holes and smooth F\n F_fill_fn = outprefix+'-F_smooth.tif'\n F_ds = gdal.Open(outprefix+'-F.tif', gdal.GA_ReadOnly)\n #import dem_downsample_fill\n #F_fill_ds = dem_downsample_fill.gdalfill_ds(F_fill_ds)\n print('Creating F_smooth.tif')\n F_fill_ds = iolib.gtif_drv.CreateCopy(F_fill_fn, F_ds, 0, options=iolib.gdal_opt)\n F_ds = None\n for n in (1, 2):\n print('Smoothing band %i' % n)\n b = F_fill_ds.GetRasterBand(n)\n b_fill_bma = iolib.b_getma(b)\n #b_fill_bma = iolib.b_getma(dem_downsample_fill.gdalfill(b))\n #Filter extreme values (careful, could lose areas of valid data with fastest v)\n #b_fill_bma = filtlib.perc_fltr(b_fill_bma, perc=(0.01, 99.99))\n #These filters remove extreme values and fill data gaps\n #b_fill_bma = filtlib.median_fltr_skimage(b_fill_bma, radius=7, erode=0)\n #b_fill_bma = filtlib.median_fltr(b_fill_bma, fsize=7, origmask=True)\n #Gaussian filter\n b_fill_bma = filtlib.gauss_fltr_astropy(b_fill_bma, size=9)\n b.WriteArray(b_fill_bma)\n F_fill_ds = None\n d_fn = make_ln(outdir, outprefix, '-F_smooth.tif')\n\n print('\\n%s' % datetime.now())\n print('%s UTC\\n' % datetime.utcnow())\n\n #If time interval is specified, convert pixel displacements to rates\n if args.dt != 'none':\n #Check if vm.tif already exists\n #Should probably just overwrite by default\n #if os.path.exists(os.path.splitext(d_fn)[0]+'_vm.tif'):\n # print(\"\\nFound existing velocity magnitude map!\\n\"\n #else:\n #Generate output velocity products and figure\n #Requires that vmap repo is in PATH\n cmd = ['disp2v.py', d_fn]\n #Note: this will attempt to automatically determine control surfaces\n #disp2v.py will accept arbitrary mask, could pass through here\n if args.remove_offsets:\n cmd.append('-remove_offsets')\n cmd.extend(['-dt', args.dt])\n print(\"Converting disparities to velocities\")\n print(cmd)\n subprocess.call(cmd)\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.ma.abs", "numpy.max", "numpy.ma.array", "numpy.ma.sqrt", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SobhanOmranian/spark-dca
[ "cd0c5ddbe433a1772442456549e37bf8838f75e3" ]
[ "scripts/figure10_experiment[static_solution_ssd]/plot_dca_static_ssd.py" ]
[ "import matplotlib.pyplot as plt\nfrom matplotlib.pyplot import savefig\nimport matplotlib.patches as mpatches\nimport pandas as pd\nimport numpy as np\nfrom numpy import dtype\nfrom matplotlib.pyplot import ylabel\nimport math\nimport re\n\nimport sys\nsys.path.append(f'./common')\nimport util\nfrom util import *\nsetup(util.setting_font_size_2, util.style_plot)\n\nfrom argparse import ArgumentParser\n\nparser = ArgumentParser()\nparser.add_argument(\"-i\", \"--inputFile\")\nparser.add_argument(\"-o\", \"--outputFilePath\")\n\nargs = parser.parse_args(sys.argv[1:])\n\ninputFileName = args.inputFile\noutputFilePath = args.outputFilePath\n\ndef normalise (x, min, max):\n return (x-min)/(max-min)\ndef get_percentage_change(current, previous):\n if current == previous:\n return 100.0\n try:\n return round ( ((current - previous) / previous) * 100.0 , 2) \n except ZeroDivisionError:\n return 0\n\nappNames = ['lda',\n 'SVM',\n 'terasort-10000000r',\n 'terasort-100000000r',\n 'terasort-300000000r',\n 's0-s1-terasort-1200000000r',\n 'terasort-hdfs-Main-1200000000r',\n 'terasort',\n 'nweight',\n 'SparseNaiveBayes',\n 'Join',\n 'Aggregation',\n 'Scan',\n 'ScalaPageRank',\n 'WordCount', \n ]\nappNamesDict = {\n 'terasort-hdfs-Main-1200000000r' : 'terasort-120GB'\n \n }\n\nmy_colors = [\n util.color_default,\n util.color_16c,\n util.color_8c,\n util.color_4c,\n util.color_2c,\n 'purple'\n ]\n\ncolor_dict = {\n 64: util.color_64c,\n 32 : util.color_default,\n 16 : util.color_16c,\n 8 : util.color_8c,\n 4 : util.color_4c,\n 2 : util.color_2c,\n }\n\nc64_patch = mpatches.Patch(color=util.color_64c, label='64c')\nc32_patch = mpatches.Patch(color=util.color_default, label='Default (32)')\nc16_patch = mpatches.Patch(color=util.color_16c, label='16 threads')\nc8_patch = mpatches.Patch(color=util.color_8c, label='8 threads')\nc4_patch = mpatches.Patch(color=util.color_4c, label='4 threads')\nc2_patch = mpatches.Patch(color=util.color_2c, label='2 threads')\n\ndef getAppNameShort(longName):\n shortName = \"unknown\"\n for appName in appNames:\n if(appName in longName):\n if appName in appNamesDict:\n dictName = appNamesDict[appName]\n shortName = dictName\n else:\n shortName = appName\n break\n return shortName\n\ndef extractNumberOfCores(appName):\n result = None\n matches = re.findall(r\"[0-9]+c\", appName)\n for matchNum, match in enumerate(matches):\n numberMatches = re.findall(\"[0-9]+\", match) \n for mNum, m in enumerate(numberMatches):\n result = m\n return result\n\n# data/dca/dca_static_ssd.csv\ndata = pd.read_csv(inputFileName, dtype={\n 'stage': int,\n 'duration': float,\n 'usedCores': int,\n 'totalCores': int,\n 'adaptive': int,\n 'isIo': int\n })\ndata['appName'] = data['appName'].apply (lambda x: x.split(\"^\", 1)[0])\ndata['appName'] = data['appName'].apply (lambda x: re.sub(r\"-[0-9]+c-\", '-', x))\ndata[\"duration\"] = data[\"duration\"].apply(lambda s: s / 1000)\nmean_data = data.groupby([\"appName\", \"stage\"]).mean()\nmean_data = mean_data.reset_index()\nappCount = len(data[\"appName\"].unique().tolist())\n\ndef getMeanAndTransformBack(df, name):\n mean = df.groupby([\"stage\", \"usedCores\"]).mean()\n mean = mean.reset_index()\n mean['appName'] = name\n return mean\n\ndef findMinimumDuration(group):\n # Filter default rows\n group = group.loc[group[\"totalCores\"] != 128]\n sum = group.groupby([\"usedCores\"])['duration'].sum()\n print(sum)\n minUsedCores = sum.idxmin()\n numCores = int( minUsedCores / numNodes)\n print(numCores,sum[minUsedCores])\n return numCores,sum[minUsedCores]\n\ndfgroup = data.groupby([\"appName\"], sort=False)\ni = 0\nnumNodes = 4\nnumberOfRows = math.ceil(appCount / 2)\nprint(f\"numberOfRows- {numberOfRows}\") \nfor name, group in dfgroup:\n fig = plt.figure(figsize=(6,5))\n \n # Find mean for the default\n mean_default = group.loc[group[\"totalCores\"] == 128 ]\n mean_default = getMeanAndTransformBack(mean_default, name)\n print(\"MEAN DEFAULT:\")\n print(mean_default)\n # Find the default duration\n mean_default_duration = mean_default['duration'].sum()\n print(f\"Default duration: {mean_default_duration}\")\n \n\n pos = 0\n numExperiments = len(group[\"totalCores\"].unique())\n previous_values = np.array(numExperiments)\n \n # Get all the non-default rows\n group = group.loc[group[\"totalCores\"] != 128 ]\n \n\n\n group = getMeanAndTransformBack(group, name)\n \n \n # Concat the mean default which we found earlier with non default rows\n for namerow, row in group.iterrows():\n if row[\"isIo\"] == 0:\n mean_default_row = (mean_default.loc[mean_default[\"stage\"] == row[\"stage\"]])\n group.loc[namerow, \"duration\"] = mean_default_row[\"duration\"].values\n group = pd.concat([mean_default, group])\n \n group = group.sort_values([\"totalCores\", \"stage\"], ascending=[False, True])\n print(\"Updated group:\")\n print(group)\n\n # Find the minimum duration\n minCores, minDuration = findMinimumDuration(group)\n print(f\"Min Cores: {minCores} and Min Duration: {minDuration}\")\n percentageChange = get_percentage_change(minDuration, mean_default_duration)\n print(f\"Percentage change from 32c to {minCores}c: {percentageChange}%\")\n\n for name2, group2 in group.groupby([\"stage\"], sort=False):\n group2 = group2.reset_index()\n colors = []\n for namerow, row in group2.iterrows():\n if row['isIo'] == 0:\n colors.append(util.color_default)\n else:\n colors.append(color_dict[row[\"usedCores\"] / 4])\n \n \n dataset = group2[\"duration\"].values\n \n # Find min in each stage\n minRow = group2.loc[group2[\"duration\"].idxmin()]\n assert isinstance(minRow, pd.Series)\n\n dataset = np.append(dataset,minRow[\"duration\"])\n colors.append(color_dict[minRow[\"usedCores\"] / 4])\n \n x =[32,16,8,4,2, 'bestfit']\n xs = range(len(x))\n y = dataset\n \n \n barlist = plt.bar(xs, y, bottom= previous_values, color='black', width=0.5, linewidth=2, edgecolor='black')\n i = 0\n for bar in barlist:\n bar.set_facecolor(colors[i])\n bar.set_linewidth(0.7)\n bar.set_edgecolor(util.color_stage_border)\n i = i +1 \n \n plt.xticks(xs, x)\n previous_values = np.array(previous_values) + np.array(dataset)\n default_patch = mpatches.Patch(color='black', label='Default')\n static_patch = mpatches.Patch(color='r', label='Static')\n legend = plt.legend(handles=[c32_patch, c16_patch, c8_patch, c4_patch, c2_patch], frameon=1)\n frame = legend.get_frame()\n frame.set_facecolor('lightgrey')\n plt.xlabel(formatLabelForLatex(\"Number of Threads\"))\n plt.ylabel(formatLabelForLatex(\"Runtime (s)\"))\n# /Users/sobhan/scala-ide-workspace-spark/spark/publication/Big Data Paradigm/img\n savefig(f\"{outputFilePath}/dca_static_ssd_{name}.pdf\",dpi=100, bbox_inches='tight')\n\nplt.show()" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.patches.Patch", "pandas.read_csv", "pandas.concat", "matplotlib.pyplot.savefig", "numpy.append", "matplotlib.pyplot.bar", "matplotlib.pyplot.xticks", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
shenghh2015/automl
[ "8c2cca6e35b1f27adf4ad709128aa586badc8a76" ]
[ "efficientdet/backbone/efficientnet_builder_test.py" ]
[ "# Copyright 2020 Google Research. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for efficientnet_builder.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import logging\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nfrom backbone import efficientnet_builder\n\n\nclass EfficientnetBuilderTest(tf.test.TestCase):\n\n def _test_model_params(self,\n model_name,\n input_size,\n expected_params,\n override_params=None,\n features_only=False,\n pooled_features_only=False):\n images = tf.zeros((1, input_size, input_size, 3), dtype=tf.float32)\n efficientnet_builder.build_model(\n images,\n model_name=model_name,\n override_params=override_params,\n training=False,\n features_only=features_only,\n pooled_features_only=pooled_features_only)\n num_params = np.sum([np.prod(v.shape) for v in tf.trainable_variables()])\n self.assertEqual(num_params, expected_params)\n\n def test_efficientnet_b0(self):\n self._test_model_params('efficientnet-b0', 224, expected_params=5288548)\n\n def test_efficientnet_b1(self):\n self._test_model_params('efficientnet-b1', 240, expected_params=7794184)\n\n def test_efficientnet_b2(self):\n self._test_model_params('efficientnet-b2', 260, expected_params=9109994)\n\n def test_efficientnet_b3(self):\n self._test_model_params('efficientnet-b3', 300, expected_params=12233232)\n\n def test_efficientnet_b4(self):\n self._test_model_params('efficientnet-b4', 380, expected_params=19341616)\n\n def test_efficientnet_b5(self):\n self._test_model_params('efficientnet-b5', 456, expected_params=30389784)\n\n def test_efficientnet_b6(self):\n self._test_model_params('efficientnet-b6', 528, expected_params=43040704)\n\n def test_efficientnet_b7(self):\n self._test_model_params('efficientnet-b7', 600, expected_params=66347960)\n\n def test_efficientnet_b0_with_customized_num_classes(self):\n self._test_model_params(\n 'efficientnet-b0',\n 224,\n expected_params=4135648,\n override_params={'num_classes': 100})\n\n def test_efficientnet_b0_with_features_only(self):\n self._test_model_params(\n 'efficientnet-b0', 224, features_only=True, expected_params=3595388)\n\n def test_efficientnet_b0_with_pooled_features_only(self):\n self._test_model_params(\n 'efficientnet-b0',\n 224,\n pooled_features_only=True,\n expected_params=4007548)\n\n def test_efficientnet_b0_fails_if_both_features_requested(self):\n with self.assertRaises(AssertionError):\n efficientnet_builder.build_model(\n None,\n model_name='efficientnet-b0',\n training=False,\n features_only=True,\n pooled_features_only=True)\n\n def test_efficientnet_b0_base(self):\n # Creates a base model using the model configuration.\n images = tf.zeros((1, 224, 224, 3), dtype=tf.float32)\n _, endpoints = efficientnet_builder.build_model_base(\n images, model_name='efficientnet-b0', training=False)\n\n # reduction_1 to reduction_5 should be in endpoints\n self.assertIn('reduction_1', endpoints)\n self.assertIn('reduction_5', endpoints)\n # reduction_5 should be the last one: no reduction_6.\n self.assertNotIn('reduction_6', endpoints)\n\n\nif __name__ == '__main__':\n logging.set_verbosity(logging.WARNING)\n # Disable eager to allow tf.profile works for #params/#flops.\n tf.disable_eager_execution()\n tf.test.main()\n" ]
[ [ "tensorflow.compat.v1.test.main", "tensorflow.compat.v1.trainable_variables", "tensorflow.compat.v1.zeros", "numpy.prod", "tensorflow.compat.v1.disable_eager_execution" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shenjl/PStudy
[ "d49c710524781b604acf4cb3d28b774ab09ff73d", "d49c710524781b604acf4cb3d28b774ab09ff73d" ]
[ "PythonStudy/Tensorflow4mnist/mnist/v1/train.py", "PythonStudy/kaggle/Elo_Merchant_Category_Recommendation/utils.py" ]
[ "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nfrom model import Network\n\n'''\npython 3.6\ntensorflow 1.4\n'''\n\n\nclass Train:\n def __init__(self):\n self.net = Network()\n\n # 初始化 session\n # Network() 只是构造了一张计算图,计算需要放到会话(session)中\n self.sess = tf.Session()\n # 初始化变量\n self.sess.run(tf.global_variables_initializer())\n\n # 读取训练和测试数据,这是tensorflow库自带的,不存在训练集会自动下载\n # 项目目录下已经下载好,删掉后,重新运行代码会自动下载\n # data_set/train-images-idx3-ubyte.gz\n # data_set/train-labels-idx1-ubyte.gz\n # data_set/t10k-images-idx3-ubyte.gz\n # data_set/t10k-labels-idx1-ubyte.gz\n # self.data = input_data.read_data_sets('../data_set', one_hot=True)\n self.data = input_data.read_data_sets('Tensorflow-mnist\\mnist\\data_set', one_hot=True)\n\n def train(self):\n # batch_size 是指每次迭代训练,传入训练的图片张数。\n # 数据集小,可以使用全数据集,数据大的情况下,\n # 为了提高训练速度,用随机抽取的n张图片来训练,效果与全数据集相近\n # https://www.zhihu.com/question/32673260\n batch_size = 64\n\n # 总的训练次数\n train_step = 2000\n\n # 开始训练\n for i in range(train_step):\n # 从数据集中获取 输入和标签(也就是答案)\n x, label = self.data.train.next_batch(batch_size)\n # 每次计算train,更新整个网络\n # loss只是为了看到损失的大小,方便打印\n _, loss = self.sess.run([self.net.train, self.net.loss],\n feed_dict={self.net.x: x, self.net.label: label})\n\n # 打印 loss,训练过程中将会看到,loss有变小的趋势\n # 代表随着训练的进行,网络识别图像的能力提高\n # 但是由于网络规模较小,后期没有明显下降,而是有明显波动\n if (i + 1) % 10 == 0:\n print('第%5d步,当前loss:%.2f' % (i + 1, loss))\n\n def calculate_accuracy(self):\n test_x = self.data.test.images\n test_label = self.data.test.labels\n # 注意:与训练不同的是,并没有计算 self.net.train\n # 只计算了accuracy这个张量,所以不会更新网络\n # 最终准确率约为0.91\n accuracy = self.sess.run(self.net.accuracy,\n feed_dict={self.net.x: test_x, self.net.label: test_label})\n print(\"准确率: %.2f,共测试了%d张图片 \" % (accuracy, len(test_label)))\n\n\nif __name__ == \"__main__\":\n app = Train()\n app.train()\n app.calculate_accuracy()\n", "import numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport lightgbm as lgb\nfrom sklearn.model_selection import KFold\nimport warnings\nimport time\nimport sys\nimport datetime\nfrom sklearn.metrics import mean_squared_error\nwarnings.simplefilter(action='ignore', category=FutureWarning)\npd.set_option('display.max_columns', 500)\n\ndef reduce_mem_usage(df, verbose=True):\n '''[summary]\n 看起来好像是数据清洗,减少内存的使用\n Arguments:\n df {[type]} -- [description]\n \n Keyword Arguments:\n verbose {bool} -- [description] (default: {True})\n \n Returns:\n [type] -- [description]\n '''\n\n numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']\n start_mem = df.memory_usage().sum() / 1024**2 \n for col in df.columns:\n col_type = df[col].dtypes\n if col_type in numerics:\n c_min = df[col].min()\n c_max = df[col].max()\n if str(col_type)[:3] == 'int':\n if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\n df[col] = df[col].astype(np.int8)\n elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\n df[col] = df[col].astype(np.int16)\n elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\n df[col] = df[col].astype(np.int32)\n elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\n df[col] = df[col].astype(np.int64) \n else:\n if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:\n df[col] = df[col].astype(np.float16)\n elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:\n df[col] = df[col].astype(np.float32)\n else:\n df[col] = df[col].astype(np.float64) \n end_mem = df.memory_usage().sum() / 1024**2\n if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))\n return df\n\n# parse_dates是什么意思?\nnew_transactions = pd.read_csv(r'D:\\workspace\\MachineLearning\\Kaggle\\Elo_Merchant_Category_Recommendation\\dataset\\new_merchant_transactions.csv',parse_dates=['purchase_date'])\n\nhistorical_transactions = pd.read_csv(r'D:\\workspace\\MachineLearning\\Kaggle\\Elo_Merchant_Category_Recommendation\\dataset\\historical_transactions.csv',parse_dates=['purchase_date'])\n\ndef binarize(df):\n '''[summary]\n 二值化,用于将布尔型数据进行转化。\n Arguments:\n df {[type]} -- [description]\n \n Returns:\n [type] -- [description]\n '''\n for col in ['authorized_flag', 'category_1']:\n df[col] = df[col].map({'Y':1, 'N':0})\n return df\n\nhistorical_transactions = binarize(historical_transactions)\nnew_transactions = binarize(new_transactions)\n\n# 格式化日期数据,formatting the dates\ndef read_data(input_file):\n df = pd.read_csv(input_file)\n df['first_active_month'] = pd.to_datetime(df['first_active_month'])\n df['elapsed_time'] = (datetime.date(2018, 2, 1) - df['first_active_month'].dt.date).dt.days\n return df\n# load the main files, and extracting the target\ntrain = read_data(r'D:\\workspace\\MachineLearning\\Kaggle\\Elo_Merchant_Category_Recommendation\\dataset\\train.csv')\ntest = read_data(r'D:\\workspace\\MachineLearning\\Kaggle\\Elo_Merchant_Category_Recommendation\\dataset\\test.csv')\n\ntarget = train['target']\ndel train['target']\n\n\n# 特征工程 Feature engineering\nhistorical_transactions['month_diff'] = ((datetime.datetime.today() - historical_transactions['purchase_date']).dt.days)//30\nhistorical_transactions['month_diff'] += historical_transactions['month_lag']\n\nnew_transactions['month_diff'] = ((datetime.datetime.today() - new_transactions['purchase_date']).dt.days)//30\nnew_transactions['month_diff'] += new_transactions['month_lag']\n\n# historical_transactions[:5]\n\nhistorical_transactions = pd.get_dummies(historical_transactions, columns=['category_2', 'category_3'])\nnew_transactions = pd.get_dummies(new_transactions, columns=['category_2', 'category_3'])\n\nhistorical_transactions = reduce_mem_usage(historical_transactions)\nnew_transactions = reduce_mem_usage(new_transactions)\n\nagg_fun = {'authorized_flag': ['mean']}\nauth_mean = historical_transactions.groupby(['card_id']).agg(agg_fun)\nauth_mean.columns = ['_'.join(col).strip() for col in auth_mean.columns.values]\nauth_mean.reset_index(inplace=True)\n\nauthorized_transactions = historical_transactions[historical_transactions['authorized_flag'] == 1]\nhistorical_transactions = historical_transactions[historical_transactions['authorized_flag'] == 0]\n\n# define a few dates features\nhistorical_transactions['purchase_month'] = historical_transactions['purchase_date'].dt.month\nauthorized_transactions['purchase_month'] = authorized_transactions['purchase_date'].dt.month\nnew_transactions['purchase_month'] = new_transactions['purchase_date'].dt.month\n\n# Then I define two functions that aggregate the info contained in these two tables. The first function aggregates the function by grouping on `card_id`\ndef aggregate_transactions(history):\n \n history.loc[:, 'purchase_date'] = pd.DatetimeIndex(history['purchase_date']).\\\n astype(np.int64) * 1e-9\n \n agg_func = {\n 'category_1': ['sum', 'mean'],\n 'category_2_1.0': ['mean'],\n 'category_2_2.0': ['mean'],\n 'category_2_3.0': ['mean'],\n 'category_2_4.0': ['mean'],\n 'category_2_5.0': ['mean'],\n 'category_3_A': ['mean'],\n 'category_3_B': ['mean'],\n 'category_3_C': ['mean'],\n 'merchant_id': ['nunique'],\n 'merchant_category_id': ['nunique'],\n 'state_id': ['nunique'],\n 'city_id': ['nunique'],\n 'subsector_id': ['nunique'],\n 'purchase_amount': ['sum', 'mean', 'max', 'min', 'std'],\n 'installments': ['sum', 'mean', 'max', 'min', 'std'],\n 'purchase_month': ['mean', 'max', 'min', 'std'],\n 'purchase_date': [np.ptp, 'min', 'max'],\n 'month_lag': ['mean', 'max', 'min', 'std'],\n 'month_diff': ['mean']\n }\n \n agg_history = history.groupby(['card_id']).agg(agg_func)\n agg_history.columns = ['_'.join(col).strip() for col in agg_history.columns.values]\n agg_history.reset_index(inplace=True)\n \n df = (history.groupby('card_id')\n .size()\n .reset_index(name='transactions_count'))\n \n agg_history = pd.merge(df, agg_history, on='card_id', how='left')\n \n return agg_history\n\nhistory = aggregate_transactions(historical_transactions)\nhistory.columns = ['hist_' + c if c != 'card_id' else c for c in history.columns]\nhistory[:5]\n\nauthorized = aggregate_transactions(authorized_transactions)\nauthorized.columns = ['auth_' + c if c != 'card_id' else c for c in authorized.columns]\nauthorized[:5]\n\nnew = aggregate_transactions(new_transactions)\nnew.columns = ['new_' + c if c != 'card_id' else c for c in new.columns]\nnew[:5]\n\n# The second function first aggregates on the two variables `card_id` and `month_lag`. Then a second grouping is performed to aggregate over time:\n\ndef aggregate_per_month(history):\n grouped = history.groupby(['card_id', 'month_lag'])\n\n agg_func = {\n 'purchase_amount': ['count', 'sum', 'mean', 'min', 'max', 'std'],\n 'installments': ['count', 'sum', 'mean', 'min', 'max', 'std'],\n }\n\n intermediate_group = grouped.agg(agg_func)\n intermediate_group.columns = ['_'.join(col).strip() for col in intermediate_group.columns.values]\n intermediate_group.reset_index(inplace=True)\n\n final_group = intermediate_group.groupby('card_id').agg(['mean', 'std'])\n final_group.columns = ['_'.join(col).strip() for col in final_group.columns.values]\n final_group.reset_index(inplace=True)\n \n return final_group\n#___________________________________________________________\nfinal_group = aggregate_per_month(authorized_transactions) \nfinal_group[:10]\n\ndef successive_aggregates(df, field1, field2):\n t = df.groupby(['card_id', field1])[field2].mean()\n u = pd.DataFrame(t).reset_index().groupby('card_id')[field2].agg(['mean', 'min', 'max', 'std'])\n u.columns = [field1 + '_' + field2 + '_' + col for col in u.columns.values]\n u.reset_index(inplace=True)\n return u\n\nadditional_fields = successive_aggregates(new_transactions, 'category_1', 'purchase_amount')\nadditional_fields = additional_fields.merge(successive_aggregates(new_transactions, 'installments', 'purchase_amount'),\n on = 'card_id', how='left')\nadditional_fields = additional_fields.merge(successive_aggregates(new_transactions, 'city_id', 'purchase_amount'),\n on = 'card_id', how='left')\nadditional_fields = additional_fields.merge(successive_aggregates(new_transactions, 'category_1', 'installments'),\n on = 'card_id', how='left')\n\n\n# 3. Training the model\n# We now train the model with the features we previously defined. A first step consists in merging all the dataframes:\n\ntrain = pd.merge(train, history, on='card_id', how='left')\ntest = pd.merge(test, history, on='card_id', how='left')\n\ntrain = pd.merge(train, authorized, on='card_id', how='left')\ntest = pd.merge(test, authorized, on='card_id', how='left')\n\ntrain = pd.merge(train, new, on='card_id', how='left')\ntest = pd.merge(test, new, on='card_id', how='left')\n\ntrain = pd.merge(train, final_group, on='card_id', how='left')\ntest = pd.merge(test, final_group, on='card_id', how='left')\n\ntrain = pd.merge(train, auth_mean, on='card_id', how='left')\ntest = pd.merge(test, auth_mean, on='card_id', how='left')\n\ntrain = pd.merge(train, additional_fields, on='card_id', how='left')\ntest = pd.merge(test, additional_fields, on='card_id', how='left')\n\ntest.to_csv('test.csv')\ntrain['target'] = target\ntrain.to_csv('train.csv')\ndel train['target']\n\n# and to define the features we want to keep to train the model. For that purpose, I use the results obtained in the [Selecting features kernel](https://www.kaggle.com/fabiendaniel/selecting-features/notebook):\n\n# unimportant_features = [\n# 'auth_category_2_1.0_mean',\n# 'auth_category_2_2.0_mean',\n# 'auth_category_2_3.0_mean',\n# 'auth_category_2_5.0_mean',\n# 'hist_category_2_3.0_mean',\n# 'hist_category_2_4.0_mean',\n# 'hist_category_2_5.0_mean',\n# 'hist_category_3_A_mean',\n# 'hist_installments_min',\n# 'hist_installments_std',\n# 'hist_month_lag_std',\n# 'hist_purchase_amount_max',\n# 'hist_purchase_month_max',\n# 'hist_purchase_month_min',\n# 'hist_purchase_month_std',\n# 'installments_min_mean',\n# 'new_category_2_1.0_mean',\n# 'new_category_2_2.0_mean',\n# 'new_category_2_3.0_mean',\n# 'new_category_2_5.0_mean',\n# 'new_city_id_nunique',\n# 'new_installments_std',\n# 'new_state_id_nunique',\n# 'purchase_amount_mean_mean'\n# ]\nfeatures = [c for c in train.columns if c not in ['card_id', 'first_active_month']]\n#features = [f for f in features if f not in unimportant_features]\ncategorical_feats = ['feature_2', 'feature_3']\n\n# We then set the hyperparameters of the LGBM model, these parameters are obtained by an [bayesian optimization done in another kernel](https://www.kaggle.com/fabiendaniel/hyperparameter-tuning/edit):\n\nparam = {'num_leaves': 111,\n 'min_data_in_leaf': 149, \n 'objective':'regression',\n 'max_depth': 9,\n 'learning_rate': 0.005,\n \"boosting\": \"gbdt\",\n \"feature_fraction\": 0.7522,\n \"bagging_freq\": 1,\n \"bagging_fraction\": 0.7083 ,\n \"bagging_seed\": 11,\n \"metric\": 'rmse',\n \"lambda_l1\": 0.2634,\n \"random_state\": 133,\n \"verbosity\": -1}\n\n# We now train the model. Here, we use a standard KFold split of the dataset in order to validate the results and to stop the training. Interstingly, during the writing of this kernel, the model was enriched adding new features, which improved the CV score. **The variations observed on the CV were found to be quite similar to the variations on the LB**: it seems that the current competition won't give us headaches to define the correct validation scheme: \n\nfolds = KFold(n_splits=5, shuffle=True, random_state=15)\noof = np.zeros(len(train))\npredictions = np.zeros(len(test))\nstart = time.time()\nfeature_importance_df = pd.DataFrame()\n\nfor fold_, (trn_idx, val_idx) in enumerate(folds.split(train.values, target.values)):\n print(\"fold n°{}\".format(fold_))\n trn_data = lgb.Dataset(train.iloc[trn_idx][features],\n label=target.iloc[trn_idx],\n categorical_feature=categorical_feats\n )\n val_data = lgb.Dataset(train.iloc[val_idx][features],\n label=target.iloc[val_idx],\n categorical_feature=categorical_feats\n )\n\n num_round = 10000\n clf = lgb.train(param,\n trn_data,\n num_round,\n valid_sets = [trn_data, val_data],\n verbose_eval=100,\n early_stopping_rounds = 200)\n \n oof[val_idx] = clf.predict(train.iloc[val_idx][features], num_iteration=clf.best_iteration)\n \n fold_importance_df = pd.DataFrame()\n fold_importance_df[\"feature\"] = features\n fold_importance_df[\"importance\"] = clf.feature_importance()\n fold_importance_df[\"fold\"] = fold_ + 1\n feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)\n \n predictions += clf.predict(test[features], num_iteration=clf.best_iteration) / folds.n_splits\n\nprint(\"CV score: {:<8.5f}\".format(mean_squared_error(oof, target)**0.5))\n\n# 4. Feature importance\n# Finally, we can have a look at the features that were used by the model:\n\ncols = (feature_importance_df[[\"feature\", \"importance\"]]\n .groupby(\"feature\")\n .mean()\n .sort_values(by=\"importance\", ascending=False)[:1000].index)\n\nbest_features = feature_importance_df.loc[feature_importance_df.feature.isin(cols)]\n\nplt.figure(figsize=(14,25))\nsns.barplot(x=\"importance\",\n y=\"feature\",\n data=best_features.sort_values(by=\"importance\",\n ascending=False))\nplt.title('LightGBM Features (avg over folds)')\nplt.tight_layout()\nplt.savefig('lgbm_importances.png')\n\n# 5. Submission\n# Now, we just need to prepare the submission file:\n\nsub_df = pd.DataFrame({\"card_id\":test[\"card_id\"].values})\nsub_df[\"target\"] = predictions\nsub_df.to_csv(\"submit.csv\", index=False)" ]
[ [ "tensorflow.global_variables_initializer", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.Session" ], [ "pandas.merge", "pandas.read_csv", "matplotlib.pyplot.tight_layout", "pandas.to_datetime", "matplotlib.pyplot.title", "pandas.concat", "matplotlib.pyplot.figure", "sklearn.model_selection.KFold", "pandas.DataFrame", "matplotlib.pyplot.savefig", "sklearn.metrics.mean_squared_error", "pandas.DatetimeIndex", "numpy.finfo", "numpy.iinfo", "pandas.set_option", "pandas.get_dummies" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
JevinJ/pyvista
[ "c9be18ed209de3f80e1a70ef01eef3355b3616ce" ]
[ "pyvista/utilities/geometric_objects.py" ]
[ "\"\"\"Provides an easy way of generating several geometric objects.\n\nCONTAINS\n--------\nvtkArrowSource\nvtkCylinderSource\nvtkSphereSource\nvtkPlaneSource\nvtkLineSource\nvtkCubeSource\nvtkConeSource\nvtkDiskSource\nvtkRegularPolygonSource\nvtkPyramid\n\n\"\"\"\nimport numpy as np\nimport vtk\n\nimport pyvista\nfrom pyvista.utilities import assert_empty_kwargs, check_valid_vector\n\nNORMALS = {\n 'x': [1, 0, 0],\n 'y': [0, 1, 0],\n 'z': [0, 0, 1],\n '-x': [-1, 0, 0],\n '-y': [0, -1, 0],\n '-z': [0, 0, -1],\n}\n\n\ndef translate(surf, center=[0., 0., 0.], direction=[1., 0., 0.]):\n \"\"\"Translate and orientate a mesh to a new center and direction.\n\n By default, the input mesh is considered centered at the origin\n and facing in the x direction.\n\n \"\"\"\n normx = np.array(direction)/np.linalg.norm(direction)\n normz = np.cross(normx, [0, 1.0, 0.0000001])\n normz /= np.linalg.norm(normz)\n normy = np.cross(normz, normx)\n\n trans = np.zeros((4, 4))\n trans[:3, 0] = normx\n trans[:3, 1] = normy\n trans[:3, 2] = normz\n trans[3, 3] = 1\n\n surf.transform(trans)\n if not np.allclose(center, [0., 0., 0.]):\n surf.points += np.array(center)\n\n\ndef Cylinder(center=(0.,0.,0.), direction=(1.,0.,0.), radius=0.5, height=1.0,\n resolution=100, capping=True, **kwargs):\n \"\"\"Create the surface of a cylinder.\n\n See also :func:`pyvista.CylinderStructured`.\n\n Parameters\n ----------\n center : list or tuple or np.ndarray\n Location of the centroid in [x, y, z]\n\n direction : list or tuple or np.ndarray\n Direction cylinder points to in [x, y, z]\n\n radius : float\n Radius of the cylinder.\n\n height : float\n Height of the cylinder.\n\n resolution : int\n Number of points on the circular face of the cylinder.\n\n capping : bool, optional\n Cap cylinder ends with polygons. Default True\n\n Returns\n -------\n cylinder : pyvista.PolyData\n Cylinder surface.\n\n Examples\n --------\n >>> import pyvista\n >>> import numpy as np\n >>> cylinder = pyvista.Cylinder(np.array([1, 2, 3]), np.array([1, 1, 1]), 1, 1)\n >>> cylinder.plot() # doctest:+SKIP\n \"\"\"\n capping = kwargs.pop('cap_ends', capping)\n assert_empty_kwargs(**kwargs)\n cylinderSource = vtk.vtkCylinderSource()\n cylinderSource.SetRadius(radius)\n cylinderSource.SetHeight(height)\n cylinderSource.SetCapping(capping)\n cylinderSource.SetResolution(resolution)\n cylinderSource.Update()\n surf = pyvista.PolyData(cylinderSource.GetOutput())\n surf.rotate_z(-90)\n translate(surf, center, direction)\n return surf\n\n\ndef CylinderStructured(radius=0.5, height=1.0,\n center=(0.,0.,0.), direction=(1.,0.,0.),\n theta_resolution=32, z_resolution=10):\n \"\"\"Create a cylinder mesh as a :class:`pyvista.StructuredGrid`.\n\n The end caps are left open. This can create a surface mesh if a single\n value for the ``radius`` is given or a 3D mesh if multiple radii are given\n as a list/array in the ``radius`` argument.\n\n Parameters\n ----------\n radius : float\n Radius of the cylinder. If an iterable\n\n height : float\n Height (length) of the cylinder along its Z-axis\n\n center : list or tuple or np.ndarray\n Location of the centroid in [x, y, z]\n\n direction : list or tuple or np.ndarray\n Direction cylinder Z-axis in [x, y, z]\n\n theta_resolution : int\n Number of points on the circular face of the cylinder.\n\n z_resolution : int\n Number of points along the height (Z-axis) of the cylinder\n\n \"\"\"\n # Define grid in polar coordinates\n r = np.array([radius]).ravel()\n nr = len(r)\n theta = np.linspace(0, 2*np.pi, num=theta_resolution)\n radius_matrix, theta_matrix = np.meshgrid(r,theta)\n\n # Transform to cartesian space\n X = radius_matrix * np.cos(theta_matrix)\n Y = radius_matrix * np.sin(theta_matrix)\n\n # Make all the nodes in the grid\n xx = np.array([X] * z_resolution).ravel()\n yy = np.array([Y] * z_resolution).ravel()\n dz = height / (z_resolution - 1)\n zz = np.empty(yy.size)\n zz = np.full((X.size, z_resolution), dz)\n zz *= np.arange(z_resolution)\n zz = zz.ravel(order='f')\n\n # Create the grid\n grid = pyvista.StructuredGrid()\n grid.points = np.c_[xx, yy, zz]\n grid.dimensions = [nr, theta_resolution, z_resolution]\n\n # Orient properly in user direction\n vx = np.array([0., 0., 1.])\n if not np.allclose(vx, direction):\n direction /= np.linalg.norm(direction)\n vx -= vx.dot(direction) * direction\n vx /= np.linalg.norm(vx)\n vy = np.cross(direction, vx)\n rmtx = np.array([vx, vy, direction])\n grid.points = grid.points.dot(rmtx)\n\n # Translate to given center\n grid.points -= np.array(grid.center)\n grid.points += np.array(center)\n\n return grid\n\n\ndef Arrow(start=(0.,0.,0.), direction=(1.,0.,0.), tip_length=0.25,\n tip_radius=0.1, tip_resolution=20, shaft_radius=0.05,\n shaft_resolution=20, scale=None):\n \"\"\"Create a vtk Arrow.\n\n Parameters\n ----------\n start : np.ndarray\n Start location in [x, y, z]\n\n direction : list or tuple or np.ndarray\n Direction the arrow points to in [x, y, z]\n\n tip_length : float, optional\n Length of the tip.\n\n tip_radius : float, optional\n Radius of the tip.\n\n tip_resolution : int, optional\n Number of faces around the tip.\n\n shaft_radius : float, optional\n Radius of the shaft.\n\n shaft_resolution : int, optional\n Number of faces around the shaft.\n\n scale : float or str, optional\n Scale factor of the entire object, default is None (i.e. scale of 1).\n 'auto' scales to length of direction array.\n\n Returns\n -------\n arrow : pyvista.PolyData\n Arrow surface.\n\n \"\"\"\n # Create arrow object\n arrow = vtk.vtkArrowSource()\n arrow.SetTipLength(tip_length)\n arrow.SetTipRadius(tip_radius)\n arrow.SetTipResolution(tip_resolution)\n arrow.SetShaftRadius(shaft_radius)\n arrow.SetShaftResolution(shaft_resolution)\n arrow.Update()\n surf = pyvista.PolyData(arrow.GetOutput())\n\n if scale == 'auto':\n scale = float(np.linalg.norm(direction))\n if isinstance(scale, float) or isinstance(scale, int):\n surf.points *= scale\n elif scale is not None:\n raise TypeError(\"Scale must be either float, int or 'auto'.\")\n\n translate(surf, start, direction)\n return surf\n\n\ndef Sphere(radius=0.5, center=(0, 0, 0), direction=(0, 0, 1), theta_resolution=30,\n phi_resolution=30, start_theta=0, end_theta=360, start_phi=0, end_phi=180):\n \"\"\"Create a vtk Sphere.\n\n Parameters\n ----------\n radius : float, optional\n Sphere radius\n\n center : np.ndarray or list, optional\n Center in [x, y, z]\n\n direction : list or tuple or np.ndarray\n Direction the top of the sphere points to in [x, y, z]\n\n theta_resolution: int , optional\n Set the number of points in the longitude direction (ranging from\n start_theta to end theta).\n\n phi_resolution : int, optional\n Set the number of points in the latitude direction (ranging from\n start_phi to end_phi).\n\n start_theta : float, optional\n Starting longitude angle.\n\n end_theta : float, optional\n Ending longitude angle.\n\n start_phi : float, optional\n Starting latitude angle.\n\n end_phi : float, optional\n Ending latitude angle.\n\n Returns\n -------\n sphere : pyvista.PolyData\n Sphere mesh.\n\n \"\"\"\n sphere = vtk.vtkSphereSource()\n sphere.SetRadius(radius)\n sphere.SetThetaResolution(theta_resolution)\n sphere.SetPhiResolution(phi_resolution)\n sphere.SetStartTheta(start_theta)\n sphere.SetEndTheta(end_theta)\n sphere.SetStartPhi(start_phi)\n sphere.SetEndPhi(end_phi)\n sphere.Update()\n surf = pyvista.PolyData(sphere.GetOutput())\n surf.rotate_y(-90)\n translate(surf, center, direction)\n return surf\n\n\ndef Plane(center=(0, 0, 0), direction=(0, 0, 1), i_size=1, j_size=1,\n i_resolution=10, j_resolution=10):\n \"\"\"Create a plane.\n\n Parameters\n ----------\n center : list or tuple or np.ndarray\n Location of the centroid in [x, y, z]\n\n direction : list or tuple or np.ndarray\n Direction cylinder points to in [x, y, z]\n\n i_size : float\n Size of the plane in the i direction.\n\n j_size : float\n Size of the plane in the j direction.\n\n i_resolution : int\n Number of points on the plane in the i direction.\n\n j_resolution : int\n Number of points on the plane in the j direction.\n\n Returns\n -------\n plane : pyvista.PolyData\n Plane mesh\n\n \"\"\"\n planeSource = vtk.vtkPlaneSource()\n planeSource.SetXResolution(i_resolution)\n planeSource.SetYResolution(j_resolution)\n planeSource.Update()\n\n surf = pyvista.PolyData(planeSource.GetOutput())\n\n surf.points[:, 0] *= i_size\n surf.points[:, 1] *= j_size\n surf.rotate_y(-90)\n translate(surf, center, direction)\n return surf\n\n\ndef Line(pointa=(-0.5, 0., 0.), pointb=(0.5, 0., 0.), resolution=1):\n \"\"\"Create a line.\n\n Parameters\n ----------\n pointa : np.ndarray or list\n Location in [x, y, z].\n\n pointb : np.ndarray or list\n Location in [x, y, z].\n\n resolution : int\n number of pieces to divide line into\n\n \"\"\"\n if resolution <= 0:\n raise ValueError('Resolution must be positive')\n if np.array(pointa).size != 3:\n raise TypeError('Point A must be a length three tuple of floats.')\n if np.array(pointb).size != 3:\n raise TypeError('Point B must be a length three tuple of floats.')\n src = vtk.vtkLineSource()\n src.SetPoint1(*pointa)\n src.SetPoint2(*pointb)\n src.SetResolution(resolution)\n src.Update()\n line = pyvista.wrap(src.GetOutput())\n # Compute distance of every point along line\n compute = lambda p0, p1: np.sqrt(np.sum((p1 - p0)**2, axis=1))\n distance = compute(np.array(pointa), line.points)\n line['Distance'] = distance\n return line\n\n\ndef Cube(center=(0., 0., 0.), x_length=1.0, y_length=1.0, z_length=1.0, bounds=None):\n \"\"\"Create a cube.\n\n It's possible to specify either the center and side lengths or just\n the bounds of the cube. If ``bounds`` are given, all other arguments are\n ignored.\n\n Parameters\n ----------\n center : np.ndarray or list\n Center in [x, y, z].\n\n x_length : float\n length of the cube in the x-direction.\n\n y_length : float\n length of the cube in the y-direction.\n\n z_length : float\n length of the cube in the z-direction.\n\n bounds : np.ndarray or list\n Specify the bounding box of the cube. If given, all other arguments are\n ignored. ``(xMin,xMax, yMin,yMax, zMin,zMax)``\n\n \"\"\"\n src = vtk.vtkCubeSource()\n if bounds is not None:\n if np.array(bounds).size != 6:\n raise TypeError('Bounds must be given as length 6 tuple: (xMin,xMax, yMin,yMax, zMin,zMax)')\n src.SetBounds(bounds)\n else:\n src.SetCenter(center)\n src.SetXLength(x_length)\n src.SetYLength(y_length)\n src.SetZLength(z_length)\n src.Update()\n return pyvista.wrap(src.GetOutput())\n\n\ndef Box(bounds=(-1., 1., -1., 1., -1., 1.), level=0, quads=True):\n \"\"\"Create a box with solid faces for the given bounds.\n\n Parameters\n ----------\n bounds : np.ndarray or list\n Specify the bounding box of the cube.\n ``(xMin, xMax, yMin, yMax, zMin, zMax)``\n\n level : int\n Level of subdivision of the faces.\n\n quads : bool, optional\n Flag to tell the source to generate either a quad or two\n triangle for a set of four points. Default ``True``.\n\n \"\"\"\n if np.array(bounds).size != 6:\n raise TypeError('Bounds must be given as length 6 tuple: (xMin, xMax, yMin, yMax, zMin, zMax)')\n src = vtk.vtkTessellatedBoxSource()\n src.SetLevel(level)\n if quads:\n src.QuadsOn()\n else:\n src.QuadsOff()\n src.SetBounds(bounds)\n src.Update()\n return pyvista.wrap(src.GetOutput())\n\n\ndef Cone(center=(0.,0.,0.), direction=(1.,0.,0.), height=1.0, radius=None,\n capping=True, angle=None, resolution=6):\n \"\"\"Create a cone.\n\n Parameters\n ----------\n center : np.ndarray or list\n Center in [x, y, z]. middle of the axis of the cone.\n\n direction : np.ndarray or list\n Direction vector in [x, y, z]. orientation vector of the cone.\n\n height : float\n Height along the cone in its specified direction.\n\n radius : float\n Base radius of the cone\n\n capping : bool\n Turn on/off whether to cap the base of the cone with a polygon.\n\n angle : float\n The angle degrees between the axis of the cone and a generatrix.\n\n resolution : int\n Number of facets used to represent the cone\n\n \"\"\"\n src = vtk.vtkConeSource()\n src.SetCapping(capping)\n src.SetDirection(direction)\n src.SetCenter(center)\n src.SetHeight(height)\n # Contributed by @kjelljorner in #249:\n if angle and radius:\n raise ValueError(\"Both radius and angle specified. They are mutually exclusive.\")\n elif angle and not radius:\n src.SetAngle(angle)\n elif not angle and radius:\n src.SetRadius(radius)\n elif not angle and not radius:\n src.SetRadius(0.5)\n src.SetResolution(resolution)\n src.Update()\n return pyvista.wrap(src.GetOutput())\n\n\ndef Polygon(center=(0.,0.,0.), radius=1, normal=(0,0,1), n_sides=6):\n \"\"\"Create a polygonal disk with a hole in the center.\n\n The disk has zero height. The user can specify the inner and outer radius\n of the disk, and the radial and circumferential resolution of the polygonal\n representation.\n\n Parameters\n ----------\n center : np.ndarray or list\n Center in [x, y, z]. middle of the axis of the polygon.\n\n radius : float\n The radius of the polygon\n\n normal : np.ndarray or list\n Direction vector in [x, y, z]. orientation vector of the cone.\n\n n_sides : int\n Number of sides of the polygon\n\n \"\"\"\n src = vtk.vtkRegularPolygonSource()\n src.SetCenter(center)\n src.SetNumberOfSides(n_sides)\n src.SetRadius(radius)\n src.SetNormal(normal)\n src.Update()\n return pyvista.wrap(src.GetOutput())\n\n\ndef Disc(center=(0., 0., 0.), inner=0.25, outer=0.5, normal=(0, 0, 1), r_res=1,\n c_res=6):\n \"\"\"Create a polygonal disk with a hole in the center.\n\n The disk has zero height. The user can specify the inner and outer radius\n of the disk, and the radial and circumferential resolution of the polygonal\n representation.\n\n Parameters\n ----------\n center : np.ndarray or list\n Center in [x, y, z]. middle of the axis of the disc.\n\n inner : float\n The inner radius\n\n outer : float\n The outer radius\n\n normal : np.ndarray or list\n Direction vector in [x, y, z]. orientation vector of the cone.\n\n r_res: int\n Number of points in radius direction.\n\n r_res: int\n Number of points in circumferential direction.\n\n \"\"\"\n src = vtk.vtkDiskSource()\n src.SetInnerRadius(inner)\n src.SetOuterRadius(outer)\n src.SetRadialResolution(r_res)\n src.SetCircumferentialResolution(c_res)\n src.Update()\n normal = np.array(normal)\n center = np.array(center)\n surf = pyvista.PolyData(src.GetOutput())\n surf.rotate_y(90)\n translate(surf, center, normal)\n return surf\n\n\ndef Text3D(string, depth=0.5):\n \"\"\"Create 3D text from a string.\"\"\"\n vec_text = vtk.vtkVectorText()\n vec_text.SetText(string)\n\n extrude = vtk.vtkLinearExtrusionFilter()\n extrude.SetInputConnection(vec_text.GetOutputPort())\n extrude.SetExtrusionTypeToNormalExtrusion()\n extrude.SetVector(0, 0, 1)\n extrude.SetScaleFactor(depth)\n\n tri_filter = vtk.vtkTriangleFilter()\n tri_filter.SetInputConnection(extrude.GetOutputPort())\n tri_filter.Update()\n return pyvista.wrap(tri_filter.GetOutput())\n\n\ndef Wavelet(extent=(-10,10,-10,10,-10,10), center=(0,0,0), maximum=255,\n x_freq=60, y_freq=30, z_freq=40, x_mag=10, y_mag=18, z_mag=5,\n std=0.5, subsample_rate=1):\n \"\"\"Create a wavelet.\"\"\"\n wavelet_source = vtk.vtkRTAnalyticSource()\n wavelet_source.SetWholeExtent(*extent)\n wavelet_source.SetCenter(center)\n wavelet_source.SetMaximum(maximum)\n wavelet_source.SetXFreq(x_freq)\n wavelet_source.SetYFreq(y_freq)\n wavelet_source.SetZFreq(z_freq)\n wavelet_source.SetXMag(x_mag)\n wavelet_source.SetYMag(y_mag)\n wavelet_source.SetZMag(z_mag)\n wavelet_source.SetStandardDeviation(std)\n wavelet_source.SetSubsampleRate(subsample_rate)\n wavelet_source.Update()\n return pyvista.wrap(wavelet_source.GetOutput())\n\n\ndef CircularArc(pointa, pointb, center, resolution=100, normal=None,\n polar=None, angle=None, negative=False):\n \"\"\"Create a circular arc defined by two endpoints and a center.\n\n The number of segments composing the polyline is controlled by\n setting the object resolution. Alternatively, one can use a\n better API (that does not allow for inconsistent nor ambiguous\n inputs), using a starting point (polar vector, measured from the\n arc's center), a normal to the plane of the arc, and an angle\n defining the arc length.\n\n Parameters\n ----------\n pointa : np.ndarray or list\n Position of the first end point.\n\n pointb : np.ndarray or list\n Position of the other end point.\n\n center : np.ndarray or list\n Center of the circle that defines the arc.\n\n resolution : int, optional\n The number of segments of the polyline that draws the arc.\n Resolution of 1 will just create a line.\n\n normal : np.ndarray or list\n The normal vector to the plane of the arc. By default it\n points in the positive Z direction.\n\n polar : np.ndarray or list\n (starting point of the arc). By default it is the unit vector\n in the positive x direction. Note: This is only used when\n normal has been input.\n\n angle : float\n Arc length (in degrees), beginning at the polar vector. The\n direction is counterclockwise by default; a negative value\n draws the arc in the clockwise direction. Note: This is only\n used when normal has been input.\n\n negative : bool, optional\n By default the arc spans the shortest angular sector point1 and point2.\n\n By setting this to true, the longest angular sector is used\n instead (i.e. the negative coterminal angle to the shortest\n one). This is only used when normal has not been input\n\n Examples\n --------\n Quarter arc centered at the origin in the xy plane\n\n >>> import pyvista\n >>> arc = pyvista.CircularArc([-1, 0, 0], [0, 1, 0], [0, 0, 0])\n >>> pl = pyvista.Plotter()\n >>> _ = pl.add_mesh(arc, color='k', line_width=4)\n >>> _ = pl.show_bounds(location='all')\n >>> _ = pl.view_xy()\n >>> pl.show() # doctest:+SKIP\n\n Quarter arc centered at the origin in the xz plane\n\n >>> arc = pyvista.CircularArc([-1, 0, 0], [1, 0, 0], [0, 0, 0], normal=[0, 0, 1])\n >>> arc.plot() # doctest:+SKIP\n \"\"\"\n check_valid_vector(pointa, 'pointa')\n check_valid_vector(pointb, 'pointb')\n check_valid_vector(center, 'center')\n\n # fix half-arc bug: if a half arc travels directly through the\n # center point, it becomes a line\n pointb = list(pointb)\n pointb[0] -= 1E-10\n pointb[1] -= 1E-10\n\n arc = vtk.vtkArcSource()\n arc.SetPoint1(*pointa)\n arc.SetPoint2(*pointb)\n arc.SetCenter(*center)\n arc.SetResolution(resolution)\n arc.SetNegative(negative)\n\n if normal is not None:\n arc.UseNormalAndAngleOn()\n check_valid_vector(normal, 'normal')\n arc.SetNormal(*normal)\n\n if polar is not None:\n check_valid_vector(polar, 'polar')\n arc.SetPolarVector(*polar)\n\n if angle is not None:\n arc.SetAngle(angle)\n\n arc.Update()\n return pyvista.wrap(arc.GetOutput())\n\n\ndef Pyramid(points):\n \"\"\"Create a pyramid defined by 5 points.\n\n Parameters\n ----------\n points : np.ndarray or list\n Points of the pyramid. Points are ordered such that the first\n four points are the four counterclockwise points on the\n quadrilateral face, and the last point is the apex.\n\n Returns\n -------\n pyramid : pyvista.UnstructuredGrid\n\n Examples\n --------\n >>> import pyvista\n >>> pointa = [1.0, 1.0, 1.0]\n >>> pointb = [-1.0, 1.0, 1.0]\n >>> pointc = [-1.0, -1.0, 1.0]\n >>> pointd = [1.0, -1.0, 1.0]\n >>> pointe = [0.0, 0.0, 0.0]\n >>> pyramid = pyvista.Pyramid([pointa, pointb, pointc, pointd, pointe])\n >>> pyramid.plot() # doctest:+SKIP\n \"\"\"\n if len(points) != 5:\n raise TypeError('Points must be given as length 5 np.ndarray or list')\n\n check_valid_vector(points[0], 'points[0]')\n check_valid_vector(points[1], 'points[1]')\n check_valid_vector(points[2], 'points[2]')\n check_valid_vector(points[3], 'points[3]')\n check_valid_vector(points[4], 'points[4]')\n\n pyramid = vtk.vtkPyramid()\n pyramid.GetPointIds().SetId(0, 0)\n pyramid.GetPointIds().SetId(1, 1)\n pyramid.GetPointIds().SetId(2, 2)\n pyramid.GetPointIds().SetId(3, 3)\n pyramid.GetPointIds().SetId(4, 4)\n\n ug = vtk.vtkUnstructuredGrid()\n ug.SetPoints(pyvista.vtk_points(np.array(points), False))\n ug.InsertNextCell(pyramid.GetCellType(), pyramid.GetPointIds())\n\n return pyvista.wrap(ug)\n" ]
[ [ "numpy.allclose", "numpy.linspace", "numpy.arange", "numpy.linalg.norm", "numpy.cos", "numpy.full", "numpy.sin", "numpy.cross", "numpy.array", "numpy.meshgrid", "numpy.zeros", "numpy.sum", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
joegeisz/pylith
[ "f74060b7b19d7e90abf8597bbe9250c96593c0ad", "f74060b7b19d7e90abf8597bbe9250c96593c0ad", "f74060b7b19d7e90abf8597bbe9250c96593c0ad", "f74060b7b19d7e90abf8597bbe9250c96593c0ad", "f74060b7b19d7e90abf8597bbe9250c96593c0ad" ]
[ "examples/2d/subduction/viz/plot_slipprofile.py", "unittests/libtests/feassemble/data/Quadrature2DLinear.py", "unittests/libtests/materials/data/DruckerPrager3DElastic.py", "examples/2d/subduction/viz/plot_faultinfo.py", "unittests/libtests/materials/data/ElasticMaterialApp.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nThis script generates a plot showing slip or fault tractions.\n\"\"\"\n\n# The code requires the numpy, h5py, and matplotlib packages.\nimport numpy\nimport h5py\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as pyplot\n\n\n# ----------------------------------------------------------------------\ndef calcDist(vertices):\n \"\"\"Compute down-dip distance from the trench.\n \"\"\"\n dist = numpy.zeros(vertices.shape[0])\n pt0 = vertices[:-1,:]\n pt1 = vertices[1:,:]\n dx = ((pt1[:,0]-pt0[:,0])**2 + (pt1[:,1]-pt0[:,1])**2)**0.5\n dist[1:] = numpy.cumsum(dx)\n return dist\n\n# ----------------------------------------------------------------------\ndef getData(sim):\n \"\"\"Read fault information from HDF5 file.\n \"\"\"\n filename = \"output/%s-fault-slabtop.h5\" % sim\n h5 = h5py.File(filename, \"r\")\n vertices = h5['geometry/vertices'][:]\n slip = h5['vertex_fields/slip'][:,:,:]\n tstamps = h5[\"time\"][:]\n h5.close()\n\n data = {\n \"time\": tstamps,\n \"vertices\": vertices,\n \"slip\": slip\n }\n return data\n\n# ----------------------------------------------------------------------\ndef plot(sim):\n\n # Get fault data for simulation.\n data = getData(sim)\n \n # Create sort key corresponding to increasing depth.\n indices = numpy.argsort(data[\"vertices\"][:,1])[::-1]\n\n # Calculate down-dip distance from trench and get sorted data.\n #dist = calcDist(data[\"vertices\"][indices,:])\n dist = -data[\"vertices\"][indices,1]\n slip = data[\"slip\"][:,indices,:]\n\n figure = pyplot.figure(figsize=(5.0, 3.0), facecolor='white', dpi=150)\n figure.set_facecolor('white')\n\n axes = figure.add_axes([0.15, 0.15, 0.80, 0.82])\n\n for i,t in enumerate(data[\"time\"]):\n color = \"blue\"\n lw = 0.5\n if i % 10 == 0:\n color = \"red\"\n lw = 1.0\n axes.plot(-slip[i,:,0], dist/1.0e+3, '-', color=color, linewidth=lw)\n axes.set_xlabel(\"Slip (m)\")\n #axes.set_ylabel(\"Down-dip Dist. (km)\")\n axes.set_ylabel(\"Depth (km)\")\n axes.invert_yaxis()\n\n\n pyplot.show()\n pyplot.savefig(\"subduction2d_%s_slip.pdf\" % sim)\n return\n\n# ======================================================================\nif __name__ == \"__main__\":\n\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--sim\", action=\"store\", dest=\"sim\", default=\"step05\")\n args = parser.parse_args()\n\n plot(args.sim)\n\n\n# End of file\n", "#!/usr/bin/env python\n#\n# ----------------------------------------------------------------------\n#\n# Brad T. Aagaard, U.S. Geological Survey\n# Charles A. Williams, GNS Science\n# Matthew G. Knepley, University of Chicago\n#\n# This code was developed as part of the Computational Infrastructure\n# for Geodynamics (http://geodynamics.org).\n#\n# Copyright (c) 2010-2017 University of California, Davis\n#\n# See COPYING for license information.\n#\n# ----------------------------------------------------------------------\n#\n\n## @file unittests/libtests/feassemble/data/Quadrature2DLinear.odb\n##\n## @brief Python container holding quadrature information for a 2-D\n## linear finite-element cell used in testing finite-element C++\n## routines.\n\nfrom pyre.components.Component import Component\n\nimport numpy\n\n# ----------------------------------------------------------------------\ndef N0(p):\n return -0.5*(p[0]+p[1])\n\ndef N0p(p):\n return -0.5\n\ndef N0q(p):\n return -0.5\n\ndef N1(p):\n return 0.5*(1.0+p[0])\n\ndef N1p(p):\n return 0.5\n\ndef N1q(p):\n return 0.0\n\ndef N2(p):\n return 0.5*(1.0+p[1])\n\ndef N2p(p):\n return 0.0\n\ndef N2q(p):\n return 0.5\n\n# ----------------------------------------------------------------------\n\n# Quadrature2DLinear class\nclass Quadrature2DLinear(Component):\n \"\"\"\n Python container holding quadrature information for a 2-D linear\n finite-element cell used in testing finite-element C++ routines.\n \"\"\"\n \n # PUBLIC METHODS /////////////////////////////////////////////////////\n \n def __init__(self, name=\"quadrature2dlinear\"):\n \"\"\"\n Constructor.\n \"\"\"\n Component.__init__(self, name, facility=\"quadrature\")\n \n self.quadPtsRef = numpy.array( [[-1.0/3.0, -1.0/3.0]], dtype=numpy.float64)\n self.quadWts = numpy.array([2.0], dtype=numpy.float64)\n self.numBasis = 3\n self.numQuadPts = 1\n self.spaceDim = 2\n self.cellDim = 2\n \n return\n \n\n def calculateBasis(self):\n \"\"\"\n Calculate basis functions and their derivatives at quadrature points.\n \"\"\"\n\n basis = numpy.zeros( (self.numQuadPts, self.numBasis),\n dtype=numpy.float64)\n basisDeriv = numpy.zeros( (self.numQuadPts, self.numBasis, self.cellDim),\n dtype=numpy.float64)\n\n iQuad = 0\n for q in self.quadPtsRef:\n # Basis functions at quadrature points\n basisQ = numpy.array([N0(q), N1(q), N2(q)], dtype=numpy.float64)\n basis[iQuad] = basisQ.reshape( (self.numBasis,) )\n \n # Derivatives of basis functions at quadrature points\n derivQ = numpy.array([[N0p(q), N0q(q)],\n [N1p(q), N1q(q)],\n [N2p(q), N2q(q)]], dtype=numpy.float64) \n basisDeriv[iQuad] = derivQ.reshape((self.numBasis, self.cellDim))\n\n iQuad += 1\n return (basis, basisDeriv)\n \n\n# FACTORIES ////////////////////////////////////////////////////////////\ndef quadrature():\n \"\"\"\n Factory for Quadrature2DLinear.\n \"\"\"\n return Quadrature2DLinear()\n\n\n# End of file \n", "#!/usr/bin/env python\n#\n# ----------------------------------------------------------------------\n#\n# Brad T. Aagaard, U.S. Geological Survey\n# Charles A. Williams, GNS Science\n# Matthew G. Knepley, University of Chicago\n#\n# This code was developed as part of the Computational Infrastructure\n# for Geodynamics (http://geodynamics.org).\n#\n# Copyright (c) 2010-2017 University of California, Davis\n#\n# See COPYING for license information.\n#\n# ----------------------------------------------------------------------\n#\n\n## @file unittests/libtests/materials/data/DruckerPrager3DElastic.py\n\n## @brief Python application for generating C++ data files for testing\n## C++ DruckerPrager3D object with elastic behavior.\n\nfrom ElasticMaterialApp import ElasticMaterialApp\n\nimport numpy\nimport math\n\n# ----------------------------------------------------------------------\ndimension = 3\nnumElasticConsts = 36\ntensorSize = 6\n\n# DruckerPrager3DElastic class\nclass DruckerPrager3DElastic(ElasticMaterialApp):\n \"\"\"\n Python application for generating C++ data files for testing C++\n DruckerPrager3D object with elastic behavior.\n \"\"\"\n \n # PUBLIC METHODS /////////////////////////////////////////////////////\n \n def __init__(self, name=\"druckerprager3delastic\"):\n \"\"\"\n Constructor.\n \"\"\"\n ElasticMaterialApp.__init__(self, name)\n\n # import pdb\n # pdb.set_trace()\n numLocs = 2\n\n self.dimension = dimension\n self.numLocs = numLocs\n\n self.dbPropertyValues = [\"density\", \"vs\", \"vp\",\n \"friction-angle\", \"cohesion\",\n \"dilatation-angle\"]\n self.numPropertyValues = numpy.array([1, 1, 1, 1, 1, 1], dtype=numpy.int32)\n\n self.dbStateVarValues = [\"plastic-strain-xx\",\n \"plastic-strain-yy\",\n \"plastic-strain-zz\",\n \"plastic-strain-xy\",\n \"plastic-strain-yz\",\n \"plastic-strain-xz\"\n ]\n self.numStateVarValues = numpy.array([6], dtype=numpy.int32)\n\n densityA = 2500.0\n vsA = 3000.0\n vpA = vsA*3**0.5\n # First case has different values for friction angle and dilatation angle.\n frictionAngleA = math.radians(30.0)\n dilatationAngleA = math.radians(20.0)\n cohesionA = 3.0e5\n strainA = [-1.1e-4, -1.2e-4, -1.3e-4, 1.4e-4, 1.5e-4, 1.6e-4]\n initialStressA = [2.1e4, 2.2e4, 2.3e4, 2.4e4, 2.5e4, 2.6e4]\n initialStrainA = [3.1e-4, 3.2e-4, 3.3e-4, 3.4e-4, 3.5e-4, 3.6e-4]\n muA = vsA*vsA*densityA\n lambdaA = vpA*vpA*densityA - 2.0*muA\n\n denomFrictionA = math.sqrt(3.0) * (3.0 - math.sin(frictionAngleA))\n denomDilatationA = math.sqrt(3.0) * (3.0 - math.sin(dilatationAngleA))\n alphaYieldA = 2.0 * math.sin(frictionAngleA)/denomFrictionA\n betaA = 6.0 * cohesionA * math.cos(frictionAngleA)/denomFrictionA\n alphaFlowA = 2.0 * math.sin(dilatationAngleA)/denomDilatationA\n \n densityB = 2000.0\n vsB = 1200.0\n vpB = vsB*3**0.5\n # Second case has same values for friction angle and dilatation angle.\n frictionAngleB = math.radians(25.0)\n dilatationAngleB = math.radians(25.0)\n cohesionB = 1.0e5\n strainB = [4.1e-4, 4.2e-4, 4.3e-4, 4.4e-4, 4.5e-4, 4.6e-4]\n initialStressB = [5.1e4, 5.2e4, 5.3e4, 5.4e4, 5.5e4, 5.6e4]\n initialStrainB = [6.1e-4, 6.2e-4, 6.3e-4, 6.4e-4, 6.5e-4, 6.6e-4]\n muB = vsB*vsB*densityB\n lambdaB = vpB*vpB*densityB - 2.0*muB\n denomFrictionB = math.sqrt(3.0) * (3.0 - math.sin(frictionAngleB))\n denomDilatationB = math.sqrt(3.0) * (3.0 - math.sin(dilatationAngleB))\n alphaYieldB = 2.0 * math.sin(frictionAngleB)/denomFrictionB\n betaB = 6.0 * cohesionB * math.cos(frictionAngleB)/denomFrictionB\n alphaFlowB = 2.0 * math.sin(dilatationAngleB)/denomDilatationB\n\n self.lengthScale = 1.0e+3\n self.pressureScale = muA\n self.timeScale = 1.0\n self.densityScale = muA / (self.lengthScale / self.timeScale)**2\n\n self.dbProperties = numpy.array([ [densityA, vsA, vpA, \\\n frictionAngleA, cohesionA, \\\n dilatationAngleA],\n [densityB, vsB, vpB, \\\n frictionAngleB, cohesionB, \\\n dilatationAngleB] ], \n dtype=numpy.float64)\n self.properties = numpy.array([ [densityA, muA, lambdaA, \\\n alphaYieldA, betaA, \\\n alphaFlowA],\n [densityB, muB, lambdaB, \\\n alphaYieldB, betaB, \\\n alphaFlowB] ],\n dtype=numpy.float64)\n\n # TEMPORARY, need to determine how to use initial state variables\n self.dbStateVars = numpy.zeros( (numLocs, tensorSize), dtype=numpy.float64)\n self.stateVars = numpy.zeros( (numLocs, tensorSize), dtype=numpy.float64)\n\n mu0 = self.pressureScale\n density0 = self.densityScale\n self.propertiesNondim = \\\n numpy.array([ [densityA/density0, muA/mu0, lambdaA/mu0, \\\n alphaYieldA, betaA/mu0, \\\n alphaFlowA],\n [densityB/density0, muB/mu0, lambdaB/mu0, \\\n alphaYieldB, betaB/mu0, \\\n alphaFlowB] ],\n dtype=numpy.float64)\n\n self.stateVarsNondim = self.stateVars # no scaling\n\n self.initialStress = numpy.array([initialStressA,\n initialStressB],\n dtype=numpy.float64)\n self.initialStrain = numpy.array([initialStrainA,\n initialStrainB],\n dtype=numpy.float64)\n \n self.density = numpy.array([densityA,\n densityB],\n dtype=numpy.float64)\n\n self.strain = numpy.array([strainA,\n strainB],\n dtype=numpy.float64)\n \n self.stress = numpy.zeros( (numLocs, tensorSize), dtype=numpy.float64)\n self.elasticConsts = numpy.zeros( (self.numLocs, numElasticConsts), \\\n dtype=numpy.float64)\n\n (self.elasticConsts[0,:], self.stress[0,:]) = \\\n self._calcStress(strainA, muA, lambdaA, \\\n initialStressA, initialStrainA)\n (self.elasticConsts[1,:], self.stress[1,:]) = \\\n self._calcStress(strainB, muB, lambdaB, \\\n initialStressB, initialStrainB)\n\n self.dtStableImplicit = 1.0e+10\n self.dtStableExplicit = 1000.0 / vpA\n\n plasticStrainUpdated = numpy.zeros((numLocs, tensorSize),\n dtype=numpy.float64)\n \n self.stateVarsUpdated = numpy.array( [plasticStrainUpdated[0,:],\n plasticStrainUpdated[1,:]],\n dtype=numpy.float64)\n\n return\n\n\n def _calcStress(self, strainV, muV, lambdaV, initialStressV, initialStrainV):\n \"\"\"\n Compute stress and derivative of elasticity matrix.\n \"\"\"\n C1111 = lambdaV + 2.0*muV\n C1122 = lambdaV\n C1133 = lambdaV\n C1112 = 0.0\n C1123 = 0.0\n C1113 = 0.0\n C2211 = lambdaV\n C2222 = lambdaV + 2.0*muV\n C2233 = lambdaV\n C2212 = 0.0\n C2223 = 0.0\n C2213 = 0.0\n C3311 = lambdaV\n C3322 = lambdaV\n C3333 = lambdaV + 2.0*muV\n C3312 = 0.0\n C3323 = 0.0\n C3313 = 0.0\n C1211 = 0.0\n C1222 = 0.0\n C1233 = 0.0\n C1212 = 2.0*muV\n C1223 = 0.0\n C1213 = 0.0\n C2311 = 0.0\n C2322 = 0.0\n C2333 = 0.0\n C2312 = 0.0\n C2323 = 2.0*muV\n C2313 = 0.0\n C1311 = 0.0\n C1322 = 0.0\n C1333 = 0.0\n C1312 = 0.0\n C1323 = 0.0\n C1313 = 2.0*muV\n elasticConsts = numpy.array([C1111, C1122, C1133, C1112, C1123, C1113,\n C2211, C2222, C2233, C2212, C2223, C2213,\n C3311, C3322, C3333, C3312, C3323, C3313,\n C1211, C1222, C1233, C1212, C1223, C1213,\n C2311, C2322, C2333, C2312, C2323, C2313,\n C1311, C1322, C1333, C1312, C1323, C1313],\n\t\t\t\t dtype=numpy.float64)\n\n strain = numpy.reshape(strainV, (6,1))\n initialStress = numpy.reshape(initialStressV, (tensorSize,1))\n initialStrain = numpy.reshape(initialStrainV, (tensorSize,1))\n elastic = numpy.array([ [C1111, C1122, C1133, C1112, C1123, C1113],\n [C2211, C2222, C2233, C2212, C2223, C2213],\n [C3311, C3322, C3333, C3312, C3323, C3313],\n [C1211, C1222, C1233, C1212, C1223, C1213],\n [C2311, C2322, C2333, C2312, C2323, C2313],\n [C1311, C1322, C1333, C1312, C1323, C1313] ],\n dtype=numpy.float64)\n stress = numpy.dot(elastic, strain-initialStrain) + initialStress\n return (elasticConsts, numpy.ravel(stress))\n \n\n# MAIN /////////////////////////////////////////////////////////////////\nif __name__ == \"__main__\":\n\n app = DruckerPrager3DElastic()\n app.run()\n\n\n# End of file \n", "#!/usr/bin/env nemesis\n\"\"\"\nThis script generates a plot showing slip or fault tractions.\n\"\"\"\n\n# The code requires the numpy, h5py, and matplotlib packages.\nimport numpy\nimport h5py\nimport matplotlib.pyplot as pyplot\n\n# ----------------------------------------------------------------------\nimport sys\n\nplot = sys.argv[1]\nif not plot in ['step01_slip', \n 'step01_stress',\n 'step04_bg',\n 'step04_initial',\n ]:\n raise ValueError(\"Unknown plot '%s'.\" % plot)\n\n# ----------------------------------------------------------------------\ndef getStep01():\n \"\"\"\n Function to get slip, tractions, and fault coordinates from step01.\n \"\"\"\n\n # Open solution file and get slip and coordinates.\n h5 = h5py.File(\"output/step01-fault.h5\", \"r\")\n vertices = h5['geometry/vertices'][:]\n slip = h5['vertex_fields/slip'][0,:,0].squeeze()\n traction_change = h5['vertex_fields/traction_change'][0,:,:].squeeze()\n h5.close()\n\n # Sort by y-coordinate (elevation).\n ids = numpy.argsort(vertices[:,1])\n vertices = vertices[ids,:]\n slip = slip[ids,:]\n traction_change = traction_change[ids]\n\n return (vertices, slip, traction_change)\n\n# ======================================================================\n(vertices, slip, traction_change) = getStep01()\n\n# Background stress field (from afterslip_tractions.py)\ndensity = 2900.0\ngacc = 9.80665\nmu_s = 0.6\n\n# Background normal tractions are overburden and compressive\n# (negative, y is negative)\ntraction_bg_normal = density*gacc*(vertices[:,1])\n\n# Background shear tractions are reverse (in 2-D right-lateral is negative)\n# because the normal tractions are negative.\ntraction_bg_shear = mu_s*traction_bg_normal\n\n# ----------------------------------------------------------------------\nfigure = pyplot.figure(figsize=(5.0, 5.0), facecolor='white', dpi=150)\nfigure.set_facecolor('white')\n\naxes = figure.add_axes([0.15, 0.1, 0.80, 0.87])\n\nif plot == \"step01_slip\":\n axes.plot(slip, vertices[:,1]/1.0e+3)\n axes.set_xlabel(\"Slip (m)\")\n axes.set_ylabel(\"Elevation (km)\")\n axes.set_ylim((-60.0, 0.0))\n\nelif plot == \"step01_stress\":\n axes.plot(traction_change[:,0]/1.0e+6, vertices[:,1]/1.0e+3)\n axes.set_xlabel(\"Traction Change (MPa)\")\n axes.set_ylabel(\"Elevation (km)\")\n axes.plot([0,0], [0, -600], linestyle='--', color='gray', alpha=0.5)\n axes.set_ylim((-60.0, 0.0))\n\nelif plot == \"step04_bg\":\n axes.plot(traction_bg_normal/1.0e+6, vertices[:,1]/1.0e+3, \n color='green', linestyle='--')\n axes.plot(traction_bg_shear/1.0e+6, vertices[:,1]/1.0e+3, color='blue')\n axes.set_xlabel(\"Traction (MPa)\")\n axes.set_ylabel(\"Elevation (km)\")\n axes.set_ylim((-60.0, 0.0))\n axes.set_xlim((-2000.0, 0.0))\n axes.legend(('Normal', 'Shear'), loc='upper left')\n\n\nelif plot == \"step04_initial\":\n traction_initial_normal = traction_bg_normal + traction_change[:,1]\n traction_initial_shear = traction_bg_shear + traction_change[:,0]\n traction_friction = -2.0e+6 + mu_s*traction_initial_normal\n\n axes.plot(traction_initial_normal/1.0e+6, vertices[:,1]/1.0e+3, \n color='green', linestyle='--')\n axes.plot(traction_initial_shear/1.0e+6, vertices[:,1]/1.0e+3, color='blue')\n axes.plot(traction_friction/1.0e+6, vertices[:,1]/1.0e+3, \n color='red', linestyle='-.')\n axes.set_xlabel(\"Traction (MPa)\")\n axes.set_ylabel(\"Elevation (km)\")\n axes.set_ylim((-60.0, 0.0))\n axes.set_xlim((-2000.0, 0.0))\n axes.legend(('Normal', 'Shear', \"Failure\"), loc='upper left')\n\n\npyplot.show()\npyplot.savefig(plot)\n", "#!/usr/bin/env python\n#\n# ----------------------------------------------------------------------\n#\n# Brad T. Aagaard, U.S. Geological Survey\n# Charles A. Williams, GNS Science\n# Matthew G. Knepley, University of Chicago\n#\n# This code was developed as part of the Computational Infrastructure\n# for Geodynamics (http://geodynamics.org).\n#\n# Copyright (c) 2010-2017 University of California, Davis\n#\n# See COPYING for license information.\n#\n# ----------------------------------------------------------------------\n#\n\n## @file unittests/libtests/materials/data/ElasticMaterialApp.py\n\n## @brief Python application for generating C++ data files for testing\n## C++ elastic material objects.\n\nfrom pyre.applications.Script import Script\n\nimport numpy\n\n# ElasticMaterialApp class\nclass ElasticMaterialApp(Script):\n \"\"\"\n Python application for generating C++ data files for testing C++\n elastic material objects.\n \"\"\"\n \n # INVENTORY //////////////////////////////////////////////////////////\n\n class Inventory(Script.Inventory):\n \"\"\"\n Python object for managing ElasticMaterialApp facilities and properties.\n \"\"\"\n\n ## @class Inventory\n ## Python object for managing ElasticMaterialApp facilities and properties.\n ##\n ## \\b Properties\n ## @li None\n ##\n ## \\b Facilities\n ## @li \\b data Data manager.\n\n import pyre.inventory\n\n from pylith.utils.CppData import CppData\n data = pyre.inventory.facility(\"data\", factory=CppData)\n data.meta['tip'] = \"Data manager.\"\n\n\n # PUBLIC METHODS /////////////////////////////////////////////////////\n\n def __init__(self, name=\"elasticmaterialapp\"):\n \"\"\"\n Constructor.\n \"\"\"\n Script.__init__(self, name)\n\n # Material information\n self.dimension = 0\n self.numLocs = 0\n self.numProperties = 0\n self.numStateVars = 0\n self.numDBProperties = 0\n self.numDBStateVars = 0\n self.numPropsQuadPt = 0\n self.numVarsQuadPt = 0\n self.numPropertyValues = None\n self.numStateVarValues = None\n self.dbPropertyValues = None\n self.dbStateVarValues = None\n self.dbProperties = None\n self.dbStateVars = None\n self.properties = None\n self.stateVars = None\n self.propertiesNondim = None\n self.stateVarsNondim = None\n self.lengthScale = 0\n self.timeScale = 0\n self.pressureScale = 0\n self.densityScale = 0\n\n # Elastic material information\n self.dtStableImplicit = 1.0e+99\n self.dtStableExplicit = 1.0e+99\n self.density = None\n self.strain = None\n self.stress = None\n self.elasticConsts = None\n self.initialStress = None\n self.initialStrain = None\n self.stateVarsUpdated = None\n return\n\n\n def main(self):\n \"\"\"\n Run the application.\n \"\"\"\n self._initData()\n self.data.write(self.name)\n return\n \n\n # PRIVATE METHODS ////////////////////////////////////////////////////\n\n def _configure(self):\n \"\"\"\n Set members using inventory.\n \"\"\"\n Script._configure(self)\n self.data = self.inventory.data\n return\n\n\n def _initData(self):\n self.numDBProperties = len(self.dbPropertyValues)\n if not self.dbStateVarValues is None:\n self.numDBStateVars = len(self.dbStateVarValues)\n self.numPropsQuadPt = numpy.sum(self.numPropertyValues)\n if not self.numStateVarValues is None:\n self.numVarsQuadPt = numpy.sum(self.numStateVarValues)\n self.numProperties = self.numPropertyValues.shape[0]\n if not self.numStateVarValues is None:\n self.numStateVars = self.numStateVarValues.shape[0]\n\n self.data.addScalar(vtype=\"int\", name=\"_dimension\",\n value=self.dimension,\n format=\"%d\")\n self.data.addScalar(vtype=\"int\", name=\"_numLocs\",\n value=self.numLocs,\n format=\"%d\")\n self.data.addScalar(vtype=\"int\", name=\"_numProperties\",\n value=self.numProperties,\n format=\"%d\")\n self.data.addScalar(vtype=\"int\", name=\"_numStateVars\",\n value=self.numStateVars,\n format=\"%d\")\n self.data.addScalar(vtype=\"int\", name=\"_numDBProperties\",\n value=self.numDBProperties,\n format=\"%d\")\n self.data.addScalar(vtype=\"int\", name=\"_numDBStateVars\",\n value=self.numDBStateVars,\n format=\"%d\")\n self.data.addScalar(vtype=\"int\", name=\"_numPropsQuadPt\",\n value=self.numPropsQuadPt,\n format=\"%d\")\n self.data.addScalar(vtype=\"int\", name=\"_numVarsQuadPt\",\n value=self.numVarsQuadPt,\n format=\"%d\")\n self.data.addArray(vtype=\"int\", name=\"_numPropertyValues\",\n values=self.numPropertyValues,\n format=\"%d\", ncols=1)\n self.data.addArray(vtype=\"int\", name=\"_numStateVarValues\",\n values=self.numStateVarValues,\n format=\"%d\", ncols=1)\n self.data.addArray(vtype=\"char*\", name=\"_dbPropertyValues\",\n values=self.dbPropertyValues,\n format=\"\\\"%s\\\"\", ncols=1)\n self.data.addArray(vtype=\"char*\", name=\"_dbStateVarValues\",\n values=self.dbStateVarValues,\n\t\t format=\"\\\"%s\\\"\", ncols=1)\n self.data.addArray(vtype=\"PylithScalar\", name=\"_dbProperties\",\n values=self.dbProperties,\n format=\"%16.8e\", ncols=1)\n self.data.addArray(vtype=\"PylithScalar\", name=\"_dbStateVars\",\n values=self.dbStateVars,\n\t\t format=\"%16.8e\", ncols=1)\n self.data.addArray(vtype=\"PylithScalar\", name=\"_properties\",\n values=self.properties,\n format=\"%16.8e\", ncols=1)\n self.data.addArray(vtype=\"PylithScalar\", name=\"_stateVars\",\n values=self.stateVars,\n format=\"%16.8e\", ncols=1)\n self.data.addArray(vtype=\"PylithScalar\", name=\"_propertiesNondim\",\n values=self.propertiesNondim,\n format=\"%16.8e\", ncols=1)\n self.data.addArray(vtype=\"PylithScalar\", name=\"_stateVarsNondim\",\n values=self.stateVarsNondim,\n format=\"%16.8e\", ncols=1)\n self.data.addScalar(vtype=\"PylithScalar\", name=\"_lengthScale\",\n value=self.lengthScale,\n format=\"%16.8e\")\n self.data.addScalar(vtype=\"PylithScalar\", name=\"_timeScale\",\n value=self.timeScale,\n format=\"%16.8e\")\n self.data.addScalar(vtype=\"PylithScalar\", name=\"_pressureScale\",\n value=self.pressureScale,\n format=\"%16.8e\")\n self.data.addScalar(vtype=\"PylithScalar\", name=\"_densityScale\",\n value=self.densityScale,\n format=\"%16.8e\")\n\n self.data.addScalar(vtype=\"PylithScalar\", name=\"_dtStableImplicit\",\n value=self.dtStableImplicit,\n format=\"%16.8e\")\n self.data.addScalar(vtype=\"PylithScalar\", name=\"_dtStableExplicit\",\n value=self.dtStableExplicit,\n format=\"%16.8e\")\n self.data.addArray(vtype=\"PylithScalar\", name=\"_density\",\n values=self.density,\n format=\"%16.8e\", ncols=1)\n self.data.addArray(vtype=\"PylithScalar\", name=\"_strain\",\n values=self.strain,\n format=\"%16.8e\", ncols=1)\n self.data.addArray(vtype=\"PylithScalar\", name=\"_stress\",\n values=self.stress,\n format=\"%16.8e\", ncols=1)\n self.data.addArray(vtype=\"PylithScalar\", name=\"_elasticConsts\",\n values=self.elasticConsts,\n format=\"%16.8e\", ncols=1)\n self.data.addArray(vtype=\"PylithScalar\", name=\"_initialStress\",\n values=self.initialStress,\n format=\"%16.8e\", ncols=1)\n self.data.addArray(vtype=\"PylithScalar\", name=\"_initialStrain\",\n values=self.initialStrain,\n format=\"%16.8e\", ncols=1)\n self.data.addArray(vtype=\"PylithScalar\", name=\"_stateVarsUpdated\",\n values=self.stateVarsUpdated,\n format=\"%16.8e\", ncols=1)\n \n return\n\n \n# End of file \n" ]
[ [ "matplotlib.use", "numpy.cumsum", "matplotlib.pyplot.savefig", "numpy.argsort", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.figure" ], [ "numpy.array", "numpy.zeros" ], [ "numpy.dot", "numpy.reshape", "numpy.ravel", "numpy.array", "numpy.zeros" ], [ "numpy.argsort", "matplotlib.pyplot.show", "matplotlib.pyplot.savefig", "matplotlib.pyplot.figure" ], [ "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
artificially-ai/FewShotVision
[ "909bc414ea27ef0300091e1dd6baba4fb063324b", "909bc414ea27ef0300091e1dd6baba4fb063324b", "909bc414ea27ef0300091e1dd6baba4fb063324b" ]
[ "utils/io_utils.py", "detection/src/loaders/data_manager.py", "detection/src/steps/yolomaml_training.py" ]
[ "import glob\nimport os\n\nimport numpy as np\nimport torch\n\nfrom utils import configs, backbones\n\nmodel_dict = dict(\n Conv4=backbones.Conv4,\n Conv4S=backbones.Conv4S,\n Conv6=backbones.Conv6,\n ResNet10=backbones.ResNet10,\n ResNet18=backbones.ResNet18,\n ResNet34=backbones.ResNet34,\n ResNet50=backbones.ResNet50,\n ResNet101=backbones.ResNet101,\n)\n\n\ndef path_to_step_output(dataset, backbone, method, output_dir=configs.save_dir):\n \"\"\"\n Defines the path where the outputs will be saved on the disk\n Args:\n dataset (str): name of the dataset\n backbone (str): name of the backbone of the model\n method (str): name of the used method\n output_dir (str): may be common to other experiments\n\n Returns:\n str: path to the output of the step\n \"\"\"\n checkpoint_dir = os.path.join(\n output_dir,\n dataset,\n '_'.join([method, backbone]),\n )\n\n if not os.path.isdir(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n return checkpoint_dir\n\n\ndef set_and_print_random_seed(random_seed, save=False, checkpoint_dir='./'):\n \"\"\"\n Set and print numpy random seed, for reproducibility of the training,\n and set torch seed based on numpy random seed\n Args:\n random_seed (int): seed for random instantiations ; if none is provided, a seed is randomly defined\n save (bool): if True, the numpy random seed is saved in seeds.txt\n checkpoint_dir (str): output folder where the seed is saved\n Returns:\n int: numpy random seed\n\n \"\"\"\n if random_seed is None:\n random_seed = np.random.randint(0, 2 ** 32 - 1)\n np.random.seed(random_seed)\n torch.manual_seed(np.random.randint(0, 2**32-1))\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n prompt = 'Random seed : {}\\n'.format(random_seed)\n print(prompt)\n\n if save:\n with open(os.path.join(checkpoint_dir, 'seeds.txt'), 'a') as f:\n f.write(prompt)\n\n return random_seed\n\n\ndef get_path_to_json(dataset, split):\n \"\"\"\n\n Args:\n dataset (str): which dataset to load\n split (str): whether to use base, val or novel dataset\n\n Returns:\n str: path to JSON file\n \"\"\"\n if dataset == 'cross':\n if split == 'base':\n path_to_json_file = configs.data_dir['miniImageNet'] + 'all.json'\n else:\n path_to_json_file = configs.data_dir['CUB'] + split + '.json'\n elif dataset == 'cross_char':\n if split == 'base':\n path_to_json_file = configs.data_dir['omniglot'] + 'noLatin.json'\n else:\n path_to_json_file = configs.data_dir['emnist'] + split + '.json'\n else:\n path_to_json_file = configs.data_dir[dataset] + split + '.json'\n\n return path_to_json_file\n\n\ndef get_assigned_file(checkpoint_dir, num):\n # TODO: returns path to .tar file corresponding to epoch num in checkpoint_dir (even if it doesn't exist)\n assign_file = os.path.join(checkpoint_dir, '{:d}.tar'.format(num))\n return assign_file\n\n\ndef get_resume_file(checkpoint_dir):\n # TODO: returns path to .tar file corresponding to maximal epoch in checkpoint_dir, None if checkpoint_dir is empty\n # TODO What happens if checkpoint_dir only contains best_model.tar ?\n filelist = glob.glob(os.path.join(checkpoint_dir, '*.tar'))\n if len(filelist) == 0:\n return None\n\n filelist = [x for x in filelist if os.path.basename(x) != 'best_model.tar']\n epochs = np.array([int(os.path.splitext(os.path.basename(x))[0]) for x in filelist])\n max_epoch = np.max(epochs)\n resume_file = os.path.join(checkpoint_dir, '{:d}.tar'.format(max_epoch))\n return resume_file\n\n\ndef get_best_file(checkpoint_dir):\n # TODO returns best_model.tar in checkpoint_dir if there is one, else returns get_resume_file(checkpoint_dir)\n best_file = os.path.join(checkpoint_dir, 'best_model.tar')\n if os.path.isfile(best_file):\n return best_file\n else:\n return get_resume_file(checkpoint_dir)\n", "import pickle\n\nimport numpy as np\nimport torch\n\nfrom detection.src.yolov3.utils.datasets import ListDataset\n\n\nclass DetectionSetDataManager():\n \"\"\"\n Data Manager used for YOLOMAML\n \"\"\"\n def __init__(self, n_way, n_support, n_query, n_episode, image_size):\n \"\"\"\n\n Args:\n n_way (int): number of different classes in a detection class\n n_support (int): number of images in the support set with an instance of one class,\n for each of the n_way classes\n n_query (int): number of images in the query set with an instance of one class,\n for each of the n_way classes\n n_episode (int): number of episodes per epoch\n image_size (int): size of images (square)\n \"\"\"\n self.n_way = n_way\n self.n_support = n_support\n self.n_query = n_query\n self.n_episode = n_episode\n self.image_size = image_size\n\n def get_data_loader(self, path_to_data_file, path_to_images_per_label=None):\n \"\"\"\n\n Args:\n path_to_data_file (str): path to file containing paths to images\n path_to_images_per_label (str): path to pkl file containing images_per_label dictionary (optional)\n\n Returns:\n DataLoader: samples data in the shape of a detection task\n \"\"\"\n dataset = ListDataset(path_to_data_file, img_size=self.image_size)\n sampler = DetectionTaskSampler(\n dataset,\n self.n_way,\n self.n_support,\n self.n_query,\n self.n_episode,\n path_to_images_per_label,\n )\n data_loader = torch.utils.data.DataLoader(dataset,\n batch_sampler=sampler,\n num_workers=12,\n collate_fn=dataset.collate_fn_episodic,\n )\n return data_loader\n\n\ndef create_dict_images_per_label(data_source):\n \"\"\"\n Compute and returns dictionary of images per label\n Args:\n data_source (ListDataset) : The data set containing the images\n Returns:\n dict: each key maps to a list of the images which contain at least one target which label is the key\n \"\"\"\n images_per_label={}\n\n for index in range(len(data_source)):\n try:\n targets = data_source[index][2]\n if targets is not None:\n for target in targets:\n label = int(target[1])\n if label not in images_per_label:\n images_per_label[label] = []\n if len(images_per_label[label]) == 0 or images_per_label[label][-1] != index:\n images_per_label[label].append(index)\n if index % 100 == 0:\n print('{index}/{length_data_source} images considered'.format(\n index=index,\n length_data_source=len(data_source))\n )\n except OSError:\n print('Corrupted image : {image_index}'.format(image_index=index))\n return images_per_label\n\n\nclass DetectionTaskSampler(torch.utils.data.Sampler):\n \"\"\"\n Samples elements in detection episodes of defined shape.\n \"\"\"\n def __init__(self, data_source, n_way, n_support, n_query, n_episodes, path_to_images_per_label=None):\n \"\"\"\n\n Args:\n data_source (ListDataset): source dataset\n n_way (int): number of different classes in a detection class\n n_support (int): number of images in the support set with an instance of one class,\n for each of the n_way classes\n n_query (int): number of images in the query set with an instance of one class,\n for each of the n_way classes\n n_episodes (int): number of episodes per epoch\n path_to_images_per_label (str): path to a pickle file containing a dictionary of images per label\n \"\"\"\n self.data_source = data_source\n self.n_way = n_way\n self.n_support = n_support\n self.n_query = n_query\n self.n_episodes = n_episodes\n\n self.images_per_label = self.get_images_per_label(path_to_images_per_label)\n self.label_list = self.get_label_list()\n\n def get_images_per_label(self, path):\n \"\"\"\n Returns dictionary of images per label from a file if specified or compute it from scratch\n Args:\n path (str) : path to a pickle file containing a dictionary of images per label\n Returns:\n dict: each key maps to a list of the images which contain at least one target which label is the key\n \"\"\"\n if path:\n with open(path, 'rb') as dictionary_file:\n images_per_label = pickle.load(dictionary_file)\n else:\n images_per_label = create_dict_images_per_label(self.data_source)\n\n return images_per_label\n\n def get_label_list(self):\n \"\"\"\n\n Returns:\n list: list of appropriate labels, i.e. labels that are present in at least n_support+n_query images\n \"\"\"\n label_list = []\n for label in self.images_per_label:\n if len(self.images_per_label[label]) >= self.n_support + self.n_query:\n label_list.append(label)\n return label_list\n\n def sample_labels(self):\n \"\"\"\n\n Returns:\n numpy.ndarray: n_way labels sampled at random from all available labels\n \"\"\"\n labels = np.random.choice(self.label_list, self.n_way, replace=False)\n return labels\n\n def sample_images_from_labels(self, labels):\n \"\"\"\n For each label in labels, samples n_support+n_query images containing at least one box associated with label\n The first n_way elements of the returned tensor will be used to determine the sampled labels\n Args:\n labels (numpy.ndarray): labels from which images will be sampled\n\n Returns:\n torch.Tensor: length = n_way*(1+n_support+n_query) information about the labels,\n and indices of images constituting an episode\n \"\"\"\n #TODO: images can appear twice\n images_indices = list(-labels-1)\n for label in labels:\n images_from_label = np.random.choice(\n self.images_per_label[label],\n self.n_support+self.n_query,\n replace=False\n )\n images_indices.extend(images_from_label)\n return torch.tensor(images_indices, dtype=torch.int32)\n\n def __len__(self):\n return self.n_episodes\n\n def __iter__(self):\n for i in range(self.n_episodes):\n labels = self.sample_labels()\n yield self.sample_images_from_labels(labels)\n", "import os\n\n\nimport torch\nfrom torch.utils.tensorboard.writer import SummaryWriter\n\nfrom detection.src.loaders.data_manager import DetectionSetDataManager\nfrom detection.src.yolo_maml import YOLOMAML\nfrom utils import configs\nfrom utils.io_utils import set_and_print_random_seed\nfrom detection.src.yolov3.model import Darknet\nfrom detection.src.yolov3.utils.parse_config import parse_data_config\n\n\nclass YOLOMAMLTraining():\n \"\"\"\n This step handles the training of the algorithm on the base dataset\n \"\"\"\n\n def __init__(\n self,\n dataset_config='yolov3/config/black.data',\n model_config='yolov3/config/yolov3.cfg',\n pretrained_weights=None,\n n_way=5,\n n_shot=5,\n n_query=16,\n optimizer='Adam',\n learning_rate=0.001,\n approx=True,\n task_update_num=3,\n print_freq=100,\n validation_freq=1000,\n n_epoch=100,\n n_episode=100,\n objectness_threshold=0.8,\n nms_threshold=0.4,\n iou_threshold=0.2,\n image_size=416,\n random_seed=None,\n output_dir=configs.save_dir,\n ):\n \"\"\"\n Args:\n dataset_config (str): path to data config file\n model_config (str): path to model definition file\n pretrained_weights (str): path to a file containing pretrained weights for the model\n n_way (int): number of labels in a detection task\n n_shot (int): number of support data in each class in an episode\n n_query (int): number of query data in each class in an episode\n optimizer (str): must be a valid class of torch.optim (Adam, SGD, ...)\n learning_rate (float): learning rate fed to the optimizer\n approx (bool): whether to use an approximation of the meta-backpropagation\n task_update_num (int): number of updates inside each episode\n print_freq (int): inside an epoch, print status update every print_freq episodes\n validation_freq (int): inside an epoch, frequency with which we evaluate the model on the validation set\n n_epoch (int): number of meta-training epochs\n n_episode (int): number of episodes per epoch during meta-training\n objectness_threshold (float): at evaluation time, only keep boxes with objectness above this threshold\n nms_threshold (float): threshold for non maximum suppression, at evaluation time\n iou_threshold (float): threshold for intersection over union\n image_size (int): size of images (square)\n random_seed (int): seed for random instantiations ; if none is provided, a seed is randomly defined\n output_dir (str): path to experiments output directory\n \"\"\"\n\n self.dataset_config = dataset_config\n self.model_config = model_config\n self.pretrained_weights = pretrained_weights\n self.n_way = n_way\n self.n_shot = n_shot\n self.n_query = n_query\n self.optimizer = optimizer\n self.learning_rate = learning_rate\n self.approx = approx\n self.task_update_num = task_update_num\n self.print_freq = print_freq\n self.validation_freq = validation_freq\n self.n_epoch = n_epoch\n self.n_episode = n_episode\n self.objectness_threshold = objectness_threshold\n self.nms_threshold = nms_threshold\n self.iou_threshold = iou_threshold\n self.image_size = image_size\n self.random_seed = random_seed\n self.checkpoint_dir = output_dir\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.writer = SummaryWriter(log_dir=output_dir)\n\n def apply(self):\n \"\"\"\n Execute the YOLOMAMLTraining step\n Returns:\n dict: a dictionary containing the whole state of the model that gave the higher validation accuracy\n\n \"\"\"\n set_and_print_random_seed(self.random_seed, True, self.checkpoint_dir)\n\n data_config = parse_data_config(self.dataset_config)\n train_path = data_config[\"train\"]\n train_dict_path = data_config.get(\"train_dict_path\", None)\n valid_path = data_config.get(\"valid\", None)\n valid_dict_path = data_config.get(\"valid_dict_path\", None)\n\n base_loader = self._get_data_loader(train_path, train_dict_path)\n val_loader = self._get_data_loader(valid_path, valid_dict_path)\n\n model = self._get_model()\n\n return self._train(base_loader, val_loader, model)\n\n def dump_output(self, _, output_folder, output_name, **__):\n pass\n\n def _train(self, base_loader, val_loader, model):\n \"\"\"\n Trains the model on the base set\n Args:\n base_loader (torch.utils.data.DataLoader): data loader for base set\n val_loader (torch.utils.data.DataLoader): data loader for validation set\n model (YOLOMAML): neural network model to train\n\n Returns:\n dict: a dictionary containing the whole state of the model that gave the higher validation accuracy\n\n \"\"\"\n optimizer = self._get_optimizer(model)\n\n for epoch in range(self.n_epoch):\n loss_dict = model.train_loop(base_loader, optimizer)\n\n self.plot_tensorboard(loss_dict, epoch)\n\n if epoch % self.print_freq == 0:\n print(\n 'Epoch {epoch}/{n_epochs} | Loss {loss}'.format(\n epoch=epoch,\n n_epochs=self.n_epoch,\n loss=loss_dict['query_total_loss'],\n )\n )\n\n if epoch % self.validation_freq == self.validation_freq - 1:\n precision, recall, average_precision, f1, ap_class = model.eval_loop(val_loader)\n\n self.writer.add_scalar('precision', precision.mean(), epoch)\n self.writer.add_scalar('recall', recall.mean(), epoch)\n self.writer.add_scalar('mAP', average_precision.mean(), epoch)\n self.writer.add_scalar('F1', f1.mean(), epoch)\n\n self.writer.close()\n\n model.base_model.save_darknet_weights(os.path.join(self.checkpoint_dir, 'final.weights'))\n\n return {'epoch': self.n_epoch, 'state': model.state_dict()}\n\n def _get_optimizer(self, model):\n \"\"\"\n Get the optimizer from string self.optimizer\n Args:\n model (torch.nn.Module): the model to be trained\n\n Returns: a torch.optim.Optimizer object parameterized with model parameters\n\n \"\"\"\n assert hasattr(torch.optim, self.optimizer), \"The optimization method is not a torch.optim object\"\n optimizer = getattr(torch.optim, self.optimizer)(model.parameters(), lr=self.learning_rate)\n\n return optimizer\n\n def _get_data_loader(self, path_to_data_file, path_to_images_per_label):\n \"\"\"\n\n Args:\n path_to_data_file (str): path to file containing paths to images\n path_to_images_per_label (str): path to pickle file containing the dictionary of images per label\n\n Returns:\n torch.utils.data.DataLoader: samples data in the shape of a detection task\n \"\"\"\n data_manager = DetectionSetDataManager(self.n_way, self.n_shot, self.n_query, self.n_episode, self.image_size)\n\n return data_manager.get_data_loader(path_to_data_file, path_to_images_per_label)\n\n def _get_model(self):\n \"\"\"\n\n Returns:\n YOLOMAML: meta-model\n \"\"\"\n\n base_model = Darknet(self.model_config, self.image_size, self.pretrained_weights)\n\n model = YOLOMAML(\n base_model,\n self.n_way,\n self.n_shot,\n self.n_query,\n self.image_size,\n approx=self.approx,\n task_update_num=self.task_update_num,\n train_lr=self.learning_rate,\n objectness_threshold=self.objectness_threshold,\n nms_threshold=self.nms_threshold,\n iou_threshold=self.iou_threshold,\n device=self.device,\n )\n\n return model\n\n def plot_tensorboard(self, loss_dict, epoch):\n \"\"\"\n Writes into summary the values present in loss_dict\n Args:\n loss_dict (dict): contains the different parts of the average loss on one epoch. Each key describes\n a part of the loss (ex: query_classification_loss) and each value is a 0-dim tensor. This dictionary is\n required to contain the keys 'support_total_loss' and 'query_total_loss' which contains respectively the\n total loss on the support set, and the total meta-loss on the query set\n epoch (int): global step value in the summary\n\n Returns:\n\n \"\"\"\n for key, value in loss_dict.items():\n self.writer.add_scalar(key, value, epoch)\n\n return\n" ]
[ [ "numpy.max", "numpy.random.seed", "numpy.random.randint" ], [ "torch.tensor", "torch.utils.data.DataLoader", "numpy.random.choice" ], [ "torch.utils.tensorboard.writer.SummaryWriter", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
phaustin/a301_2020
[ "9be7ead5f641013e2cec4e736ea76171b849e8d5", "9be7ead5f641013e2cec4e736ea76171b849e8d5" ]
[ "sat_lib/process_bands.py", "week9/landsat_image.py" ]
[ "import a301_lib\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom pyhdf.SD import SD\nfrom pyhdf.SD import SDC\nfrom pathlib import Path\nimport h5py\nfrom contextlib import contextmanager\nimport os\nfrom sat_lib.modismeta_read import get_core\n\n@contextmanager\ndef cd(newdir):\n prevdir = os.getcwd()\n os.chdir(newdir)\n try:\n yield\n finally:\n os.chdir(prevdir)\n\ndef readband(the_file,the_band):\n \"\"\"\n read and calibrate a MODIS band from an open hdf4 SD dataset\n\n Parameters\n ----------\n\n the_file:pyhdf.SD object\n the dataset open for reading\n the_band: int\n band number for MODIS (1-36)\n\n Returns\n -------\n the_chan_calibrated: ndarray\n the pixel radiances in W/m^2/sr/micron\n \"\"\"\n longwave_data = the_file.select(\"EV_1KM_Emissive\") # select sds\n longwave_bands = the_file.select(\"Band_1KM_Emissive\")\n band_nums = longwave_bands.get()\n thechan_index = int(np.searchsorted(band_nums, the_band))\n print(f\"reading ban {the_band}\")\n print(thechan_index)\n thechan_data = longwave_data[thechan_index, :, :]\n scales = longwave_data.attributes()[\"radiance_scales\"]\n offsets = longwave_data.attributes()[\"radiance_offsets\"]\n thechan_scale = scales[thechan_index]\n thechan_offset = offsets[thechan_index]\n thechan_calibrated = (thechan_data - thechan_offset) * thechan_scale\n return thechan_calibrated\n\ndef write_bands(outname,chan_rads,core_metadata):\n \"\"\"\n write a MODIS band 30 to an h5 file\n\n Parameters\n ----------\n\n outname: str\n name of output hdf\n chan_rads: dict\n the pixel radiances in W/m^2/sr/micron\n key: channel number (int)\n value: radiance (ndarray)\n\n Returns\n -------\n None-- the_file is closed by this function\n \"\"\"\n with h5py.File(outname, \"w\") as f:\n group = f.create_group(\"channels\")\n for key, value in chan_rads.items():\n chan_name = f\"chan{key}\"\n radiance_array = value\n radiance_array = radiance_array.astype(np.float32)\n dset = group.create_dataset(chan_name, radiance_array.shape,\n dtype=radiance_array.dtype)\n dset[...] = radiance_array[...]\n dset.attrs['units'] = \"W/m^2/micron/ sr\"\n f.attrs[\"history\"] = 'written by process.py'\n f.attrs[\"CoreMetadata.0\"] = core_metadata\n print(f\"wrote {outname}\")\n\n\n\n\nif __name__ == \"__main__\":\n import a301_lib\n sat_data = a301_lib.sat_data / \"hdf4_files\"\n with cd(sat_data):\n all_files = list(sat_data.glob(\"MYD021KM*2105*hdf\"))\n all_files = [item for item in all_files if (item.parent.name != \"h5_dir\"\n and item.name.find('MYD02') >= 0)]\n print(f\"found {all_files}\")\n out_dir = sat_data /\"h5_dir\"\n out_dir.mkdir(parents=True, exist_ok=True)\n for a_file in all_files[:]:\n core_metadata = get_core(a_file)\n out_file = out_dir / f\"oct9_{a_file.name}\"\n out_file = out_file.with_suffix('.h5')\n print(f\"reading {a_file}, writing {out_file}\")\n the_sd = SD(str(a_file), SDC.READ)\n band_list = [30,31,32]\n rad_dict = {}\n for the_band in band_list:\n rad_dict[the_band] = readband(the_sd,the_band)\n the_sd.end()\n write_bands(out_file,rad_dict,core_metadata)\n", "# ---\n# jupyter:\n# jupytext:\n# notebook_metadata_filter: all,-language_info,-toc,-latex_envs\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.6.1-dev\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown]\n# (landsat1)=\n# # Landsat image processing 1\n\n# %% [markdown]\n# https://medium.com/@mommermiscience/dealing-with-geospatial-raster-data-in-python-with-rasterio-775e5ba0c9f5\n#\n# https://www.perrygeo.com/python-affine-transforms.html\n#\n# http://geologyandpython.com/get-landsat-8.html\n\n# %% [markdown]\n# ## Bulk image download from AWS\n#\n# Notes drawing on http://geologyandpython.com/get-landsat-8.html\n\n# %%\nimport pandas as pd\nimport a301_lib\nimport datetime as dt\nimport dateutil.parser\nimport numpy as np\nfrom pathlib import Path\n\n# %%\n# !pwd\n\n# %%\ndownload_catalog=True\nif download_catalog:\n s3_scenes = pd.read_csv('http://landsat-pds.s3.amazonaws.com/c1/L8/scene_list.gz', compression='gzip')\nelse:\n s3_scenes = pd.read_csv(a301_lib.sat_data / 'landsat/scene_list.gz', compression='gzip')\n\n# %% [markdown]\n# ## Get images from Vancouver\n#\n# Filter out cloud cover > 20% and preprocessed images with ids ending in T2 or RT\n\n# %%\npath, row = 47, 26\n\nprint('Path:',path, 'Row:', row)\n\n# Filter the Landsat Amazon S3 table for images matching path, row, cloudcover and processing state.\nscenes = s3_scenes[(s3_scenes.path == path) & (s3_scenes.row == row) & \n (s3_scenes.cloudCover <= 20) & \n (~s3_scenes.productId.str.contains('_T2')) &\n (~s3_scenes.productId.str.contains('_RT'))]\nprint(' Found {} images\\n'.format(len(scenes)))\nscenes.head()\n\n# %%\nscenes_van = pd.DataFrame(scenes)\n\n# %%\ncolumns = scenes_van.iloc[0].index\ncolumns\n\n# %%\ntimestamp = scenes_van.iloc[0].acquisitionDate\ntimestamp\n\n# %%\nthe_date = dateutil.parser.parse(timestamp)\nthe_date\n\n\n# %%\ndef convert_times(row):\n return dateutil.parser.parse(row.acquisitionDate)\n\nthe_times = scenes_van.apply(convert_times,axis=1)\nthe_times.head()\n\n# %%\nscenes_van['datetime']=the_times\ndel scenes_van['acquisitionDate']\nscenes_van.head()\n\n# %%\nscenes_van.datetime.iloc[0].day,scenes_van.datetime.iloc[0].month, scenes_van.datetime.iloc[0].year\n\n\n# %%\ndef make_date(row):\n year,month,day = row.datetime.year, row.datetime.month, row.datetime.day\n the_date = dt.date(year,month,day)\n return the_date\ndate_vals = scenes_van.apply(make_date, axis=1)\nscenes_van['the_date']=date_vals\n\n# %%\nhit = scenes_van.the_date == dt.date(2015,6,14)\nnp.sum(hit)\nmy_scene = scenes_van[hit]\nmy_scene\n\n# %%\nscene_url = my_scene.iloc[0].download_url\n\n# %%\nimport requests\nfrom bs4 import BeautifulSoup\nimport os\nimport shutil\n\n\n# Request the html text of the download_url from the amazon server. \n# download_url example: https://landsat-pds.s3.amazonaws.com/c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/index.html\nresponse = requests.get(scene_url)\nprint(f\"response: {response}, {type(response)}\")\nlandsat_path = Path() / 'landsat_scenes' / my_scene.iloc[0].productId\nlandsat_path.mkdir(parents=True,exist_ok=True)\n# # If the response status code is fine (200)\nif response.status_code == 200:\n\n # Import the html to beautiful soup\n html = BeautifulSoup(response.content, 'html.parser')\n\n # Create the dir where we will put this image files.\n entity_dir = os.path.join(landsat_path, my_scene.iloc[0].productId)\n os.makedirs(entity_dir, exist_ok=True)\n\n # Second loop: for each band of this image that we find using the html <li> tag\n good_bands = ['B4.TIF', 'B5.TIF']\n good_list = []\n for li in html.find_all('li'):\n\n # Get the href tag\n the_file = li.find_next('a').get('href')\n for keyword in good_bands:\n if the_file.find(keyword) > 0:\n good_list.append(the_file)\n if the_file.find('MTL.txt') > 0:\n good_list.append(the_file)\n print(f\"here is goodlist: {good_list}\")\n \ndownload=True\nif download:\n for the_file in good_list:\n print(f' Downloading: {the_file}')\n\n # Download the files\n # code from: https://stackoverflow.com/a/18043472/5361345\n image_path = scene_url.replace('index.html', the_file)\n print(image_path)\n response = requests.get(image_path, stream=True)\n\n with open(landsat_path / the_file, 'wb') as output:\n shutil.copyfileobj(response.raw, output)\n del response\n \n\n# %%\n" ]
[ [ "numpy.searchsorted" ], [ "pandas.read_csv", "numpy.sum", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
Klimorg/template_segmentation
[ "f5a5066905acb06c66793d9a361eae8570652af2" ]
[ "src/pipelines/classic.py" ]
[ "from typing import List, Tuple\n\nimport albumentations as A\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nfrom src.pipelines.base_pipeline import BasePipeline\n\n# class Tensorize(object):\n# \"\"\"\n# Class used to create tensor datasets for TensorFlow.\n\n# Inheritance:\n# object: The base class of the class hierarchy, used only to enforce WPS306.\n# See https://wemake-python-stylegui.de/en/latest/pages/usage/violations/consistency.html#consistency.\n\n# Args:\n# n_classes (int): Number of classes in the dataset.\n# img_shape (Tuple[int,int,int]): Dimension of the image, format is (H,W,C).\n# random_seed (int): Fixed random seed for reproducibility.\n# \"\"\"\n\n# def __init__(\n# self,\n# n_classes: int,\n# img_shape: Tuple[int, int, int],\n# random_seed: int,\n# ) -> None:\n# \"\"\"Initialization of the class Tensorize.\n\n# Initialize the class, the number of classes in the datasets, the shape of the\n# images and the random seed.\n# \"\"\"\n\n# self.n_classes = n_classes\n# self.img_shape = img_shape\n# self.random_seed = random_seed\n# self.AUTOTUNE = tf.data.AUTOTUNE\n\n# def load_images(self, data_frame: pd.DataFrame, column_name: str) -> List[str]:\n# \"\"\"Load the images as a list.\n\n# Take the dataframe containing the observations and the masks and the return the\n# column containing the observations as a list.\n\n# Args:\n# data_frame (pd.DataFrame): Dataframe containing the dataset.\n# column_name (str): The name of the column containing the observations.\n\n# Returns:\n# The list of observations deduced from the dataframe.\n# \"\"\"\n# return data_frame[column_name].tolist()\n\n# @tf.function\n# def parse_image_and_mask(\n# self,\n# image: str,\n# mask: str,\n# ) -> Tuple[np.ndarray, np.ndarray]:\n# \"\"\"Transform image and mask.\n\n# Parse image and mask to go from path to a resized np.ndarray.\n\n# Args:\n# filename (str): The path of the image to parse.\n# mask (str): The mask of the image.\n\n# Returns:\n# A np.ndarray corresponding to the image and the corresponding one-hot mask.\n# \"\"\"\n# resized_dims = [self.img_shape[0], self.img_shape[1]]\n# # convert the mask to one-hot encoding\n# # decode image\n# image = tf.io.read_file(image)\n# # Don't use tf.image.decode_image,\n# # or the output shape will be undefined\n# image = tf.image.decode_jpeg(image)\n# # This will convert to float values in [0, 1]\n# image = tf.image.convert_image_dtype(image, tf.float32)\n# image = tf.image.resize(\n# image,\n# resized_dims,\n# method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,\n# )\n\n# mask = tf.io.read_file(mask)\n# # Don't use tf.image.decode_image,\n# # or the output shape will be undefined\n# mask = tf.io.decode_png(mask, channels=1)\n# mask = tf.image.resize(\n# mask,\n# resized_dims,\n# method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,\n# )\n\n# return image, mask\n\n# def train_preprocess(\n# self,\n# image: np.ndarray,\n# mask: np.ndarray,\n# ) -> Tuple[np.ndarray, np.ndarray]:\n# \"\"\"Augmentation preprocess, if needed.\n\n# Args:\n# image (np.ndarray): The image to augment.\n# mask (np.ndarray): The corresponding mask.\n\n# Returns:\n# The augmented pair.\n# \"\"\"\n\n# aug = A.Compose(\n# [\n# A.HorizontalFlip(p=0.5),\n# A.VerticalFlip(p=0.5),\n# A.RandomRotate90(p=0.5),\n# A.Transpose(p=0.5),\n# ],\n# )\n\n# augmented = aug(image=image, mask=mask)\n\n# image = augmented[\"image\"]\n# mask = augmented[\"mask\"]\n\n# image = tf.cast(x=image, dtype=tf.float32)\n# mask = tf.cast(x=mask, dtype=tf.float32)\n\n# return image, mask\n\n# @tf.function\n# def apply_augments(\n# self,\n# image: np.ndarray,\n# mask: np.ndarray,\n# ) -> Tuple[np.ndarray, np.ndarray]:\n# \"\"\"Apply augmentation (roations, transposition, flips), if needed.\n\n# Args:\n# image (np.ndarray): A numpy array representing an image of the dataset.\n# mask (np.ndarray): A numpy array representing a mask of the dataset.\n\n# Returns:\n# An augmented pair (image, mask).\n# \"\"\"\n\n# image, mask = tf.numpy_function(\n# func=self.train_preprocess,\n# inp=[image, mask],\n# Tout=[tf.float32, tf.float32],\n# )\n\n# img_shape = [self.img_shape[0], self.img_shape[1], 3]\n# mask_shape = [self.img_shape[0], self.img_shape[1], 1]\n\n# image = tf.ensure_shape(image, shape=img_shape)\n# mask = tf.ensure_shape(mask, shape=mask_shape)\n\n# return image, mask\n\n# def create_train_dataset(\n# self,\n# data_path: str,\n# batch: int,\n# repet: int,\n# prefetch: int,\n# augment: bool,\n# ) -> tf.data.Dataset:\n# \"\"\"Creation of a tensor dataset for TensorFlow.\n\n# Args:\n# data_path (str): Path where the csv file containing the dataframe is\n# located.\n# batch (int): Batch size, usually 32.\n# repet (int): How many times the dataset has to be repeated.\n# prefetch (int): How many batch the CPU has to prepare in advance for the\n# GPU.\n# augment (bool): Does the dataset has to be augmented or no.\n\n# Returns:\n# A batch of observations and masks.\n# \"\"\"\n# df = pd.read_csv(data_path)\n# features = self.load_images(data_frame=df, column_name=\"filename\")\n# masks = self.load_images(data_frame=df, column_name=\"mask\")\n\n# dataset = tf.data.Dataset.from_tensor_slices((features, masks))\n# dataset = dataset.cache()\n# dataset = dataset.shuffle(len(features), seed=self.random_seed)\n# dataset = dataset.repeat(repet)\n# dataset = dataset.map(\n# self.parse_image_and_mask,\n# num_parallel_calls=self.AUTOTUNE,\n# )\n# if augment:\n# dataset = dataset.map(self.apply_augments, num_parallel_calls=self.AUTOTUNE)\n# dataset = dataset.batch(batch)\n# return dataset.prefetch(prefetch)\n\n# def create_test_dataset(\n# self,\n# data_path: str,\n# batch: int,\n# repet: int,\n# prefetch: int,\n# ) -> tf.data.Dataset:\n# \"\"\"Creation of a tensor dataset for TensorFlow.\n\n# Args:\n# data_path (str): Path where the csv file containing the dataframe is\n# located.\n# batch (int): Batch size, usually 32.\n# repet (int): How many times the dataset has to be repeated.\n# prefetch (int): How many batch the CPU has to prepare in advance for the\n# GPU.\n# augment (bool): Does the dataset has to be augmented or no.\n\n# Returns:\n# A batch of observations and masks.\n# \"\"\"\n# df = pd.read_csv(data_path)\n# features = self.load_images(data_frame=df, column_name=\"filename\")\n# masks = self.load_images(data_frame=df, column_name=\"mask\")\n\n# dataset = tf.data.Dataset.from_tensor_slices((features, masks))\n# dataset = dataset.cache()\n# dataset = dataset.shuffle(len(features), seed=self.random_seed)\n# dataset = dataset.repeat(repet)\n# dataset = dataset.map(\n# self.parse_image_and_mask,\n# num_parallel_calls=self.AUTOTUNE,\n# )\n# dataset = dataset.batch(batch)\n# return dataset.prefetch(prefetch)\n\n\nclass BaseDataset(BasePipeline):\n \"\"\"\n Class used to create tensor datasets for TensorFlow.\n\n Inheritance:\n object: The base class of the class hierarchy, used only to enforce WPS306.\n See https://wemake-python-stylegui.de/en/latest/pages/usage/violations/consistency.html#consistency.\n\n Args:\n n_classes (int): Number of classes in the dataset.\n img_shape (Tuple[int,int,int]): Dimension of the image, format is (H,W,C).\n random_seed (int): Fixed random seed for reproducibility.\n \"\"\"\n\n def __init__(\n self,\n *args,\n **kwargs,\n ) -> None:\n \"\"\"Initialization of the class Tensorize.\n\n Initialize the class, the number of classes in the datasets, the shape of the\n images and the random seed.\n \"\"\"\n super().__init__(\n *args,\n **kwargs,\n )\n\n def create_train_dataset(\n self,\n data_path: str,\n batch: int,\n repet: int,\n prefetch: int,\n augment: bool,\n ) -> tf.data.Dataset:\n \"\"\"Creation of a tensor dataset for TensorFlow.\n\n Args:\n data_path (str): Path where the csv file containing the dataframe is\n located.\n batch (int): Batch size, usually 32.\n repet (int): How many times the dataset has to be repeated.\n prefetch (int): How many batch the CPU has to prepare in advance for the\n GPU.\n augment (bool): Does the dataset has to be augmented or no.\n\n Returns:\n A batch of observations and masks.\n \"\"\"\n df = pd.read_csv(data_path)\n features = self.load_images(data_frame=df, column_name=\"filename\")\n masks = self.load_images(data_frame=df, column_name=\"mask\")\n\n dataset = tf.data.Dataset.from_tensor_slices((features, masks))\n dataset = dataset.cache()\n dataset = dataset.shuffle(len(features), seed=self.random_seed)\n dataset = dataset.repeat(repet)\n dataset = dataset.map(\n self.parse_image_and_mask,\n num_parallel_calls=self.AUTOTUNE,\n )\n if augment:\n dataset = dataset.map(self.apply_augments, num_parallel_calls=self.AUTOTUNE)\n dataset = dataset.batch(batch)\n return dataset.prefetch(prefetch)\n" ]
[ [ "tensorflow.data.Dataset.from_tensor_slices", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [ "1.10" ] } ]
jim22k/python-suitesparse-graphblas
[ "09eb560950dd848cadef6115c78ef4ed2e1ae80f" ]
[ "setup.py" ]
[ "from setuptools import setup, find_packages, Extension\nfrom glob import glob\n\ntry:\n from Cython.Build import cythonize\n from Cython.Compiler.Options import get_directive_defaults\n\n use_cython = True\nexcept ImportError:\n use_cython = False\nimport numpy as np\nimport os\nimport sys\nimport versioneer\n\ndefine_macros = [(\"NPY_NO_DEPRECATED_API\", \"NPY_1_7_API_VERSION\")]\n\nif use_cython:\n suffix = \".pyx\"\n directive_defaults = get_directive_defaults()\n directive_defaults[\"binding\"] = True\n directive_defaults[\"language_level\"] = 3\n if os.environ.get(\"CYTHON_COVERAGE\"):\n directive_defaults[\"linetrace\"] = True\n define_macros.append((\"CYTHON_TRACE_NOGIL\", \"1\"))\nelse:\n suffix = \".c\"\n\ninclude_dirs = [np.get_include(), os.path.join(sys.prefix, \"include\")]\next_modules = [\n Extension(\n name[: -len(suffix)].replace(\"/\", \".\").replace(\"\\\\\", \".\"),\n [name],\n include_dirs=include_dirs,\n define_macros=define_macros,\n )\n for name in glob(f\"suitesparse_graphblas/**/*{suffix}\", recursive=True)\n]\nif use_cython:\n ext_modules = cythonize(ext_modules, include_path=include_dirs)\n\nwith open(\"README.md\") as f:\n long_description = f.read()\n\npackage_data = {\"suitesparse_graphblas\": [\"*.pyx\", \"*.pxd\", \"*.h\"]}\nif sys.platform == \"win32\":\n package_data[\"suitesparse_graphblas\"].append(\"*.dll\")\n\nsetup(\n name=\"suitesparse-graphblas\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"SuiteSparse:GraphBLAS Python bindings.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=find_packages(),\n author=\"Michel Pelletier, James Kitchen, Erik Welch\",\n author_email=\"[email protected],[email protected],[email protected]\",\n url=\"https://github.com/GraphBLAS/python-suitesparse-graphblas\",\n ext_modules=ext_modules,\n cffi_modules=[\"suitesparse_graphblas/build.py:ffibuilder\"],\n python_requires=\">=3.7\",\n install_requires=[\"cffi>=1.0.0\", \"numpy>=1.15\"],\n setup_requires=[\"cffi>=1.0.0\", \"pytest-runner\"],\n tests_require=[\"pytest\"],\n license=\"Apache License 2.0\",\n package_data=package_data,\n include_package_data=True,\n)\n" ]
[ [ "numpy.get_include" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
satyami3/stock
[ "d3d3f65a25feb764e6f735f422251fd0ede520fc" ]
[ "scheduled_tasks/economy/get_upcoming_events_date.py" ]
[ "import json\nimport sqlite3\nimport tabula\nimport pandas as pd\nfrom datetime import datetime, timedelta\n\nconn = sqlite3.connect(r\"database/database.db\", check_same_thread=False)\ndb = conn.cursor()\n\ncurrent_date = datetime.utcnow()\n\n\ndef get_next_retail_sales_date():\n \"\"\"\n Get next retail sales release date\n \"\"\"\n df = tabula.read_pdf(r\"https://www.census.gov/retail/marts/www/martsdates.pdf\", pages=1)[0]\n df[\"Release Date\"] = pd.to_datetime(df[\"Release Date\"], errors='coerce')\n df = df[df[\"Release Date\"] >= current_date].iloc[0]\n df['Release Date'] = df['Release Date'].strftime('%Y-%m-%d')\n return df\n\n\ndef get_next_cpi_date():\n \"\"\"\n Get next CPI release date\n \"\"\"\n df = pd.read_html(r\"https://www.bls.gov/schedule/news_release/cpi.htm\")[0][:-1]\n df[\"Release Date\"] = pd.to_datetime(df[\"Release Date\"], errors='coerce')\n df = df[df[\"Release Date\"] >= current_date].iloc[0]\n df['Release Date'] = df['Release Date'].strftime('%Y-%m-%d')\n return df\n\n\ndef to_week_day(date):\n \"\"\"\n Get the next closest weekday\n Parameters\n ----------\n date : datetime\n Date to find the next closest weekday\n \"\"\"\n if date.weekday() in {5, 6}:\n date += timedelta(days=-date.weekday() + 7)\n return str(date.date())\n\n\ndef get_next_rrp_treasury_date(date):\n return to_week_day(date)\n\n\ndef get_holidays():\n \"\"\"\n Get holidays in US when stock market is closed\n \"\"\"\n holidays_df = pd.read_html(r\"https://www.sec.gov/edgar/filer-information/calendar\")[0]\n holidays_df[\"Date\"] = pd.to_datetime(holidays_df[\"Date\"])\n print(holidays_df)\n return holidays_df\n\n\nif __name__ == '__main__':\n db.execute(\"SELECT record_date from reverse_repo ORDER BY record_date DESC LIMIT 1\")\n record_date = db.fetchone()\n rrp_treasury_date = get_next_rrp_treasury_date(datetime.strptime(record_date[0], \"%Y-%m-%d\") + timedelta(days=1))\n retail_df = get_next_retail_sales_date()\n cpi_df = get_next_cpi_date()\n\n with open(r\"database/economic_date.json\", \"w\") as r:\n information = {\n \"Retail Sales\": {\"Ref Month\": retail_df[\"Data Month\"], \"Release Date\": retail_df[\"Release Date\"]},\n \"Inflation\": {\"Ref Month\": cpi_df[\"Reference Month\"], \"Release Date\": cpi_df[\"Release Date\"]},\n \"Daily Treasury\": {\"Release Date\": rrp_treasury_date},\n \"Reverse Repo\": {\"Release Date\": rrp_treasury_date},\n }\n json.dump(information, r, indent=4)\n" ]
[ [ "pandas.to_datetime", "pandas.read_html" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
Shumpei-Kikuta/BentoML
[ "4fe508934ab431ea5c414ee9d8b84c2104688381", "4fe508934ab431ea5c414ee9d8b84c2104688381", "4fe508934ab431ea5c414ee9d8b84c2104688381", "4fe508934ab431ea5c414ee9d8b84c2104688381" ]
[ "tests/conftest.py", "tests/integration/test_keras_artifact.py", "tests/adapters/test_tf_tensor_input.py", "tests/integration/projects/fastai2/tests/test_service.py" ]
[ "import functools\nimport glob\nimport inspect\nimport os\n\nimport imageio\nimport numpy as np\nimport pytest\n\nfrom bentoml.yatai.client import YataiClient\nfrom tests.bento_service_examples.example_bento_service import ExampleBentoService\n\n\ndef pytest_configure():\n '''\n global constants for tests\n '''\n # async request client\n async def assert_request(\n method,\n url,\n headers=None,\n data=None,\n timeout=None,\n assert_status=None,\n assert_data=None,\n ):\n if assert_status is None:\n assert_status = 200\n\n import aiohttp\n\n try:\n async with aiohttp.ClientSession() as sess:\n async with sess.request(\n method, url, data=data, headers=headers, timeout=timeout\n ) as r:\n r_body = await r.read()\n except RuntimeError:\n # the event loop has been closed due to previous task failed, ignore\n return\n\n if callable(assert_status):\n assert assert_status(r.status), f\"{r.status} {r_body}\"\n else:\n assert r.status == assert_status, f\"{r.status} {r_body}\"\n\n if assert_data is not None:\n if callable(assert_data):\n assert assert_data(r_body), r_body\n else:\n assert r_body == assert_data\n\n pytest.assert_request = assert_request\n\n # dataframe json orients\n pytest.DF_ORIENTS = {\n 'split',\n 'records',\n 'index',\n 'columns',\n 'values',\n # 'table', # TODO(bojiang)\n }\n pytest.DF_AUTO_ORIENTS = {\n 'records',\n 'columns',\n }\n\n def _since_version(ver: str):\n def _wrapper(func):\n if not inspect.iscoroutinefunction(func):\n\n @functools.wraps(func)\n def _wrapped(*args, **kwargs):\n from packaging import version\n\n bundle_ver = os.environ.get(\"BUNDLE_BENTOML_VERSION\")\n if bundle_ver and version.parse(bundle_ver) < version.parse(ver):\n pytest.skip()\n return func(*args, **kwargs)\n\n else:\n\n @functools.wraps(func)\n async def _wrapped(*args, **kwargs):\n from packaging import version\n\n bundle_ver = os.environ.get(\"BUNDLE_BENTOML_VERSION\")\n if bundle_ver and version.parse(bundle_ver) < version.parse(ver):\n pytest.skip()\n return await func(*args, **kwargs)\n\n return _wrapped\n\n return _wrapper\n\n pytest.since_bentoml_version = _since_version\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--batch-request\", action=\"store_false\")\n\n\[email protected]()\ndef is_batch_request(pytestconfig):\n return pytestconfig.getoption(\"batch_request\")\n\n\[email protected]()\ndef bin_file(tmpdir):\n bin_file_ = tmpdir.join(\"bin_file.bin\")\n with open(bin_file_, \"wb\") as of:\n of.write(\"â\".encode('gb18030'))\n return str(bin_file_)\n\n\[email protected]()\ndef bin_files(tmpdir):\n for i in range(10):\n bin_file_ = tmpdir.join(f\"{i}.bin\")\n with open(bin_file_, \"wb\") as of:\n of.write(f\"â{i}\".encode('gb18030'))\n return sorted(glob.glob(str(tmpdir.join(\"*.bin\"))))\n\n\[email protected]()\ndef unicode_file(tmpdir):\n bin_file_ = tmpdir.join(\"bin_file.unicode\")\n with open(bin_file_, \"wb\") as of:\n of.write(\"â\".encode('utf-8'))\n return str(bin_file_)\n\n\[email protected]()\ndef unicode_files(tmpdir):\n for i in range(10):\n bin_file_ = tmpdir.join(f\"{i}.list.unicode\")\n with open(bin_file_, \"wb\") as of:\n of.write(f\"â{i}\".encode('utf-8'))\n return sorted(glob.glob(str(tmpdir.join(\"*.list.unicode\"))))\n\n\[email protected]()\ndef img_file(tmpdir):\n img_file_ = tmpdir.join(\"test_img.jpg\")\n imageio.imwrite(str(img_file_), np.zeros((10, 10)))\n return str(img_file_)\n\n\[email protected]()\ndef img_files(tmpdir):\n for i in range(10):\n img_file_ = tmpdir.join(f\"{i}.list.jpg\")\n imageio.imwrite(str(img_file_), np.zeros((10, 10)))\n return sorted(glob.glob(str(tmpdir.join(\"*.list.jpg\"))))\n\n\[email protected]()\ndef json_file(tmpdir):\n json_file_ = tmpdir.join(\"test.json\")\n with open(json_file_, \"w\") as of:\n of.write('{\"name\": \"kaith\", \"game\": \"morrowind\"}')\n return str(json_file_)\n\n\[email protected]()\ndef json_files(tmpdir):\n for i in range(10):\n file_ = tmpdir.join(f\"{i}.list.json\")\n with open(file_, \"w\") as of:\n of.write('{\"i\": %d, \"name\": \"kaith\", \"game\": \"morrowind\"}' % i)\n return sorted(glob.glob(str(tmpdir.join(\"*.list.json\"))))\n\n\nclass TestModel(object):\n def predict_dataframe(self, df):\n return df[\"col1\"] * 2\n\n def predict_image(self, input_datas):\n for input_data in input_datas:\n assert input_data is not None\n return [input_data.shape for input_data in input_datas]\n\n def predict_multi_images(self, original, compared):\n return (original == compared).all()\n\n def predict_json(self, input_jsons):\n assert input_jsons\n return [{\"ok\": True}] * len(input_jsons)\n\n\[email protected]()\ndef example_bento_service_class():\n # When the ExampleBentoService got saved and loaded again in the test, the two class\n # attribute below got set to the loaded BentoService class. Resetting it here so it\n # does not effect other tests\n ExampleBentoService._bento_service_bundle_path = None\n ExampleBentoService._bento_service_bundle_version = None\n return ExampleBentoService\n\n\[email protected]()\ndef bento_service(example_bento_service_class): # pylint:disable=redefined-outer-name\n \"\"\"Create a new ExampleBentoService\n \"\"\"\n test_model = TestModel()\n test_svc = example_bento_service_class()\n test_svc.pack('model', test_model)\n return test_svc\n\n\[email protected]()\ndef bento_bundle_path(bento_service): # pylint:disable=redefined-outer-name\n \"\"\"Create a new ExampleBentoService, saved it to tmpdir, and return full saved_path\n \"\"\"\n saved_path = bento_service.save()\n yield saved_path\n delete_saved_bento_service(bento_service.name, bento_service.version)\n\n\ndef delete_saved_bento_service(name, version):\n yc = YataiClient()\n yc.repository.delete(f'{name}:{version}')\n", "# pylint: disable=redefined-outer-name\n\nimport json\n\nimport keras\nimport numpy as np\nimport pytest\nimport tensorflow as tf\n\nimport bentoml\nfrom tests.integration.utils import (\n build_api_server_docker_image,\n export_service_bundle,\n run_api_server_docker_container,\n)\n\nTF2 = tf.__version__.startswith('2')\n\nif TF2:\n from tests.bento_service_examples.keras_classifier import KerasClassifier\nelse:\n from tests.bento_service_examples.keras_with_tf1_classifier import KerasClassifier\n\ntest_data = [1, 2, 3, 4, 5]\n\n\[email protected](params=[tf.keras, keras], scope=\"session\")\ndef keras_model(request):\n ke = request.param\n net = ke.Sequential(\n (\n ke.layers.Dense(\n units=1,\n input_shape=(5,),\n use_bias=False,\n kernel_initializer=ke.initializers.Ones(),\n ),\n )\n )\n net.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n return net\n\n\[email protected](scope=\"session\")\ndef svc(keras_model):\n \"\"\"Return a TensorFlow2 BentoService.\"\"\"\n # When the ExampleBentoService got saved and loaded again in the test, the\n # two class attribute below got set to the loaded BentoService class.\n # Resetting it here so it does not effect other tests\n\n KerasClassifier._bento_service_bundle_path = None\n KerasClassifier._bento_service_bundle_version = None\n\n svc = KerasClassifier()\n keras_model.predict(np.array([test_data]))\n svc.pack('model', keras_model)\n svc.pack('model2', keras_model)\n return svc\n\n\[email protected](scope=\"session\")\ndef image(svc, clean_context):\n with export_service_bundle(svc) as saved_path:\n yield clean_context.enter_context(build_api_server_docker_image(saved_path))\n\n\[email protected](scope=\"module\")\ndef host(image, enable_microbatch):\n with run_api_server_docker_container(\n image, enable_microbatch=enable_microbatch, timeout=500\n ) as host:\n yield host\n\n\ndef test_keras_artifact(svc):\n assert svc.predict([test_data]) == [\n 15.0\n ], 'Inference on unsaved Keras artifact does not match expected'\n assert svc.predict2([test_data]) == [\n 15.0\n ], 'Inference on unsaved Keras artifact does not match expected'\n\n\ndef test_keras_artifact_loaded(svc):\n with export_service_bundle(svc) as saved_path:\n loaded = bentoml.load(saved_path)\n assert (\n loaded.predict([test_data]) == 15.0\n ), 'Inference on saved and loaded Keras artifact does not match expected'\n assert (\n loaded.predict2([test_data]) == 15.0\n ), 'Inference on saved and loaded Keras artifact does not match expected'\n\n\[email protected]\nasync def test_keras_artifact_with_docker(host):\n await pytest.assert_request(\n \"POST\",\n f\"http://{host}/predict\",\n headers=((\"Content-Type\", \"application/json\"),),\n data=json.dumps(test_data),\n assert_status=200,\n assert_data=b'[15.0]',\n )\n await pytest.assert_request(\n \"POST\",\n f\"http://{host}/predict2\",\n headers=((\"Content-Type\", \"application/json\"),),\n data=json.dumps(test_data),\n assert_status=200,\n assert_data=b'[15.0]',\n )\n", "# pylint: disable=redefined-outer-name\nimport sys\nimport json\nimport base64\nimport math\nimport numbers\n\nimport pytest\nimport numpy as np\n\ntry:\n from unittest.mock import MagicMock\nexcept ImportError:\n from mock import MagicMock\n\nfrom bentoml.types import HTTPRequest\nfrom bentoml.marshal.utils import BATCH_REQUEST_HEADER\n\n\ndef mock_tensorflow_module():\n class MockTensor:\n def __init__(self, _input):\n self.input = _input\n\n def numpy(self):\n if isinstance(self.input, (list, tuple)):\n return np.array(self.input, dtype=object)\n return self.input\n\n def __eq__(self, dst):\n return self.input == dst.input\n\n class MockConstant(MockTensor):\n pass\n\n sys.modules['tensorflow'] = MagicMock()\n\n import tensorflow as tf\n\n tf.__version__ = \"2.0\"\n tf.Tensor = tf.compat.v2.Tensor = MockTensor\n tf.constant = tf.compat.v2.constant = MockConstant\n\n\nmock_tensorflow_module()\n\n\nSTR_BYTES = b\"hello world\"\nSTR = STR_BYTES.decode(\"utf-8\")\nSTR_B64 = base64.b64encode(STR_BYTES).decode()\n\nBIN_BYTES = b\"\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR\"\nBIN_B64 = base64.b64encode(BIN_BYTES).decode()\n\nTEST_INPUTS = [\n {'instances': [[[1, 2]], [[3, 4]]]},\n {\"instances\": [[1.0, -float('inf'), float('inf')]]},\n {\"instances\": float('nan')},\n {\"instances\": {\"b64\": STR_B64}},\n {\"instances\": [{\"b64\": STR_B64}]},\n {\"instances\": {\"b64\": BIN_B64}},\n {\"instances\": [{\"b64\": BIN_B64}]},\n]\n\n\nTEST_HEADERS = [\n ((BATCH_REQUEST_HEADER, 'true'),),\n ((BATCH_REQUEST_HEADER, 'true'),),\n ((BATCH_REQUEST_HEADER, 'false'),),\n ((BATCH_REQUEST_HEADER, 'false'),),\n ((BATCH_REQUEST_HEADER, 'true'),),\n ((BATCH_REQUEST_HEADER, 'false'),),\n ((BATCH_REQUEST_HEADER, 'true'),),\n]\n\n\nEXPECTED_RESULTS = [\n [[[1, 2]], [[3, 4]]],\n [[1.0, -float('inf'), float('inf')]],\n float('nan'),\n STR,\n [STR],\n {\"b64\": BIN_B64},\n [{\"b64\": BIN_B64}],\n]\n\n\[email protected](params=zip(TEST_INPUTS, TEST_HEADERS, EXPECTED_RESULTS))\ndef test_cases(request):\n return request.param\n\n\ndef assert_eq_or_both_nan(x, y):\n if isinstance(x, numbers.Number) and isinstance(y, numbers.Number):\n assert math.isnan(x) and math.isnan(y) or math.isclose(x, y)\n else:\n assert x == y\n\n\ndef test_tf_tensor_handle_request(make_api, test_cases):\n '''\n ref: https://www.tensorflow.org/tfx/serving/api_rest#request_format_2\n '''\n from bentoml.adapters import TfTensorInput\n\n api = make_api(input_adapter=TfTensorInput(), user_func=lambda i: i)\n\n input_data, headers, except_result = test_cases\n body = json.dumps(input_data).encode('utf-8')\n request = HTTPRequest(headers=headers, body=body)\n\n response = tuple(api.handle_batch_request([request]))[0]\n\n prediction = json.loads(response.body)\n assert_eq_or_both_nan(except_result, prediction)\n\n\ndef test_tf_tensor_handle_batch_request(make_api, test_cases):\n '''\n ref: https://www.tensorflow.org/tfx/serving/api_rest#request_format_2\n '''\n from bentoml.adapters import TfTensorInput\n\n api = make_api(input_adapter=TfTensorInput(), user_func=lambda i: i)\n\n input_data, headers, except_result = test_cases\n body = json.dumps(input_data).encode('utf-8')\n request = HTTPRequest(headers=headers, body=body)\n responses = api.handle_batch_request([request] * 3)\n\n for response in responses:\n prediction = json.loads(response.body)\n assert_eq_or_both_nan(except_result, prediction)\n", "import pandas\n\ntest_df = pandas.DataFrame([[1] * 5])\n\n\ndef test_fastai2_artifact_pack(service):\n assert service.predict(test_df) == 5.0, 'Run inference before saving'\n" ]
[ [ "numpy.zeros" ], [ "numpy.array", "tensorflow.__version__.startswith" ], [ "numpy.array" ], [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
brucearctor/datacatalog-connectors-rdbms
[ "7ff5dc858ea7aa21486343304fc281692480cdb8" ]
[ "google-datacatalog-rdbms-connector/src/google/datacatalog_connectors/rdbms/prepare/sql_objects/sql_objects_datacatalog_entry_factory.py" ]
[ "#!/usr/bin/python\n#\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pandas as pd\nfrom google.cloud import datacatalog\nfrom google.protobuf import timestamp_pb2\nfrom google.datacatalog_connectors.commons.prepare.base_entry_factory import \\\n BaseEntryFactory\n\nfrom google.datacatalog_connectors.rdbms.scrape import constants\n\n\nclass SQLObjectsDataCatalogEntryFactory(BaseEntryFactory):\n\n def __init__(self, project_id, location_id, entry_resource_url_prefix,\n entry_group_id, sql_objects_config):\n self.__project_id = project_id\n self.__location_id = location_id\n self.__entry_resource_url_prefix = entry_resource_url_prefix\n self.__entry_group_id = entry_group_id\n self.__sql_objects_config = sql_objects_config\n\n def make_entry_for_sql_object(self, sql_object_key, sql_object_type,\n sql_object_item):\n sql_object_config = self.__sql_objects_config[sql_object_key]\n\n metadata_def = sql_object_config[\n constants.SQL_OBJECT_ITEM_METADATA_DEF_KEY]\n\n name = sql_object_item[constants.SQL_OBJECT_ITEM_NAME]\n\n entry_id = self._format_id(name)\n entry = datacatalog.Entry()\n\n entry.user_specified_type = sql_object_type\n entry.user_specified_system = self.__entry_group_id\n\n entry.display_name = self._format_display_name(name)\n\n sql_object_fields = metadata_def[constants.SQL_OBJECT_FIELDS]\n\n sql_object_fields = self.__filter_entry_model_fields(sql_object_fields)\n\n self.__set_entry_system_timestamps(entry, sql_object_fields,\n sql_object_item)\n\n self.__set_entry_description(entry, sql_object_fields, sql_object_item)\n\n entry.name = datacatalog.DataCatalogClient.entry_path(\n self.__project_id, self.__location_id, self.__entry_group_id,\n entry_id)\n\n entry.linked_resource = '{}/{}'.format(\n self.__entry_resource_url_prefix, entry_id)\n\n return entry_id, entry\n\n @classmethod\n def __filter_entry_model_fields(cls, sql_object_fields):\n sql_object_fields = [\n field for field in sql_object_fields\n if field[constants.SQL_OBJECT_FIELD_TARGET][\n constants.SQL_OBJECT_FIELD_TARGET_MODEL] ==\n constants.SQL_OBJECT_ENTRY_MODEL\n ]\n return sql_object_fields\n\n @classmethod\n def __set_entry_system_timestamps(cls, entry, sql_object_fields,\n sql_object_item):\n\n created_time_field = cls.__find_sql_object_field(\n sql_object_fields, constants.SQL_OBJECT_ENTRY_CREATE_TIME)\n\n if created_time_field:\n created_time = cls.__get_sql_object_field_value(\n sql_object_item, created_time_field)\n\n update_time_field = cls.__find_sql_object_field(\n sql_object_fields, constants.SQL_OBJECT_ENTRY_UPDATE_TIME)\n\n update_time = None\n if update_time_field:\n update_time = cls.__get_sql_object_field_value(\n sql_object_item, update_time_field)\n\n create_time, update_time = \\\n cls.__convert_source_system_timestamp_fields(\n created_time,\n update_time)\n\n if create_time and update_time:\n created_timestamp = timestamp_pb2.Timestamp()\n created_timestamp.FromSeconds(create_time)\n entry.source_system_timestamps.create_time = created_timestamp\n\n updated_timestamp = timestamp_pb2.Timestamp()\n updated_timestamp.FromSeconds(update_time)\n entry.source_system_timestamps.update_time = updated_timestamp\n\n @classmethod\n def __set_entry_description(cls, entry, sql_object_fields,\n sql_object_item):\n description_field = cls.__find_sql_object_field(\n sql_object_fields, constants.SQL_OBJECT_ENTRY_DESCRIPTION)\n\n if description_field:\n description = sql_object_item.get(\n description_field[constants.SQL_OBJECT_FIELD_TARGET][\n constants.SQL_OBJECT_FIELD_TARGET_NAME])\n\n if pd.isna(description):\n description = ''\n\n entry.description = description\n\n @classmethod\n def __find_sql_object_field(cls, sql_object_fields, field_name):\n return next(\n iter([\n field for field in sql_object_fields\n if field[constants.SQL_OBJECT_FIELD_TARGET][\n constants.SQL_OBJECT_FIELD_TARGET_NAME] == field_name\n ]), None)\n\n @classmethod\n def __get_sql_object_field_value(cls, sql_object_item, field):\n return sql_object_item.get(field[constants.SQL_OBJECT_FIELD_TARGET][\n constants.SQL_OBJECT_FIELD_TARGET_NAME])\n\n @classmethod\n def __convert_timestamp_value_to_epoch(cls, timestamp_value):\n # In case it is not a valid timestamp field, we ignore it.\n if pd.notnull(timestamp_value) and isinstance(timestamp_value,\n pd.Timestamp):\n return int(timestamp_value.timestamp())\n\n @classmethod\n def __convert_source_system_timestamp_fields(cls, raw_create_time,\n raw_update_time):\n create_time = cls.__convert_timestamp_value_to_epoch(raw_create_time)\n if not pd.isnull(raw_update_time):\n update_time = cls.__convert_timestamp_value_to_epoch(\n raw_update_time)\n else:\n update_time = create_time\n return create_time, update_time\n" ]
[ [ "pandas.isna", "pandas.notnull", "pandas.isnull" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
carseven/color-blind-test-hack
[ "debac7cc1c8176ff722e1e7fb5f5eae12a92d3a0" ]
[ "src/model.py" ]
[ "import numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow.python.keras import models, layers\r\nfrom tensorflow.python.keras.datasets import mnist\r\nfrom tensorflow.python.keras.preprocessing.image import ImageDataGenerator\r\nimport random\r\nimport json\r\n\r\n\r\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\r\n\r\n# Las imagenes se convierten en tensores de 3 dimnsiones para poder ser\r\n# con las conv2d de keras.\r\ntrain_images = train_images.reshape((60000, 28, 28, 1))\r\n\r\n# Se normalizan las imagenes en un factor 1/255 y se convierten en tipo float\r\ntrain_images = train_images.astype('float32') / 255\r\n\r\n# Las imagenes se convierten en tensores de 3 dimnsiones para poder ser\r\n# con las conv2d de keras.\r\ntest_images = test_images.reshape((10000, 28, 28, 1))\r\n\r\n# Se normalizan las imagenes en un factor 1/255 y se convierten en tipo float\r\ntest_images = test_images.astype('float32') / 255\r\n\r\n# Se codifican las etiquetas como one-hot enconding\r\ntrain_labels = tf.keras.utils.to_categorical(train_labels)\r\ntest_labels = tf.keras.utils.to_categorical(test_labels)\r\n\r\n\"\"\"### Aumentación de datos\"\"\"\r\n\r\n\r\n# Función propia, ruido gaussiano\r\n\r\ndef ruido(imagen):\r\n varianza = 0.1\r\n desviacion = varianza * random.random()\r\n ruido = np.random.normal(0, desviacion, imagen.shape)\r\n imagen += ruido\r\n np.clip(imagen, 0., 255.)\r\n return imagen\r\n\r\n\r\n# Configuración del generador de imagenes.\r\ndatagen = ImageDataGenerator(zoom_range=0.1,\r\n width_shift_range=0.1,\r\n height_shift_range=0.1,\r\n preprocessing_function=ruido)\r\n\r\n# Solo utilizamos aumentación en el conjunto de entrenamiento. Se indica al\r\n# al generador que imagenes tiene que procesar\r\ndatagen.fit(train_images)\r\n\r\n\r\n# Se indica que es un modelo secuencial\r\nmodel = models.Sequential()\r\n\r\n# Se añaden las capas al modelo\r\n\r\n# Bloque 1 CNN\r\nmodel.add(layers.Conv2D(32, (3, 3),\r\n activation='relu',\r\n padding='same',\r\n use_bias=True,\r\n input_shape=(28, 28, 1)))\r\nmodel.add(layers.BatchNormalization())\r\nmodel.add(layers.MaxPooling2D((2, 2)))\r\nmodel.add(layers.Dropout(0.25))\r\n\r\n# Bloque 2 CNN\r\nmodel.add(layers.Conv2D(64, (3, 3),\r\n activation='relu',\r\n padding='same',\r\n use_bias=True))\r\nmodel.add(layers.BatchNormalization())\r\nmodel.add(layers.MaxPooling2D((2, 2)))\r\nmodel.add(layers.Dropout(0.25))\r\n\r\n# Bloque 3 CNN\r\nmodel.add(layers.Conv2D(64, (3, 3),\r\n activation='relu',\r\n padding='same',\r\n use_bias=True))\r\nmodel.add(layers.BatchNormalization())\r\nmodel.add(layers.Dropout(0.25))\r\n\r\n# Bloque 4 FC\r\nmodel.add(layers.Flatten())\r\nmodel.add(layers.Dense(64, activation='relu'))\r\nmodel.add(layers.Dropout(0.5))\r\nmodel.add(layers.Dense(10, activation='softmax'))\r\n\r\n# Se configura la función de perdidas y el algoritmo de apredizaje.\r\nmodel.compile(optimizer='adam',\r\n loss='categorical_crossentropy',\r\n metrics=['accuracy'])\r\n\r\n# Visualización de los bloques y parametros del modelo implementado.\r\nmodel.summary()\r\n\r\n# Se indica que datos alimentan al modelo en la fase de entrenamiento y en la\r\n# de validación. En este caso los datos de entrenamiento viene generador tras\r\n# procesar el conjunto de entrenamiento.\r\nhistory = model.fit(datagen.flow(train_images, train_labels,\r\n batch_size=256),\r\n steps_per_epoch=int(train_images.shape[0] / 256) + 1,\r\n epochs=20,\r\n validation_data=(test_images, test_labels))\r\n\r\ntest_loss, test_acc = model.evaluate(test_images, test_labels)\r\nprint('Test accuracy:', test_acc)\r\n\r\npwd = '/Users/carseven/dev/color-blind-test-hack/'\r\n\r\nmodel.save_weights(pwd + 'src/model-data/mnist.tf', save_format='tf')\r\n\r\nmodel_config = model.to_json()\r\nwith open(pwd + 'src/model-data/model-config.json',\r\n 'w',\r\n encoding='utf-8') as f:\r\n json.dump(model_config, f, ensure_ascii=False, indent=4)\r\n" ]
[ [ "tensorflow.python.keras.preprocessing.image.ImageDataGenerator", "tensorflow.python.keras.layers.BatchNormalization", "tensorflow.python.keras.layers.Flatten", "tensorflow.python.keras.layers.MaxPooling2D", "numpy.clip", "tensorflow.python.keras.layers.Dense", "tensorflow.python.keras.models.Sequential", "numpy.random.normal", "tensorflow.python.keras.datasets.mnist.load_data", "tensorflow.python.keras.layers.Conv2D", "tensorflow.python.keras.layers.Dropout", "tensorflow.keras.utils.to_categorical" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SocratesNFR/evodynamic
[ "682b610096182bde2298cdca352e7b319a0e4c41" ]
[ "examples/reservoir/test_mnist_esn_with_memory.py" ]
[ "\"\"\"\nTesting features and method for\nEcho State Network - Reservoir for MNIST digit classification with memory\n\"\"\"\n\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nimport numpy as np\nimport evodynamic.experiment as experiment\nimport evodynamic.connection.random as conn_random\nimport evodynamic.connection as connection\nimport evodynamic.connection.custom as conn_custom\nimport evodynamic.cells.activation as act\nimport evodynamic.utils as utils\n\nmnist = tf.keras.datasets.mnist\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nx_train_num_images = x_train.shape[0]\nx_train_image_shape = x_train.shape[1:3]\nx_test_num_images = x_test.shape[0]\nx_test_image_shape = x_test.shape[1:3]\n\nx_train = ((x_train / 255.0) > 0.5).astype(np.float64)\nx_train = x_train.reshape(x_train.shape[0],-1)\nx_train = np.transpose(x_train)\n\nx_test = ((x_test / 255.0) > 0.5).astype(np.float64)\nx_test = x_test.reshape(x_test.shape[0],-1)\nx_test = np.transpose(x_test)\n\ny_train_one_hot = np.zeros((y_train.max()+1, y_train.size))\ny_train_one_hot[y_train,np.arange(y_train.size)] = 1\ny_train = y_train_one_hot\n\ny_test_one_hot = np.zeros((y_test.max()+1, y_test.size))\ny_test_one_hot[y_test,np.arange(y_test.size)] = 1\ny_test = y_test_one_hot\n\nepochs = 1\nbatch_size = 100\nnum_batches = int(np.ceil(x_train_num_images / batch_size))\nnum_batches_test = int(np.ceil(x_test_num_images / batch_size))\nwidth = 28*28\ninput_size = 28*28\noutput_layer_size = 10\nimage_num_pixels = x_train_image_shape[0] * x_train_image_shape[1]\nmemory_size = 2\n\nexp = experiment.Experiment(input_start=0,input_delay=0,training_start=1,\n training_delay=1,reset_cells_after_train=True,\n batch_size=batch_size)\n\n\ninput_esn = exp.add_input(tf.float64, [input_size], \"input_esn\")\ndesired_output = exp.add_desired_output(tf.float64, [output_layer_size], \"desired_output\")\n\ng_esn = exp.add_group_cells(name=\"g_esn\", amount=width)\ng_esn_real = g_esn.add_real_state(state_name='g_esn_real')\n\nexp.add_connection(\"input_conn\", connection.IndexConnection(input_esn,g_esn_real,\n np.arange(width)))\n\n\nindices = [[i,i] for i in range(width)]\nvalues = [1]*width\ndense_shape = [width, width]\n\ng_esn_real_conn = conn_custom.create_custom_sparse_matrix('g_esn_real_conn',\n indices,\n values,\n dense_shape)\n\nexp.add_connection(\"g_esn_conn\",\n connection.WeightedConnection(g_esn_real,\n g_esn_real,act.relu,\n g_esn_real_conn))\n\ng_esn_memory = exp.add_state_memory(g_esn_real,memory_size)\n\noutput_layer = exp.add_group_cells(name=\"output_layer\", amount=output_layer_size)\noutput_layer_real_state = output_layer.add_real_state(state_name='output_layer_real_state')\n\nesn_output_conn = conn_random.create_xavier_connection(\"esn_output_conn\", memory_size*width, output_layer_size)\nexp.add_trainable_connection(\"output_conn\",\n connection.WeightedConnection(g_esn_memory,\n output_layer_real_state,\n act.sigmoid,\n esn_output_conn))\n\nc_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(\n logits=exp.trainable_connections[\"output_conn\"].output,\n labels=desired_output,\n axis=0))\n\nexp.set_training(c_loss,0.03)\n\n# Monitors are needed because \"reset_cells_after_train=True\"\nexp.add_monitor(\"output_layer\", \"output_layer_real_state\", timesteps=1)\nexp.add_monitor(\"g_esn\", \"g_esn_real\", timesteps=1)\n\nexp.initialize_cells()\n\nfor epoch in range(epochs):\n print(\"Epoch:\", epoch)\n shuffled_indices = np.random.permutation(x_train_num_images)\n batch_indices = np.split(shuffled_indices,\\\n np.arange(batch_size,x_train_num_images,batch_size))\n for step, batch_idx in enumerate(batch_indices):\n input_esn_batch = x_train[:,batch_idx]\n\n desired_output_batch = y_train[:,batch_idx]\n\n input_esn_batch_1 = np.array(input_esn_batch)\n input_esn_batch_2 = np.array(input_esn_batch)\n\n split_img_idx = width//2\n input_esn_batch_1[:split_img_idx,:] = 0\n input_esn_batch_2[split_img_idx:,:] = 0\n\n feed_dict = {input_esn: input_esn_batch_2, desired_output: desired_output_batch}\n # Double run step\n exp.run_step(feed_dict=feed_dict)\n\n feed_dict = {input_esn: input_esn_batch_1, desired_output: desired_output_batch}\n exp.run_step(feed_dict=feed_dict)\n res_ca = exp.get_monitor(\"g_esn\", \"g_esn_real\")[:,:,0]\n prediction_batch = exp.get_monitor(\"output_layer\", \"output_layer_real_state\")[0,:,:]\n accuracy_batch = np.sum(np.argmax(prediction_batch, axis=0) == np.argmax(desired_output_batch, axis=0)) / batch_size\n\n utils.progressbar_loss_accu(step+1, num_batches, exp.training_loss, accuracy_batch)\n\n print(\"Testing...\")\n\n # Testing!\n shuffled_indices_test = np.random.permutation(x_test_num_images)\n batch_indices_test = np.split(shuffled_indices_test,\\\n np.arange(batch_size,x_test_num_images,batch_size))\n for step_test, batch_idx in enumerate(batch_indices_test):\n input_esn_batch = x_test[:,batch_idx]\n\n desired_output_batch = y_test[:,batch_idx]\n\n input_esn_batch_1 = np.array(input_esn_batch)\n input_esn_batch_2 = np.array(input_esn_batch)\n\n split_img_idx = width//2\n input_esn_batch_1[:split_img_idx,:] = 0\n input_esn_batch_2[split_img_idx:,:] = 0\n\n feed_dict = {input_esn: input_esn_batch_2, desired_output: desired_output_batch}\n # Double run step\n exp.run_step(feed_dict=feed_dict, testing = True)\n\n feed_dict = {input_esn: input_esn_batch_1, desired_output: desired_output_batch}\n exp.run_step(feed_dict=feed_dict, testing = True)\n res_ca = exp.get_monitor(\"g_esn\", \"g_esn_real\")[:,:,0]\n prediction_batch = exp.get_monitor(\"output_layer\", \"output_layer_real_state\")[0,:,:]\n accuracy_batch = np.sum(np.argmax(prediction_batch, axis=0) == np.argmax(desired_output_batch, axis=0)) / batch_size\n\n utils.progressbar_loss_accu(step_test+1, num_batches_test, exp.training_loss, accuracy_batch)" ]
[ [ "tensorflow.compat.v1.disable_v2_behavior", "numpy.arange", "numpy.ceil", "numpy.random.permutation", "numpy.argmax", "numpy.transpose", "tensorflow.compat.v1.nn.softmax_cross_entropy_with_logits_v2", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
banboooo044/natural-language-sentiment-anaysis
[ "e18d7c0373d9f0a00d5a3cc14abf671081bc940b", "e18d7c0373d9f0a00d5a3cc14abf671081bc940b" ]
[ "classifier/code-analysis/nb_gridCV.py", "classifier/src/model_lgb.py" ]
[ "# 詳しい説明は同様のプログラム logis_gradCV.py を参照\nimport sys\nsys.path.append('../')\nimport numpy as np\nimport pandas as pd\n\nfrom scipy import sparse\n\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import make_scorer\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom src.runner import Runner\nfrom src.util import Logger\nfrom src.model_NB import ModelMultinomialNB\n\n\nlogger = Logger()\n\ndef makefig(result):\n sns.set_style(\"whitegrid\")\n ax = sns.boxenplot(data = result, width=0.4)\n ax.set_ylabel('Accuracy', size=14)\n ax.tick_params(labelsize=14)\n plt.savefig(f'../model/tuning/{NAME}-NB.png',dpi=300)\n\n\nif __name__ == '__main__':\n base_params = {\n 'alpha' : 1.0,\n 'fit_prior' : True,\n 'class_prior' : None\n }\n params_NB = dict(base_params)\n param_grid_ = {'alpha': [0.001, 0.01, 0.1, 1, 10, 100]}\n\n features = [\n \"bow\", \"n-gram\",\"tf-idf\", \"n-gram-tf-idf\"\n ]\n\n results = [ ]\n NAME = \":\".join(features)\n for name in features:\n x = Runner.load_x_train(name)\n y = Runner.load_y_train()\n model = ModelMultinomialNB(name, **dict(params_NB))\n search = GridSearchCV( model, cv=6, param_grid=param_grid_ , return_train_score=True, verbose=10, refit=True )\n search.fit(x, y)\n results.append( (search, name) )\n logger.info(f'{name} - bestscore : {search.best_score_} - result :{search.cv_results_[\"mean_test_score\"]}')\n \n res = pd.DataFrame.from_dict(\n { name : search.cv_results_[\"mean_test_score\"] for search, name in results }, \n orient='index', \n columns=param_grid_['alpha']\n )\n\n for search, name in results:\n logger.info(f'{name} - bestscore : {search.best_score_}')\n \n res.to_csv(f'../model/tuning/{NAME}-NB.csv')\n\n makefig(res)\n", "import os,sys\nsys.path.append('../')\n\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport lightgbm as lgb\n\nfrom src.model import Model\nfrom src.util import Util\n\nfrom sklearn.metrics import log_loss, accuracy_score, f1_score, classification_report\n\nclass ModelLGB(Model):\n def __init__(self, run_fold_name, **params):\n super().__init__(run_fold_name, params)\n\n def train(self, tr_x, tr_y, va_x=None, va_y=None):\n validation = va_x is not None\n dtrain = lgb.Dataset(tr_x, label=tr_y)\n if validation:\n dvalid = lgb.Dataset(va_x, label=va_y)\n\n params = dict(self.params)\n num_round = params.pop('num_boost_round')\n\n if validation:\n # バリデーションデータが存在する場合, Eearly Stoppingを行う\n early_stopping_rounds = params.pop('early_stopping_rounds')\n watchlist = [dtrain, dvalid ]\n self.model = lgb.train(params, dtrain, num_round, valid_sets=watchlist,\n valid_names=['train','eval'],\n early_stopping_rounds=early_stopping_rounds)\n else:\n watchlist = [(dtrain, 'train')]\n self.model = lgb.train(params, dtrain, num_round, evals=watchlist)\n\n def predict(self, te_x):\n return self.model.predict(te_x, ntree_limit=self.model.best_iteration)\n\n def score(self, te_x, te_y):\n pred_prob = self.predict(te_x)\n y_pred = np.argmax(pred_prob, axis=1)\n # print(classification_report(te_y, y_pred))\n return f1_score(np.identity(5)[te_y], np.identity(5)[y_pred], average='samples')\n\n def save_model(self, feature):\n model_path = os.path.join(f'../model/model/{feature}', f'{self.run_fold_name}.model')\n os.makedirs(os.path.dirname(model_path), exist_ok=True)\n Util.dump(self.model, model_path)\n\n def load_model(self, feature):\n model_path = os.path.join(f'../model/model/{feature}', f'{self.run_fold_name}.model')\n self.model = Util.load(model_path)" ]
[ [ "sklearn.model_selection.GridSearchCV", "matplotlib.pyplot.savefig", "pandas.DataFrame.from_dict" ], [ "numpy.argmax", "numpy.identity" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vkola-lab/multi-GPU
[ "d50cff0d587b640fb3af94329102d3eacdd70aa5", "d50cff0d587b640fb3af94329102d3eacdd70aa5", "d50cff0d587b640fb3af94329102d3eacdd70aa5" ]
[ "xfdlfw/metric/roc_auc.py", "xfdlfw/metric/recall.py", "xfdlfw/metric/matthews_corr_coef.py" ]
[ "\"\"\"\nCreated on Thu Oct 14 14:47:38 2021\n\n@author: cxue2\n\"\"\"\n\nfrom ._metric import Metric\nfrom ._misc import _numpy\nimport sklearn.metrics as M\n\n\nclass RocAuc(Metric):\n\n @_numpy\n def __call__(self, output, y_true):\n\n return M.roc_auc_score(y_true, output[:, 1], **self.kwargs)\n", "\"\"\"\nCreated on Thu Oct 14 14:47:38 2021\n\n@author: cxue2\n\"\"\"\n\nfrom ._metric import _Metric_00\nfrom ._misc import _fn_tpl_compare\nimport torch\n\n\nclass Recall(_Metric_00):\n\n def from_meta(self, hmp):\n\n _, _, fn, tp = torch.ravel(hmp['cnf'])\n \n return tp / (tp + fn)\n\n @_fn_tpl_compare(1)\n def compare(self, val_0, val_1): pass\n", "\"\"\"\nCreated on Thu Oct 14 14:47:38 2021\n\n@author: cxue2\n\"\"\"\n\nfrom ._metric import _Metric_00\nfrom ._misc import _fn_tpl_compare\nimport torch\n\n\nclass MatthewsCorrCoef(_Metric_00):\n\n def from_meta(self, hmp):\n\n tn, fp, fn, tp = torch.ravel(hmp['cnf'])\n n = tn + fp + fn + tp\n s = (tp + fn) / n\n p = (tp + fp) / n\n \n return (tp / n - s * p) / torch.sqrt(p * s * (1 - s) * (1 - p))\n\n @_fn_tpl_compare(1)\n def compare(self, val_0, val_1): pass" ]
[ [ "sklearn.metrics.roc_auc_score" ], [ "torch.ravel" ], [ "torch.sqrt", "torch.ravel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
maljovec/ann-benchmarks
[ "03f9b3db562794787c936f9ea661ad3b08d5f062" ]
[ "ann_benchmarks/datasets.py" ]
[ "import h5py\nimport numpy\nimport os\nimport random\nimport sys\n\nimport subprocess\n# import samplers\n# import pyDOE\n# import ghalton\n\ntry:\n from urllib import urlretrieve\nexcept ImportError:\n from urllib.request import urlretrieve # Python 3\n\n\ndef download(src, dst):\n if not os.path.exists(dst):\n # TODO: should be atomic\n print('downloading %s -> %s...' % (src, dst))\n urlretrieve(src, dst)\n\n\ndef get_dataset_fn(dataset):\n if not os.path.exists('data'):\n os.mkdir('data')\n return os.path.join('data', '%s.hdf5' % dataset)\n\n\ndef get_dataset(which):\n hdf5_fn = get_dataset_fn(which)\n try:\n url = 'http://ann-benchmarks.com/%s.hdf5' % which\n download(url, hdf5_fn)\n except:\n print(\"Cannot download %s\" % url)\n if which in DATASETS:\n print(\"Creating dataset locally\")\n DATASETS[which](hdf5_fn)\n hdf5_f = h5py.File(hdf5_fn)\n return hdf5_f\n\n\n# Everything below this line is related to creating datasets\n# You probably never need to do this at home, just rely on the prepared datasets at http://ann-benchmarks.com\n\ndef write_output(train, test, fn, distance, point_type='float', count=100):\n from ann_benchmarks.algorithms.bruteforce import BruteForceBLAS\n n = 0\n f = h5py.File(fn, 'w')\n f.attrs['distance'] = distance\n f.attrs['point_type'] = point_type\n print('train size: %9d * %4d' % train.shape)\n print('test size: %9d * %4d' % test.shape)\n f.create_dataset('train', (len(train), len(\n train[0])), dtype=train.dtype)[:] = train\n f.create_dataset('test', (len(test), len(\n test[0])), dtype=test.dtype)[:] = test\n neighbors = f.create_dataset('neighbors', (len(test), count), dtype='i')\n distances = f.create_dataset('distances', (len(test), count), dtype='f')\n bf = BruteForceBLAS(distance, precision=train.dtype)\n bf.fit(train)\n queries = []\n for i, x in enumerate(test):\n if i % 1000 == 0:\n print('%d/%d...' % (i, test.shape[0]))\n res = list(bf.query_with_distances(x, count))\n res.sort(key=lambda t: t[-1])\n neighbors[i] = [j for j, _ in res]\n distances[i] = [d for _, d in res]\n f.close()\n\n\ndef train_test_split(X, test_size=10000):\n import sklearn.model_selection\n print('Splitting %d*%d into train/test' % X.shape)\n return sklearn.model_selection.train_test_split(X, test_size=test_size, random_state=1)\n\n\ndef glove(out_fn, d):\n import zipfile\n\n url = 'http://nlp.stanford.edu/data/glove.twitter.27B.zip'\n fn = os.path.join('data', 'glove.twitter.27B.zip')\n download(url, fn)\n with zipfile.ZipFile(fn) as z:\n print('preparing %s' % out_fn)\n z_fn = 'glove.twitter.27B.%dd.txt' % d\n X = []\n for line in z.open(z_fn):\n v = [float(x) for x in line.strip().split()[1:]]\n X.append(numpy.array(v))\n X_train, X_test = train_test_split(X)\n write_output(numpy.array(X_train), numpy.array(\n X_test), out_fn, 'angular')\n\n\ndef _load_texmex_vectors(f, n, k):\n import struct\n\n v = numpy.zeros((n, k))\n for i in range(n):\n f.read(4) # ignore vec length\n v[i] = struct.unpack('f' * k, f.read(k*4))\n\n return v\n\n\ndef _get_irisa_matrix(t, fn):\n import struct\n m = t.getmember(fn)\n f = t.extractfile(m)\n k, = struct.unpack('i', f.read(4))\n n = m.size // (4 + 4*k)\n f.seek(0)\n return _load_texmex_vectors(f, n, k)\n\n\ndef sift(out_fn):\n import tarfile\n\n url = 'ftp://ftp.irisa.fr/local/texmex/corpus/sift.tar.gz'\n fn = os.path.join('data', 'sift.tar.tz')\n download(url, fn)\n with tarfile.open(fn, 'r:gz') as t:\n train = _get_irisa_matrix(t, 'sift/sift_base.fvecs')\n test = _get_irisa_matrix(t, 'sift/sift_query.fvecs')\n write_output(train, test, out_fn, 'euclidean')\n\n\ndef gist(out_fn):\n import tarfile\n\n url = 'ftp://ftp.irisa.fr/local/texmex/corpus/gist.tar.gz'\n fn = os.path.join('data', 'gist.tar.tz')\n download(url, fn)\n with tarfile.open(fn, 'r:gz') as t:\n train = _get_irisa_matrix(t, 'gist/gist_base.fvecs')\n test = _get_irisa_matrix(t, 'gist/gist_query.fvecs')\n write_output(train, test, out_fn, 'euclidean')\n\n\ndef _load_mnist_vectors(fn):\n import gzip\n import struct\n\n print('parsing vectors in %s...' % fn)\n f = gzip.open(fn)\n type_code_info = {\n 0x08: (1, \"!B\"),\n 0x09: (1, \"!b\"),\n 0x0B: (2, \"!H\"),\n 0x0C: (4, \"!I\"),\n 0x0D: (4, \"!f\"),\n 0x0E: (8, \"!d\")\n }\n magic, type_code, dim_count = struct.unpack(\"!hBB\", f.read(4))\n assert magic == 0\n assert type_code in type_code_info\n\n dimensions = [struct.unpack(\"!I\", f.read(4))[0] for i in range(dim_count)]\n\n entry_count = dimensions[0]\n entry_size = numpy.product(dimensions[1:])\n\n b, format_string = type_code_info[type_code]\n vectors = []\n for i in range(entry_count):\n vectors.append([struct.unpack(format_string, f.read(b))[0]\n for j in range(entry_size)])\n return numpy.array(vectors)\n\n\ndef mnist(out_fn):\n download(\n 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', 'mnist-train.gz')\n download(\n 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', 'mnist-test.gz')\n train = _load_mnist_vectors('mnist-train.gz')\n test = _load_mnist_vectors('mnist-test.gz')\n write_output(train, test, out_fn, 'euclidean')\n\n\ndef fashion_mnist(out_fn):\n download('http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',\n 'fashion-mnist-train.gz')\n download('http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',\n 'fashion-mnist-test.gz')\n train = _load_mnist_vectors('fashion-mnist-train.gz')\n test = _load_mnist_vectors('fashion-mnist-test.gz')\n write_output(train, test, out_fn, 'euclidean')\n\n\ndef transform_bag_of_words(filename, n_dimensions, out_fn):\n import gzip\n from scipy.sparse import lil_matrix\n from sklearn.feature_extraction.text import TfidfTransformer\n from sklearn import random_projection\n with gzip.open(filename, 'rb') as f:\n file_content = f.readlines()\n entries = int(file_content[0])\n words = int(file_content[1])\n file_content = file_content[3:] # strip first three entries\n print(\"building matrix...\")\n A = lil_matrix((entries, words))\n for e in file_content:\n doc, word, cnt = [int(v) for v in e.strip().split()]\n A[doc - 1, word - 1] = cnt\n print(\"normalizing matrix entries with tfidf...\")\n B = TfidfTransformer().fit_transform(A)\n print(\"reducing dimensionality...\")\n C = random_projection.GaussianRandomProjection(\n n_components=n_dimensions).fit_transform(B)\n X_train, X_test = train_test_split(C)\n write_output(numpy.array(X_train), numpy.array(\n X_test), out_fn, 'angular')\n\n\ndef nytimes(out_fn, n_dimensions):\n fn = 'nytimes_%s.txt.gz' % n_dimensions\n download('https://archive.ics.uci.edu/ml/machine-learning-databases/bag-of-words/docword.nytimes.txt.gz', fn)\n transform_bag_of_words(fn, n_dimensions, out_fn)\n\n\ndef random(out_fn, n_dims, n_samples, centers, distance):\n import sklearn.datasets\n\n X, _ = sklearn.datasets.make_blobs(\n n_samples=n_samples, n_features=n_dims, centers=centers, random_state=1)\n X_train, X_test = train_test_split(X, test_size=0.1)\n write_output(X_train, X_test, out_fn, distance)\n\ndef random_bitstring(out_fn, n_dims, n_samples, n_queries):\n import sklearn.datasets\n\n Y, _ = sklearn.datasets.make_blobs(n_samples=n_samples, n_features=n_dims, centers=n_queries, random_state=1)\n X = numpy.zeros((n_samples, n_dims), dtype=numpy.bool)\n for i, vec in enumerate(Y):\n X[i] = numpy.array([v > 0 for v in vec], dtype=numpy.bool)\n\n X_train, X_test = train_test_split(X, test_size=n_queries)\n write_output(X_train, X_test, out_fn, 'hamming', 'bit')\n\n\ndef word2bits(out_fn, path, fn):\n import tarfile\n local_fn = fn + '.tar.gz'\n url = 'http://web.stanford.edu/~maxlam/word_vectors/compressed/%s/%s.tar.gz' % (\n path, fn)\n download(url, local_fn)\n print('parsing vectors in %s...' % local_fn)\n with tarfile.open(local_fn, 'r:gz') as t:\n f = t.extractfile(fn)\n n_words, k = [int(z) for z in next(f).strip().split()]\n X = numpy.zeros((n_words, k), dtype=numpy.bool)\n for i in range(n_words):\n X[i] = numpy.array([float(z) > 0 for z in next(f).strip().split()[1:]], dtype=numpy.bool)\n\n X_train, X_test = train_test_split(X, test_size=1000)\n write_output(X_train, X_test, out_fn, 'hamming', 'bit')\n\ndef sift_hamming(out_fn, fn):\n import tarfile\n local_fn = fn + '.tar.gz'\n url = 'http://sss.projects.itu.dk/ann-benchmarks/datasets/%s.tar.gz' % fn\n download(url, local_fn)\n print('parsing vectors in %s...' % local_fn)\n with tarfile.open(local_fn, 'r:gz') as t:\n f = t.extractfile(fn)\n lines = f.readlines()\n X = numpy.zeros((len(lines), 256), dtype=numpy.bool)\n for i, line in enumerate(lines):\n X[i] = numpy.array([int(x) > 0 for x in line.decode().strip()], dtype=numpy.bool)\n X_train, X_test = train_test_split(X, test_size = 1000)\n write_output(X_train, X_test, out_fn, 'hamming', 'bit')\n\ndef lastfm(out_fn, n_dimensions, test_size=50000):\n # This tests out ANN methods for retrieval on simple matrix factorization based\n # recommendation algorithms. The idea being that the query/test vectors are user factors\n # and the train set are item factors from the matrix factorization model.\n\n # Since the predictor is a dot product, we transform the factors first as described in this\n # paper: https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/XboxInnerProduct.pdf\n # This hopefully replicates the experiments done in this post:\n # http://www.benfrederickson.com/approximate-nearest-neighbours-for-recommender-systems/\n\n # The dataset is from \"Last.fm Dataset - 360K users\":\n # http://www.dtic.upf.edu/~ocelma/MusicRecommendationDataset/lastfm-360K.html\n\n # this requires the implicit package to generate the factors (on my desktop/gpu this only\n # takes 4-5 seconds to train - but could take 1-2 minutes on a laptop)\n from implicit.datasets.lastfm import get_lastfm\n from implicit.approximate_als import augment_inner_product_matrix\n import implicit\n\n # train an als model on the lastfm data\n _, _, play_counts = get_lastfm()\n model = implicit.als.AlternatingLeastSquares(factors=n_dimensions)\n model.fit(implicit.nearest_neighbours.bm25_weight(play_counts, K1=100, B=0.8))\n\n # transform item factors so that each one has the same norm, and transform the user\n # factors such by appending a 0 column\n _, item_factors = augment_inner_product_matrix(model.item_factors)\n user_factors = numpy.append(model.user_factors,\n numpy.zeros((model.user_factors.shape[0], 1)),\n axis=1)\n\n # only query the first 50k users (speeds things up signficantly without changing results)\n user_factors = user_factors[:test_size]\n\n # after that transformation a cosine lookup will return the same results as the inner product\n # on the untransformed data\n write_output(item_factors, user_factors, out_fn, 'angular')\n\n\n# Writing my own custom samplers to know how these algorithms perform on\n# denser dimensionalities (up to 10D and 1 billion points) and\n# I need to test the time it takes to build a full graph\n\n\ndef uniform(out_fn, seed, n_dims, n_samples, distance):\n numpy.random.seed(seed)\n X = numpy.random.uniform(size=(n_samples, n_dims))\n write_output(X, X, out_fn, distance)\n\n\ndef normal(out_fn, seed, n_dims, n_samples, distance):\n numpy.random.seed(seed)\n X = numpy.clip(numpy.random.normal(\n loc=0.5, scale=0.15, size=(n_samples, n_dims)), 0, 1)\n write_output(X, X, out_fn, distance)\n\n\ndef cvt(out_fn, seed, n_dims, n_samples, distance):\n result = subprocess.run(['samplers/cvt/createCVT', '-N', str(n_samples),\n '-D', str(n_dims), '-seed',\n str(seed), '-ann', '1',\n '-iterations', '1000000'],\n stdout=subprocess.PIPE)\n lines = result.stdout.decode('utf-8').strip().split('\\n')\n X = numpy.zeros((n_samples, n_dims))\n for i, line in enumerate(lines):\n X[i, :] = list(map(float, line.strip().split(' ')))\n write_output(X, X, out_fn, distance)\n\n\ndef shell(out_fn, seed, n_dims, n_samples, distance):\n numpy.random.seed(seed)\n r = numpy.atleast_2d(numpy.random.uniform(low=0.5, high=1, size=n_samples)).T\n sampler = samplers.DirectionalSampler(n_dims)\n X = ((r * sampler.generate_samples(n_samples)) + 1) / 2.\n write_output(X, X, out_fn, distance)\n\n\ndef lhs(out_fn, seed, n_dims, n_samples, distance):\n numpy.random.seed(seed)\n X = pyDOE.lhs(n_dims, n_samples)\n write_output(X, X, out_fn, distance)\n\n\ndef halton(out_fn, seed, n_dims, n_samples, distance):\n sequencer = ghalton.GeneralizedHalton(n_dims, seed)\n X = numpy.array(sequencer.get(n_samples))\n write_output(X, X, out_fn, distance)\n\n\nDATASETS = {\n 'fashion-mnist-784-euclidean': fashion_mnist,\n 'gist-960-euclidean': gist,\n 'glove-25-angular': lambda out_fn: glove(out_fn, 25),\n 'glove-50-angular': lambda out_fn: glove(out_fn, 50),\n 'glove-100-angular': lambda out_fn: glove(out_fn, 100),\n 'glove-200-angular': lambda out_fn: glove(out_fn, 200),\n 'mnist-784-euclidean': mnist,\n 'random-xs-20-euclidean': lambda out_fn: random(out_fn, 20, 10000, 100, 'euclidean'),\n 'random-s-100-euclidean': lambda out_fn: random(out_fn, 100, 100000, 1000, 'euclidean'),\n 'random-xs-20-angular': lambda out_fn: random(out_fn, 20, 10000, 100, 'angular'),\n 'random-s-100-angular': lambda out_fn: random(out_fn, 100, 100000, 1000, 'angular'),\n 'random-xs-16-hamming': lambda out_fn: random_bitstring(out_fn, 16, 10000, 100),\n 'random-s-128-hamming': lambda out_fn: random_bitstring(out_fn, 128, 50000, 1000),\n 'random-l-256-hamming': lambda out_fn: random_bitstring(out_fn, 256, 100000, 1000),\n 'sift-128-euclidean': sift,\n 'nytimes-256-angular': lambda out_fn: nytimes(out_fn, 256),\n 'nytimes-16-angular': lambda out_fn: nytimes(out_fn, 16),\n 'word2bits-800-hamming': lambda out_fn: word2bits(out_fn, '400K', 'w2b_bitlevel1_size800_vocab400K'),\n 'uniform-5-euclidean': lambda out_fn: uniform(out_fn, 0, 5, 10000000, 'euclidean'),\n 'normal-5-euclidean': lambda out_fn: normal(out_fn, 0, 5, 10000000, 'euclidean'),\n 'cvt-5-euclidean': lambda out_fn: cvt(out_fn, 0, 5, 10000000, 'euclidean'),\n 'shell-5-euclidean': lambda out_fn: shell(out_fn, 0, 5, 10000000, 'euclidean'),\n 'lhs-5-euclidean': lambda out_fn: lhs(out_fn, 0, 5, 10000000, 'euclidean'),\n 'halton-5-euclidean': lambda out_fn: halton(out_fn, 0, 5, 10000000, 'euclidean'),\n 'uniform-3-euclidean': lambda out_fn: uniform(out_fn, 0, 3, 1000000, 'euclidean'),\n 'normal-3-euclidean': lambda out_fn: normal(out_fn, 0, 3, 1000000, 'euclidean'),\n 'cvt-3-euclidean': lambda out_fn: cvt(out_fn, 0, 3, 1000000, 'euclidean'),\n 'shell-3-euclidean': lambda out_fn: shell(out_fn, 0, 3, 1000000, 'euclidean'),\n 'lhs-3-euclidean': lambda out_fn: lhs(out_fn, 0, 3, 1000000, 'euclidean'),\n 'halton-3-euclidean': lambda out_fn: halton(out_fn, 0, 3, 1000000, 'euclidean'),\n 'lastfm-64-dot': lambda out_fn: lastfm(out_fn, 64),\n 'sift-256-hamming': lambda out_fn: sift_hamming(out_fn, 'sift.hamming.256'),\n}\n" ]
[ [ "numpy.product", "numpy.random.seed", "sklearn.random_projection.GaussianRandomProjection", "sklearn.feature_extraction.text.TfidfTransformer", "numpy.random.normal", "numpy.random.uniform", "numpy.array", "numpy.zeros", "scipy.sparse.lil_matrix" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
anikeshkamath/IITGn_SURF_2016
[ "396ce97e4ed8980a8d650a9e486a97b427bfc731" ]
[ "Graph_Algorithms/indegree_CountMin.py" ]
[ "'''\nThis module is a streaming algorithm developed to compute the (in)degree centrality of vertices using CountMin sketch.\nCountMin provides approximate frequencies for each distinct element in the input stream. Accuracy of the approximation based on the dimensions of the 2D array used to store these frequencies. Exact value of the gurantee is derived in the CountMin paper.\nIndegree centrality can be seen as the frequency of the destination vertex. In the case of undirected graphs, the edge aslso increases the source vertex frequency. In this way, CountMin is used for finite space streaming algorithm for computation of indegree centrality of the vertices, which otherwise would have required maintaining an entire graph structure.\n'''\n\nfrom streampy_classes import Stream\nfrom streampy_classes import Agent\nfrom streampy_classes import Operators\nimport networkx as nx\nimport numpy as np\nimport hashlib\n\n'''\nmy_hash_value is the hash function used. This makes use of md5 hash library, which gives less collisions than the hashing done by a python dictionary\nouter_update updates the 2D array. As discussed in Count-Min algorithm, multiple copies of the array is kept so as to get a better guarantee on the aaproximate frequency provided.\nagent_update is the function that is fed to the corresponding agent eventually.\n'''\n\ndef my_hash_value(input_element, array_no, size_of_array):\n m = hashlib.md5()\n m.update(str(input_element) + str(array_no))\n hash_hex = int(m.hexdigest()[-8:],16)\n return (hash_hex)%(size_of_array)\n\ndef outer_update(directedness):\n def agent_update(ip_lst, c_struct):\n lst=ip_lst[0]\n for i in lst:\n source = i[0]\n sink = i[1]\n for j in range(c_struct.shape[0]):\n ind_sink = my_hash_value(sink, j, c_struct.shape[1])\n c_struct[j][ind_sink] += 1\n if not directedness:\n ind_source = my_hash_value(source, j, c_struct.shape[1])\n c_struct[j][ind_source] += 1\n return [], c_struct\n return agent_update\n\n\nclass indegree_CountMin(object):\n\n '''\n is_directed is the boolean for directedness of graph\n iStream is the input stream of edges\n count_structure is the 2D array that maintains the frequencies\n no_array being number of arrays and size_array being size of each array\n \n '''\n\n def __init__(self, iStream, is_directed, no_arrays, size_array, w_s = 15, step_size = 15, oStream= []):\n self.input_stream = iStream\n self.count_struct = np.zeros([no_arrays, size_array], 'float')\n self.is_directed = is_directed\n self.window_size = w_s\n self.step_size = step_size\n update = outer_update(self.is_directed)\n self.count_Agent = Operators.window_agent(update, [self.input_stream], [oStream], self.count_struct, None, self.window_size, self.step_size)\n\n def query(self, queried_vertice):\n lst_of_freqs = []\n nu_rows = self.count_struct.shape[0] \n for j in range(nu_rows):\n ind = my_hash_value(queried_vertice, j, self.count_struct.shape[1])\n lst_of_freqs.append(self.count_struct[j][ind])\n return min(lst_of_freqs)\n\n\n\n\n \n\n \n\n \n\n \n \n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
brain-link/cmne
[ "0d87e0bcd0041b250ee484f39341a0bf83f72949", "0d87e0bcd0041b250ee484f39341a0bf83f72949" ]
[ "archive/I_implementation/II_run_crossvalidation.py", "archive/I_implementation/I_cmne/I_hyperparameter_evaluation/eval_topo_multi_hidden.py" ]
[ "#**\n# @file Option_4_bio_mne_comparison.py\n# @author Christoph Dinh <[email protected]>;\n# Matti Hamalainen <[email protected]>\n# @version 1.0\n# @date May, 2017\n#\n# @section LICENSE\n#\n# Copyright (C) 2017, Christoph Dinh. All rights reserved.\n#\n# @brief Model inverse operator with Deep Learning Model\n# to estimate a MNE-dSPM inverse solution on single epochs\n#\n#**\n\n#==================================================================================\n#%%\nimport os\nimport sys\n\nimport config as cfg\n\nsys.path.append(cfg.repo_path + 'I_implementation/I_cmne/II_training') #Add relative path to include modules\nsys.path.append(cfg.repo_path + 'I_implementation/helpers')\nsys.path.append(cfg.repo_path + 'I_implementation/I_cmne/I_hyperparameter_evaluation')\n\nimport numpy as np\nimport random\nimport matplotlib\nmatplotlib.use('Agg')# remove for plt.show()\nimport matplotlib.pyplot as plt\n\nimport pandas as pd\n\nimport datetime\n\nfrom mne.minimum_norm import apply_inverse\n\nimport mne\nfrom mne.minimum_norm import apply_inverse_epochs, read_inverse_operator\n\nfrom keras.models import load_model\n\nfrom helpers.cmnesettings import CMNESettings\nfrom helpers.cmnedata import CMNEData, standardize, reshape_future_data\n\n\n###################################################################################################\n# The Script\n###################################################################################################\n## assr_270LP_fs900 fs_1_nu_10_lb_80\n# look_back = 80\n\n# # 0\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_10_lb_80_2020-07-26_062129.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job4_it_0.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job4_it_0_fs_1_nu_10_lb_80.txt'\n\n# # 1\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_10_lb_80_2020-07-29_001638.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job4_it_1.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job4_it_1_fs_1_nu_10_lb_80.txt'\n\n# # 2\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_10_lb_80_2020-07-29_051144.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job4_it_2.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job4_it_2_fs_1_nu_10_lb_80.txt'\n\n# # 3\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_10_lb_80_2020-07-29_095246.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job4_it_3.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job4_it_3_fs_1_nu_10_lb_80.txt'\n\n# # 4\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_10_lb_80_2020-07-29_144903.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job4_it_4.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job4_it_4_fs_1_nu_10_lb_80.txt'\n\n# # 5\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_10_lb_80_2020-07-29_195717.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job4_it_5.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job4_it_5_fs_1_nu_10_lb_80.txt'\n\n# # 6\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_10_lb_80_2020-07-30_010414.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job4_it_6.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job4_it_6_fs_1_nu_10_lb_80.txt'\n\n## assr_270LP_fs900 fs_1_nu_160_lb_80\n# look_back = 80\n\n# # 0\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_160_lb_80_2020-07-24_211559.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job5_it_0.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job5_it_0_fs_1_nu_160_lb_80.txt'\n\n# # 1\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_160_lb_80_2020-07-25_023115.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job5_it_1.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job5_it_1_fs_1_nu_160_lb_80.txt'\n\n# # 2\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_160_lb_80_2020-07-25_074010.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job5_it_2.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job5_it_2_fs_1_nu_160_lb_80.txt'\n\n# # 3\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_160_lb_80_2020-07-25_125038.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job5_it_3.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job5_it_3_fs_1_nu_160_lb_80.txt'\n\n# # 4\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_160_lb_80_2020-07-25_180344.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job5_it_4.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job5_it_4_fs_1_nu_160_lb_80.txt'\n\n# # 5 <<<\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_160_lb_80_2020-07-25_235531.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job5_it_5.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job5_it_5_fs_1_nu_160_lb_80.txt'\n\n# # 6\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_160_lb_80_2020-07-26_061135.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job5_it_6.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job5_it_6_fs_1_nu_160_lb_80.txt'\n\n## assr_270LP_fs900 fs_1_nu_1280_lb_80\n# look_back = 80\n\n# # 0\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_1280_lb_80_2020-07-24_211925.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job6_it_0.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job6_it_0_fs_1_nu_1280_lb_80.txt'\n\n# # 1\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_1280_lb_80_2020-07-25_023622.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job6_it_1.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job6_it_1_fs_1_nu_1280_lb_80.txt'\n\n# # 2\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_1280_lb_80_2020-07-25_074933.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job6_it_2.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job6_it_2_fs_1_nu_1280_lb_80.txt'\n\n# # 3\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_1280_lb_80_2020-07-25_130259.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job6_it_3.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job6_it_3_fs_1_nu_1280_lb_80.txt'\n\n# # 4\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_1280_lb_80_2020-07-25_182002.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job6_it_4.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job6_it_4_fs_1_nu_1280_lb_80.txt'\n\n# # 5\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_1280_lb_80_2020-07-26_001853.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job6_it_5.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job6_it_5_fs_1_nu_1280_lb_80.txt'\n\n# # 6\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb80/eval_hyper_model_meg-eeg_fs_1_nu_1280_lb_80_2020-07-26_063512.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_idcs_job6_it_6.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb80/assr_270LP_fs900_cross_mse_job6_it_6_fs_1_nu_1280_lb_80.txt'\n\n## assr_270LP_fs900 fs_1_nu_1280_lb_10\n# look_back = 10\n\n# # 0\n# fname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb10/eval_hyper_model_meg-eeg_fs_1_nu_1280_lb_10_2020-07-24_160109.h5'\n# fname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb10/assr_270LP_fs900_cross_idcs_job3_it_0.txt'\n# fname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb10/assr_270LP_fs900_cross_mse_job3_it_0_fs_1_nu_1280_lb_10.txt'\n\n## assr_270LP_fs900 fs_1_nu_1280_lb_160\nlook_back = 160\n\n# 0\nfname_model = 'C:/Users/chris/Dropbox/CMNE/Results/I_models/lb160/eval_hyper_model_meg-eeg_fs_1_nu_1280_lb_160_2020-07-25_101246.h5'\nfname_cross_validation_idcs = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb160/assr_270LP_fs900_cross_idcs_job9_it_0.txt'\nfname_cross_validation_mse = 'C:/Users/chris/Dropbox/CMNE/Results/III_training/lb160/assr_270LP_fs900_cross_mse_job9_it_0_fs_1_nu_1280_lb_160.txt'\n\n#%% Data Settings\ndata_settings = CMNESettings( repo_path=cfg.repo_path, data_path=cfg.data_path,\n fname_raw=cfg.fname_raw,\n fname_inv=cfg.fname_inv,\n fname_eve=cfg.fname_eve,\n fname_test_idcs=cfg.fname_test_idcs,\n meg_and_eeg=cfg.meg_and_eeg\n )\n\nevent_id, tmin, tmax = 1, -0.2, 0.5\ntrain_percentage = 0.85\ncross_validation_percentage = 0.85\n\ndata = CMNEData(cmne_settings=data_settings)\ndata.load_data(event_id=event_id, tmin=tmin, tmax=tmax, train_percentage=train_percentage)\n\n#################################\n#%%\nnum_train_idcs = len(data.train_idcs())\n\nwhole_list = list(range(num_train_idcs))\n\nif os.path.isfile(fname_cross_validation_idcs):\n cross_validation_train_idcs = []\n with open(fname_cross_validation_idcs, \"r\") as f:\n for line in f:\n cross_validation_train_idcs.append(int(line.strip()))\n cross_validation_test_idcs = [item for item in whole_list if item not in cross_validation_train_idcs]\n\n\n sel_epochs = data.train_epochs(cross_validation_test_idcs)\n\n nave = 2 #len(epochs)\n \n # Compute inverse solution and stcs for each epoch\n # Use the same inverse operator as with evoked data (i.e., set nave)\n # If you use a different nave, dSPM just scales by a factor sqrt(nave)\n #sel_epochs = mne.set_eeg_reference(sel_epochs, ref_channels=None, copy=True)[0]\n sel_epochs.apply_proj()\n\n # Compute inverse solution and stcs for each epoch\n # Use the same inverse operator as with evoked data (i.e., set nave)\n # If you use a different nave, dSPM just scales by a factor sqrt(nave)\n\n stcs = apply_inverse_epochs(sel_epochs, inverse_operator=data.inv_op(), lambda2=data.lambda2(), method=data.method(), pick_ori=\"normal\", nave=nave)\n\n # Attention - just an approximation, since not all stc are considered for the mean and the std\n stc_data = np.hstack([stc.data for stc in stcs])\n stc_mean = np.mean(stc_data, axis=1)\n stc_std = np.std(stc_data, axis=1)\n stc_data = None\n #Attention end\n\n # load model\n lstm_model = load_model(fname_model)\n\n future_steps = 1\n\n count_stcs = 1;\n #################################\n # %%\n with open(fname_cross_validation_mse, \"w\") as f:\n for stc in stcs:\n print('STC %d'%(count_stcs))\n stc_normalized = standardize(stc.data,mean=stc_mean,std=stc_std)\n stc_normalized_T = stc_normalized.transpose()\n \n feature_list, label_list = reshape_future_data(stc=stc_normalized_T, look_back=look_back, future_steps=future_steps)\n\n features = np.array(feature_list)\n labels = np.array(label_list)\n \n #%% LSTM estimation\n step = 1;\n for feature, label in (zip(features, labels)):\n stc_prior = np.expand_dims(feature, axis=0)\n stc_predict = lstm_model.predict(stc_prior)\n stc_mse = ((stc_predict - label)**2).mean(axis=1)\n \n #print('STC %d, Step %d, Error %f'%(count_stcs, step, stc_mse))\n\n f.write(str(stc_mse) +\"\\n\")\n step = step + 1;\n \n count_stcs = count_stcs + 1;\n\n if count_stcs == 11:\n break # break here", "#**\n# @file Option_3b_bio_mne_LSTM_single_estimation_mh_eval.py\n# @author Christoph Dinh <[email protected]>;\n# Matti Hamalainen <[email protected]>\n# @version 1.0\n# @date May, 2017\n#\n# @section LICENSE\n#\n# Copyright (C) 2017, Christoph Dinh. All rights reserved.\n#\n# @brief Model inverse operator with Deep Learning Model\n# to estimate a MNE-dSPM inverse solution on single epochs\n#\n#**\n\n#==================================================================================\n#%%\nimport sys\nsys.path.append(\"../..\") #Add relative path to include modules\n\nimport numpy as np\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport datetime\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\n\n#from keras.callbacks import TensorBoard\n\nfrom cmnesettings import CMNESettings\nfrom cmnedata import CMNEData\nfrom cmnedata import generate_lstm_batches\n\ndef eval_topo_multi_hidden(data_settings, data, training_settings):\n ###################################################################################################\n # Configuration\n ###################################################################################################\n\n lstm_look_back = 80 #40 #100 #40\n \n num_units = [[320,320,320],[640,640,640],[1280,1280,1280]]\n\n\n ###################################################################################################\n # The Script\n ###################################################################################################\n \n num_features_in = data.inv_op()['nsource']\n num_labels_out = num_features_in\n \n # TensorBoard Callback\n # tbCallBack = TensorBoard(log_dir=data_settings.tb_log_dir(), histogram_freq=1, write_graph=True, write_images=True)\n \n history_losses = []\n for num_unit in num_units:\n print('>>>> Starting next iteration (Number of Units = [%s]) <<<<\\n'%', '.join(map(str, num_unit)))\n \n #time_steps_in = lstm_look_back\n # create the Data Generator\n data_generator = generate_lstm_batches(epochs=data.epochs(), inverse_operator=data.inv_op(), lambda2=data.lambda2(), method=data.method(), look_back=lstm_look_back, batch_size=training_settings['minibatch_size'])\n \n # create LSTM model\n model = None\n model = Sequential()\n model.add(LSTM(num_unit[0], activation='tanh', return_sequences=True, input_shape=(lstm_look_back,num_features_in)))\n if len(num_unit) > 2:\n model.add(LSTM(num_unit[1], activation='tanh', return_sequences=True))\n model.add(LSTM(num_unit[2], activation='tanh'))\n elif len(num_unit) > 1:\n model.add(LSTM(num_unit[1], activation='tanh'))\n model.add(Dense(num_labels_out, activation='linear'))\n \n # compile the model\n model.compile(loss = 'mean_squared_error', optimizer = 'adam')\n \n # Train - fit the model :D\n fitting_result = model.fit_generator(data_generator, steps_per_epoch=training_settings['steps_per_ep'], epochs=training_settings['num_epochs'], verbose=1, validation_data=None, class_weight=None, workers=1)# callbacks=[tbCallBack], validation_data=None, class_weight=None, workers=1)\n \n # # let's get some predictions\n # test_predict = model.predict(test_features)\n \n ###################################################################################################\n # Save Results\n ###################################################################################################\n date_stamp = datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')\n \n fname_model = data_settings.repo_path() + 'Results/Models/Model_Opt_3b_' + data_settings.modality() + '_mh_%s'%'_'.join(map(str, num_unit)) + '_lb_' + str(lstm_look_back) + '_' + date_stamp + '.h5'\n fname_training_loss = data_settings.repo_path() + 'Results/Training/Loss_Opt_3b_' + data_settings.modality() + '_mh_%s'%'_'.join(map(str, num_unit)) + '_lb_' + str(lstm_look_back) + '_' + date_stamp + '.txt'\n fname_resultfig = data_settings.repo_path() + 'Results/img/Loss_Opt_3b_' + data_settings.modality() + '_mh_%s'%'_'.join(map(str, num_unit)) + '_lb_' + str(lstm_look_back) + '_' + date_stamp + '.png'\n \n history_losses.append(fitting_result.history['loss'])\n \n # save model\n model.save(fname_model)\n \n # # plot the data\n # print('Testing Prediction',test_predict)\n # print('Testing Reference',test_labels)\n \n # save loss\n np.savetxt(fname_training_loss, fitting_result.history['loss'])\n \n # save plot the data\n plt.figure()\n plt.plot(fitting_result.history['loss'])\n plt.xlabel('Minibatch number')\n plt.ylabel('Loss')\n plt.title('Minibatch run vs. Training loss')\n #axes = plt.gca()\n #axes.set_xlim([xmin,xmax])\n #axes.set_ylim([0,1.2])\n fig = plt.gcf()\n fig.set_size_inches(8, 6)\n plt.savefig(fname_resultfig, dpi=300)\n #plt.show()\n \n \n # save overall plot\n date_stamp = datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')\n fname_overall_fig = data_settings.repo_path() + 'Results/img/Loss_Opt_3b_' + data_settings.modality() + '_overall_mh_' + date_stamp + '.png'\n plt.figure()\n plt.xlabel('Minibatch number')\n plt.ylabel('Loss')\n plt.title('Minibatch run vs. Training loss')\n for i in range(len(history_losses)):\n plt.plot(history_losses[i], label='NU [%s]'%', '.join(map(str, num_units[i])))\n \n plt.legend()\n #axes = plt.gca()\n #axes.set_xlim([xmin,xmax])\n #axes.set_ylim([0,1.2])\n fig = plt.gcf()\n fig.set_size_inches(8, 6)\n plt.savefig(fname_overall_fig, dpi=300)\n #plt.show()\n" ]
[ [ "numpy.hstack", "numpy.expand_dims", "matplotlib.use", "numpy.std", "numpy.mean", "numpy.array" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.use", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "matplotlib.pyplot.gcf", "matplotlib.pyplot.plot", "numpy.savetxt", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mo-vic/HanZiGan
[ "dd8dc4b1eeffb01f928d7a4e5931cf6af1d7c1a4" ]
[ "utils/utils.py" ]
[ "import math\n\nimport torch\nfrom torch._six import inf\nfrom torchvision.utils import make_grid\n\nimport numpy as np\nfrom tqdm import tqdm\n\n\ndef _grad_norm(parameters, norm_type=2):\n r\"\"\"Compute gradient norm of an iterable of parameters.\n\n The norm is computed over all gradients together, as if they were\n concatenated into a single vector.\n\n Arguments:\n parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a\n single Tensor.\n norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for\n infinity norm.\n\n Returns:\n Total norm of the parameters (viewed as a single vector).\n \"\"\"\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n parameters = list(filter(lambda p: p.grad is not None, parameters))\n norm_type = float(norm_type)\n if norm_type == inf:\n total_norm = max(p.grad.data.abs().max() for p in parameters)\n else:\n total_norm = 0\n for p in parameters:\n param_norm = p.grad.data.norm(norm_type)\n total_norm += param_norm.item() ** norm_type\n total_norm = total_norm ** (1. / norm_type)\n\n return total_norm\n\n\ndef train(model, dataloader, criterion, optimizer, use_gpu, writer, epoch, scheduler, num_fakes, flip_rate, show_freq):\n all_acc = []\n all_d_loss = []\n all_g_loss = []\n\n d_optimizer, g_optimizer = optimizer\n d_scheduler, g_scheduler = scheduler\n\n for idx, data in tqdm(enumerate(dataloader), desc=\"Training Epoch {}\".format(epoch)):\n # train discriminator\n d_optimizer.zero_grad()\n g_optimizer.zero_grad()\n labels = torch.cat([torch.bernoulli(torch.ones((data.size(0), 1)) * flip_rate), torch.zeros((num_fakes, 1))],\n dim=0)\n if use_gpu:\n data, labels = data.cuda(), labels.cuda()\n outputs = model(data, mode='D')\n d_loss = criterion(outputs, labels)\n d_loss.backward()\n d_optimizer.step()\n\n all_d_loss.append(d_loss.item())\n acc = (torch.ge(outputs, 0.5).long().data == labels.long().data).double().mean()\n all_acc.append(acc.item())\n\n writer.add_scalar(\"train_d_grad_norm\", _grad_norm(model.parameters()),\n global_step=epoch * len(dataloader) + idx)\n writer.add_scalar(\"train_d_loss\", d_loss.item(), global_step=epoch * len(dataloader) + idx)\n writer.add_scalar(\"train_acc\", acc.item(), global_step=epoch * len(dataloader) + idx)\n\n # train generator\n d_optimizer.zero_grad()\n g_optimizer.zero_grad()\n fake_images, outputs = model(mode='G')\n labels = torch.ones((num_fakes, 1))\n if use_gpu:\n labels = labels.cuda()\n g_loss = criterion(outputs, labels)\n g_loss.backward()\n g_optimizer.step()\n\n all_g_loss.append(g_loss.item())\n\n if idx % show_freq == 0:\n fake_images = make_grid(fake_images, nrow=round(math.sqrt(num_fakes)))\n writer.add_image(\"fake_images\", fake_images, global_step=epoch * len(dataloader) + idx)\n real_images = make_grid(data, nrow=round(math.sqrt(data.size(0))))\n writer.add_image(\"real_images\", real_images, global_step=epoch * len(dataloader) + idx)\n\n writer.add_scalar(\"train_g_grad_norm\", _grad_norm(model.parameters()),\n global_step=epoch * len(dataloader) + idx)\n writer.add_scalar(\"train_g_loss\", g_loss.item(), global_step=epoch * len(dataloader) + idx)\n\n writer.add_scalar(\"acc\", np.mean(all_acc).item(), global_step=epoch)\n writer.add_scalar(\"d_loss\", np.mean(all_d_loss).item(), global_step=epoch)\n writer.add_scalar(\"g_loss\", np.mean(all_g_loss).item(), global_step=epoch)\n\n d_scheduler.step(np.mean(all_d_loss).item())\n g_scheduler.step(np.mean(all_g_loss).item())\n\n print(\"Epoch {}: total discriminator loss: {}\".format(epoch, np.mean(all_d_loss).item()), end=',')\n print(\"total generator loss: {}, global accuracy:{}.\".format(np.mean(all_g_loss), np.mean(all_acc)))\n" ]
[ [ "torch.ge", "torch.ones", "numpy.mean", "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lacie-life/MasterStudy
[ "16bb79a41555693c7e8cbb3c248c4670e0097073" ]
[ "ComputerVision/Bag-of-Visual-Words/BoVW.py" ]
[ "import argparse\nimport cv2\nimport numpy as np\nimport os\nfrom sklearn.cluster import KMeans\nfrom sklearn.cluster import MiniBatchKMeans\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import StandardScaler\nfrom matplotlib import pyplot as plt\nfrom sklearn import svm, datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.utils.multiclass import unique_labels\nfrom sklearn.metrics.pairwise import chi2_kernel\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import accuracy_score\n\nclass_names = []\n\ndef labelLoader(path):\n global class_names\n class_names = os.listdir(path)\n\n\ndef getFiles(train, path):\n images = []\n for folder in os.listdir(path):\n for file in os.listdir(os.path.join(path, folder)):\n images.append(os.path.join(path, os.path.join(folder, file)))\n\n if (train is True):\n np.random.shuffle(images)\n return images\n\n\ndef getDescriptors(sift, img):\n kp, des = sift.detectAndCompute(img, None)\n return des\n\n\ndef readImage(img_path):\n # print(img_path)\n img = cv2.imread(img_path, 0)\n return cv2.resize(img, (150, 150))\n\n\ndef vstackDescriptors(descriptor_list):\n descriptors = np.array(descriptor_list[0])\n for descriptor in descriptor_list[1:]:\n descriptors = np.vstack((descriptors, descriptor))\n\n return descriptors\n\n\ndef clusterDescriptors(descriptors, no_clusters, batch):\n kmeans = KMeans(n_clusters=no_clusters).fit(descriptors)\n # kmeans = MiniBatchKMeans(n_clusters=no_clusters, batch_size=batch, verbose=1).fit(descriptors)\n return kmeans\n\n\ndef extractFeatures(kmeans, descriptor_list, image_count, no_clusters):\n im_features = np.array([np.zeros(no_clusters) for i in range(image_count)])\n for i in range(image_count):\n for j in range(len(descriptor_list[i])):\n feature = descriptor_list[i][j]\n feature = feature.reshape(1, 128)\n idx = kmeans.predict(feature)\n im_features[i][idx] += 1\n\n return im_features\n\n\ndef normalizeFeatures(scale, features):\n return scale.transform(features)\n\n\ndef plotHistogram(im_features, no_clusters):\n x_scalar = np.arange(no_clusters)\n y_scalar = np.array([abs(np.sum(im_features[:, h], dtype=np.int32)) for h in range(no_clusters)])\n\n plt.bar(x_scalar, y_scalar)\n plt.xlabel(\"Visual Word Index\")\n plt.ylabel(\"Frequency\")\n plt.title(\"Complete Vocabulary Generated\")\n plt.xticks(x_scalar + 0.4, x_scalar)\n plt.savefig(\"histogram.png\")\n\n\ndef svcParamSelection(X, y, kernel, nfolds):\n Cs = [0.5, 0.1, 0.15, 0.2, 0.3]\n gammas = [0.1, 0.11, 0.095, 0.105]\n param_grid = {'C': Cs, 'gamma': gammas}\n grid_search = GridSearchCV(SVC(kernel=kernel), param_grid, cv=nfolds)\n grid_search.fit(X, y)\n grid_search.best_params_\n return grid_search.best_params_\n\n\ndef findSVM(im_features, train_labels, kernel):\n features = im_features\n if (kernel == \"precomputed\"):\n features = np.dot(im_features, im_features.T)\n\n params = svcParamSelection(features, train_labels, kernel, 5)\n C_param, gamma_param = params.get(\"C\"), params.get(\"gamma\")\n print(C_param, gamma_param)\n\n svm = SVC(kernel=kernel, C=C_param, gamma=gamma_param)\n svm.fit(features, train_labels)\n return svm\n\n\ndef plotConfusionMatrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n cm = confusion_matrix(y_true, y_pred)\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax\n\n\ndef plotConfusions(true, predictions):\n np.set_printoptions(precision=2)\n\n plotConfusionMatrix(true, predictions, classes=class_names,\n title='Confusion matrix, without normalization')\n\n plotConfusionMatrix(true, predictions, classes=class_names, normalize=True,\n title='Normalized confusion matrix')\n\n plt.savefig(\"confusions.png\")\n\n\ndef findAccuracy(true, predictions):\n print('accuracy score: %0.3f' % accuracy_score(true, predictions))\n\n\ndef trainModel(path, no_clusters, kernel, batch):\n images = getFiles(True, path)\n print(\"Train images path detected.\")\n sift = cv2.xfeatures2d.SIFT_create()\n descriptor_list = []\n train_labels = np.array([])\n image_count = len(images)\n\n for img_path in images:\n for i in range(0, len(class_names)):\n if class_names[i] in img_path:\n class_index = i\n\n train_labels = np.append(train_labels, class_index)\n img = readImage(img_path)\n des = getDescriptors(sift, img)\n descriptor_list.append(des)\n\n\n # descriptors = vstackDescriptors(descriptor_list)\n # print(\"Descriptors vstacked.\")\n\n kmeans = clusterDescriptors(descriptor_list, no_clusters, batch)\n print(\"Descriptors clustered.\")\n\n im_features = extractFeatures(kmeans, descriptor_list, image_count, no_clusters)\n print(\"Images features extracted.\")\n\n scale = StandardScaler().fit(im_features)\n im_features = scale.transform(im_features)\n print(\"Train images normalized.\")\n\n plotHistogram(im_features, no_clusters)\n print(\"Features histogram plotted.\")\n\n svm = findSVM(im_features, train_labels, kernel)\n print(\"SVM fitted.\")\n print(\"Training completed.\")\n\n return kmeans, scale, svm, im_features\n\n\ndef testModel(path, kmeans, scale, svm, im_features, no_clusters, kernel):\n test_images = getFiles(False, path)\n print(\"Test images path detected.\")\n\n count = 0\n true = []\n descriptor_list = []\n\n index = list(range(0, len(class_names)))\n\n # name_dict = dict(zip(index, class_names.copy()))\n\n name_dict = {\n \"0\": class_names[0],\n \"1\": class_names[1],\n \"2\": class_names[2],\n \"3\": class_names[3],\n \"4\": class_names[4],\n \"5\": class_names[5],\n \"6\": class_names[6],\n \"7\": class_names[7]\n }\n print(name_dict)\n\n sift = cv2.xfeatures2d.SIFT_create()\n\n for img_path in test_images:\n img = readImage(img_path)\n des = getDescriptors(sift, img)\n\n if (des is not None):\n count += 1\n descriptor_list.append(des)\n\n for i in range(0, len(class_names)):\n if class_names[i] in img_path:\n print(img_path)\n print(class_names[i])\n true.append(class_names[i])\n\n test_features = extractFeatures(kmeans, descriptor_list, count, no_clusters)\n\n test_features = scale.transform(test_features)\n\n kernel_test = test_features\n if (kernel == \"precomputed\"):\n kernel_test = np.dot(test_features, im_features.T)\n\n predictions = [name_dict[str(int(i))] for i in svm.predict(kernel_test)]\n print(\"Test images classified.\")\n\n plotConfusions(true, predictions)\n print(\"Confusion matrixes plotted.\")\n\n findAccuracy(true, predictions)\n print(\"Accuracy calculated.\")\n print(\"Execution done.\")\n\n\ndef execute(train_path, test_path, no_clusters, kernel, batch):\n kmeans, scale, svm, im_features = trainModel(train_path, no_clusters, kernel, batch)\n testModel(test_path, kmeans, scale, svm, im_features, no_clusters, kernel)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--train_path', action=\"store\", dest=\"train_path\", default=\"/home/jun/Github/BoVW/random-2/train\")\n parser.add_argument('--test_path', action=\"store\", dest=\"test_path\", default=\"/home/jun/Github/BoVW/random-2/test\")\n parser.add_argument('--word', action=\"store\", dest=\"word\", default=500)\n parser.add_argument('--batch', action=\"store\", dest=\"batch\", default=3000)\n parser.add_argument('--kernel_type', action=\"store\", dest=\"kernel_type\", default=\"linear\")\n\n args = vars(parser.parse_args())\n if (not (args['kernel_type'] == \"linear\" or args['kernel_type'] == \"precomputed\")):\n print(\"Kernel type must be either linear or precomputed\")\n exit(0)\n\n labelLoader(args['train_path'])\n print(class_names)\n execute(args['train_path'], args['test_path'], int(args['word']), args['kernel_type'], args['batch'])\n" ]
[ [ "numpy.dot", "sklearn.cluster.KMeans", "sklearn.metrics.confusion_matrix", "numpy.arange", "numpy.zeros", "matplotlib.pyplot.title", "matplotlib.pyplot.savefig", "numpy.append", "sklearn.preprocessing.StandardScaler", "sklearn.svm.SVC", "numpy.array", "numpy.sum", "matplotlib.pyplot.ylabel", "numpy.set_printoptions", "matplotlib.pyplot.subplots", "numpy.random.shuffle", "matplotlib.pyplot.bar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "numpy.vstack", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RomanShen/radial-bnn
[ "7c8bc85397c1461a6fd5ea9adf0631f9ade27f6c" ]
[ "src/models/distributions.py" ]
[ "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\" \n@author: romanshen \n@file: distributions.py \n@time: 2021/05/07\n@contact: [email protected]\n\"\"\"\n\n\nimport torch\n\n# Priors\n\n\ndef gaussian_prior(name, log2pi, mu, sigma, device):\n \"\"\"\n Args:\n *args: {\"mu\": , \"sigma\":, \"log2pi\"}\n Returns: log_gaussian_pdf that takes a weight of arbitrary shape\n \"\"\"\n if mu == 0 and sigma == 1:\n # We handle this case slightly differently as it is common and can be made more efficient\n def log_gaussian_pdf(x):\n x = x.view(x.shape[0], -1)\n return -log2pi * x.shape[1] / 2 - torch.sum(x ** 2) / 2.0\n\n return log_gaussian_pdf\n else:\n mu_tensor = torch.tensor(\n mu, requires_grad=False, dtype=torch.float32, device=device\n )\n sigma_tensor = torch.tensor(\n sigma, requires_grad=False, dtype=torch.float32, device=device\n )\n two_sigma_squared = 2 * (sigma_tensor ** 2)\n log_sigma = torch.log(sigma_tensor)\n\n def log_gaussian_pdf(x):\n x = x.view(x.shape[0], -1)\n log_pd = -log2pi * x.shape[1] / 2\n log_pd = log_pd - torch.sum((x - mu_tensor) ** 2, dim=1) / two_sigma_squared\n log_pd = log_pd - log_sigma * x.shape[1] / 2\n return log_pd\n\n return log_gaussian_pdf\n\n\n# Sampling noise distributions\n\n\ndef radial(size):\n \"\"\"\n Creates a distribution that is unit Gaussian along r and uniform over \\theta.\n :param size: The size of the weight distribution to be generated.\n Zeroth dimension is variational samples.\n 1+ dimensions are the weight for each sample from the variational distribution.\n The same weight is applied to each example in a batch.\n :return: noise distribution\n \"\"\"\n if torch.cuda.is_available():\n device = \"cuda\"\n else:\n device = \"cpu\"\n\n # First we find a random direction (\\epsilon_{\\text{MFVI}} in equation (3) on page 4)\n epsilon_mfvi = torch.randn(size, device=device)\n\n # Then we pick a distance (r in equation (3) on page 4)\n distance = torch.randn((size[0]), device=device)\n\n # Then we normalize each variational sample independently\n if len(size) == 2:\n normalizing_factor = torch.norm(\n epsilon_mfvi.view(size[0], -1), p=2, dim=1\n ).unsqueeze(1)\n distance = distance.unsqueeze(1)\n elif len(size) == 3:\n normalizing_factor = (\n torch.norm(epsilon_mfvi.view(size[0], -1), p=2, dim=1)\n .unsqueeze(1)\n .unsqueeze(1)\n )\n distance = distance.unsqueeze(1).unsqueeze(1)\n elif len(size) == 5:\n # Here we have a CNN with dimensions (var samples, out_channels, in_channels, kernel, kernel)\n normalizing_factor = (\n torch.norm(epsilon_mfvi.view(size[0], -1), p=2, dim=1)\n .unsqueeze(1)\n .unsqueeze(1)\n .unsqueeze(1)\n .unsqueeze(1)\n )\n distance = distance.unsqueeze(1).unsqueeze(1).unsqueeze(1).unsqueeze(1)\n else:\n raise ValueError(\n \"Number of dimensions for epsilon not expected. Are you sure you wanted size {}\".format(\n size\n )\n )\n\n direction = epsilon_mfvi / normalizing_factor\n epsilon_radial = direction * distance\n return epsilon_radial\n\n\ndef gaussian(size):\n \"\"\"\n Returns a tensor of random epsilon using the default gaussian unit distribution\n :param size: shape of tensor to return (tuple)\n :return: FloatTensor of Size\n \"\"\"\n if torch.cuda.is_available():\n device = \"cuda\"\n else:\n device = \"cpu\"\n\n epsilon_mfvi = torch.randn(size, device=device)\n return epsilon_mfvi\n" ]
[ [ "torch.randn", "torch.sum", "torch.tensor", "torch.log", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lkilcher/dolfyn-light
[ "416bf6aa8a3455cebf973f416c9e4ba89a801a71" ]
[ "dolfyn/adv/motion.py" ]
[ "import numpy as np\nimport scipy.signal as sig\nfrom scipy.integrate import cumtrapz\nfrom .rotate import inst2earth, _rotate_vel2body\nimport warnings\n\n\nclass CalcMotion(object):\n\n \"\"\"\n A 'calculator' for computing the velocity of points that are\n rigidly connected to an ADV-body with an IMU.\n\n Parameters\n ----------\n\n advo : `adv_raw<dolfyn.adv.base.adv_raw>`\n The IMU-adv object that will be used to compute motion.\n\n accel_filtfreq : float\n the frequency at which to high-pass filter the acceleration\n signal to remove low-frequency drift.\n\n vel_filtfreq : float (optional)\n a second frequency to high-pass filter the integrated\n acceleration. (default: 1/3 of accel_filtfreq)\n\n Examples\n --------\n\n >>> from dolfyn.adv import api as avm\n >>> from dolfyn.adv import motion as avmot\n\n >>> dat = avm.read_nortek('my_data_file.vec')\n\n >>> mcalc = avmot.CalcMotion(dat)\n\n # Calculate the motion of a point that is (.3, .1, .06) meters\n # from the adv-body origin:\n >>> mot = mcalc([.3, .1, .06])\n\n \"\"\"\n\n def __init__(self, advo,\n accel_filtfreq=1. / 30,\n vel_filtfreq=None,\n to_earth=True):\n\n self.advo = advo\n self.accel_filtfreq = accel_filtfreq\n if vel_filtfreq is None:\n vel_filtfreq = accel_filtfreq / 3\n self.accelvel_filtfreq = vel_filtfreq\n self.to_earth = to_earth\n\n self._set_Accel()\n self._set_AccelStable()\n self.AngRt = advo.AngRt # No copy because not modified.\n\n def _set_Accel(self, ):\n advo = self.advo\n if advo.props['coord_sys'] == 'inst':\n self.Accel = np.einsum('ijk,ik->jk',\n advo.orientmat,\n advo.Accel)\n elif self.advo.props['coord_sys'] == 'earth':\n self.Accel = advo.Accel.copy()\n else:\n raise Exception((\"Invalid coordinate system '%s'. The coordinate \"\n \"system must either be 'earth' or 'inst' to \"\n \"perform motion correction.\")\n % (self.advo.props['coord_sys'], ))\n\n def _set_AccelStable(self, ):\n \"\"\"\n \"\"\"\n self.AccelStable = acc = self.Accel.copy()\n if self.accel_filtfreq == 0:\n acc[:] = acc.mean(-1)[..., None]\n else:\n flt = sig.butter(1, self.accel_filtfreq / (self.advo.fs / 2))\n for idx in range(3):\n acc[idx] = sig.filtfilt(flt[0], flt[1], acc[idx])\n\n def __call__(self, vec):\n \"\"\"\n Calculate the motion of the point specified by vec (in meters,\n in the adv-body coordinate system).\n\n Parameters\n ----------\n\n vec : |np.ndarray| (len(3) or 3 x M)\n The vector in meters (or set of vectors) from the\n body-origin (center of head end-cap) to the point of\n interest (in the body coord-sys).\n\n Returns\n -------\n umot : |np.ndarray| (3 x M x N_time)\n The motion (velocity) array (3, n_time).\n\n \"\"\"\n return self.calc_uacc() + self.calc_urot(np.array(vec), )\n\n def calc_uacc(self, ):\n \"\"\"\n Calculates the translational velocity from the acceleration\n signal.\n\n Returns\n -------\n uacc : |np.ndarray| (3 x n_time)\n The acceleration-induced velocity array (3, n_time).\n \"\"\"\n samp_freq = self.advo.fs\n\n hp = self.Accel - self.AccelStable\n\n dat = np.concatenate((np.zeros(list(hp.shape[:-1]) + [1]),\n cumtrapz(hp, dx=1. / samp_freq)), axis=-1)\n if self.accelvel_filtfreq > 0:\n filt_freq = self.accelvel_filtfreq\n # 8th order butterworth filter.\n filt = sig.butter(2, float(filt_freq) / (samp_freq / 2))\n for idx in range(hp.shape[0]):\n dat[idx] = dat[idx] - sig.filtfilt(filt[0], filt[1], dat[idx])\n return dat\n\n def calc_urot(self, vec, to_earth=None):\n\n \"\"\"\n Calculate the induced velocity due to rotations of the instrument\n about the IMU center.\n\n Parameters\n ----------\n\n vec : |np.ndarray| (len(3) or 3 x M)\n The vector in meters (or vectors) from the body-origin\n (center of head end-cap) to the point of interest (in the\n body coord-sys).\n\n Returns\n -------\n urot : |np.ndarray| (3 x M x N_time)\n The rotation-induced velocity array (3, n_time).\n\n \"\"\"\n\n if to_earth is None:\n to_earth = self.to_earth\n\n dimflag = False\n if vec.ndim == 1:\n vec = vec.copy().reshape((3, 1))\n dimflag = True\n\n # Correct for the body->imu distance.\n # The nortek_body2imu vector is subtracted because of\n # vector addition:\n # body2head = body2imu + imu2head\n # Thus:\n # imu2head = body2head - body2imu\n vec = vec - self.advo.body2imu_vec[:, None]\n\n # This motion of the point *vec* due to rotations should be the\n # cross-product of omega (rotation vector) and the vector.\n # u=dz*omegaY-dy*omegaZ,v=dx*omegaZ-dz*omegaX,w=dy*omegaX-dx*omegaY\n # where vec=[dx,dy,dz], and AngRt=[omegaX,omegaY,omegaZ]\n urot = np.array([(vec[2][:, None] * self.AngRt[1] -\n vec[1][:, None] * self.AngRt[2]),\n (vec[0][:, None] * self.AngRt[2] -\n vec[2][:, None] * self.AngRt[0]),\n (vec[1][:, None] * self.AngRt[0] -\n vec[0][:, None] * self.AngRt[1]),\n ])\n\n if to_earth:\n urot = np.einsum('jik,jlk->ilk', self.advo['orientmat'], urot)\n\n if dimflag:\n return urot[:, 0, :]\n\n return urot\n\n\ndef _calc_probe_pos(advo, separate_probes=False):\n \"\"\"\n !!!Currently this only works for Nortek Vectors!\n\n In the future, we could use the transformation matrix (and a\n probe-length lookup-table?)\n \"\"\"\n # According to the ADV_DataSheet, the probe-length radius is\n # 8.6cm @ 120deg from probe-stem axis. If I subtract 1cm\n # (!!!checkthis) to get acoustic receiver center, this is\n # 7.6cm. In the coordinate sys of the center of the probe\n # then, the positions of the centers of the receivers is:\n if advo.make_model == 'Nortek VECTOR' and separate_probes:\n r = 0.076\n # The angle between the x-y plane and the probes\n phi = -30. * np.pi / 180.\n theta = np.array([0., 120., 240.]) * np.pi / \\\n 180. # The angles of the probes from the x-axis.\n return (np.dot(advo.props['body2head_rotmat'].T,\n np.array([r * np.cos(theta),\n r * np.sin(theta),\n r * np.tan(phi) * np.ones(3)])) +\n advo.props['body2head_vec'][:, None]\n )\n else:\n return advo.props['body2head_vec']\n\n\ndef correct_motion(advo,\n accel_filtfreq=1. / 30,\n vel_filtfreq=None,\n to_earth=True,\n separate_probes=False, ):\n \"\"\"\n This function performs motion correction on an IMU-ADV data\n object. The IMU and ADV data should be tightly synchronized and\n contained in a single data object.\n\n Parameters\n ----------\n\n advo : dolfyn.adv.adv class\n\n accel_filtfreq : float\n the frequency at which to high-pass filter the acceleration\n signal to remove low-frequency drift.\n\n vel_filtfreq : float (optional)\n a second frequency to high-pass filter the integrated\n acceleration. (default: 1/3 of accel_filtfreq)\n\n to_earth : bool (optional, default: True)\n All variables in the advo.props['rotate_vars'] list will be\n rotated into either the earth frame (to_earth=True) or the\n instrument frame (to_earth=False).\n\n separate_probes : bool (optional, default: False)\n a flag to perform motion-correction at the probe tips, and\n perform motion correction in beam-coordinates, then transform\n back into XYZ/earth coordinates. This correction seems to be\n lower than the noise levels of the ADV, so the defualt is to not\n use it (False).\n\n Returns\n -------\n This function returns None, it operates on the input data object,\n ``advo``. The following attributes are added to `advo`:\n\n ``uraw`` is the uncorrected velocity\n\n ``urot`` is the rotational component of the head motion (from\n AngRt)\n\n ``uacc`` is the translational component of the head motion (from\n Accel)\n\n ``AccelStable`` is the low-pass filtered Accel signal\n\n The primary velocity vector attribute, ``_u``, is motion corrected\n such that:\n\n _u = uraw + urot + uacc\n\n The signs are correct in this equation. The measured velocity\n induced by head-motion is *in the opposite direction* of the head\n motion. i.e. when the head moves one way in stationary flow, it\n measures a velocity in the opposite direction. Therefore, to\n remove the motion from the raw signal we *add* the head motion.\n\n Notes\n -----\n\n Acceleration signals from inertial sensors are notorious for\n having a small bias that can drift slowly in time. When\n integrating these signals to estimate velocity the bias is\n amplified and leads to large errors in the estimated\n velocity. There are two methods for removing these errors,\n\n 1) high-pass filter the acceleration signal prior and/or after\n integrating. This implicitly assumes that the low-frequency\n translational velocity is zero.\n 2) provide a slowly-varying reference position (often from a GPS)\n to an IMU that can use the signal (usually using Kalman\n filters) to debias the acceleration signal.\n\n Because method (1) removes `real` low-frequency acceleration,\n method (2) is more accurate. However, providing reference position\n estimates to undersea instruments is practically challenging and\n expensive. Therefore, lacking the ability to use method (2), this\n function utilizes method (1).\n\n For deployments in which the ADV is mounted on a mooring, or other\n semi-fixed structure, the assumption of zero low-frequency\n translational velocity is a reasonable one. However, for\n deployments on ships, gliders, or other moving objects it is\n not. The measured velocity, after motion-correction, will still\n hold some of this contamination and will be a sum of the ADV\n motion and the measured velocity on long time scales. If\n low-frequency motion is known separate from the ADV (e.g. from a\n bottom-tracking ADP, or from a ship's GPS), it may be possible to\n remove that signal from the ADV signal in post-processing. The\n accuracy of this approach has not, to my knowledge, been tested\n yet.\n\n Examples\n --------\n\n >>> from dolfyn.adv import api as avm\n >>> dat = avm.read_nortek('my_data_file.vec')\n >>> avm.motion.correct_motion(dat)\n\n ``dat`` will now have motion-corrected.\n\n \"\"\"\n\n if hasattr(advo, 'urot'):\n raise Exception('The data object already appears to have been motion corrected.')\n\n if advo.props['coord_sys'] != 'inst':\n raise Exception('The data object must be in the instrument frame to be motion corrected.')\n\n if vel_filtfreq is None:\n vel_filtfreq = accel_filtfreq / 3\n\n # Be sure the velocity data has been rotated to the body frame.\n _rotate_vel2body(advo)\n\n # Create the motion 'calculator':\n calcobj = CalcMotion(advo,\n accel_filtfreq=accel_filtfreq,\n vel_filtfreq=vel_filtfreq,\n to_earth=to_earth)\n\n ##########\n # Calculate the translational velocity (from the Accel):\n advo.groups['orient'].add('uacc')\n advo.uacc = calcobj.calc_uacc()\n # Copy AccelStable to the adv-object.\n advo.groups['orient'].add('AccelStable')\n advo.AccelStable = calcobj.AccelStable\n\n ##########\n # Calculate rotational velocity (from AngRt):\n pos = _calc_probe_pos(advo, separate_probes)\n # Calculate the velocity of the head (or probes).\n urot = calcobj.calc_urot(pos, to_earth=False)\n if separate_probes:\n # The head->beam transformation matrix\n transMat = advo.config.head.get('TransMatrix', None)\n # The body->head transformation matrix\n rmat = advo.props['body2head_rotmat']\n\n # 1) Rotate body-coordinate velocities to head-coord.\n urot = np.dot(rmat, urot)\n # 2) Rotate body-coord to beam-coord (einsum),\n # 3) Take along beam-component (diagonal),\n # 4) Rotate back to head-coord (einsum),\n urot = np.einsum('ij,kj->ik',\n transMat,\n np.diagonal(np.einsum('ij,jkl->ikl',\n np.linalg.inv(transMat),\n urot)\n ))\n # 5) Rotate back to body-coord.\n urot = np.dot(rmat.T, urot)\n advo.urot = urot\n advo.groups['orient'].add('urot')\n\n ##########\n # Rotate the data into the correct coordinate system.\n # inst2earth expects a 'rotate_vars' property.\n # Add urot, uacc, AccelStable, to it.\n if 'rotate_vars' not in advo.props.keys():\n advo.props['rotate_vars'] = {'_u', 'urot', 'uacc',\n 'Accel', 'AccelStable',\n 'AngRt', 'Mag'}\n else:\n advo.props['rotate_vars'].update({'urot', 'uacc', 'AccelStable'})\n\n # NOTE: Accel, AccelStable, and uacc are in the earth-frame after\n # calc_uacc() call.\n if to_earth:\n advo.Accel = calcobj.Accel\n inst2earth(advo, rotate_vars=advo.props['rotate_vars'] -\n {'Accel', 'AccelStable', 'uacc', })\n else:\n # rotate these variables back to the instrument frame.\n inst2earth(advo, reverse=True,\n rotate_vars={'AccelStable', 'uacc', },\n force=True,\n )\n\n ##########\n # Copy _u -> uraw prior to motion correction:\n advo.add_data('uraw', advo._u.copy(), 'main')\n # Add it to rotate_vars:\n advo.props['rotate_vars'].update({'uraw', })\n\n ##########\n # Remove motion from measured velocity!\n # NOTE: The plus sign is because the measured-induced velocities\n # are in the opposite direction of the head motion.\n # i.e. when the head moves one way in stationary flow, it\n # measures a velocity in the opposite direction.\n advo._u += (advo.urot + advo.uacc)\n\n\nclass CorrectMotion(object):\n\n \"\"\"\n This object performs motion correction on an IMU-ADV data\n object. The IMU and ADV data should be tightly synchronized and\n contained in a single data object.\n\n Parameters\n ----------\n\n accel_filtfreq : float\n the frequency at which to high-pass filter the acceleration\n signal to remove low-frequency drift.\n\n vel_filtfreq : float (optional)\n a second frequency to high-pass filter the integrated\n acceleration. (default: 1/3 of accel_filtfreq)\n\n separate_probes : bool (optional: False)\n a flag to perform motion-correction at the probe tips, and\n perform motion correction in beam-coordinates, then transform\n back into XYZ/earth coordinates. This correction seems to be\n lower than the noise levels of the ADV, so the defualt is to not\n use it (False).\n\n Notes\n -----\n\n Acceleration signals from inertial sensors are notorious for\n having a small bias that can drift slowly in time. When\n integrating these signals to estimate velocity the bias is\n amplified and leads to large errors in the estimated\n velocity. There are two methods for removing these errors,\n\n 1) high-pass filter the acceleration signal prior and/or after\n integrating. This implicitly assumes that the low-frequency\n translational velocity is zero.\n 2) provide a slowly-varying reference position (often from a GPS)\n to an IMU that can use the signal (usually using Kalman\n filters) to debias the acceleration signal.\n\n Because method (1) removes `real` low-frequency acceleration,\n method (2) is more accurate. However, providing reference position\n estimates to undersea instruments is practically challenging and\n expensive. Therefore, lacking the ability to use method (2), this\n function utilizes method (1).\n\n For deployments in which the ADV is mounted on a mooring, or other\n semi-fixed structure, the assumption of zero low-frequency\n translational velocity is a reasonable one. However, for\n deployments on ships, gliders, or other moving objects it is\n not. The measured velocity, after motion-correction, will still\n hold some of this contamination and will be a sum of the ADV\n motion and the measured velocity on long time scales. If\n low-frequency motion is known separate from the ADV (e.g. from a\n bottom-tracking ADP, or from a ship's GPS), it may be possible to\n remove that signal from the ADV signal in post-processing. The\n accuracy of this approach has not, to my knowledge, been tested\n yet.\n\n Examples\n --------\n\n >>> from dolfyn.adv import api as avm\n >>> dat = avm.read_nortek('my_data_file.vec')\n >>> mc = avm.CorrectMotion(0.1)\n >>> corrected_data = mc(dat)\n\n \"\"\"\n\n def __init__(self, accel_filtfreq=1. / 30,\n vel_filtfreq=None,\n separate_probes=False):\n\n self.accel_filtfreq = accel_filtfreq\n if vel_filtfreq is None:\n vel_filtfreq = accel_filtfreq / 3\n self.accelvel_filtfreq = vel_filtfreq\n self.separate_probes = separate_probes\n warnings.warn(\"The 'CorrectMotion' class is being deprecated \"\n \"and will be removed in a future DOLfYN release. \"\n \"Use the 'correct_motion' function instead.\",\n DeprecationWarning)\n\n def _rotate_vel2body(self, advo):\n # The transpose should do head to body.\n advo._u = np.dot(advo.props['body2head_rotmat'].T, advo._u)\n\n def _calc_rot_vel(self, calcobj):\n \"\"\"\n Calculate the 'rotational' velocity as measured by the IMU\n rate sensor.\n \"\"\"\n advo = calcobj.advo\n\n # This returns a 3x3 array of probe positions if\n # separate_probes is True.\n pos = self._calc_probe_pos(advo)\n\n # Calculate the velocity of the head (or probes).\n urot = calcobj.calc_urot(pos, to_earth=False)\n\n if self.separate_probes:\n # The head->beam transformation matrix\n transMat = advo.config.head.get('TransMatrix', None)\n # The body->head transformation matrix\n rmat = advo.props['body2head_rotmat']\n\n # 1) Rotate body-coordinate velocities to head-coord.\n urot = np.dot(rmat, urot)\n # 2) Rotate body-coord to beam-coord (einsum),\n # 3) Take along beam-component (diagonal),\n # 4) Rotate back to head-coord (einsum),\n urot = np.einsum('ij,kj->ik',\n transMat,\n np.diagonal(np.einsum('ij,jkl->ikl',\n np.linalg.inv(transMat),\n urot)\n ))\n # 5) Rotate back to body-coord.\n urot = np.dot(rmat.T, urot)\n\n advo.urot = urot\n advo.groups['orient'].add('urot')\n\n def _calc_probe_pos(self, advo):\n \"\"\"\n !!!Currently this only works for Nortek Vectors!\n\n In the future, we could use the transformation matrix (and a\n probe-length lookup-table?)\n \"\"\"\n # According to the ADV_DataSheet, the probe-length radius is\n # 8.6cm @ 120deg from probe-stem axis. If I subtract 1cm\n # (!!!checkthis) to get acoustic receiver center, this is\n # 7.6cm. In the coordinate sys of the center of the probe\n # then, the positions of the centers of the receivers is:\n if advo.make_model == 'Nortek VECTOR' and self.separate_probes:\n r = 0.076\n # The angle between the x-y plane and the probes\n phi = -30. * np.pi / 180.\n theta = np.array([0., 120., 240.]) * np.pi / \\\n 180. # The angles of the probes from the x-axis.\n return (np.dot(advo.props['body2head_rotmat'].T,\n np.array([r * np.cos(theta),\n r * np.sin(theta),\n r * np.tan(phi) * np.ones(3)])) +\n advo.props['body2head_vec'][:, None]\n )\n else:\n return advo.props['body2head_vec']\n\n def _calc_accel_vel(self, calcobj):\n advo = calcobj.advo\n advo.groups['orient'].add('uacc')\n advo.uacc = calcobj.calc_uacc()\n\n def __call__(self, advo, to_earth=True):\n \"\"\"\n Perform motion correction on an IMU-equipped ADV object.\n\n Parameters\n ----------\n advo : :class:`ADVraw <base.ADVraw>`\n The adv object on which to perform motion correction.\n It must contain the following data attributes:\n\n - _u : The velocity array.\n - Accel : The translational acceleration array.\n - AngRt : The rotation-rate array.\n - orientmat : The orientation matrix.\n - props : a dictionary that has 'body2head_vec',\n 'body2head_rotmat' and 'coord_sys'.\n\n to_earth : bool (optional, default: True)\n A boolean that specifies whether the data should be\n rotated into the earth frame.\n\n Notes\n -----\n\n After calling this function, `advo` will have *urot* and\n *uacc* data attributes. The velocity vector attribute ``_u``\n will be motion corrected according to:\n\n u_corr = u_raw + uacc + urot\n\n Therefore, to recover the 'raw' velocity, subtract uacc and\n urot from ``_u``.\n\n This method does not return a data object, it operates on\n (motion corrects) the input `advo`.\n\n \"\"\"\n\n calcobj = CalcMotion(advo,\n accel_filtfreq=self.accel_filtfreq,\n vel_filtfreq=self.accelvel_filtfreq,\n to_earth=to_earth)\n\n if 'rotate_vars' not in advo.props.keys():\n advo.props['rotate_vars'] = {'_u', 'urot', 'uacc', 'uraw',\n 'Accel', 'AccelStable',\n 'AngRt', 'Mag'}\n else:\n advo.props['rotate_vars'].update({'urot', 'uacc', 'AccelStable', 'uraw'})\n\n self._rotate_vel2body(advo)\n self._calc_rot_vel(calcobj)\n self._calc_accel_vel(calcobj)\n\n # calcobj.Accel, calcobj.AccelStable, and uacc are already in\n # the earth frame.\n advo.groups['orient'].add('AccelStable')\n advo.AccelStable = calcobj.AccelStable\n advo.add_data('uraw', advo._u.copy(), 'main')\n if to_earth:\n advo.Accel = calcobj.Accel\n inst2earth(advo, rotate_vars=advo.props['rotate_vars'] -\n {'Accel', 'AccelStable', 'uacc', })\n else:\n # rotate these variables back to the instrument frame.\n inst2earth(advo, reverse=True,\n rotate_vars={'AccelStable', 'uacc', },\n force=True,\n )\n # NOTE: The plus sign is because the measured-induced velocities\n # are in the opposite direction of the head motion.\n # i.e. when the head moves one way in stationary flow, it\n # measures a velocity in the opposite direction.\n advo._u += (advo.urot + advo.uacc)\n" ]
[ [ "numpy.dot", "scipy.signal.filtfilt", "numpy.einsum", "scipy.integrate.cumtrapz", "numpy.linalg.inv", "numpy.cos", "numpy.sin", "numpy.ones", "numpy.tan", "scipy.signal.butter", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
OSUPCVLab/PlantDiseaseCNN
[ "aeed18e9e30e53670d3e9a0bd7bd71cc73f01691" ]
[ "dcgan-generat images/model/Generator.py" ]
[ "import torch.nn as nn\n\nclass Generator(nn.Module):\n def __init__(self, nc, ngf, nz):\n super(Generator,self).__init__()\n self.layer1 = nn.Sequential(nn.ConvTranspose2d(nz,ngf*32,kernel_size=4),\n nn.BatchNorm2d(ngf*32),\n nn.ReLU())\n\n self.layer2 = nn.Sequential(nn.ConvTranspose2d(ngf*32,ngf*16,kernel_size=4,stride=2,padding=1),\n nn.BatchNorm2d(ngf*16),\n nn.ReLU())\n\n self.layer3 = nn.Sequential(nn.ConvTranspose2d(ngf*16,ngf*8,kernel_size=4,stride=2,padding=1),\n nn.BatchNorm2d(ngf*8),\n nn.ReLU())\n # 4 x 4\n self.layer4 = nn.Sequential(nn.ConvTranspose2d(ngf*8,ngf*4,kernel_size=4,stride=2,padding=1),\n nn.BatchNorm2d(ngf*4),\n nn.ReLU())\n # 8 x 8\n self.layer5 = nn.Sequential(nn.ConvTranspose2d(ngf*4,ngf*2,kernel_size=4,stride=2,padding=1),\n nn.BatchNorm2d(ngf*2),\n nn.ReLU())\n\n self.layer6 = nn.Sequential(nn.ConvTranspose2d(ngf * 2, ngf, kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(ngf),\n nn.ReLU())\n # 16 x 16\n self.layer7 = nn.Sequential(nn.ConvTranspose2d(ngf,nc,kernel_size=4,stride=2,padding=1),\n nn.Tanh())\n\n def forward(self,x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.layer5(out)\n out = self.layer6(out)\n out = self.layer7(out)\n return out\n" ]
[ [ "torch.nn.ReLU", "torch.nn.Tanh", "torch.nn.ConvTranspose2d", "torch.nn.BatchNorm2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rcjackson/ACT
[ "c57fb55094b142bbbef63e7069d4024049996139", "af9f0edb76e6f16e2764d5441a4bf4d7fb3a9f39", "c57fb55094b142bbbef63e7069d4024049996139" ]
[ "act/qc/qctests.py", "act/retrievals/radiation.py", "act/plotting/HistogramDisplay.py" ]
[ "\"\"\"\nact.qc.qctests\n------------------------------\n\nHere we define the methods for performing the tests and putting the\nresults in the ancillary quality control varible. If you add a test\nto this file you will need to add a method reference in the main\nqcfilter class definition to make it callable.\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nimport warnings\nfrom act.utils import get_missing_value, convert_units\n\n\n# This is a Mixins class used to allow using qcfilter class that is already\n# registered to the xarray object. All the methods in this class will be added\n# to the qcfilter class. Doing this to make the code spread across more files\n# so it is more manageable and readable. Additinal files of tests can be added\n# to qcfilter by creating a new class in the new file and adding to qcfilter\n# class declaration.\nclass QCTests:\n \"\"\"\n This is a Mixins class used to allow using qcfilter class that is already\n registered to the xarray object. All the methods in this class will be added\n to the qcfilter class. Doing this to make the code spread across more files\n so it is more manageable and readable. Additinal files of tests can be added\n to qcfilter by creating a new class in the new file and adding to qcfilter\n class declaration.\n\n \"\"\"\n def __init__(self, obj, **kwargs):\n self._obj = obj\n\n def add_missing_value_test(self, var_name, missing_value=None,\n missing_value_att_name='missing_value',\n test_number=None, test_assessment='Bad',\n test_meaning=None, flag_value=False,\n prepend_text=None):\n \"\"\"\n Method to add indication in quality control variable\n where data value is set to missing value.\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n missing_value : int or float\n Optional missing value to use. If not provided will attempt\n to get it from the variable attribute or use NaN.\n missing_value_att_name : str\n Optional attribute name to use.\n test_meaning : str\n The optional text description to add to flag_meanings\n describing the test. Will add a default if not set.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will set a default if not set.\n test_number : int\n Optional test number to use. If not set will ues next\n available test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n if test_meaning is None:\n test_meaning = 'Value is set to missing_value.'\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n if missing_value is None:\n missing_value = get_missing_value(self._obj, var_name, nodefault=True)\n if (missing_value is None and\n self._obj[var_name].values.dtype.type in\n (type(0.0), np.float16, np.float32, np.float64)):\n missing_value = float('nan')\n else:\n missing_value = -9999\n\n # Ensure missing_value attribute is matching data type\n missing_value = np.array(missing_value, dtype=self._obj[var_name].values.dtype.type)\n\n # New method using straight numpy instead of masked array\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n if np.isnan(missing_value) is False:\n index = np.equal(self._obj[var_name].values, missing_value)\n else:\n index = np.isnan(self._obj[var_name].values)\n\n test_dict = self._obj.qcfilter.add_test(\n var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n try:\n self._obj[var_name].attrs[missing_value_att_name]\n except KeyError:\n self._obj[var_name].attrs[missing_value_att_name] = missing_value\n\n return test_dict\n\n def add_less_test(self, var_name, limit_value, test_meaning=None,\n test_assessment='Bad', test_number=None,\n flag_value=False, limit_attr_name=None,\n prepend_text=None):\n \"\"\"\n Method to perform a less than test (i.e. minimum value) and add\n result to ancillary quality control variable. If ancillary\n quality control variable does not exist it will be created.\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n limit_value : int or float or None\n Limit value to use in test. The value will be written\n to the quality control variable as an attribute. If set\n to None, will return without adding test.\n test_meaning : str\n The optional text description to add to flag_meanings\n describing the test. Will add a default if not set.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will set a default if not set.\n test_number : int\n Optional test number to use. If not set will ues next\n available test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n limit_attr_name : str\n Optional attribute name to store the limit_value under\n quality control ancillary variable.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n if limit_value is None:\n return\n\n if limit_attr_name is None:\n if test_assessment == 'Suspect' or test_assessment == 'Indeterminate':\n attr_name = 'warn_min'\n else:\n attr_name = 'fail_min'\n else:\n attr_name = limit_attr_name\n\n if test_meaning is None:\n test_meaning = ('Data value less than {}.').format(attr_name)\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n # New method with straight numpy\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n index = np.less(self._obj[var_name].values, limit_value)\n\n result = self._obj.qcfilter.add_test(\n var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n # Ensure limit_value attribute is matching data type\n limit_value = np.array(limit_value, dtype=self._obj[var_name].values.dtype.type)\n\n qc_var_name = result['qc_variable_name']\n self._obj[qc_var_name].attrs[attr_name] = limit_value\n\n return result\n\n def add_greater_test(self, var_name, limit_value, test_meaning=None,\n test_assessment='Bad', test_number=None,\n flag_value=False, limit_attr_name=None,\n prepend_text=None):\n \"\"\"\n Method to perform a greater than test (i.e. maximum value) and add\n result to ancillary quality control variable. If ancillary\n quality control variable does not exist it will be created.\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n limit_value : int or float or None\n Limit value to use in test. The value will be written\n to the quality control variable as an attribute. If set\n to None will return without setting test.\n test_meaning : str\n The optional text description to add to flag_meanings\n describing the test. Will add a default if not set.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will set a default if not set.\n test_number : int\n Optional test number to use. If not set will ues next\n available test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n limit_attr_name : str\n Optional attribute name to store the limit_value under\n quality control ancillary variable.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n if limit_value is None:\n return\n\n if limit_attr_name is None:\n if test_assessment == 'Suspect' or test_assessment == 'Indeterminate':\n attr_name = 'warn_max'\n else:\n attr_name = 'fail_max'\n else:\n attr_name = limit_attr_name\n\n if test_meaning is None:\n test_meaning = ('Data value greater than {}.').format(attr_name)\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n index = np.greater(self._obj[var_name].values, limit_value)\n\n result = self._obj.qcfilter.add_test(\n var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n # Ensure limit_value attribute is matching data type\n limit_value = np.array(limit_value, dtype=self._obj[var_name].values.dtype.type)\n\n qc_var_name = result['qc_variable_name']\n self._obj[qc_var_name].attrs[attr_name] = limit_value\n\n return result\n\n def add_less_equal_test(self, var_name, limit_value, test_meaning=None,\n test_assessment='Bad', test_number=None,\n flag_value=False, limit_attr_name=None,\n prepend_text=None):\n \"\"\"\n Method to perform a less than or equal to test\n (i.e. minimum value) and add result to ancillary quality control\n variable. If ancillary quality control variable does not exist it\n will be created.\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n limit_value : int or float or None\n Limit value to use in test. The value will be written\n to the quality control variable as an attribute. If set\n to None will return without setttin test.\n test_meaning : str\n The optional text description to add to flag_meanings\n describing the test. Will add a default if not set.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will set a default if not set.\n test_number : int\n Optional test number to use. If not set will ues next\n available test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n limit_attr_name : str\n Optional attribute name to store the limit_value under\n quality control ancillary variable.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n if limit_value is None:\n return\n\n if limit_attr_name is None:\n if test_assessment == 'Suspect' or test_assessment == 'Indeterminate':\n attr_name = 'warn_min'\n else:\n attr_name = 'fail_min'\n else:\n attr_name = limit_attr_name\n\n if test_meaning is None:\n test_meaning = ('Data value less than '\n 'or equal to {}.').format(attr_name)\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n index = np.less_equal(self._obj[var_name].values, limit_value)\n\n result = self._obj.qcfilter.add_test(\n var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n # Ensure limit_value attribute is matching data type\n limit_value = np.array(limit_value, dtype=self._obj[var_name].values.dtype.type)\n\n qc_var_name = result['qc_variable_name']\n self._obj[qc_var_name].attrs[attr_name] = limit_value\n\n return result\n\n def add_greater_equal_test(self, var_name, limit_value, test_meaning=None,\n test_assessment='Bad', test_number=None,\n flag_value=False, limit_attr_name=None,\n prepend_text=None):\n \"\"\"\n Method to perform a greater than or equal to test\n (i.e. maximum value) and add result to ancillary quality control\n variable. If ancillary quality control variable does not exist it\n will be created.\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n limit_value : int or float or None\n Limit value to use in test. The value will be written\n to the quality control variable as an attribute. If set\n to None will return without setttin test.\n test_meaning : str\n The optional text description to add to flag_meanings\n describing the test. Will add a default if not set.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will set a default if not set.\n test_number : int\n Optional test number to use. If not set will ues next\n available test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n limit_attr_name : str\n Optional attribute name to store the limit_value under\n quality control ancillary variable.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n if limit_value is None:\n return\n\n if limit_attr_name is None:\n if test_assessment == 'Suspect' or test_assessment == 'Indeterminate':\n attr_name = 'warn_max'\n else:\n attr_name = 'fail_max'\n else:\n attr_name = limit_attr_name\n\n if test_meaning is None:\n test_meaning = ('Data value greater than '\n 'or equal to {}.').format(attr_name)\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n index = np.greater_equal(self._obj[var_name].values, limit_value)\n\n result = self._obj.qcfilter.add_test(\n var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n # Ensure limit_value attribute is matching data type\n limit_value = np.array(limit_value, dtype=self._obj[var_name].values.dtype.type)\n\n qc_var_name = result['qc_variable_name']\n self._obj[qc_var_name].attrs[attr_name] = limit_value\n\n return result\n\n def add_equal_to_test(self, var_name, limit_value, test_meaning=None,\n test_assessment='Bad', test_number=None,\n flag_value=False, limit_attr_name=None,\n prepend_text=None):\n \"\"\"\n Method to perform an equal test and add result to ancillary quality\n control variable. If ancillary quality control variable does not\n exist it will be created.\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n limit_value : int or float or None\n Limit value to use in test. The value will be written\n to the quality control variable as an attribute. If set\n to None will return without setttin test.\n test_meaning : str\n The optional text description to add to flag_meanings\n describing the test. Will add a default if not set.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will set a default if not set.\n test_number : int\n Optional test number to use. If not set will ues next\n available test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n limit_attr_name : str\n Optional attribute name to store the limit_value under\n quality control ancillary variable.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n if limit_value is None:\n return\n\n if limit_attr_name is None:\n if test_assessment == 'Suspect' or test_assessment == 'Indeterminate':\n attr_name = 'warn_equal_to'\n else:\n attr_name = 'fail_equal_to'\n else:\n attr_name = limit_attr_name\n\n if test_meaning is None:\n test_meaning = 'Data value equal to {}.'.format(attr_name)\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n index = np.equal(self._obj[var_name].values, limit_value)\n\n result = self._obj.qcfilter.add_test(\n var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n # Ensure limit_value attribute is matching data type\n limit_value = np.array(limit_value, dtype=self._obj[var_name].values.dtype.type)\n\n qc_var_name = result['qc_variable_name']\n self._obj[qc_var_name].attrs[attr_name] = limit_value\n\n return result\n\n def add_not_equal_to_test(self, var_name, limit_value, test_meaning=None,\n test_assessment='Bad', test_number=None,\n flag_value=False, limit_attr_name=None,\n prepend_text=None):\n \"\"\"\n Method to perform a not equal to test and add result to ancillary\n quality control variable. If ancillary quality control variable does\n not exist it will be created.\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n limit_value : int or float or None\n Limit value to use in test. The value will be written\n to the quality control variable as an attribute. If set\n to None will return without setttin test.\n test_meaning : str\n The optional text description to add to flag_meanings\n describing the test. Will add a default if not set.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will set a default if not set.\n test_number : int\n Optional test number to use. If not set will ues next\n available test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n limit_attr_name : str\n Optional attribute name to store the limit_value under\n quality control ancillary variable.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n if limit_value is None:\n return\n\n if limit_attr_name is None:\n if test_assessment == 'Suspect' or test_assessment == 'Indeterminate':\n attr_name = 'warn_not_equal_to'\n else:\n attr_name = 'fail_not_equal_to'\n else:\n attr_name = limit_attr_name\n\n if test_meaning is None:\n test_meaning = 'Data value not equal to {}.'.format(attr_name)\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n index = np.not_equal(self._obj[var_name].values, limit_value)\n\n result = self._obj.qcfilter.add_test(\n var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n # Ensure limit_value attribute is matching data type\n limit_value = np.array(limit_value, dtype=self._obj[var_name].values.dtype.type)\n\n qc_var_name = result['qc_variable_name']\n self._obj[qc_var_name].attrs[attr_name] = limit_value\n\n return result\n\n def add_outside_test(self, var_name, limit_value_lower, limit_value_upper,\n test_meaning=None,\n test_assessment='Bad', test_number=None,\n flag_value=False, limit_attr_names=None,\n prepend_text=None):\n \"\"\"\n Method to perform a less than or greater than test\n (i.e. outide minimum and maximum value) and add\n result to ancillary quality control variable. If ancillary\n quality control variable does not exist it will be created.\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n limit_value_lower : int or float\n Lower limit value to use in test. The value will be written\n to the quality control variable as an attribute.\n limit_value_upper : int or float\n Upper limit value to use in test. The value will be written\n to the quality control variable as an attribute.\n test_meaning : str\n The optional text description to add to flag_meanings\n describing the test. Will add a default if not set.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will set a default if not set.\n test_number : int\n Optional test number to use. If not set will ues next\n available test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n limit_attr_names : list of str\n Optional attribute name to store the limit_value under\n quality control ancillary variable. First value is\n lower limit attribute name and second value is\n upper limit attribute name.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n\n if limit_attr_names is None:\n if test_assessment == 'Suspect' or test_assessment == 'Indeterminate':\n attr_name_lower = 'warn_lower_range'\n attr_name_upper = 'warn_upper_range'\n else:\n attr_name_lower = 'fail_lower_range'\n attr_name_upper = 'fail_upper_range'\n else:\n attr_name_lower = limit_attr_names[0]\n attr_name_upper = limit_attr_names[1]\n\n if test_meaning is None:\n test_meaning = ('Data value less than {} '\n 'or greater than {}.').format(attr_name_lower,\n attr_name_upper)\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n data = np.ma.masked_outside(self._obj[var_name].values,\n limit_value_lower, limit_value_upper)\n if data.mask.size == 1:\n data.mask = np.full(data.data.shape, data.mask, dtype=bool)\n\n index = data.mask\n\n result = self._obj.qcfilter.add_test(\n var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n # Ensure limit_value attribute is matching data type\n limit_value_lower = np.array(limit_value_lower, dtype=self._obj[var_name].values.dtype.type)\n limit_value_upper = np.array(limit_value_upper, dtype=self._obj[var_name].values.dtype.type)\n\n qc_var_name = result['qc_variable_name']\n self._obj[qc_var_name].attrs[attr_name_lower] = limit_value_lower\n self._obj[qc_var_name].attrs[attr_name_upper] = limit_value_upper\n\n return result\n\n def add_inside_test(self, var_name, limit_value_lower, limit_value_upper,\n test_meaning=None, test_assessment='Bad',\n test_number=None, flag_value=False,\n limit_attr_names=None,\n prepend_text=None):\n \"\"\"\n Method to perform a greater than or less than test\n (i.e. between minimum and maximum value) and add\n result to ancillary quality control variable. If ancillary\n quality control variable does not exist it will be created.\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n limit_value_lower : int or float\n Lower limit value to use in test. The value will be written\n to the quality control variable as an attribute.\n limit_value_upper : int or float\n Upper limit value to use in test. The value will be written\n to the quality control variable as an attribute.\n test_meaning : str\n The optional text description to add to flag_meanings\n describing the test. Will add a default if not set.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will set a default if not set.\n test_number : int\n Optional test number to use. If not set will ues next\n available test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n limit_attr_names : list of str\n Optional attribute name to store the limit_value under\n quality control ancillary variable. First value is\n lower limit attribute name and second value is\n upper limit attribute name.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n\n if limit_attr_names is None:\n if test_assessment == 'Suspect' or test_assessment == 'Indeterminate':\n attr_name_lower = 'warn_lower_range_inner'\n attr_name_upper = 'warn_upper_range_inner'\n else:\n attr_name_lower = 'fail_lower_range_inner'\n attr_name_upper = 'fail_upper_range_inner'\n else:\n attr_name_lower = limit_attr_names[0]\n attr_name_upper = limit_attr_names[1]\n\n if test_meaning is None:\n test_meaning = ('Data value greater than {} '\n 'or less than {}.').format(attr_name_lower,\n attr_name_upper)\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n data = np.ma.masked_inside(self._obj[var_name].values,\n limit_value_lower, limit_value_upper)\n if data.mask.size == 1:\n data.mask = np.full(data.data.shape, data.mask, dtype=bool)\n\n index = data.mask\n\n result = self._obj.qcfilter.add_test(\n var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n # Ensure limit_value attribute is matching data type\n limit_value_lower = np.array(limit_value_lower, dtype=self._obj[var_name].values.dtype.type)\n limit_value_upper = np.array(limit_value_upper, dtype=self._obj[var_name].values.dtype.type)\n\n qc_var_name = result['qc_variable_name']\n self._obj[qc_var_name].attrs[attr_name_lower] = limit_value_lower\n self._obj[qc_var_name].attrs[attr_name_upper] = limit_value_upper\n\n return result\n\n def add_persistence_test(self, var_name, window=10, test_limit=0.0001,\n min_periods=1, center=True, test_meaning=None,\n test_assessment='Bad', test_number=None,\n flag_value=False, prepend_text=None):\n \"\"\"\n Method to perform a persistence test over 1-D data..\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n window : int\n Optional number of data samples to use in the calculation of\n standard deviation to test for consistent data.\n test_limit : float\n Optional test limit to use where the standard deviation less\n than will trigger the test.\n min_periods : int\n Optional number of minimum values to use in the moving window.\n Setting to 1 so this correctly handles NaNs.\n center : boolean\n Optional where within the moving window to report the standard\n deviation values. Used in the .rolling.std() calculation with xarray.\n test_meaning : str\n The optional text description to add to flag_meanings\n describing the test. Will add a default if not set.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will set a default if not set.\n test_number : int\n Optional test number to use. If not set will ues next\n available test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n data = self._obj[var_name]\n if window > data.size:\n window = data.size\n\n if test_meaning is None:\n test_meaning = ('Data failing persistence test. '\n 'Standard Deviation over a window of {} values '\n 'less than {}.').format(window, test_limit)\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n stddev = data.rolling(time=window, min_periods=min_periods, center=True).std()\n index = stddev < test_limit\n\n result = self._obj.qcfilter.add_test(\n var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n return result\n\n def add_difference_test(self, var_name, dataset2_dict=None, ds2_var_name=None,\n diff_limit=None, tolerance=\"1m\",\n set_test_regardless=True,\n apply_assessment_to_dataset2=None,\n apply_tests_to_dataset2=None,\n test_meaning=None, test_assessment='Bad',\n test_number=None, flag_value=False,\n prepend_text=None):\n \"\"\"\n Method to perform a comparison test on time series data. Tested on 1-D\n data only. Will check if units and long_name indicate a direction and\n compensate for 0 to 360 degree transition.\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n dataset2_dict : dict\n Dictionary with key equal to datastream name and value\n equal to xarray dataset containging variable to compare. If no provided\n will assume second dataset is the same as self dataset.\n ds2_var_name : str\n Comparison dataset variable name to compare.\n diff_limit : int or float\n Difference limit for comparison.\n apply_assessment_to_dataset2 : str or list of str\n Option to filter comparison dataset variable using corresponsing\n quality control variable using assessments. Example would be\n ['Bad'], where all quality control data with assessment Bad will\n not be used in this test.\n apply_tests_to_dataset2 : int or list of int\n Option to filter comparison dataset variable using corresponding\n quality control variable using test numbers. Example would be\n [2,4], where all quality control data with test numbers 2 or 4 set\n will not be used in this test.\n tolerance : str\n Optional text indicating the time tolerance for aligning two\n DataArrays.\n set_test_regardless : boolean\n Option to set test description even if no data in comparison data\n set.\n test_meaning : str\n Optional text description to add to flag_meanings\n describing the test. Will use a default if not set.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will use a default if not set.\n test_number : int\n Optional test number to use. If not set will use next available\n test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n if dataset2_dict is None:\n dataset2_dict = {'second_dataset': self._obj}\n\n if not isinstance(dataset2_dict, dict):\n raise ValueError('You did not provide a dictionary containing the '\n 'datastream name as the key and xarray dataset as '\n 'the value for dataset2_dict for add_difference_test().')\n\n if diff_limit is None:\n raise ValueError('You did not provide a test limit for add_difference_test().')\n\n datastream2 = list(dataset2_dict.keys())[0]\n dataset2 = dataset2_dict[datastream2]\n\n if set_test_regardless is False and type(dataset2) != xr.core.dataset.Dataset:\n return\n\n if test_meaning is None:\n if dataset2 is self._obj:\n var_name2 = f'{ds2_var_name}'\n else:\n var_name2 = f'{datastream2}:{ds2_var_name}'\n\n test_meaning = (f'Difference between {var_name} and {var_name2} '\n f'greater than {diff_limit} {self._obj[var_name].attrs[\"units\"]}')\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n if tolerance is not None:\n tolerance = pd.Timedelta(tolerance)\n\n index = []\n if type(dataset2) == xr.core.dataset.Dataset:\n if apply_assessment_to_dataset2 is not None or apply_tests_to_dataset2 is not None:\n dataset2[ds2_var_name].values = dataset2.qcfilter.get_masked_data(\n ds2_var_name, rm_assessments=apply_assessment_to_dataset2,\n rm_tests=apply_tests_to_dataset2, return_nan_array=True)\n\n df_a = pd.DataFrame({'time': self._obj['time'].values,\n var_name: self._obj[var_name].values})\n data_b = convert_units(dataset2[ds2_var_name].values,\n dataset2[ds2_var_name].attrs['units'],\n self._obj[var_name].attrs['units'])\n ds2_var_name = ds2_var_name + '_newname'\n df_b = pd.DataFrame({'time': dataset2['time'].values,\n ds2_var_name: data_b})\n\n if tolerance is not None:\n tolerance = pd.Timedelta(tolerance)\n\n pd_c = pd.merge_asof(df_a, df_b, on='time', tolerance=tolerance,\n direction=\"nearest\")\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n # Check if variable is for wind direction comparisons. Fix\n # for 0 - 360 degrees transition. This is done by adding 360 degrees to\n # all wind values and using modulus to get the minimum difference number.\n # This is done for both a-b and b-a and then choosing the minimum number\n # to compensate for large differences.\n wdir_units = ['deg', 'degree', 'degrees', 'degs']\n if (self._obj[var_name].attrs['units'] in wdir_units and\n 'direction' in self._obj[var_name].attrs['long_name'].lower()):\n diff1 = np.mod(np.absolute((pd_c[var_name] + 360.) -\n (pd_c[ds2_var_name] + 360.)), 360)\n diff2 = np.mod(np.absolute((pd_c[ds2_var_name] + 360.) -\n (pd_c[var_name] + 360.)), 360)\n diff = np.array([diff1, diff2])\n diff = np.nanmin(diff, axis=0)\n\n else:\n diff = np.absolute(pd_c[var_name] - pd_c[ds2_var_name])\n\n index = diff > diff_limit\n\n result = self._obj.qcfilter.add_test(\n var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n return result\n\n def add_delta_test(self, var_name, diff_limit=1, test_meaning=None,\n limit_attr_name=None,\n test_assessment='Indeterminate', test_number=None,\n flag_value=False, prepend_text=None):\n \"\"\"\n Method to perform a difference test on adjacent values in time series.\n Will flag both values where a difference is greater\n than or equal to the difference limit. Tested with 1-D data only. Not\n sure what will happen with higher dimentioned data.\n\n Parameters\n ----------\n var_name : str\n Data variable name.\n diff_limit : int or float\n Difference limit\n test_meaning : str\n Optional text description to add to flag_meanings\n describing the test. Will use a default if not set.\n limit_attr_name : str\n Optional attribute name to store the limit_value under\n quality control ancillary variable.\n test_assessment : str\n Optional single word describing the assessment of the test.\n Will use a default if not set.\n test_number : int\n Optional test number to use. If not set will use next available\n test number.\n flag_value : boolean\n Indicates that the tests are stored as integers\n not bit packed values in quality control variable.\n prepend_text : str\n Optional text to prepend to the test meaning.\n Example is indicate what institution added the test.\n\n Returns\n -------\n test_info : tuple\n A tuple containing test information including var_name, qc variable name,\n test_number, test_meaning, test_assessment\n\n \"\"\"\n\n if limit_attr_name is None:\n if test_assessment == 'Suspect' or test_assessment == 'Indeterminate':\n attr_name = 'warn_delta'\n else:\n attr_name = 'fail_delta'\n else:\n attr_name = limit_attr_name\n\n if test_meaning is None:\n test_meaning = f'Difference between current and previous values exceeds {attr_name}.'\n\n if prepend_text is not None:\n test_meaning = ': '.join((prepend_text, test_meaning))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n # Check if variable is for wind direction comparisons by units. Fix\n # for 0 - 360 degrees transition. This is done by adding 360 degrees to\n # all wind values and using modulus to get the minimum difference number.\n wdir_units = ['deg', 'degree', 'degrees', 'degs']\n if (self._obj[var_name].attrs['units'] in wdir_units and\n 'direction' in self._obj[var_name].attrs['long_name'].lower()):\n abs_diff = np.mod(np.abs(np.diff(self._obj[var_name].values)), 360)\n else:\n abs_diff = np.abs(np.diff(self._obj[var_name].values))\n\n index = np.where(abs_diff >= diff_limit)[0]\n if index.size > 0:\n index = np.append(index, index + 1)\n index = np.unique(index)\n\n result = self._obj.qcfilter.add_test(var_name, index=index,\n test_number=test_number,\n test_meaning=test_meaning,\n test_assessment=test_assessment,\n flag_value=flag_value)\n\n # Ensure min value attribute is matching data type\n diff_limit = np.array(diff_limit, dtype=self._obj[var_name].values.dtype.type)\n\n qc_var_name = result['qc_variable_name']\n self._obj[qc_var_name].attrs[attr_name] = diff_limit\n\n return result\n", "\"\"\"\nact.retrievals.radiation\n------------------------\n\nModule for solar radiation related calculations and retrievals\n\n\"\"\"\n\nimport numpy as np\nimport xarray as xr\nfrom scipy.constants import Stefan_Boltzmann\nfrom act.utils.datetime_utils import datetime64_to_datetime\nfrom act.utils.geo_utils import get_solar_azimuth_elevation\n\n\ndef calculate_dsh_from_dsdh_sdn(obj, dsdh='down_short_diffuse_hemisp',\n sdn='short_direct_normal', lat='lat',\n lon='lon'):\n \"\"\"\n\n Function to derive the downwelling shortwave hemispheric irradiance from the\n downwelling shortwave diffuse hemispheric irradiance (dsdh) and the shortwave\n direct normal irradiance (sdn) at a given location (lat,lon)\n\n Parameters\n ----------\n obj : Xarray dataset\n Object where variables for these calculations are stored\n dsdh : str\n Name of the downwelling shortwave diffuse hemispheric irradiance field to use.\n Defaults to downwelling_sw_diffuse_hemisp_irradiance.\n sdn : str\n Name of shortwave direct normal irradiance field to use.\n Defaults to shortwave_direct_normal_irradiance.\n lat : str\n Name of latitude field in dataset to use. Defaults to 'lat'.\n lon : str\n Name of longitued field in dataset to use. Defaults to 'lon'.\n\n Returns\n -------\n\n obj: Xarray dataset\n ACT Xarray dataset oject with calculations included as new variables.\n\n \"\"\"\n\n # Calculating Derived Down Short Hemisp\n tt = datetime64_to_datetime(obj['time'].values)\n elevation, _, _ = get_solar_azimuth_elevation(obj[lat].values, obj[lon].values, tt)\n solar_zenith = np.cos(np.radians(90. - elevation))\n dsh = (obj[dsdh].values + (solar_zenith * obj[sdn].values))\n\n # Add data back to object\n atts = {'long_name': 'Derived Downwelling Shortwave Hemispheric Irradiance', 'units': 'W/m^2'}\n da = xr.DataArray(dsh, coords={'time': obj['time'].values}, dims=['time'], attrs=atts)\n obj['derived_down_short_hemisp'] = da\n\n return obj\n\n\ndef calculate_irradiance_stats(obj, variable=None, variable2=None, diff_output_variable=None,\n ratio_output_variable=None, threshold=None):\n \"\"\"\n\n Function to calculate the difference and ratio between two irradiance.\n\n Parameters\n ----------\n obj : ACT object\n Object where variables for these calculations are stored\n variable : str\n Name of the first irradiance variable\n variable2 : str\n Name of the second irradiance variable\n diff_output_variable : str\n Variable name to store the difference results\n Defaults to 'diff[underscore]'+variable\n ratio_output_variable : str\n Variable name to store the ratio results\n Defaults to 'ratio[underscore]'+variable\n\n Returns\n -------\n\n obj: ACT Object\n Object with calculations included as new variables.\n\n \"\"\"\n\n if variable is None or variable2 is None:\n return obj\n if diff_output_variable is None:\n diff_output_variable = 'diff_' + variable\n if ratio_output_variable is None:\n ratio_output_variable = 'ratio_' + variable\n\n # ---------------------------------\n # Calculating Difference\n # ---------------------------------\n diff = obj[variable] - obj[variable2]\n atts = {'long_name': ' '.join(['Difference between', variable, 'and', variable2]), 'units': 'W/m^2'}\n da = xr.DataArray(diff, coords={'time': obj['time'].values}, dims=['time'], attrs=atts)\n obj[diff_output_variable] = da\n\n # ---------------------------------\n # Calculating Irradiance Ratio\n # ---------------------------------\n ratio = obj[variable].values / obj[variable2].values\n if threshold is not None:\n index = np.where((obj[variable].values < threshold) & (obj[variable2].values < threshold))\n ratio[index] = np.nan\n\n atts = {'long_name': ' '.join(['Ratio between', variable, 'and', variable2]), 'units': ''}\n da = xr.DataArray(ratio, coords={'time': obj['time'].values}, dims=['time'], attrs=atts)\n obj[ratio_output_variable] = da\n\n return obj\n\n\ndef calculate_net_radiation(obj, ush='up_short_hemisp', ulh='up_long_hemisp', dsh='down_short_hemisp',\n dlhs='down_long_hemisp_shaded', smooth=None):\n\n \"\"\"\n\n Function to calculate the net radiation from upwelling short and long-wave irradiance and\n downwelling short and long-wave hemisperic irradiances\n\n Parameters\n ----------\n obj : ACT object\n Object where variables for these calculations are stored\n ush : str\n Name of the upwelling shortwave hemispheric variable\n ulh : str\n Name of the upwelling longwave hemispheric variable\n dsh : str\n Name of the downwelling shortwave hemispheric variable\n dlhs : str\n Name of the downwelling longwave hemispheric variable\n smooth : int\n Smoothing to apply to the net radiation. This will create an additional variable\n\n Returns\n -------\n\n obj: ACT Object\n Object with calculations included as new variables.\n\n \"\"\"\n\n # Calculate Net Radiation\n ush_da = obj[ush]\n ulh_da = obj[ulh]\n dsh_da = obj[dsh]\n dlhs_da = obj[dlhs]\n\n net = -ush_da + dsh_da - ulh_da + dlhs_da\n\n atts = {'long_name': 'Calculated Net Radiation', 'units': 'W/m^2'}\n da = xr.DataArray(net, coords={'time': obj['time'].values}, dims=['time'], attrs=atts)\n obj['net_radiation'] = da\n\n if smooth is not None:\n net_smoothed = net.rolling(time=smooth).mean()\n atts = {'long_name': 'Net Radiation Smoothed by ' + str(smooth), 'units': 'W/m^2'}\n da = xr.DataArray(net_smoothed, coords={'time': obj['time'].values}, dims=['time'], attrs=atts)\n obj['net_radiation_smoothed'] = da\n\n return obj\n\n\ndef calculate_longwave_radiation(obj, temperature_var=None, vapor_pressure_var=None, met_obj=None,\n emiss_a=0.61, emiss_b=0.06):\n\n \"\"\"\n\n Function to calculate longwave radiation during clear and cloudy sky conditions\n using equations from Monteith and Unsworth 2013, Prata 1996, as reported in\n Splitt and Bahrmann 1999.\n\n Parameters\n ----------\n obj : ACT object\n Object where variables for these calculations are stored\n temperature_var : str\n Name of the temperature variable to use\n vapor_pressure_var : str\n Name of the vapor pressure variable to use\n met_obj : ACT object\n Object where surface meteorological variables for these calculations are stored\n if not given, will assume they are in the main object passed in\n emiss_a : float\n a coefficient for the emissivity calculation of e = a + bT\n emiss_b : float\n a coefficient for the emissivity calculation of e = a + bT\n\n Returns\n -------\n obj : ACT object\n ACT object with 3 new variables; monteith_clear, monteith_cloudy, prata_clear\n\n References\n ---------\n Monteith, John L., and Mike H. Unsworth. 2013. Principles of Environmental Physics.\n Edited by John L. Monteith and Mike H. Unsworth. Boston: Academic Press.\n\n Prata, A. J. 1996. “A New Long-Wave Formula for Estimating Downward Clear-Sky Radiation at\n the Surface.” Quarterly Journal of the Royal Meteorological Society 122 (533): 1127–51.\n\n Splitt, M. E., and C. P. Bahrmann. 1999. Improvement in the Assessment of SIRS Broadband\n Longwave Radiation Data Quality. Ninth ARM Science Team Meeting Proceedings,\n San Antonio, Texas, March 22-26\n\n \"\"\"\n if met_obj is not None:\n\n T = met_obj[temperature_var] + 273.15 # C to K\n e = met_obj[vapor_pressure_var] * 10. # kpa to hpa\n else:\n T = obj[temperature_var] + 273.15 # C to K\n e = obj[vapor_pressure_var] * 10. # kpa to hpa\n\n if len(T) == 0 or len(e) == 0:\n raise ValueError('Temperature and Vapor Pressure are Needed')\n\n # Get Stefan Boltzmann Constant\n stefan = Stefan_Boltzmann\n\n # Calculate sky emissivity from Splitt and Bahrmann 1999\n esky = emiss_a + emiss_b * np.sqrt(e)\n\n # Base clear sky longwave calculation from Monteith 2013\n lw_calc_clear = esky * stefan * T**4\n\n # Prata 1996 Calculation\n xi = 46.5 * (e / T)\n lw_calc_clear_prata = (1.0 - (1.0 + xi) * np.exp(-(1.2 + 3.0 * xi)**.5)) * stefan * T**4\n\n # Monteith Cloudy Calcuation as indicated by Splitt and Bahrmann 1999\n lw_calc_cldy = esky * (1.0 + (0.178 - 0.00957 * (T - 290.))) * stefan * T**4\n\n atts = {'long_name': 'Clear Sky Estimate-(Monteith, 1973)', 'units': 'W/m^2'}\n da = xr.DataArray(lw_calc_clear, coords={'time': obj['time'].values}, dims=['time'], attrs=atts)\n obj['monteith_clear'] = da\n\n atts = {'long_name': 'Overcast Sky Estimate-(Monteith, 1973)', 'units': 'W/m^2'}\n da = xr.DataArray(lw_calc_cldy, coords={'time': obj['time'].values}, dims=['time'], attrs=atts)\n obj['monteith_cloudy'] = da\n\n atts = {'long_name': 'Clear Sky Estimate-(Prata, 1996)', 'units': 'W/m^2'}\n da = xr.DataArray(lw_calc_clear_prata, coords={'time': obj['time'].values}, dims=['time'], attrs=atts)\n obj['prata_clear'] = da\n\n return obj\n", "\"\"\" Module for Histogram Plotting. \"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport xarray as xr\n\nfrom .plot import Display\nfrom ..utils import datetime_utils as dt_utils\n\n\nclass HistogramDisplay(Display):\n \"\"\"\n This class is used to make histogram plots. It is inherited from Display\n and therefore contains all of Display's attributes and methods.\n\n Examples\n --------\n To create a TimeSeriesDisplay with 3 rows, simply do:\n\n .. code-block:: python\n\n ds = act.read_netcdf(the_file)\n disp = act.plotting.HistogramDisplay(\n ds, subplot_shape=(3,), figsize=(15,5))\n\n The HistogramDisplay constructor takes in the same keyword arguments as\n plt.subplots. For more information on the plt.subplots keyword arguments,\n see the `matplotlib documentation\n <https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html>`_.\n If no subplot_shape is provided, then no figure or axis will be created\n until add_subplots or plots is called.\n\n \"\"\"\n def __init__(self, obj, subplot_shape=(1,), ds_name=None, **kwargs):\n super().__init__(obj, subplot_shape, ds_name, **kwargs)\n\n def set_xrng(self, xrng, subplot_index=(0,)):\n \"\"\"\n Sets the x range of the plot.\n\n Parameters\n ----------\n xrng : 2 number array\n The x limits of the plot.\n subplot_index : 1 or 2D tuple, list, or array\n The index of the subplot to set the x range of.\n\n \"\"\"\n if self.axes is None:\n raise RuntimeError(\"set_xrng requires the plot to be displayed.\")\n\n if not hasattr(self, 'xrng') and len(self.axes.shape) == 2:\n self.xrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2),\n dtype='datetime64[D]')\n elif not hasattr(self, 'xrng') and len(self.axes.shape) == 1:\n self.xrng = np.zeros((self.axes.shape[0], 2),\n dtype='datetime64[D]')\n\n self.axes[subplot_index].set_xlim(xrng)\n self.xrng[subplot_index, :] = np.array(xrng)\n\n def set_yrng(self, yrng, subplot_index=(0,)):\n \"\"\"\n Sets the y range of the plot.\n\n Parameters\n ----------\n yrng : 2 number array\n The y limits of the plot.\n subplot_index : 1 or 2D tuple, list, or array\n The index of the subplot to set the x range of.\n\n \"\"\"\n if self.axes is None:\n raise RuntimeError(\"set_yrng requires the plot to be displayed.\")\n\n if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:\n self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))\n elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:\n self.yrng = np.zeros((self.axes.shape[0], 2))\n\n if yrng[0] == yrng[1]:\n yrng[1] = yrng[1] + 1\n\n self.axes[subplot_index].set_ylim(yrng)\n self.yrng[subplot_index, :] = yrng\n\n def plot_stacked_bar_graph(self, field, dsname=None, bins=None,\n sortby_field=None, sortby_bins=None,\n subplot_index=(0, ), set_title=None,\n density=False, **kwargs):\n \"\"\"\n This procedure will plot a stacked bar graph of a histogram.\n\n Parameters\n ----------\n field : str\n The name of the field to take the histogram of.\n dsname : str or None\n The name of the datastream the field is contained in. Set\n to None to let ACT automatically determine this.\n bins : array-like or None\n The histogram bin boundaries to use. Set to None to use\n numpy's default boundaries.\n sortby_field : str or None\n Set this option to a field name in order to sort the histograms\n by a given field parameter. For example, one can sort histograms of CO2\n concentration by temperature.\n sortby_bins : array-like or None\n The bins to sort the histograms by.\n subplot_index : tuple\n The subplot index to place the plot in\n set_title : str\n The title of the plot.\n density: bool\n Set to True to plot a p.d.f. instead of a frequency histogram.\n\n Other keyword arguments will be passed into :func:`matplotlib.pyplot.bar`.\n\n Returns\n -------\n return_dict : dict\n A dictionary containing the plot axis handle, bin boundaries, and\n generated histogram.\n\n \"\"\"\n if dsname is None and len(self._arm.keys()) > 1:\n raise ValueError((\"You must choose a datastream when there are 2 \" +\n \"or more datasets in the TimeSeriesDisplay \" +\n \"object.\"))\n elif dsname is None:\n dsname = list(self._arm.keys())[0]\n\n xdata = self._arm[dsname][field]\n\n if 'units' in xdata.attrs:\n xtitle = ''.join(['(', xdata.attrs['units'], ')'])\n else:\n xtitle = field\n\n if sortby_field is not None:\n ydata = self._arm[dsname][sortby_field]\n\n if bins is not None and sortby_bins is None and sortby_field is not None:\n # We will defaut the y direction to have the same # of bins as x\n sortby_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(bins))\n\n # Get the current plotting axis, add day/night background and plot data\n if self.fig is None:\n self.fig = plt.figure()\n\n if self.axes is None:\n self.axes = np.array([plt.axes()])\n self.fig.add_axes(self.axes[0])\n\n if sortby_field is not None:\n if 'units' in ydata.attrs:\n ytitle = ''.join(['(', ydata.attrs['units'], ')'])\n else:\n ytitle = field\n if bins is None:\n my_hist, x_bins, y_bins = np.histogram2d(\n xdata.values.flatten(), ydata.values.flatten(), density=density)\n else:\n my_hist, x_bins, y_bins = np.histogram2d(\n xdata.values.flatten(), ydata.values.flatten(),\n density=density, bins=[bins, sortby_bins])\n x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0\n self.axes[subplot_index].bar(\n x_inds, my_hist[:, 0].flatten(),\n label=(str(y_bins[0]) + \" to \" + str(y_bins[1])), **kwargs)\n for i in range(1, len(y_bins) - 1):\n self.axes[subplot_index].bar(\n x_inds, my_hist[:, i].flatten(),\n bottom=my_hist[:, i - 1],\n label=(str(y_bins[i]) + \" to \" + str(y_bins[i + 1])), **kwargs)\n self.axes[subplot_index].legend()\n else:\n if bins is None:\n bmin = np.nanmin(xdata)\n bmax = np.nanmax(xdata)\n bins = np.arange(bmin, bmax, (bmax - bmin) / 10.)\n my_hist, bins = np.histogram(\n xdata.values.flatten(), bins=bins, density=density)\n x_inds = (bins[:-1] + bins[1:]) / 2.0\n self.axes[subplot_index].bar(x_inds, my_hist)\n\n # Set Title\n if set_title is None:\n set_title = ' '.join([dsname, field, 'on',\n dt_utils.numpy_to_arm_date(\n self._arm[dsname].time.values[0])])\n self.axes[subplot_index].set_title(set_title)\n self.axes[subplot_index].set_ylabel(\"count\")\n self.axes[subplot_index].set_xlabel(xtitle)\n\n return_dict = {}\n return_dict[\"plot_handle\"] = self.axes[subplot_index]\n if 'x_bins' in locals():\n return_dict[\"x_bins\"] = x_bins\n return_dict[\"y_bins\"] = y_bins\n else:\n return_dict[\"bins\"] = bins\n return_dict[\"histogram\"] = my_hist\n\n return return_dict\n\n def plot_size_distribution(self, field, bins, time=None, dsname=None,\n subplot_index=(0, ), set_title=None,\n **kwargs):\n \"\"\"\n This procedure plots a stairstep plot of a size distribution. This is\n useful for plotting size distributions and waveforms.\n\n Parameters\n ----------\n field : str\n The name of the field to plot the spectrum from.\n bins : str or array-like\n The name of the field that stores the bins for the spectra.\n time : none or datetime\n If None, spectra to plot will be automatically determined.\n Otherwise, specify this field for the time period to plot.\n dsname : str\n The name of the Dataset to plot. Set to None to have\n ACT automatically determine this.\n subplot_index : tuple\n The subplot index to place the plot in.\n set_title : str or None\n Use this to set the title.\n\n Additional keyword arguments will be passed into :func:`matplotlib.pyplot.step`\n\n Returns\n -------\n ax : matplotlib axis handle\n The matplotlib axis handle referring to the plot.\n\n \"\"\"\n if dsname is None and len(self._arm.keys()) > 1:\n raise ValueError((\"You must choose a datastream when there are 2 \" +\n \"or more datasets in the TimeSeriesDisplay \" +\n \"object.\"))\n elif dsname is None:\n dsname = list(self._arm.keys())[0]\n\n xdata = self._arm[dsname][field]\n\n if isinstance(bins, str):\n bins = self._arm[dsname][bins]\n else:\n bins = xr.DataArray(bins)\n\n if 'units' in bins.attrs:\n xtitle = ''.join(['(', bins.attrs['units'], ')'])\n else:\n xtitle = 'Bin #'\n\n if 'units' in xdata.attrs:\n ytitle = ''.join(['(', xdata.attrs['units'], ')'])\n else:\n ytitle = field\n\n if(len(xdata.dims) > 1 and time is None):\n raise ValueError((\"Input data has more than one dimension, \" +\n \"you must specify a time to plot!\"))\n elif len(xdata.dims) > 1:\n xdata = xdata.sel(time=time, method='nearest')\n\n if(len(bins.dims) > 1 or len(bins.values) != len(xdata.values)):\n raise ValueError(\"Bins must be a one dimensional field whose \" +\n \"length is equal to the field length!\")\n\n # Get the current plotting axis, add day/night background and plot data\n if self.fig is None:\n self.fig = plt.figure()\n\n if self.axes is None:\n self.axes = np.array([plt.axes()])\n self.fig.add_axes(self.axes[0])\n\n # Set Title\n if set_title is None:\n set_title = ' '.join([dsname, field, 'on',\n dt_utils.numpy_to_arm_date(\n self._arm[dsname].time.values[0])])\n\n self.axes[subplot_index].set_title(set_title)\n self.axes[subplot_index].step(bins.values, xdata.values)\n self.axes[subplot_index].set_xlabel(xtitle)\n self.axes[subplot_index].set_ylabel(ytitle)\n\n return self.axes[subplot_index]\n\n def plot_stairstep_graph(self, field, dsname=None, bins=None,\n sortby_field=None, sortby_bins=None,\n subplot_index=(0, ),\n set_title=None,\n density=False, **kwargs):\n \"\"\"\n This procedure will plot a stairstep plot of a histogram.\n\n Parameters\n ----------\n field : str\n The name of the field to take the histogram of.\n dsname : str or None\n The name of the datastream the field is contained in. Set\n to None to let ACT automatically determine this.\n bins : array-like or None\n The histogram bin boundaries to use. Set to None to use\n numpy's default boundaries.\n sortby_field : str or None\n Set this option to a field name in order to sort the histograms\n by a given field parameter. For example, one can sort histograms of CO2\n concentration by temperature.\n sortby_bins : array-like or None\n The bins to sort the histograms by.\n subplot_index : tuple\n The subplot index to place the plot in.\n set_title : str\n The title of the plot.\n density : bool\n Set to True to plot a p.d.f. instead of a frequency histogram.\n\n Other keyword arguments will be passed into :func:`matplotlib.pyplot.step`.\n\n Returns\n -------\n return_dict : dict\n A dictionary containing the plot axis handle, bin boundaries, and\n generated histogram.\n\n \"\"\"\n if dsname is None and len(self._arm.keys()) > 1:\n raise ValueError((\"You must choose a datastream when there are 2 \" +\n \"or more datasets in the TimeSeriesDisplay \" +\n \"object.\"))\n elif dsname is None:\n dsname = list(self._arm.keys())[0]\n\n xdata = self._arm[dsname][field]\n\n if 'units' in xdata.attrs:\n xtitle = ''.join(['(', xdata.attrs['units'], ')'])\n else:\n xtitle = field\n\n if sortby_field is not None:\n ydata = self._arm[dsname][sortby_field]\n\n if bins is not None and sortby_bins is None and sortby_field is not None:\n # We will defaut the y direction to have the same # of bins as x\n sortby_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(bins))\n\n # Get the current plotting axis, add day/night background and plot data\n if self.fig is None:\n self.fig = plt.figure()\n\n if self.axes is None:\n self.axes = np.array([plt.axes()])\n self.fig.add_axes(self.axes[0])\n\n if sortby_field is not None:\n if 'units' in ydata.attrs:\n ytitle = ''.join(['(', ydata.attrs['units'], ')'])\n else:\n ytitle = field\n if bins is None:\n my_hist, x_bins, y_bins = np.histogram2d(\n xdata.values.flatten(), ydata.values.flatten(), density=density)\n else:\n my_hist, x_bins, y_bins = np.histogram2d(\n xdata.values.flatten(), ydata.values.flatten(),\n density=density, bins=[bins, sortby_bins])\n x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0\n self.axes[subplot_index].step(\n x_inds, my_hist[:, 0].flatten(),\n label=(str(y_bins[0]) + \" to \" + str(y_bins[1])), **kwargs)\n for i in range(1, len(y_bins) - 1):\n self.axes[subplot_index].step(\n x_inds, my_hist[:, i].flatten(),\n label=(str(y_bins[i]) + \" to \" + str(y_bins[i + 1])), **kwargs)\n self.axes[subplot_index].legend()\n else:\n my_hist, bins = np.histogram(\n xdata.values.flatten(), bins=bins, density=density)\n x_inds = (bins[:-1] + bins[1:]) / 2.0\n self.axes[subplot_index].step(x_inds, my_hist, **kwargs)\n\n # Set Title\n if set_title is None:\n set_title = ' '.join([dsname, field, 'on',\n dt_utils.numpy_to_arm_date(\n self._arm[dsname].time.values[0])])\n self.axes[subplot_index].set_title(set_title)\n self.axes[subplot_index].set_ylabel(\"count\")\n self.axes[subplot_index].set_xlabel(xtitle)\n\n return_dict = {}\n return_dict[\"plot_handle\"] = self.axes[subplot_index]\n if 'x_bins' in locals():\n return_dict[\"x_bins\"] = x_bins\n return_dict[\"y_bins\"] = y_bins\n else:\n return_dict[\"bins\"] = bins\n return_dict[\"histogram\"] = my_hist\n\n return return_dict\n\n def plot_heatmap(self, x_field, y_field, dsname=None, x_bins=None, y_bins=None,\n subplot_index=(0, ), set_title=None,\n density=False, **kwargs):\n \"\"\"\n This procedure will plot a heatmap of a histogram from 2 variables.\n\n Parameters\n ----------\n x_field : str\n The name of the field to take the histogram of on the X axis.\n y_field : str\n The name of the field to take the histogram of on the Y axis.\n dsname : str or None\n The name of the datastream the field is contained in. Set\n to None to let ACT automatically determine this.\n x_bins : array-like or None\n The histogram bin boundaries to use for the variable on the X axis.\n Set to None to use numpy's default boundaries.\n y_bins : array-like or None\n The histogram bin boundaries to use for the variable on the Y axis.\n Set to None to use numpy's default boundaries.\n subplot_index : tuple\n The subplot index to place the plot in\n set_title : str\n The title of the plot.\n density : bool\n Set to True to plot a p.d.f. instead of a frequency histogram.\n\n Other keyword arguments will be passed into :func:`matplotlib.pyplot.pcolormesh`.\n\n Returns\n -------\n return_dict : dict\n A dictionary containing the plot axis handle, bin boundaries, and\n generated histogram.\n\n \"\"\"\n if dsname is None and len(self._arm.keys()) > 1:\n raise ValueError((\"You must choose a datastream when there are 2 \"\n \"or more datasets in the TimeSeriesDisplay \"\n \"object.\"))\n elif dsname is None:\n dsname = list(self._arm.keys())[0]\n\n xdata = self._arm[dsname][x_field]\n\n if 'units' in xdata.attrs:\n xtitle = ''.join(['(', xdata.attrs['units'], ')'])\n else:\n xtitle = x_field\n ydata = self._arm[dsname][y_field]\n\n if x_bins is not None and y_bins is None:\n # We will defaut the y direction to have the same # of bins as x\n y_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(x_bins))\n\n # Get the current plotting axis, add day/night background and plot data\n if self.fig is None:\n self.fig = plt.figure()\n\n if self.axes is None:\n self.axes = np.array([plt.axes()])\n self.fig.add_axes(self.axes[0])\n\n if 'units' in ydata.attrs:\n ytitle = ''.join(['(', ydata.attrs['units'], ')'])\n else:\n ytitle = y_field\n\n if x_bins is None:\n my_hist, x_bins, y_bins = np.histogram2d(\n xdata.values.flatten(), ydata.values.flatten(), density=density)\n else:\n my_hist, x_bins, y_bins = np.histogram2d(\n xdata.values.flatten(), ydata.values.flatten(),\n density=density, bins=[x_bins, y_bins])\n x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0\n y_inds = (y_bins[:-1] + y_bins[1:]) / 2.0\n xi, yi = np.meshgrid(x_inds, y_inds, indexing='ij')\n mesh = self.axes[subplot_index].pcolormesh(xi, yi, my_hist, **kwargs)\n\n # Set Title\n if set_title is None:\n set_title = ' '.join([dsname, 'on',\n dt_utils.numpy_to_arm_date(\n self._arm[dsname].time.values[0])])\n self.axes[subplot_index].set_title(set_title)\n self.axes[subplot_index].set_ylabel(ytitle)\n self.axes[subplot_index].set_xlabel(xtitle)\n self.add_colorbar(mesh, title=\"count\", subplot_index=subplot_index)\n\n return_dict = {}\n return_dict[\"plot_handle\"] = self.axes[subplot_index]\n return_dict[\"x_bins\"] = x_bins\n return_dict[\"y_bins\"] = y_bins\n return_dict[\"histogram\"] = my_hist\n\n return return_dict\n" ]
[ [ "numpy.ma.masked_outside", "numpy.nanmin", "pandas.DataFrame", "numpy.where", "numpy.greater", "numpy.unique", "numpy.less", "numpy.full", "numpy.greater_equal", "numpy.diff", "numpy.less_equal", "numpy.isnan", "pandas.Timedelta", "numpy.append", "numpy.equal", "numpy.not_equal", "numpy.array", "pandas.merge_asof", "numpy.ma.masked_inside", "numpy.absolute" ], [ "numpy.exp", "numpy.radians", "numpy.where", "numpy.sqrt" ], [ "numpy.nanmax", "numpy.meshgrid", "numpy.arange", "numpy.nanmin", "matplotlib.pyplot.axes", "numpy.array", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aishraghavan/introtodeeplearning
[ "2cd42e4279cea670ee45b95e1ca92ae899eaf721" ]
[ "mitdeeplearning/lab3.py" ]
[ "import io\nimport base64\nfrom IPython.display import HTML\nimport gym\nimport numpy as np\n\ndef play_video(filename):\n encoded = base64.b64encode(io.open(filename, 'r+b').read())\n embedded = HTML(data='''\n <video controls>\n <source src=\"data:video/mp4;base64,{0}\" type=\"video/mp4\" />\n </video>'''.format(encoded.decode('ascii')))\n\n return embedded\n\ndef preprocess_pong(image):\n I = image[35:195] # Crop\n I = I[::2, ::2, 0] # Downsample width and height by a factor of 2\n I[I == 144] = 0 # Remove background type 1\n I[I == 109] = 0 # Remove background type 2\n I[I != 0] = 1 # Set remaining elements (paddles, ball, etc.) to 1\n return I.astype(np.float).reshape(80, 80, 1)\n\n\ndef save_video_of_model(model, env_name, obs_diff=False, pp_fn=None):\n import skvideo.io\n from pyvirtualdisplay import Display\n display = Display(visible=0, size=(400, 300))\n display.start()\n\n if pp_fn is None:\n pp_fn = lambda x: x\n\n env = gym.make(env_name)\n obs = env.reset()\n obs = pp_fn(obs)\n prev_obs = obs\n\n filename = env_name + \".mp4\"\n output_video = skvideo.io.FFmpegWriter(filename)\n\n counter = 0\n done = False\n while not done:\n frame = env.render(mode='rgb_array')\n output_video.writeFrame(frame)\n\n if obs_diff:\n input_obs = obs - prev_obs\n else:\n input_obs = obs\n action = model(np.expand_dims(input_obs, 0)).numpy().argmax()\n\n prev_obs = obs\n obs, reward, done, info = env.step(action)\n obs = pp_fn(obs)\n counter += 1\n\n output_video.close()\n print(\"Successfully saved {} frames into {}!\".format(counter, filename))\n return filename\n" ]
[ [ "numpy.expand_dims" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zubairahmed-ai/keras-yolo3
[ "5ba9ab3c99ee1b5e71b614a5464cb316d35cc9b3" ]
[ "yolo.py" ]
[ "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nRun a YOLO_v3 style detection model on test images.\n\"\"\"\n\nimport colorsys\nimport os\nimport random\nimport time\nimport cv2\nimport numpy as np\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom PIL import Image, ImageDraw, ImageFont\nfrom timeit import time\nfrom timeit import default_timer as timer ### to calculate FPS\n\nfrom yolo3.model import yolo_eval\n\nclass YOLO(object):\n def __init__(self):\n self.model_path = 'model_data/yolo.h5'\n self.anchors_path = 'model_data/yolo_anchors.txt'\n self.classes_path = 'model_data/coco_classes.txt'\n self.score = 0.3\n self.iou = 0.5\n self.class_names = self._get_class()\n self.anchors = self._get_anchors()\n self.sess = K.get_session()\n self.boxes, self.scores, self.classes = self.generate()\n\n def _get_class(self):\n classes_path = os.path.expanduser(self.classes_path)\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n def _get_anchors(self):\n anchors_path = os.path.expanduser(self.anchors_path)\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n anchors = np.array(anchors).reshape(-1, 2)\n return anchors\n\n def generate(self):\n model_path = os.path.expanduser(self.model_path)\n assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'\n\n self.yolo_model = load_model(model_path)\n print('{} model, anchors, and classes loaded.'.format(model_path))\n\n self.model_image_size = self.yolo_model.layers[0].input_shape[1:3]\n\n # Generate colors for drawing bounding boxes.\n hsv_tuples = [(x / len(self.class_names), 1., 1.)\n for x in range(len(self.class_names))]\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n self.colors = list(\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\n self.colors))\n random.seed(10101) # Fixed seed for consistent colors across runs.\n random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.\n random.seed(None) # Reset seed to default.\n\n # Generate output tensor targets for filtered bounding boxes.\n # TODO: Wrap these backend operations with Keras layers.\n self.input_image_shape = K.placeholder(shape=(2, ))\n boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou)\n return boxes, scores, classes\n\n def detect_image(self, image):\n start = time.time()\n resized_image = image.resize(tuple(reversed(self.model_image_size)), Image.BICUBIC)\n image_data = np.array(resized_image, dtype='float32')\n\n print(image_data.shape)\n image_data /= 255.\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n\n out_boxes, out_scores, out_classes = self.sess.run(\n [self.boxes, self.scores, self.classes],\n feed_dict={\n self.yolo_model.input: image_data,\n self.input_image_shape: [image.size[1], image.size[0]],\n K.learning_phase(): 0\n })\n\n print('Found {} boxes for {}'.format(len(out_boxes), 'img'))\n\n font = ImageFont.truetype(font='font/FiraMono-Medium.otf', size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\n thickness = (image.size[0] + image.size[1]) // 300\n\n for i, c in reversed(list(enumerate(out_classes))):\n predicted_class = self.class_names[c]\n box = out_boxes[i]\n score = out_scores[i]\n\n label = '{} {:.2f}'.format(predicted_class, score)\n draw = ImageDraw.Draw(image)\n label_size = draw.textsize(label, font)\n\n top, left, bottom, right = box\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\n print(label, (left, top), (right, bottom))\n\n if top - label_size[1] >= 0:\n text_origin = np.array([left, top - label_size[1]])\n else:\n text_origin = np.array([left, top + 1])\n\n # My kingdom for a good redistributable image drawing library.\n for i in range(thickness):\n draw.rectangle(\n [left + i, top + i, right - i, bottom - i],\n outline=self.colors[c])\n draw.rectangle(\n [tuple(text_origin), tuple(text_origin + label_size)],\n fill=self.colors[c])\n draw.text(text_origin, label, fill=(0, 0, 0), font=font)\n del draw\n\n end = time.time()\n print(end - start)\n return image\n\n def close_session(self):\n self.sess.close()\n\n\ndef detect_video(yolo,video_path):\n vid = cv2.VideoCapture(video_path) ### TODO: will video path other than 0 be used?\n if not vid.isOpened():\n raise IOError(\"Couldn't open webcam\")\n accum_time = 0\n curr_fps = 0\n fps = \"FPS: ??\"\n prev_time = timer()\n while True:\n return_value, frame = vid.read()\n image = Image.fromarray(frame)\n image = yolo.detect_image(image)\n result = np.asarray(image)\n curr_time = timer()\n exec_time = curr_time - prev_time\n prev_time = curr_time\n accum_time = accum_time + exec_time\n curr_fps = curr_fps + 1\n if accum_time > 1:\n accum_time = accum_time - 1\n fps = \"FPS: \" + str(curr_fps)\n curr_fps = 0\n cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50,\n color=(255, 0, 0), thickness=2)\n cv2.namedWindow(\"result\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"result\",result)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n yolo.close_session()\n\n\ndef detect_img(yolo):\n while True:\n img = input('Input image filename:')\n try:\n image = Image.open(img)\n except:\n print('Open Error! Try again!')\n continue\n else:\n r_image = yolo.detect_image(image)\n r_image.show()\n yolo.close_session()\n\n\n\nif __name__ == '__main__':\n detect_img(YOLO())\n" ]
[ [ "numpy.asarray", "numpy.array", "numpy.expand_dims", "numpy.floor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DavidAdamczyk/tensorflow-cmake
[ "8d848fae993a791dd44bc9cdcf9ad91f5795bf52" ]
[ "custom_op/example.py" ]
[ "#!/usr/bin/env python\n# 2018, Patrick Wieschollek <[email protected]>\n\nfrom __future__ import print_function\nimport numpy as np\nimport tensorflow as tf\nfrom user_ops import matrix_add\n\nnp.random.seed(42)\ntf.set_random_seed(42)\n\nmatA = np.random.randn(1, 2, 3, 4).astype(np.float32) * 10\nmatB = np.random.randn(1, 2, 3, 4).astype(np.float32) * 10\n\n\nA = tf.placeholder(tf.float32, shape=[None, 2, 3, 4])\nB = tf.placeholder(tf.float32, shape=[None, 2, 3, 4])\n\nbias = 42.\n\nactual_op = matrix_add(A, B, bias)\n\n\nwith tf.Session() as sess:\n print (sess.run(actual_op, {A: matA, B: matB}))\n" ]
[ [ "numpy.random.seed", "tensorflow.placeholder", "numpy.random.randn", "tensorflow.Session", "tensorflow.set_random_seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
62theories/tf-flask
[ "c6954f0f3c4082165c92c77bb06d2fec6e75a8c4", "10ef6bbe39bb5ac3d0e2755dc60b6843d39d395c", "c6954f0f3c4082165c92c77bb06d2fec6e75a8c4" ]
[ "official/projects/edgetpu/nlp/utils/utils_test.py", "official/nlp/modeling/layers/transformer_encoder_block.py", "official/nlp/modeling/networks/bert_encoder.py" ]
[ "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for utils.py.\"\"\"\n\nfrom absl import flags\nimport tensorflow as tf\nimport yaml\n\nfrom official.projects.edgetpu.nlp.configs import params\nfrom official.projects.edgetpu.nlp.modeling import model_builder\nfrom official.projects.edgetpu.nlp.utils import utils\n\nFLAGS = flags.FLAGS\n\n\n# Helper function to compare two nested Dicts.\n# Note that this function only ensures all the fields in dict_a have definition\n# and same value in dict_b. This function does not guarantee that\n# dict_a == dict_b.\ndef nested_dict_compare(dict_a, dict_b):\n for k, v in sorted(dict_a.items()):\n if k not in dict_b:\n return False\n if isinstance(v, dict) and isinstance(dict_b[k], dict):\n if not nested_dict_compare(dict_a[k], dict_b[k]):\n return False\n else:\n # A caveat: When dict_a[k] = 1, dict_b[k] = True, the return is True.\n if dict_a[k] != dict_b[k]:\n return False\n return True\n\n\nclass UtilsTest(tf.test.TestCase):\n\n def test_config_override(self):\n # Define several dummy flags which are call by the utils.config_override\n # function.\n file_path = 'third_party/tensorflow_models/official/projects/edgetpu/nlp/experiments/mobilebert_edgetpu_m.yaml'\n flags.DEFINE_string('tpu', None, 'tpu_address.')\n flags.DEFINE_list('config_file', [file_path],\n 'A list of config files path.')\n flags.DEFINE_string('params_override', None, 'Override params.')\n flags.DEFINE_string('model_dir', '/tmp/', 'Model saving directory.')\n flags.DEFINE_list('mode', ['train'], 'Job mode.')\n flags.DEFINE_bool('use_vizier', False,\n 'Whether to enable vizier based hyperparameter search.')\n experiment_params = params.EdgeTPUBERTCustomParams()\n experiment_params = utils.config_override(experiment_params, FLAGS)\n experiment_params_dict = experiment_params.as_dict()\n\n with tf.io.gfile.GFile(file_path, 'r') as f:\n loaded_dict = yaml.load(f, Loader=yaml.FullLoader)\n\n # experiment_params contains all the configs but the loaded_dict might\n # only contains partial of the configs.\n self.assertTrue(nested_dict_compare(loaded_dict, experiment_params_dict))\n\n def test_load_checkpoint(self):\n \"\"\"Test the pretrained model can be successfully loaded.\"\"\"\n experiment_params = params.EdgeTPUBERTCustomParams()\n student_pretrainer = experiment_params.student_model\n student_pretrainer.encoder.type = 'mobilebert'\n pretrainer = model_builder.build_bert_pretrainer(\n pretrainer_cfg=student_pretrainer,\n name='test_model')\n # Makes sure the pretrainer variables are created.\n checkpoint_path = self.create_tempfile().full_path\n _ = pretrainer(pretrainer.inputs)\n pretrainer.save_weights(checkpoint_path)\n\n utils.load_checkpoint(pretrainer, checkpoint_path)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Keras-based TransformerEncoder block layer.\"\"\"\n\nimport tensorflow as tf\n\n\[email protected]_keras_serializable(package=\"Text\")\nclass TransformerEncoderBlock(tf.keras.layers.Layer):\n \"\"\"TransformerEncoderBlock layer.\n\n This layer implements the Transformer Encoder from\n \"Attention Is All You Need\". (https://arxiv.org/abs/1706.03762),\n which combines a `tf.keras.layers.MultiHeadAttention` layer with a\n two-layer feedforward network.\n\n References:\n [Attention Is All You Need](https://arxiv.org/abs/1706.03762)\n [BERT: Pre-training of Deep Bidirectional Transformers for Language\n Understanding](https://arxiv.org/abs/1810.04805)\n \"\"\"\n\n def __init__(self,\n num_attention_heads,\n inner_dim,\n inner_activation,\n output_range=None,\n kernel_initializer=\"glorot_uniform\",\n bias_initializer=\"zeros\",\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n use_bias=True,\n norm_first=False,\n norm_epsilon=1e-12,\n output_dropout=0.0,\n attention_dropout=0.0,\n inner_dropout=0.0,\n attention_initializer=None,\n attention_axes=None,\n **kwargs):\n \"\"\"Initializes `TransformerEncoderBlock`.\n\n Args:\n num_attention_heads: Number of attention heads.\n inner_dim: The output dimension of the first Dense layer in a two-layer\n feedforward network.\n inner_activation: The activation for the first Dense layer in a two-layer\n feedforward network.\n output_range: the sequence output range, [0, output_range) for slicing the\n target sequence. `None` means the target sequence is not sliced.\n kernel_initializer: Initializer for dense layer kernels.\n bias_initializer: Initializer for dense layer biases.\n kernel_regularizer: Regularizer for dense layer kernels.\n bias_regularizer: Regularizer for dense layer biases.\n activity_regularizer: Regularizer for dense layer activity.\n kernel_constraint: Constraint for dense layer kernels.\n bias_constraint: Constraint for dense layer kernels.\n use_bias: Whether to enable use_bias in attention layer. If set False,\n use_bias in attention layer is disabled.\n norm_first: Whether to normalize inputs to attention and intermediate\n dense layers. If set False, output of attention and intermediate dense\n layers is normalized.\n norm_epsilon: Epsilon value to initialize normalization layers.\n output_dropout: Dropout probability for the post-attention and output\n dropout.\n attention_dropout: Dropout probability for within the attention layer.\n inner_dropout: Dropout probability for the first Dense layer in a\n two-layer feedforward network.\n attention_initializer: Initializer for kernels of attention layers. If set\n `None`, attention layers use kernel_initializer as initializer for\n kernel.\n attention_axes: axes over which the attention is applied. `None` means\n attention over all axes, but batch, heads, and features.\n **kwargs: keyword arguments/\n \"\"\"\n super().__init__(**kwargs)\n\n self._num_heads = num_attention_heads\n self._inner_dim = inner_dim\n self._inner_activation = inner_activation\n self._attention_dropout = attention_dropout\n self._attention_dropout_rate = attention_dropout\n self._output_dropout = output_dropout\n self._output_dropout_rate = output_dropout\n self._output_range = output_range\n self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)\n self._bias_initializer = tf.keras.initializers.get(bias_initializer)\n self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)\n self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)\n self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)\n self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)\n self._bias_constraint = tf.keras.constraints.get(bias_constraint)\n self._use_bias = use_bias\n self._norm_first = norm_first\n self._norm_epsilon = norm_epsilon\n self._inner_dropout = inner_dropout\n if attention_initializer:\n self._attention_initializer = tf.keras.initializers.get(\n attention_initializer)\n else:\n self._attention_initializer = self._kernel_initializer\n self._attention_axes = attention_axes\n\n def build(self, input_shape):\n if isinstance(input_shape, tf.TensorShape):\n input_tensor_shape = input_shape\n elif isinstance(input_shape, (list, tuple)):\n input_tensor_shape = tf.TensorShape(input_shape[0])\n else:\n raise ValueError(\n \"The type of input shape argument is not supported, got: %s\" %\n type(input_shape))\n einsum_equation = \"abc,cd->abd\"\n if len(input_tensor_shape.as_list()) > 3:\n einsum_equation = \"...bc,cd->...bd\"\n hidden_size = input_tensor_shape[-1]\n if hidden_size % self._num_heads != 0:\n raise ValueError(\n \"The input size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (hidden_size, self._num_heads))\n self._attention_head_size = int(hidden_size // self._num_heads)\n common_kwargs = dict(\n bias_initializer=self._bias_initializer,\n kernel_regularizer=self._kernel_regularizer,\n bias_regularizer=self._bias_regularizer,\n activity_regularizer=self._activity_regularizer,\n kernel_constraint=self._kernel_constraint,\n bias_constraint=self._bias_constraint)\n self._attention_layer = tf.keras.layers.MultiHeadAttention(\n num_heads=self._num_heads,\n key_dim=self._attention_head_size,\n dropout=self._attention_dropout,\n use_bias=self._use_bias,\n kernel_initializer=self._attention_initializer,\n attention_axes=self._attention_axes,\n name=\"self_attention\",\n **common_kwargs)\n self._attention_dropout = tf.keras.layers.Dropout(rate=self._output_dropout)\n # Use float32 in layernorm for numeric stability.\n # It is probably safe in mixed_float16, but we haven't validated this yet.\n self._attention_layer_norm = (\n tf.keras.layers.LayerNormalization(\n name=\"self_attention_layer_norm\",\n axis=-1,\n epsilon=self._norm_epsilon,\n dtype=tf.float32))\n self._intermediate_dense = tf.keras.layers.experimental.EinsumDense(\n einsum_equation,\n output_shape=(None, self._inner_dim),\n bias_axes=\"d\",\n kernel_initializer=self._kernel_initializer,\n name=\"intermediate\",\n **common_kwargs)\n policy = tf.keras.mixed_precision.global_policy()\n if policy.name == \"mixed_bfloat16\":\n # bfloat16 causes BERT with the LAMB optimizer to not converge\n # as well, so we use float32.\n # TODO(b/154538392): Investigate this.\n policy = tf.float32\n self._intermediate_activation_layer = tf.keras.layers.Activation(\n self._inner_activation, dtype=policy)\n self._inner_dropout_layer = tf.keras.layers.Dropout(\n rate=self._inner_dropout)\n self._output_dense = tf.keras.layers.experimental.EinsumDense(\n einsum_equation,\n output_shape=(None, hidden_size),\n bias_axes=\"d\",\n name=\"output\",\n kernel_initializer=self._kernel_initializer,\n **common_kwargs)\n self._output_dropout = tf.keras.layers.Dropout(rate=self._output_dropout)\n # Use float32 in layernorm for numeric stability.\n self._output_layer_norm = tf.keras.layers.LayerNormalization(\n name=\"output_layer_norm\",\n axis=-1,\n epsilon=self._norm_epsilon,\n dtype=tf.float32)\n\n super(TransformerEncoderBlock, self).build(input_shape)\n\n def get_config(self):\n config = {\n \"num_attention_heads\":\n self._num_heads,\n \"inner_dim\":\n self._inner_dim,\n \"inner_activation\":\n self._inner_activation,\n \"output_dropout\":\n self._output_dropout_rate,\n \"attention_dropout\":\n self._attention_dropout_rate,\n \"output_range\":\n self._output_range,\n \"kernel_initializer\":\n tf.keras.initializers.serialize(self._kernel_initializer),\n \"bias_initializer\":\n tf.keras.initializers.serialize(self._bias_initializer),\n \"kernel_regularizer\":\n tf.keras.regularizers.serialize(self._kernel_regularizer),\n \"bias_regularizer\":\n tf.keras.regularizers.serialize(self._bias_regularizer),\n \"activity_regularizer\":\n tf.keras.regularizers.serialize(self._activity_regularizer),\n \"kernel_constraint\":\n tf.keras.constraints.serialize(self._kernel_constraint),\n \"bias_constraint\":\n tf.keras.constraints.serialize(self._bias_constraint),\n \"use_bias\":\n self._use_bias,\n \"norm_first\":\n self._norm_first,\n \"norm_epsilon\":\n self._norm_epsilon,\n \"inner_dropout\":\n self._inner_dropout,\n \"attention_initializer\":\n tf.keras.initializers.serialize(self._attention_initializer),\n \"attention_axes\": self._attention_axes,\n }\n base_config = super(TransformerEncoderBlock, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def call(self, inputs):\n \"\"\"Transformer self-attention encoder block call.\n\n Args:\n inputs: a single tensor or a list of tensors.\n `input tensor` as the single sequence of embeddings.\n [`input tensor`, `attention mask`] to have the additional attention\n mask.\n [`query tensor`, `key value tensor`, `attention mask`] to have separate\n input streams for the query, and key/value to the multi-head\n attention.\n\n Returns:\n An output tensor with the same dimensions as input/query tensor.\n \"\"\"\n if isinstance(inputs, (list, tuple)):\n if len(inputs) == 2:\n input_tensor, attention_mask = inputs\n key_value = None\n elif len(inputs) == 3:\n input_tensor, key_value, attention_mask = inputs\n else:\n raise ValueError(\"Unexpected inputs to %s with length at %d\" %\n (self.__class__, len(inputs)))\n else:\n input_tensor, key_value, attention_mask = (inputs, None, None)\n\n if self._output_range:\n if self._norm_first:\n source_tensor = input_tensor[:, 0:self._output_range, :]\n input_tensor = self._attention_layer_norm(input_tensor)\n if key_value is not None:\n key_value = self._attention_layer_norm(key_value)\n target_tensor = input_tensor[:, 0:self._output_range, :]\n if attention_mask is not None:\n attention_mask = attention_mask[:, 0:self._output_range, :]\n else:\n if self._norm_first:\n source_tensor = input_tensor\n input_tensor = self._attention_layer_norm(input_tensor)\n if key_value is not None:\n key_value = self._attention_layer_norm(key_value)\n target_tensor = input_tensor\n\n if key_value is None:\n key_value = input_tensor\n attention_output = self._attention_layer(\n query=target_tensor, value=key_value, attention_mask=attention_mask)\n attention_output = self._attention_dropout(attention_output)\n if self._norm_first:\n attention_output = source_tensor + attention_output\n else:\n attention_output = self._attention_layer_norm(target_tensor +\n attention_output)\n if self._norm_first:\n source_attention_output = attention_output\n attention_output = self._output_layer_norm(attention_output)\n inner_output = self._intermediate_dense(attention_output)\n inner_output = self._intermediate_activation_layer(inner_output)\n inner_output = self._inner_dropout_layer(inner_output)\n layer_output = self._output_dense(inner_output)\n layer_output = self._output_dropout(layer_output)\n\n if self._norm_first:\n return source_attention_output + layer_output\n\n # During mixed precision training, layer norm output is always fp32 for now.\n # Casts fp32 for the subsequent add.\n layer_output = tf.cast(layer_output, tf.float32)\n return self._output_layer_norm(layer_output + attention_output)\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Transformer-based BERT encoder network.\"\"\"\n# pylint: disable=g-classes-have-attributes\n\nfrom absl import logging\nimport tensorflow as tf\n\nfrom official.nlp.modeling import layers\n\n\[email protected]_keras_serializable(package='Text')\nclass BertEncoder(tf.keras.Model):\n \"\"\"Bi-directional Transformer-based encoder network.\n\n This network implements a bi-directional Transformer-based encoder as\n described in \"BERT: Pre-training of Deep Bidirectional Transformers for\n Language Understanding\" (https://arxiv.org/abs/1810.04805). It includes the\n embedding lookups and transformer layers, but not the masked language model\n or classification task networks.\n\n The default values for this object are taken from the BERT-Base implementation\n in \"BERT: Pre-training of Deep Bidirectional Transformers for Language\n Understanding\".\n\n *Note* that the network is constructed by\n [Keras Functional API](https://keras.io/guides/functional_api/).\n\n Args:\n vocab_size: The size of the token vocabulary.\n hidden_size: The size of the transformer hidden layers.\n num_layers: The number of transformer layers.\n num_attention_heads: The number of attention heads for each transformer. The\n hidden size must be divisible by the number of attention heads.\n max_sequence_length: The maximum sequence length that this encoder can\n consume. If None, max_sequence_length uses the value from sequence length.\n This determines the variable shape for positional embeddings.\n type_vocab_size: The number of types that the 'type_ids' input can take.\n inner_dim: The output dimension of the first Dense layer in a two-layer\n feedforward network for each transformer.\n inner_activation: The activation for the first Dense layer in a two-layer\n feedforward network for each transformer.\n output_dropout: Dropout probability for the post-attention and output\n dropout.\n attention_dropout: The dropout rate to use for the attention layers\n within the transformer layers.\n initializer: The initialzer to use for all weights in this encoder.\n output_range: The sequence output range, [0, output_range), by slicing the\n target sequence of the last transformer layer. `None` means the entire\n target sequence will attend to the source sequence, which yields the full\n output.\n embedding_width: The width of the word embeddings. If the embedding width is\n not equal to hidden size, embedding parameters will be factorized into two\n matrices in the shape of ['vocab_size', 'embedding_width'] and\n ['embedding_width', 'hidden_size'] ('embedding_width' is usually much\n smaller than 'hidden_size').\n embedding_layer: An optional Layer instance which will be called to\n generate embeddings for the input word IDs.\n norm_first: Whether to normalize inputs to attention and intermediate\n dense layers. If set False, output of attention and intermediate dense\n layers is normalized.\n dict_outputs: Whether to use a dictionary as the model outputs.\n return_all_encoder_outputs: Whether to output sequence embedding outputs of\n all encoder transformer layers. Note: when the following `dict_outputs`\n argument is True, all encoder outputs are always returned in the dict,\n keyed by `encoder_outputs`.\n \"\"\"\n\n def __init__(\n self,\n vocab_size,\n hidden_size=768,\n num_layers=12,\n num_attention_heads=12,\n max_sequence_length=512,\n type_vocab_size=16,\n inner_dim=3072,\n inner_activation=lambda x: tf.keras.activations.gelu(x, approximate=True),\n output_dropout=0.1,\n attention_dropout=0.1,\n initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),\n output_range=None,\n embedding_width=None,\n embedding_layer=None,\n norm_first=False,\n dict_outputs=False,\n return_all_encoder_outputs=False,\n **kwargs):\n if 'sequence_length' in kwargs:\n kwargs.pop('sequence_length')\n logging.warning('`sequence_length` is a deprecated argument to '\n '`BertEncoder`, which has no effect for a while. Please '\n 'remove `sequence_length` argument.')\n\n # Handles backward compatible kwargs.\n if 'intermediate_size' in kwargs:\n inner_dim = kwargs.pop('intermediate_size')\n\n if 'activation' in kwargs:\n inner_activation = kwargs.pop('activation')\n\n if 'dropout_rate' in kwargs:\n output_dropout = kwargs.pop('dropout_rate')\n\n if 'attention_dropout_rate' in kwargs:\n attention_dropout = kwargs.pop('attention_dropout_rate')\n\n activation = tf.keras.activations.get(inner_activation)\n initializer = tf.keras.initializers.get(initializer)\n\n word_ids = tf.keras.layers.Input(\n shape=(None,), dtype=tf.int32, name='input_word_ids')\n mask = tf.keras.layers.Input(\n shape=(None,), dtype=tf.int32, name='input_mask')\n type_ids = tf.keras.layers.Input(\n shape=(None,), dtype=tf.int32, name='input_type_ids')\n\n if embedding_width is None:\n embedding_width = hidden_size\n\n if embedding_layer is None:\n embedding_layer_inst = layers.OnDeviceEmbedding(\n vocab_size=vocab_size,\n embedding_width=embedding_width,\n initializer=initializer,\n name='word_embeddings')\n else:\n embedding_layer_inst = embedding_layer\n word_embeddings = embedding_layer_inst(word_ids)\n\n # Always uses dynamic slicing for simplicity.\n position_embedding_layer = layers.PositionEmbedding(\n initializer=initializer,\n max_length=max_sequence_length,\n name='position_embedding')\n position_embeddings = position_embedding_layer(word_embeddings)\n type_embedding_layer = layers.OnDeviceEmbedding(\n vocab_size=type_vocab_size,\n embedding_width=embedding_width,\n initializer=initializer,\n use_one_hot=True,\n name='type_embeddings')\n type_embeddings = type_embedding_layer(type_ids)\n\n embeddings = tf.keras.layers.Add()(\n [word_embeddings, position_embeddings, type_embeddings])\n\n embedding_norm_layer = tf.keras.layers.LayerNormalization(\n name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)\n\n embeddings = embedding_norm_layer(embeddings)\n embeddings = (tf.keras.layers.Dropout(rate=output_dropout)(embeddings))\n\n # We project the 'embedding' output to 'hidden_size' if it is not already\n # 'hidden_size'.\n if embedding_width != hidden_size:\n embedding_projection = tf.keras.layers.experimental.EinsumDense(\n '...x,xy->...y',\n output_shape=hidden_size,\n bias_axes='y',\n kernel_initializer=initializer,\n name='embedding_projection')\n embeddings = embedding_projection(embeddings)\n else:\n embedding_projection = None\n\n transformer_layers = []\n data = embeddings\n attention_mask = layers.SelfAttentionMask()(data, mask)\n encoder_outputs = []\n for i in range(num_layers):\n if i == num_layers - 1 and output_range is not None:\n transformer_output_range = output_range\n else:\n transformer_output_range = None\n layer = layers.TransformerEncoderBlock(\n num_attention_heads=num_attention_heads,\n inner_dim=inner_dim,\n inner_activation=inner_activation,\n output_dropout=output_dropout,\n attention_dropout=attention_dropout,\n norm_first=norm_first,\n output_range=transformer_output_range,\n kernel_initializer=initializer,\n name='transformer/layer_%d' % i)\n transformer_layers.append(layer)\n data = layer([data, attention_mask])\n encoder_outputs.append(data)\n\n last_encoder_output = encoder_outputs[-1]\n # Applying a tf.slice op (through subscript notation) to a Keras tensor\n # like this will create a SliceOpLambda layer. This is better than a Lambda\n # layer with Python code, because that is fundamentally less portable.\n first_token_tensor = last_encoder_output[:, 0, :]\n pooler_layer = tf.keras.layers.Dense(\n units=hidden_size,\n activation='tanh',\n kernel_initializer=initializer,\n name='pooler_transform')\n cls_output = pooler_layer(first_token_tensor)\n\n outputs = dict(\n sequence_output=encoder_outputs[-1],\n pooled_output=cls_output,\n encoder_outputs=encoder_outputs,\n )\n\n if dict_outputs:\n super().__init__(\n inputs=[word_ids, mask, type_ids], outputs=outputs, **kwargs)\n else:\n cls_output = outputs['pooled_output']\n if return_all_encoder_outputs:\n encoder_outputs = outputs['encoder_outputs']\n outputs = [encoder_outputs, cls_output]\n else:\n sequence_output = outputs['sequence_output']\n outputs = [sequence_output, cls_output]\n super().__init__( # pylint: disable=bad-super-call\n inputs=[word_ids, mask, type_ids],\n outputs=outputs,\n **kwargs)\n\n self._pooler_layer = pooler_layer\n self._transformer_layers = transformer_layers\n self._embedding_norm_layer = embedding_norm_layer\n self._embedding_layer = embedding_layer_inst\n self._position_embedding_layer = position_embedding_layer\n self._type_embedding_layer = type_embedding_layer\n if embedding_projection is not None:\n self._embedding_projection = embedding_projection\n\n config_dict = {\n 'vocab_size': vocab_size,\n 'hidden_size': hidden_size,\n 'num_layers': num_layers,\n 'num_attention_heads': num_attention_heads,\n 'max_sequence_length': max_sequence_length,\n 'type_vocab_size': type_vocab_size,\n 'inner_dim': inner_dim,\n 'inner_activation': tf.keras.activations.serialize(activation),\n 'output_dropout': output_dropout,\n 'attention_dropout': attention_dropout,\n 'initializer': tf.keras.initializers.serialize(initializer),\n 'output_range': output_range,\n 'embedding_width': embedding_width,\n 'embedding_layer': embedding_layer,\n 'norm_first': norm_first,\n 'dict_outputs': dict_outputs,\n }\n # pylint: disable=protected-access\n self._setattr_tracking = False\n self._config = config_dict\n self._setattr_tracking = True\n # pylint: enable=protected-access\n\n def get_embedding_table(self):\n return self._embedding_layer.embeddings\n\n def get_embedding_layer(self):\n return self._embedding_layer\n\n def get_config(self):\n return self._config\n\n @property\n def transformer_layers(self):\n \"\"\"List of Transformer layers in the encoder.\"\"\"\n return self._transformer_layers\n\n @property\n def pooler_layer(self):\n \"\"\"The pooler dense layer after the transformer layers.\"\"\"\n return self._pooler_layer\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n if 'embedding_layer' in config and config['embedding_layer'] is not None:\n warn_string = (\n 'You are reloading a model that was saved with a '\n 'potentially-shared embedding layer object. If you contine to '\n 'train this model, the embedding layer will no longer be shared. '\n 'To work around this, load the model outside of the Keras API.')\n print('WARNING: ' + warn_string)\n logging.warn(warn_string)\n\n return cls(**config)\n" ]
[ [ "tensorflow.io.gfile.GFile", "tensorflow.test.main" ], [ "tensorflow.keras.layers.LayerNormalization", "tensorflow.TensorShape", "tensorflow.keras.layers.Activation", "tensorflow.keras.constraints.get", "tensorflow.keras.constraints.serialize", "tensorflow.keras.regularizers.get", "tensorflow.cast", "tensorflow.keras.utils.register_keras_serializable", "tensorflow.keras.initializers.serialize", "tensorflow.keras.layers.experimental.EinsumDense", "tensorflow.keras.regularizers.serialize", "tensorflow.keras.layers.MultiHeadAttention", "tensorflow.keras.mixed_precision.global_policy", "tensorflow.keras.layers.Dropout", "tensorflow.keras.initializers.get" ], [ "tensorflow.keras.layers.LayerNormalization", "tensorflow.keras.layers.Dropout", "tensorflow.keras.activations.serialize", "tensorflow.keras.layers.Dense", "tensorflow.keras.activations.gelu", "tensorflow.keras.initializers.serialize", "tensorflow.keras.utils.register_keras_serializable", "tensorflow.keras.layers.Add", "tensorflow.keras.layers.experimental.EinsumDense", "tensorflow.keras.initializers.TruncatedNormal", "tensorflow.keras.activations.get", "tensorflow.keras.initializers.get", "tensorflow.keras.layers.Input" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.4", "2.5", "2.6", "2.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.4", "2.5", "2.6", "2.7" ] } ]
kozodoi/BMS-Molecular-Translation
[ "881b252a3c30e5b0afce2ce2c5da73d02755394d", "7de42928e36e63964f9f3ed0b5920ec86b8fc27d" ]
[ "codes/decoder.py", "codes/data.py" ]
[ "####### RNN DECODER\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Attention(nn.Module):\n '''\n Attention network for calculate attention value\n '''\n def __init__(self, encoder_dim, decoder_dim, attention_dim):\n '''\n :param encoder_dim: input size of encoder network\n :param decoder_dim: input size of decoder network\n :param attention_dim: input size of attention network\n '''\n super(Attention, self).__init__()\n self.encoder_att = nn.Linear(encoder_dim, attention_dim) # linear layer to transform encoded image\n self.decoder_att = nn.Linear(decoder_dim, attention_dim) # linear layer to transform decoder's output\n self.full_att = nn.Linear(attention_dim, 1) # linear layer to calculate values to be softmax-ed\n self.relu = nn.ReLU()\n self.softmax = nn.Softmax(dim = 1) # softmax layer to calculate weights\n\n def forward(self, encoder_out, decoder_hidden):\n att1 = self.encoder_att(encoder_out) # (batch_size, num_pixels, attention_dim)\n att2 = self.decoder_att(decoder_hidden) # (batch_size, attention_dim)\n att = self.full_att(self.relu(att1 + att2.unsqueeze(1))).squeeze(2) # (batch_size, num_pixels)\n alpha = self.softmax(att) # (batch_size, num_pixels)\n attention_weighted_encoding = (encoder_out * alpha.unsqueeze(2)).sum(dim = 1) # (batch_size, encoder_dim)\n return attention_weighted_encoding, alpha\n\n\nclass DecoderWithAttention(nn.Module):\n '''\n Decoder network with attention network used for training\n '''\n\n def __init__(self, attention_dim, embed_dim, decoder_dim, vocab_size, device, encoder_dim, dropout):\n '''\n :param attention_dim: input size of attention network\n :param embed_dim: input size of embedding network\n :param decoder_dim: input size of decoder network\n :param vocab_size: total number of characters used in training\n :param encoder_dim: input size of encoder network\n :param dropout: dropout rate\n '''\n super(DecoderWithAttention, self).__init__()\n self.encoder_dim = encoder_dim\n self.attention_dim = attention_dim\n self.embed_dim = embed_dim\n self.decoder_dim = decoder_dim\n self.vocab_size = vocab_size\n self.dropout = dropout\n self.device = device\n self.attention = Attention(encoder_dim, decoder_dim, attention_dim) # attention network\n self.embedding = nn.Embedding(vocab_size, embed_dim) # embedding layer\n self.dropout = nn.Dropout(p = self.dropout)\n self.decode_step = nn.LSTMCell(embed_dim + encoder_dim, decoder_dim, bias = True) # decoding LSTMCell\n self.init_h = nn.Linear(encoder_dim, decoder_dim) # linear layer to find initial hidden state of LSTMCell\n self.init_c = nn.Linear(encoder_dim, decoder_dim) # linear layer to find initial cell state of LSTMCell\n self.f_beta = nn.Linear(decoder_dim, encoder_dim) # linear layer to create a sigmoid-activated gate\n self.sigmoid = nn.Sigmoid()\n self.fc = nn.Linear(decoder_dim, vocab_size) # linear layer to find scores over vocabulary\n self.init_weights() # initialize some layers with the uniform distribution\n\n def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)\n\n def load_pretrained_embeddings(self, embeddings):\n self.embedding.weight = nn.Parameter(embeddings)\n\n def fine_tune_embeddings(self, fine_tune = True):\n for p in self.embedding.parameters():\n p.requires_grad = fine_tune\n\n def init_hidden_state(self, encoder_out):\n mean_encoder_out = encoder_out.mean(dim = 1)\n h = self.init_h(mean_encoder_out) # (batch_size, decoder_dim)\n c = self.init_c(mean_encoder_out)\n return h, c\n\n def forward(self, encoder_out, encoded_captions, caption_lengths):\n '''\n :param encoder_out: output of encoder network\n :param encoded_captions: transformed sequence from character to integer\n :param caption_lengths: length of transformed sequence\n '''\n batch_size = encoder_out.size(0)\n encoder_dim = encoder_out.size(-1)\n vocab_size = self.vocab_size\n encoder_out = encoder_out.view(batch_size, -1, encoder_dim) # (batch_size, num_pixels, encoder_dim)\n num_pixels = encoder_out.size(1)\n caption_lengths, sort_ind = caption_lengths.squeeze(1).sort(dim=0, descending=True)\n encoder_out = encoder_out[sort_ind]\n encoded_captions = encoded_captions[sort_ind]\n \n # embedding transformed sequence for vector\n embeddings = self.embedding(encoded_captions) # (batch_size, max_caption_length, embed_dim)\n \n # initialize hidden state and cell state of LSTM cell\n h, c = self.init_hidden_state(encoder_out) # (batch_size, decoder_dim)\n \n # set decode length by caption length - 1 because of omitting start token\n decode_lengths = (caption_lengths - 1).tolist()\n predictions = torch.zeros(batch_size, max(decode_lengths), vocab_size, device = self.device)\n alphas = torch.zeros(batch_size, max(decode_lengths), num_pixels, device = self.device)\n \n # predict sequence\n for t in range(max(decode_lengths)):\n batch_size_t = sum([l > t for l in decode_lengths])\n attention_weighted_encoding, alpha = self.attention(encoder_out[:batch_size_t], h[:batch_size_t])\n gate = self.sigmoid(self.f_beta(h[:batch_size_t])) # gating scalar, (batch_size_t, encoder_dim)\n attention_weighted_encoding = gate * attention_weighted_encoding\n h, c = self.decode_step(\n torch.cat([embeddings[:batch_size_t, t, :], attention_weighted_encoding], dim=1),\n (h[:batch_size_t], c[:batch_size_t])) # (batch_size_t, decoder_dim)\n preds = self.fc(self.dropout(h)) # (batch_size_t, vocab_size)\n predictions[:batch_size_t, t, :] = preds\n alphas[:batch_size_t, t, :] = alpha\n return predictions, encoded_captions, decode_lengths, alphas, sort_ind\n \n def predict(self, encoder_out, decode_lengths, tokenizer):\n batch_size = encoder_out.size(0)\n encoder_dim = encoder_out.size(-1)\n vocab_size = self.vocab_size\n encoder_out = encoder_out.view(batch_size, -1, encoder_dim) # (batch_size, num_pixels, encoder_dim)\n num_pixels = encoder_out.size(1)\n # embed start tocken for LSTM input\n start_tockens = torch.ones(batch_size, dtype=torch.long, device = self.device) * tokenizer.stoi['<sos>']\n embeddings = self.embedding(start_tockens)\n # initialize hidden state and cell state of LSTM cell\n h, c = self.init_hidden_state(encoder_out) # (batch_size, decoder_dim)\n predictions = torch.zeros(batch_size, decode_lengths, vocab_size, device = self.device)\n # predict sequence\n '''\n for t in range(decode_lengths):\n attention_weighted_encoding, alpha = self.attention(encoder_out, h)\n gate = self.sigmoid(self.f_beta(h)) # gating scalar, (batch_size_t, encoder_dim)\n attention_weighted_encoding = gate * attention_weighted_encoding\n h, c = self.decode_step(\n torch.cat([embeddings, attention_weighted_encoding], dim=1),\n (h, c)) # (batch_size_t, decoder_dim)\n preds = self.fc(self.dropout(h)) # (batch_size_t, vocab_size)\n predictions[:, t, :] = preds\n #if np.argmax(preds.detach().cpu().numpy()) == tokenizer.stoi['<eos>']:\n # break\n embeddings = self.embedding(torch.argmax(preds, -1))\n '''\n \n # predict sequence\n end_condition = torch.zeros(batch_size, dtype=torch.long, device = self.device)\n for t in range(decode_lengths):\n attention_weighted_encoding, alpha = self.attention(encoder_out, h)\n gate = self.sigmoid(self.f_beta(h)) # gating scalar, (batch_size_t, encoder_dim)\n attention_weighted_encoding = gate * attention_weighted_encoding\n h, c = self.decode_step(\n torch.cat([embeddings, attention_weighted_encoding], dim=1),\n (h, c)) # (batch_size_t, decoder_dim)\n preds = self.fc(self.dropout(h)) # (batch_size_t, vocab_size)\n predictions[:, t, :] = preds\n end_condition |= (torch.argmax(preds, -1) == tokenizer.stoi[\"<eos>\"])\n if end_condition.sum() == batch_size:\n break\n embeddings = self.embedding(torch.argmax(preds, -1))\n \n return predictions\n \n \n # beam search\n def forward_step(self, prev_tokens, hidden, encoder_out, function):\n\n h, c = hidden\n h, c = h.squeeze(0), c.squeeze(0)\n\n embeddings = self.embedding(prev_tokens)\n if embeddings.dim() == 3:\n embeddings = embeddings.squeeze(1)\n\n attention_weighted_encoding, alpha = self.attention(encoder_out, h)\n gate = self.sigmoid(self.f_beta(h)) # gating scalar, (batch_size_t, encoder_dim)\n attention_weighted_encoding = gate * attention_weighted_encoding\n h, c = self.decode_step(\n torch.cat([embeddings, attention_weighted_encoding], dim=1),\n (h, c)) # (batch_size_t, decoder_dim)\n preds = self.fc(self.dropout(h)) # (batch_size_t, vocab_size)\n\n hidden = (h.unsqueeze(0), c.unsqueeze(0))\n predicted_softmax = function(preds, dim=1)\n return predicted_softmax, hidden, None\n\n\n\n####### TOP-K DECODER\n\ndef _inflate(tensor, times, dim):\n # repeat_dims = [1] * tensor.dim()\n # repeat_dims[dim] = times\n # return tensor.repeat(*repeat_dims)\n return torch.repeat_interleave(tensor, times, dim)\n\n\nclass TopKDecoder(torch.nn.Module):\n r\"\"\"\n Top-K decoding with beam search.\n\n Args:\n decoder_rnn (DecoderRNN): An object of DecoderRNN used for decoding.\n k (int): Size of the beam.\n\n Inputs: inputs, encoder_hidden, encoder_outputs, function, teacher_forcing_ratio\n - **inputs** (seq_len, batch, input_size): list of sequences, whose length is the batch size and within which\n each sequence is a list of token IDs. It is used for teacher forcing when provided. (default is `None`)\n - **encoder_hidden** (num_layers * num_directions, batch_size, hidden_size): tensor containing the features\n in the hidden state `h` of encoder. Used as the initial hidden state of the decoder.\n - **encoder_outputs** (batch, seq_len, hidden_size): tensor with containing the outputs of the encoder.\n Used for attention mechanism (default is `None`).\n - **function** (torch.nn.Module): A function used to generate symbols from RNN hidden state\n (default is `torch.nn.functional.log_softmax`).\n - **teacher_forcing_ratio** (float): The probability that teacher forcing will be used. A random number is\n drawn uniformly from 0-1 for every decoding token, and if the sample is smaller than the given value,\n teacher forcing would be used (default is 0).\n\n Outputs: decoder_outputs, decoder_hidden, ret_dict\n - **decoder_outputs** (batch): batch-length list of tensors with size (max_length, hidden_size) containing the\n outputs of the decoder.\n - **decoder_hidden** (num_layers * num_directions, batch, hidden_size): tensor containing the last hidden\n state of the decoder.\n - **ret_dict**: dictionary containing additional information as follows {*length* : list of integers\n representing lengths of output sequences, *topk_length*: list of integers representing lengths of beam search\n sequences, *sequence* : list of sequences, where each sequence is a list of predicted token IDs,\n *topk_sequence* : list of beam search sequences, each beam is a list of token IDs, *inputs* : target\n outputs if provided for decoding}.\n \"\"\"\n\n def __init__(self, decoder_rnn, k, decoder_dim, max_length, tokenizer):\n super(TopKDecoder, self).__init__()\n self.rnn = decoder_rnn\n self.k = k\n self.hidden_size = decoder_dim # self.rnn.hidden_size\n self.V = len(tokenizer)\n self.SOS = tokenizer.stoi[\"<sos>\"]\n self.EOS = tokenizer.stoi[\"<eos>\"]\n self.max_length = max_length\n self.tokenizer = tokenizer\n\n def forward(self, inputs=None, encoder_hidden=None, encoder_outputs=None, function=F.log_softmax,\n teacher_forcing_ratio=0, retain_output_probs=True):\n \"\"\"\n Forward rnn for MAX_LENGTH steps. Look at :func:`seq2seq.models.DecoderRNN.DecoderRNN.forward_rnn` for details.\n \"\"\"\n\n # inputs, batch_size, max_length = self.rnn._validate_args(inputs, encoder_hidden, encoder_outputs,\n # function, teacher_forcing_ratio)\n\n batch_size = encoder_outputs.size(0)\n max_length = self.max_length\n\n self.pos_index = (torch.LongTensor(range(batch_size)) * self.k).view(-1, 1).to(device)\n\n # Inflate the initial hidden states to be of size: b*k x h\n # encoder_hidden = self.rnn._init_state(encoder_hidden)\n if encoder_hidden is None:\n hidden = None\n else:\n if isinstance(encoder_hidden, tuple):\n # hidden = tuple([_inflate(h, self.k, 1) for h in encoder_hidden])\n hidden = tuple([h.squeeze(0) for h in encoder_hidden])\n hidden = tuple([_inflate(h, self.k, 0) for h in hidden])\n hidden = tuple([h.unsqueeze(0) for h in hidden])\n else:\n # hidden = _inflate(encoder_hidden, self.k, 1)\n raise RuntimeError(\"Not supported\")\n\n # ... same idea for encoder_outputs and decoder_outputs\n if True: # self.rnn.use_attention:\n inflated_encoder_outputs = _inflate(encoder_outputs, self.k, 0)\n else:\n inflated_encoder_outputs = None\n\n # Initialize the scores; for the first step,\n # ignore the inflated copies to avoid duplicate entries in the top k\n sequence_scores = torch.Tensor(batch_size * self.k, 1)\n sequence_scores.fill_(-float('Inf'))\n sequence_scores.index_fill_(0, torch.LongTensor([i * self.k for i in range(0, batch_size)]), 0.0)\n sequence_scores = sequence_scores.to(device)\n\n # Initialize the input vector\n input_var = torch.transpose(torch.LongTensor([[self.SOS] * batch_size * self.k]), 0, 1).to(device)\n\n # Store decisions for backtracking\n stored_outputs = list()\n stored_scores = list()\n stored_predecessors = list()\n stored_emitted_symbols = list()\n stored_hidden = list()\n\n for i in range(0, max_length):\n\n # Run the RNN one step forward\n log_softmax_output, hidden, _ = self.rnn.forward_step(input_var, hidden,\n inflated_encoder_outputs, function=function)\n # If doing local backprop (e.g. supervised training), retain the output layer\n if retain_output_probs:\n stored_outputs.append(log_softmax_output)\n\n # To get the full sequence scores for the new candidates, add the local scores for t_i to the predecessor scores for t_(i-1)\n sequence_scores = _inflate(sequence_scores, self.V, 1)\n sequence_scores += log_softmax_output.squeeze(1)\n scores, candidates = sequence_scores.view(batch_size, -1).topk(self.k, dim=1)\n\n # Reshape input = (bk, 1) and sequence_scores = (bk, 1)\n input_var = (candidates % self.V).view(batch_size * self.k, 1)\n sequence_scores = scores.view(batch_size * self.k, 1)\n\n # Update fields for next timestep\n predecessors = (candidates // self.V + self.pos_index.expand_as(candidates)).view(batch_size * self.k, 1)\n if isinstance(hidden, tuple):\n hidden = tuple([h.index_select(1, predecessors.squeeze()) for h in hidden])\n else:\n hidden = hidden.index_select(1, predecessors.squeeze())\n\n # Update sequence scores and erase scores for end-of-sentence symbol so that they aren't expanded\n stored_scores.append(sequence_scores.clone())\n eos_indices = input_var.data.eq(self.EOS)\n if eos_indices.nonzero().dim() > 0:\n sequence_scores.data.masked_fill_(eos_indices, -float('inf'))\n\n # Cache results for backtracking\n stored_predecessors.append(predecessors)\n stored_emitted_symbols.append(input_var)\n stored_hidden.append(hidden)\n\n # Do backtracking to return the optimal values\n output, h_t, h_n, s, l, p = self._backtrack(stored_outputs, stored_hidden,\n stored_predecessors, stored_emitted_symbols,\n stored_scores, batch_size, self.hidden_size)\n\n # Build return objects\n decoder_outputs = [step[:, 0, :] for step in output]\n if isinstance(h_n, tuple):\n decoder_hidden = tuple([h[:, :, 0, :] for h in h_n])\n else:\n decoder_hidden = h_n[:, :, 0, :]\n metadata = {}\n metadata['inputs'] = inputs\n metadata['output'] = output\n metadata['h_t'] = h_t\n metadata['score'] = s\n metadata['topk_length'] = l\n metadata['topk_sequence'] = p\n metadata['length'] = [seq_len[0] for seq_len in l]\n metadata['sequence'] = [seq[0] for seq in p]\n return decoder_outputs, decoder_hidden, metadata\n\n def _backtrack(self, nw_output, nw_hidden, predecessors, symbols, scores, b, hidden_size):\n \"\"\"Backtracks over batch to generate optimal k-sequences.\n\n Args:\n nw_output [(batch*k, vocab_size)] * sequence_length: A Tensor of outputs from network\n nw_hidden [(num_layers, batch*k, hidden_size)] * sequence_length: A Tensor of hidden states from network\n predecessors [(batch*k)] * sequence_length: A Tensor of predecessors\n symbols [(batch*k)] * sequence_length: A Tensor of predicted tokens\n scores [(batch*k)] * sequence_length: A Tensor containing sequence scores for every token t = [0, ... , seq_len - 1]\n b: Size of the batch\n hidden_size: Size of the hidden state\n\n Returns:\n output [(batch, k, vocab_size)] * sequence_length: A list of the output probabilities (p_n)\n from the last layer of the RNN, for every n = [0, ... , seq_len - 1]\n\n h_t [(batch, k, hidden_size)] * sequence_length: A list containing the output features (h_n)\n from the last layer of the RNN, for every n = [0, ... , seq_len - 1]\n\n h_n(batch, k, hidden_size): A Tensor containing the last hidden state for all top-k sequences.\n\n score [batch, k]: A list containing the final scores for all top-k sequences\n\n length [batch, k]: A list specifying the length of each sequence in the top-k candidates\n\n p (batch, k, sequence_len): A Tensor containing predicted sequence\n \"\"\"\n\n lstm = isinstance(nw_hidden[0], tuple)\n\n # initialize return variables given different types\n output = list()\n h_t = list()\n p = list()\n # Placeholder for last hidden state of top-k sequences.\n # If a (top-k) sequence ends early in decoding, `h_n` contains\n # its hidden state when it sees EOS. Otherwise, `h_n` contains\n # the last hidden state of decoding.\n if lstm:\n state_size = nw_hidden[0][0].size()\n h_n = tuple([torch.zeros(state_size).to(device), torch.zeros(state_size).to(device)])\n else:\n h_n = torch.zeros(nw_hidden[0].size()).to(device)\n l = [[self.max_length] * self.k for _ in range(b)] # Placeholder for lengths of top-k sequences\n # Similar to `h_n`\n\n # the last step output of the beams are not sorted\n # thus they are sorted here\n sorted_score, sorted_idx = scores[-1].view(b, self.k).topk(self.k)\n # initialize the sequence scores with the sorted last step beam scores\n s = sorted_score.clone()\n\n batch_eos_found = [0] * b # the number of EOS found\n # in the backward loop below for each batch\n\n t = self.max_length - 1\n # initialize the back pointer with the sorted order of the last step beams.\n # add self.pos_index for indexing variable with b*k as the first dimension.\n t_predecessors = (sorted_idx + self.pos_index.expand_as(sorted_idx)).view(b * self.k)\n while t >= 0:\n # Re-order the variables with the back pointer\n current_output = nw_output[t].index_select(0, t_predecessors)\n if lstm:\n current_hidden = tuple([h.index_select(1, t_predecessors) for h in nw_hidden[t]])\n else:\n current_hidden = nw_hidden[t].index_select(1, t_predecessors)\n current_symbol = symbols[t].index_select(0, t_predecessors)\n # Re-order the back pointer of the previous step with the back pointer of\n # the current step\n t_predecessors = predecessors[t].index_select(0, t_predecessors).squeeze()\n\n # This tricky block handles dropped sequences that see EOS earlier.\n # The basic idea is summarized below:\n #\n # Terms:\n # Ended sequences = sequences that see EOS early and dropped\n # Survived sequences = sequences in the last step of the beams\n #\n # Although the ended sequences are dropped during decoding,\n # their generated symbols and complete backtracking information are still\n # in the backtracking variables.\n # For each batch, everytime we see an EOS in the backtracking process,\n # 1. If there is survived sequences in the return variables, replace\n # the one with the lowest survived sequence score with the new ended\n # sequences\n # 2. Otherwise, replace the ended sequence with the lowest sequence\n # score with the new ended sequence\n #\n eos_indices = symbols[t].data.squeeze(1).eq(self.EOS).nonzero()\n if eos_indices.dim() > 0:\n for i in range(eos_indices.size(0) - 1, -1, -1):\n # Indices of the EOS symbol for both variables\n # with b*k as the first dimension, and b, k for\n # the first two dimensions\n idx = eos_indices[i]\n b_idx = int(idx[0] // self.k)\n # The indices of the replacing position\n # according to the replacement strategy noted above\n res_k_idx = self.k - (batch_eos_found[b_idx] % self.k) - 1\n batch_eos_found[b_idx] += 1\n res_idx = b_idx * self.k + res_k_idx\n\n # Replace the old information in return variables\n # with the new ended sequence information\n t_predecessors[res_idx] = predecessors[t][idx[0]]\n current_output[res_idx, :] = nw_output[t][idx[0], :]\n if lstm:\n current_hidden[0][:, res_idx, :] = nw_hidden[t][0][:, idx[0], :]\n current_hidden[1][:, res_idx, :] = nw_hidden[t][1][:, idx[0], :]\n h_n[0][:, res_idx, :] = nw_hidden[t][0][:, idx[0], :].data\n h_n[1][:, res_idx, :] = nw_hidden[t][1][:, idx[0], :].data\n else:\n current_hidden[:, res_idx, :] = nw_hidden[t][:, idx[0], :]\n h_n[:, res_idx, :] = nw_hidden[t][:, idx[0], :].data\n current_symbol[res_idx, :] = symbols[t][idx[0]]\n s[b_idx, res_k_idx] = scores[t][idx[0]].data[0]\n l[b_idx][res_k_idx] = t + 1\n\n # record the back tracked results\n output.append(current_output)\n h_t.append(current_hidden)\n p.append(current_symbol)\n\n t -= 1\n\n # Sort and re-order again as the added ended sequences may change\n # the order (very unlikely)\n s, re_sorted_idx = s.topk(self.k)\n for b_idx in range(b):\n l[b_idx] = [l[b_idx][k_idx.item()] for k_idx in re_sorted_idx[b_idx, :]]\n\n re_sorted_idx = (re_sorted_idx + self.pos_index.expand_as(re_sorted_idx)).view(b * self.k)\n\n # Reverse the sequences and re-order at the same time\n # It is reversed because the backtracking happens in reverse time order\n output = [step.index_select(0, re_sorted_idx).view(b, self.k, -1) for step in reversed(output)]\n p = [step.index_select(0, re_sorted_idx).view(b, self.k, -1) for step in reversed(p)]\n if lstm:\n h_t = [tuple([h.index_select(1, re_sorted_idx).view(-1, b, self.k, hidden_size) for h in step]) for step in reversed(h_t)]\n h_n = tuple([h.index_select(1, re_sorted_idx.data).view(-1, b, self.k, hidden_size) for h in h_n])\n else:\n h_t = [step.index_select(1, re_sorted_idx).view(-1, b, self.k, hidden_size) for step in reversed(h_t)]\n h_n = h_n.index_select(1, re_sorted_idx.data).view(-1, b, self.k, hidden_size)\n s = s.data\n\n return output, h_t, h_n, s, l, p\n\n def _mask_symbol_scores(self, score, idx, masking_score=-float('inf')):\n score[idx] = masking_score\n\n def _mask(self, tensor, idx, dim=0, masking_score=-float('inf')):\n if len(idx.size()) > 0:\n indices = idx[:, 0]\n tensor.index_fill_(dim, indices, masking_score)", "import albumentations as A\nfrom albumentations.pytorch import ToTensorV2\nimport cv2\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.data.sampler import RandomSampler, SequentialSampler, WeightedRandomSampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.nn.utils.rnn import pad_sequence, pack_padded_sequence\n\nimport numpy as np\nimport pandas as pd\n\nfrom utilities import smart_print\nfrom augmentations import get_augs\n\n\n####### DATASETS\n\nclass ImageData(Dataset):\n \n def __init__(self, \n df, \n tokenizer = None, \n channels = 3,\n crop = False, \n padding = False,\n morphology = False,\n meta = False,\n transform = None):\n super().__init__()\n self.df = df\n self.tokenizer = tokenizer\n self.file_paths = df['file_path'].values\n self.labels = df['InChI_text'].values\n self.transform = transform\n self.crop = crop\n self.channels = channels\n self.morphology = morphology\n self.meta = meta\n self.padding = padding\n \n def __len__(self):\n return len(self.df)\n \n def __getitem__(self, idx):\n \n # import\n file_path = self.file_paths[idx] \n image = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE) \n if image is None:\n raise FileNotFoundError(file_path)\n \n # image meta data\n if self.meta:\n meta_area = (image.shape[0] * image.shape[1]) / 2500000\n meta_ratio = (image.shape[0] / image.shape[1]) / 30.0\n meta = torch.LongTensor([meta_area, meta_ratio])\n \n # morphological transforms\n if self.morphology:\n image = cv2.morphologyEx(image, cv2.MORPH_OPEN, np.ones((2, 2)))\n image = cv2.erode(image, np.ones((2, 2)))\n \n # smart crop\n if self.crop:\n image = smart_crop(image)\n \n # convert to RGB\n if self.channels == 3:\n image = cv2.merge([image, image, image]).astype(np.float32)\n elif self.channels == 1:\n image = image.astype(np.float32)\n \n # padding\n if self.padding:\n image = pad_image(image)\n \n # augmentations\n if self.transform:\n image = self.transform(image = image)['image']\n \n # output\n label = torch.LongTensor(self.tokenizer.text_to_sequence(self.labels[idx]))\n label_length = torch.LongTensor([len(label)])\n if self.meta:\n return image, meta, label, label_length\n else:\n return image, label, label_length\n \n \n \nclass ImageTestData(Dataset):\n \n def __init__(self, \n df, \n channels = 3,\n crop = False, \n padding = False,\n morphology = False,\n meta = False,\n transform = None):\n super().__init__()\n self.df = df\n self.file_paths = df['file_path'].values\n self.transform = transform\n self.crop = crop\n self.channels = channels\n self.padding = padding\n self.morphology = morphology\n self.meta = meta\n self.fix_transform = A.Compose([A.Transpose(p = 1), A.VerticalFlip(p = 1)])\n \n def __len__(self):\n return len(self.df)\n \n def __getitem__(self, idx):\n \n # import\n file_path = self.file_paths[idx]\n image = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE) \n if image is None:\n raise FileNotFoundError(path)\n \n # image meta data\n if self.meta:\n meta_area = (image.shape[0] * image.shape[1]) / 2500000\n if image.shape[0] > image.shape[1]:\n meta_ratio = (image.shape[0] / image.shape[1]) / 30.0\n else:\n meta_ratio = (image.shape[1] / image.shape[0]) / 30.0\n meta = torch.LongTensor([meta_area, meta_ratio])\n \n # morphological transforms\n if self.morphology:\n image = cv2.morphologyEx(image, cv2.MORPH_OPEN, np.ones((2, 2)))\n image = cv2.erode(image, np.ones((2, 2)))\n \n # smart crop\n if self.crop:\n image = smart_crop(image)\n \n # convert to RGB\n if self.channels == 3:\n image = cv2.merge([image, image, image]).astype(np.float32)\n elif self.channels == 1:\n image = image.astype(np.float32)\n \n # fix rotation\n h, w = image.shape[0], image.shape[1]\n if h > w:\n image = self.fix_transform(image = image)['image']\n \n # padding\n if self.padding:\n image = pad_image(image)\n \n # augmentations\n if self.transform:\n image = self.transform(image = image)['image']\n \n # output \n if self.meta:\n return image, meta\n else:\n return image\n\n\n\n####### BATCH COLLATE HELPER FUNCTION\n\n''' \nBorrowed from https://www.kaggle.com/yasufuminakama/inchi-resnet-lstm-with-attention-starter\n'''\n\ndef bms_collate(batch, tokenizer):\n imgs, labels, label_lengths = [], [], []\n for data_point in batch:\n imgs.append(data_point[0])\n labels.append(data_point[1])\n label_lengths.append(data_point[2])\n labels = pad_sequence(labels, batch_first = True, padding_value = tokenizer.stoi['<pad>'])\n return torch.stack(imgs), labels, torch.stack(label_lengths).reshape(-1, 1)\n\n\n\n####### DATA PREP\n\ndef get_data(df, \n fold, \n CFG, \n epoch = None):\n \n # epoch number\n if epoch is None:\n epoch = 0\n\n # load splits\n df_train = df.loc[df.fold != fold].reset_index(drop = True)\n df_valid = df.loc[df.fold == fold].reset_index(drop = True)\n if CFG['valid_subset']:\n df_valid = df_valid.head(CFG['valid_subset'])\n smart_print('- no. images: train - {}, valid - {}'.format(len(df_train), len(df_valid)), CFG)\n \n # checks\n assert len(df_train) + len(df_valid) == len(df), 'Wrong number of observations'\n \n # extra data\n if CFG['data_ext']:\n df_extra_epoch = df_extra.sample(n = CFG['data_ext'], random_state = CFG['seed'] + epoch).reset_index(drop = True)\n df_train = pd.concat([df_train, df_extra_epoch], axis = 0).reset_index(drop = True)\n smart_print('- appending extra data to train...', CFG)\n smart_print('- no. images: train - {}, valid - {}'.format(len(df_train), len(df_valid)), CFG)\n\n # subset for debug mode\n if CFG['debug']:\n df_train = df_train.sample(CFG['batch_size'] * 10, random_state = CFG['seed']).reset_index(drop = True)\n df_valid = df_valid.sample(CFG['batch_size'] * 10, random_state = CFG['seed']).reset_index(drop = True)\n smart_print('- subsetting data for debug mode...', CFG)\n smart_print('- no. images: train - {}, valid - {}'.format(len(df_train), len(df_valid)), CFG)\n \n # sort validation data for efficiency\n df_valid['InChI_length'] = df_valid['InChI'].str.len()\n df_valid = df_valid.sort_values(by = 'InChI_length', ascending = False).reset_index(drop = True)\n del df_valid['InChI_length']\n \n return df_train, df_valid\n\n\n\n\n####### DATA LOADERS\n\nfrom utilities import *\n\ndef get_loaders(df_train, \n df_valid, \n tokenizer, \n CFG, \n epoch = None):\n\n ##### EPOCH-BASED PARAMS\n\n image_size = CFG['image_size']\n p_aug = CFG['p_aug']\n\n\n ##### DATASETS\n \n # augmentations\n train_augs, valid_augs = get_augs(CFG, image_size, p_aug)\n\n # datasets\n train_dataset = ImageData(df = df_train, \n transform = train_augs,\n tokenizer = tokenizer, \n channels = CFG['num_channels'],\n crop = CFG['smart_crop'],\n morphology = CFG['morphology'],\n padding = CFG['padding'],\n meta = CFG['meta_data'])\n valid_dataset = ImageTestData(df = df_valid, \n transform = valid_augs,\n channels = CFG['num_channels'],\n crop = CFG['smart_crop'],\n morphology = CFG['morphology'],\n padding = CFG['padding'],\n meta = CFG['meta_data'])\n \n \n ##### DATA SAMPLERS\n \n # samplers\n train_sampler = RandomSampler(train_dataset)\n valid_sampler = SequentialSampler(valid_dataset)\n \n ##### DATA LOADERS\n \n # data loaders\n train_loader = DataLoader(dataset = train_dataset, \n batch_size = CFG['batch_size'], \n shuffle = True,\n num_workers = CFG['cpu_workers'],\n drop_last = True, \n collate_fn = lambda b: bms_collate(b, tokenizer),\n worker_init_fn = worker_init_fn,\n pin_memory = False)\n valid_loader = DataLoader(dataset = valid_dataset, \n batch_size = CFG['valid_batch_size'], \n shuffle = False,\n num_workers = CFG['cpu_workers'],\n drop_last = False,\n pin_memory = False)\n \n # feedback\n smart_print('- image size: {}x{}, p(augment): {}'.format(image_size, image_size, p_aug), CFG)\n if epoch is None:\n smart_print('-' * 55, CFG)\n \n return train_loader, valid_loader" ]
[ [ "torch.nn.Softmax", "torch.nn.Dropout", "torch.nn.Parameter", "torch.ones", "torch.LongTensor", "torch.Tensor", "torch.zeros", "torch.cat", "torch.nn.Embedding", "torch.nn.Sigmoid", "torch.nn.LSTMCell", "torch.nn.Linear", "torch.repeat_interleave", "torch.nn.ReLU", "torch.argmax" ], [ "torch.LongTensor", "pandas.concat", "torch.nn.utils.rnn.pad_sequence", "torch.utils.data.DataLoader", "numpy.ones", "torch.stack", "torch.utils.data.sampler.SequentialSampler", "torch.utils.data.sampler.RandomSampler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
ijcai2022-5500/anego
[ "9a2e5f29f0ec0787ad8ce7822089345053442887", "9a2e5f29f0ec0787ad8ce7822089345053442887" ]
[ "cocoa/analysis/analyzer.py", "craigslistbargain/scripts/plot_id.py" ]
[ "\"\"\"Functions that analyze dialogues and models.\n\"\"\"\nimport json\nfrom collections import defaultdict\nimport numpy as np\n\nfrom cocoa.core.entity import is_entity\nfrom cocoa.model.util import entropy, safe_div\nfrom cocoa.model.counter import build_vocabulary, count_ngrams\nfrom cocoa.model.ngram import MLENgramModel\n\nfrom core.tokenizer import tokenize\n\nall_vocab = None\nno_ent_vocab = None\n\nclass Analyzer(object):\n def __init__(self, lexicon):\n self.lexicon = lexicon\n\n def example_stats(self, examples, agent=None):\n stats = {}\n stats['num_dialogues'] = len(examples)\n stats['num_turns_per_dialogue'] = np.mean([len(e.events) for e in examples])\n utterances = [tokenize(e.data) \\\n for example in examples \\\n for e in example.events if e.action == 'message' and\n (not agent or example.agents[e.agent] == agent)]\n stats['num_tokens_per_turn'] = np.mean([len(u) for u in utterances])\n\n vocab = set()\n for u in utterances:\n vocab.update(u)\n stats['vocab_size'] = len(vocab)\n global all_vocab\n all_vocab = vocab\n stats['corpus_perplexity'] = self.sequence_perplexity(utterances)\n\n self.print_stats(stats, 'dataset stats')\n return stats\n\n def intent_sequence_perplexity(self, intent_sequences, n=3):\n H = 0.\n N = 0\n for intent, sequences in intent_sequences.iteritems():\n model = self.build_lm(sequences, n)\n H_, N_ = self.total_entropy(model, sequences)\n H += H_\n N += N_\n H = safe_div(H, N)\n return np.power(2, H)\n\n def total_entropy(self, model, sequences):\n H = 0.\n N = 0\n for s in sequences:\n h, n = model.entropy(s, average=False)\n H += h\n N += n\n return H, N\n\n def build_lm(self, sequences, n):\n vocab = build_vocabulary(1, *sequences)\n counter = count_ngrams(n, vocab, sequences, pad_left=True, pad_right=False)\n model = MLENgramModel(counter)\n return model\n\n def sequence_perplexity(self, sequences, n=3):\n model = self.build_lm(sequences, n)\n H, N = self.total_entropy(model, sequences)\n H = safe_div(H, N)\n return np.power(2, H)\n\n def print_stats(self, stats, name):\n print ('='*5, name.upper(), '='*5)\n print (json.dumps(stats, indent=2))\n\n def parser_stats(self, parsed_dialogues, agent=None):\n stats = {}\n non_entity_vocab = set()\n ents = set()\n stats['intents'] = defaultdict(int)\n intent_utterances = defaultdict(list)\n\n for dialogue in parsed_dialogues:\n for utterance in dialogue:\n if agent and utterance.agent != agent:\n continue\n if utterance.tokens is not None:\n tokens = [x.canonical.type if is_entity(x) else x for x in utterance.tokens]\n e = [x.surface for x in utterance.tokens if is_entity(x)]\n ents.update(e)\n non_entity_vocab.update(tokens)\n if utterance.lf and utterance.lf.intent != '<start>':\n stats['intents'][utterance.lf.intent] += 1\n if utterance.text is not None:\n intent_utterances[utterance.lf.intent].append(tokenize(utterance.text))\n stats['non_entity_vocab_size'] = len(non_entity_vocab)\n #print 'entities:', len(ents)\n #global no_ent_vocab\n #no_ent_vocab = non_entity_vocab\n #for x in all_vocab:\n # if not x in non_entity_vocab:\n # print x\n\n stats['intent_corpus_perplexity'] = self.intent_sequence_perplexity(intent_utterances)\n\n # Percentage intents\n #s = float(sum(stats['intents'].values()))\n #stats['intents'] = sorted(\n # [(k, v, v / s) for k, v in stats['intents'].iteritems()],\n # key=lambda x: x[1], reverse=True)\n\n self.print_stats(stats, 'parser stats')\n return stats\n\n def manager_stats(self, manager):\n stats = {}\n stats['actions'] = manager.actions\n\n # Most likely sequence\n action_seq = [{'context': ('<start>', '<start>')}]\n for i in xrange(10):\n state = action_seq[-1]\n context = state['context']\n\n freqdist = manager.model.freqdist(context)\n counts = [x[1] for x in freqdist]\n ent = entropy(counts, normalized=False)\n state['entropy'] = ent\n\n state['most_likely_action'] = manager.most_likely_action(context, freqdist)\n state['min_entropy_action'] = manager.min_entropy_action(context, freqdist)\n\n new_context = (context[-1], state['most_likely_action'])\n action_seq.append({'context': new_context})\n\n stats['action_seq'] = action_seq\n\n self.print_stats(stats, 'manager stats')\n return stats\n\n #def generator_stats(self, generator):\n", "# Example:\n# python tb2plt.py --dir p_logs/ \\\n# --agent0 rl_decay.1 rl_dacay.2 rl_dacay.3 rl_decay.4 rl_decay.5 \\\n# --agent1 tom_decay.1.b tom_decay.2.b tom_decay.3.b tom_decay.4.b tom_decay.6.b \\\n# --show --max-epoch 90 --agent1-move 50 --agent1-leftclip 50 --draw-type rl\n\n\n# python tb2plt.py --dir apex_logs/ \\\n# --agent0 a2c_decay3 a2c_decay4 rl_decay.1 rl_dacay.2 rl_dacay.3 \\\n# --agent1 tom2 tom3 tom_decay4 tom_decay5 tom_decay6\\\n# --show --max-epoch 50\n# python tb2plt.py --dir apex_logs/ \\\n# --agent0 rl_decay.5 rl_dacay.2 rl_dacay.3 rl_decay.1 rl_decay.4 \\\n# --agent1 tom2 tom3 tom_decay4 tom_decay5 tom_decay6\\\n# --show --max-epoch 90\n\n\n# Load scalar from tensorboard logs and render with matplotlib\n# * extract_rewards_from_example(file_name):\n# Get rewards from specific examples file\n# * load_from_tensorboard\n# Get logs (include rewards) from tensorboard\n\nfrom tensorboard.backend.event_processing import event_accumulator\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport os\nimport re\n\nedge = 0.17\nleft_e = 0.22\nup_e = 0.92\nright_e = 0.98\n\ndef extract_rewards_from_example(file_name='result.txt'):\n r = re.compile('([-+]?\\d+\\.\\d+)')\n with open(file_name,'r') as f:\n data = f.readlines()\n rewards = [[], []]\n for d in data:\n if d.find('reward: [0]') != -1:\n rewards[0].append(float(r.findall(d)[0]))\n if d.find('reward: [1]') != -1:\n rewards[1].append(float(r.findall(d)[0]))\n return [np.mean(rewards[0]), np.mean(rewards[1])]\n # print('rewards: {}, {}'.format(np.mean(rewards[0]), np.mean(rewards[1])))\n\ndef get_args():\n parser = argparse.ArgumentParser(conflict_handler='resolve')\n parser.add_argument('--dir', default='logs/', help='directory of the tensorboard log file')\n parser.add_argument('--agent0', nargs='+', type=str, help='directory of the tensorboard log file')\n parser.add_argument('--agent1', nargs='+', type=str, help='directory of the tensorboard log file')\n parser.add_argument('--draw-type', choices=['sl', 'rl'], type=str, default='sl', help='plot type')\n parser.add_argument('--show', action='store_true', help='show pictures directly')\n parser.add_argument('--fig-dirs', default='./', help='direcotry of these figures')\n parser.add_argument('--font-size', type=int, default=24, help='size of fonts')\n parser.add_argument('--label-size', type=int, default=24, help='size of font on label')\n\n # parser.add_argument('--test', type=str, nargs='+', choices=['a', 'b', 'c'], help='size of font on label')\n\n parser.add_argument('--max-epoch', type=int, default=50, help='')\n parser.add_argument('--plot-step', type=int, default=5, help='')\n parser.add_argument('--train-step', type=int, default=100)\n parser.add_argument('--one-point', default=False, action='store_true')\n parser.add_argument('--item-name', default='agent0/reward', type=str)\n parser.add_argument('--agent1-leftclip', default=50, type=int)\n parser.add_argument('--agent1-move', default=0, type=int)\n\n args = parser.parse_args()\n return args\n\ndef load_from_tensorboard(dir):\n data = []\n\n # for root, dirs, files in os.walk(dir):\n # for file in files:\n # fpath = os.path.join(root, file)\n ea = event_accumulator.EventAccumulator(dir)\n ea.Reload()\n # print(ea.scalars.Keys())\n\n # val_psnr = ea.scalars.Items('val_psnr')\n # print(len(val_psnr))\n # print([(i.step, i.value) for i in val_psnr])\n return ea.scalars\n\ndef get_xy(it):\n x, y = [], []\n for i in it:\n x.append(i.step)\n y.append(i.value)\n return x, y\n\n# Supervise Learning\n# dev/train loss\ndef aggregate_info(dirs, pyd, plot_step, item_name='agent0/reward'):\n for d in dirs:\n scalars = load_from_tensorboard(d)\n xx, yy = get_xy(scalars.Items(item_name))\n for i, y in enumerate(yy):\n if args.one_point and xx[i] % (args.train_step* args.plot_step) != 0:\n continue\n # x = (xx[i] // args.train_step - 1) // args.plot_step * args.plot_step\n x = xx[i] // plot_step * plot_step\n if not pyd.get(x):\n pyd[x] = []\n pyd[x].append(y)\n\ndef draw_sl_training_curve(tomid_dirs, tom_dirs, args):\n\n max_epoch = 1800\n plot_step = max_epoch // 100\n plot_x = list(range(0, max_epoch, plot_step))\n print(plot_step)\n plot_y_dict = [{}, {}]\n\n tom_loss = \"tom0/dev_price_loss\"\n id_accu = \"identity0/dev_accuracy\"\n\n # load rl data\n aggregate_info(tom_dirs, plot_y_dict[1], plot_step, item_name=tom_loss)\n aggregate_info(tomid_dirs, plot_y_dict[0], plot_step, item_name=tom_loss)\n\n # aggregate_info(tomid_dirs, plot_y_dict[0], plot_step, item_name=id_accu)\n # print(plot_y_dict[0].keys(), plot_x)\n # print(plot_y_dict)\n\n # for i in range(2):\n # step = 50\n # print('[{agent}]{step}: {mean}(+-{std})'\n # .format(agent=i, step=step, mean=np.mean(plot_y_dict[i][step]), std = np.std(plot_y_dict[i][step])))\n\n\n # Draw text\n labels = ['tom with identifier', 'tom']\n x = []\n y = []\n plt.style.use('ggplot')\n\n # Draw train set\n for i in range(2):\n ymean = []\n ystd = []\n for j, x in enumerate(plot_x):\n yylist = plot_y_dict[i][x]\n mean = np.mean(yylist)\n std = np.std(yylist)\n ymean.append(mean)\n ystd.append(std)\n # plt.plot(plot_x, ymean, label=labels[i])\n # plt.errorbar(plot_x, ymean, ystd, label=labels[i], alpha=0.7)\n # std = [np.std(np.array(j)) for j in loss_h[i]]\n # sup = [y[j] + ystd[j] for j in range(len(y))]\n # inf = [y[j] - ystd[j] for j in range(len(y))]\n\n sup = [ymean[j] + ystd[j] for j in range(len(ymean))]\n inf = [ymean[j] - ystd[j] for j in range(len(ymean))]\n\n plt.plot(plot_x, ymean, label=labels[i], )\n plt.fill_between(plot_x, inf, sup, alpha=0.3)\n\n plt.axhline(y=0.4, ls=\":\", c='gray')\n plt.axhline(y=0, ls=\":\", c='gray')\n\n plt.ylim(0.7, 7)\n\n plt.xlabel('Episode', fontsize=args.font_size)\n plt.ylabel('Loss * 1000', fontsize=args.font_size)\n # plt.title('ToM ', fontsize=args.font_size)\n plt.tick_params(labelsize=args.label_size)\n plt.subplots_adjust(bottom=edge, left=left_e, top=up_e, right=right_e)\n plt.legend(fontsize=args.font_size)\n if args.show:\n plt.show()\n else:\n plt.savefig(os.path.join(args.fig_dirs, 'tom_sl.eps'))\n plt.clf()\n\ndef draw_id_training_curve(tomid_dirs, args):\n max_epoch = 1800\n nums = 10\n plot_step = max_epoch // nums\n plot_x = list(range(0, 10))\n print(plot_step)\n plot_y_dict = [{}, {}]\n\n id_accu = [\"identity0/dev_accuracy\", \"identity0/dev_accuracy2\"]\n # scalars = []\n\n def aggregate_info0(dirs, plot_step, item_name='agent0/reward'):\n ret = []\n for d in dirs:\n scalars = load_from_tensorboard(d)\n xx, yy = get_xy(scalars.Items(item_name))\n for i in range(max(0, len(yy)-plot_step), len(yy)):\n y = yy[i]\n # if args.one_point and xx[i] % (args.train_step * args.plot_step) != 0:\n # continue\n # x = (xx[i] // args.train_step - 1) // args.plot_step * args.plot_step\n x = xx[i] // plot_step * plot_step\n ret.append(y)\n # if not pyd.get(x):\n # pyd[x] = []\n # pyd[x].append(y)\n return ret\n\n # load rl data\n # aggregate_info(tom_dirs, plot_y_dict[1], plot_step, item_name=tom_loss)\n # aggregate_info(tomid_dirs, plot_y_dict[0], plot_step, item_name=tom_loss)\n for j in range(2):\n plot_y_dict[j][-1] = aggregate_info0(tomid_dirs, 20, item_name=id_accu[j])\n for i in range(10):\n tmpdir = [os.path.join(p, 'step_{}'.format(i)) for p in tomid_dirs]\n ret = aggregate_info0(tmpdir, 20, item_name=id_accu[j])\n plot_y_dict[j][i] = ret\n # print(plot_y_dict[0].keys(), plot_x)\n # print(plot_y_dict)\n\n # for i in range(2):\n # step = 50\n # print('[{agent}]{step}: {mean}(+-{std})'\n # .format(agent=i, step=step, mean=np.mean(plot_y_dict[i][step]), std = np.std(plot_y_dict[i][step])))\n\n # Draw text\n labels = ['tom with identifier', 'tom']\n x = []\n y = []\n plt.style.use('ggplot')\n\n # Draw train set\n for i in range(2):\n ymean = []\n ystd = []\n for x in range(0, 10):\n yylist = plot_y_dict[i][x]\n mean = np.mean(yylist)\n std = np.std(yylist)\n ymean.append(mean)\n ystd.append(std)\n\n sup = [ymean[j] + ystd[j] for j in range(len(ymean))]\n inf = [ymean[j] - ystd[j] for j in range(len(ymean))]\n\n if i == 0:\n print('top1:', ymean)\n plt.bar([x+1-0.2 for x in plot_x], ymean, label=\"separate results\", width=0.4, alpha=0.7)\n # plt.errorbar([x+1 for x in plot_x], ymean, ystd, label=\"\", alpha=0.7, capsize=0.2, ecolor='gray', ls='')\n else:\n print('top3:', ymean)\n plt.bar([x + 1+0.2 for x in plot_x], ymean, label=\"separate results\", width=0.4, alpha=0.7)\n # plt.errorbar([x + 1 for x in plot_x], ymean, ystd, label=\"\", alpha=0.7, capsize=0.2, ecolor='gray', ls='')\n\n mean = np.mean(plot_y_dict[0][-1])\n std = np.std(plot_y_dict[0][-1])\n # mean = 0.5\n\n print('top1_mean:', mean)\n plt.bar([-1-0.2], mean, label=\"average result\", width=0.4, alpha=0.7)\n # plt.errorbar([-1], mean, std, label=\"\", alpha=0.7, capsize=0.2, ecolor='gray', ls='')\n\n mean = np.mean(plot_y_dict[1][-1])\n std = np.std(plot_y_dict[1][-1])\n print('top3_mean:', mean)\n plt.bar([-1+0.2], mean, label=\"average result\", width=0.4, alpha=0.7)\n # plt.errorbar([-1], mean, std, label=\"\", alpha=0.7, capsize=0.2, ecolor='gray', ls='')\n # plt.plot(plot_x, ymean, label=labels[i], )\n # plt.fill_between(plot_x, inf, sup, alpha=0.3)\n\n plt.axhline(y=1./7, ls=\"-.\", c='black')\n plt.axhline(y=mean, ls=\":\", c='gray')\n\n plt.xticks([x+1 for x in plot_x], [x+1 for x in plot_x])\n\n plt.ylim(0.1, 1)\n\n plt.xlabel('Turns', fontsize=args.font_size)\n plt.ylabel('Accuracy', fontsize=args.font_size)\n # plt.title('ToM ', fontsize=args.font_size)\n plt.tick_params(labelsize=args.label_size)\n plt.subplots_adjust(bottom=edge, left=left_e, top=up_e, right=right_e)\n # plt.legend(fontsize=args.font_size)\n if args.show:\n plt.show()\n else:\n plt.savefig(os.path.join(args.fig_dirs, 'id.eps'))\n plt.clf()\n\n# def draw_it_var(max_var=2):\n# #f = open('record/bias%.2f.pkl' % max_var, 'rb')\n# f = open('record/var_%.1f.pkl'%max_var, 'rb')\n# loss_h = pickle.load(f)\n# # color = ['magenta', 'orange', 'green', 'red']\n# if max_var == 2:\n# plt.figure(figsize=(6.5,5))\n# else:\n# plt.figure(figsize=(6.3,5))\n# for i in range(4):\n# if i == 1:\n# plt.plot([])\n# plt.fill_between([],[],[])\n# continue\n# #loss_h[i] = loss_h[i][:70]\n# y = [np.mean(np.array(j)) for j in loss_h[i]]\n# x = list(range(len(y)))\n# std = [np.std(np.array(j)) for j in loss_h[i]]\n# sup = [y[j] + std[j] for j in range(len(y))]\n# inf = [y[j] - std[j] for j in range(len(y))]\n#\n# sup = [y[j] + std[j] for j in range(len(y))]\n# inf = [y[j] - std[j] for j in range(len(y))]\n#\n# plt.plot(x, y, label=labels[i], )\n# plt.fill_between(x, inf, sup, alpha=0.3)\n# plt.ylim((0, y[0]*0.4))\n# plt.xlim((0, len(x)-10))\n#\n# plt.xlabel('Budget', fontsize=font_size)\n# plt.ylabel('Loss', fontsize=font_size)\n# #plt.title('Variance in [0.1, %.1f]' % (max_var), fontsize=font_size)\n# plt.tick_params(labelsize=labelsize)\n# plt.subplots_adjust(bottom=edge, left = left_e, top=up_e, right=right_e)\n# plt.legend(fontsize=font_size)\n# plt.show()\n\ndef draw_rl_tranning_curve(rl_dirs, tom_dirs, args):\n plot_x = [list(range(0, args.max_epoch, args.plot_step)),\n list(range(args.agent1_leftclip, args.max_epoch, args.plot_step))]\n plot_y_dict = [{}, {}]\n\n def aggregate_info(dirs, pyd, item_name='agent0/reward', right_move=0):\n for d in dirs:\n scalars = load_from_tensorboard(d)\n xx, yy = get_xy(scalars.Items(item_name))\n for i, y in enumerate(yy):\n if args.one_point and xx[i] % (args.train_step * args.plot_step) != 0:\n continue\n x = (xx[i] // args.train_step - 1 + right_move) // args.plot_step * args.plot_step\n if not pyd.get(x):\n pyd[x] = []\n pyd[x].append(y)\n\n # load rl data\n aggregate_info(rl_dirs, plot_y_dict[0], item_name=args.item_name)\n aggregate_info(tom_dirs, plot_y_dict[1], item_name=args.item_name, right_move=args.agent1_move)\n\n # for i in range(2):\n # step = 50\n # print('[{agent}]{step}: {mean}(+-{std})'\n # .format(agent=i, step=step, mean=np.mean(plot_y_dict[i][step]), std = np.std(plot_y_dict[i][step])))\n\n # Draw text\n labels = ['rl', 'tom']\n x = []\n y = []\n plt.style.use('ggplot')\n\n # Draw train set\n for i in range(2):\n ymean = []\n ystd = []\n for j, x in enumerate(plot_x[i]):\n yylist = plot_y_dict[i][x]\n mean = np.mean(yylist)\n std = np.std(yylist)\n ymean.append(mean)\n ystd.append(std)\n # plt.plot(plot_x, ymean, label=labels[i])\n plt.errorbar(plot_x[i], ymean, ystd, label=labels[i], alpha=0.7)\n\n plt.axhline(y=0.4, ls=\":\", c='gray')\n plt.axhline(y=0, ls=\":\", c='gray')\n\n plt.xlabel('Episode', fontsize=args.font_size)\n plt.ylabel('Reward', fontsize=args.font_size)\n plt.title('Inference Fine Tuning', fontsize=args.font_size)\n plt.tick_params(labelsize=args.label_size)\n plt.subplots_adjust(bottom=edge, left=left_e, top=up_e, right=right_e)\n plt.legend(fontsize=args.font_size)\n if args.show:\n plt.show()\n else:\n plt.savefig()\n plt.clf()\n\nif __name__ == '__main__':\n args = get_args()\n # data = load_from_tensorboard(args.dir)\n\n dirs0 = [args.dir+i for i in args.agent0]\n dirs1 = [args.dir+i for i in args.agent1]\n\n # draw_sl_training_curve(dirs0, dirs1, args)\n draw_id_training_curve(dirs0, args)\n\n # if args.draw_type == 'sl':\n # # Draw sl curve\n # draw_sl_training_curve(dirs0, dirs1, args)\n # elif args.draw_type == 'rl':\n # # Draw rl curve\n # draw_rl_tranning_curve(dirs0, dirs1, args)\n # elif args.draw_type == 'tom':\n # pass\n\n\n" ]
[ [ "numpy.power" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.plot", "numpy.mean", "numpy.std", "matplotlib.pyplot.errorbar", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.style.use", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "matplotlib.pyplot.savefig", "matplotlib.pyplot.fill_between", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.axhline", "matplotlib.pyplot.clf", "matplotlib.pyplot.bar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.tick_params" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
qdmy/Adelaidet-Quantization
[ "e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b", "e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b", "e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b", "e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b", "e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b", "e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b", "e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b", "e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b" ]
[ "codebase/third_party/spos_ofa/ofa/imagenet_classification/networks/mobilenet_v3.py", "detectron2_ofa/modeling/meta_arch/blendmask/basis_module.py", "codebase/third_party/spos_ofa/ofa/utils/pytorch_utils.py", "detectron2_ofa/modeling/matcher.py", "codebase/third_party/ofa/evaluate_archs.py", "tools/convert_checkpoint_notshared_norm.py", "codebase/third_party/spos_ofa/ofa/imagenet_classification/networks/mobilenet_v3_cifar.py", "detectron2_ofa/modeling/backbone/resnet.py" ]
[ "# Once for All: Train One Network and Specialize it for Efficient Deployment\n# Han Cai, Chuang Gan, Tianzhe Wang, Zhekai Zhang, Song Han\n# International Conference on Learning Representations (ICLR), 2020.\n\nimport copy\nimport torch.nn as nn\n\nfrom codebase.third_party.spos_ofa.ofa.utils.layers import set_layer_from_config, MBConvLayer, ConvLayer, IdentityLayer, LinearLayer, ResidualBlock\nfrom codebase.third_party.spos_ofa.ofa.utils import MyNetwork, make_divisible, MyGlobalAvgPool2d\n\n__all__ = ['MobileNetV3', 'MobileNetV3Large']\n\n\nclass MobileNetV3(MyNetwork):\n\n\tdef __init__(self, first_conv, blocks, final_expand_layer, feature_mix_layer, classifier):\n\t\tsuper(MobileNetV3, self).__init__()\n\n\t\tself.first_conv = first_conv\n\t\tself.blocks = nn.ModuleList(blocks)\n\t\tself.final_expand_layer = final_expand_layer\n\t\tself.global_avg_pool = MyGlobalAvgPool2d(keep_dim=True)\n\t\tself.feature_mix_layer = feature_mix_layer\n\t\tself.classifier = classifier\n\n\tdef forward(self, x):\n\t\tx = self.first_conv(x)\n\t\tfor block in self.blocks:\n\t\t\tx = block(x)\n\t\tx = self.final_expand_layer(x)\n\t\tx = self.global_avg_pool(x) # global average pooling\n\t\tx = self.feature_mix_layer(x)\n\t\tx = x.view(x.size(0), -1)\n\t\tx = self.classifier(x)\n\t\treturn x\n\n\t@property\n\tdef module_str(self):\n\t\t_str = self.first_conv.module_str + '\\n'\n\t\tfor block in self.blocks:\n\t\t\t_str += block.module_str + '\\n'\n\t\t_str += self.final_expand_layer.module_str + '\\n'\n\t\t_str += self.global_avg_pool.__repr__() + '\\n'\n\t\t_str += self.feature_mix_layer.module_str + '\\n'\n\t\t_str += self.classifier.module_str\n\t\treturn _str\n\n\t@property\n\tdef config(self):\n\t\treturn {\n\t\t\t'name': MobileNetV3.__name__,\n\t\t\t'bn': self.get_bn_param(),\n\t\t\t'first_conv': self.first_conv.config,\n\t\t\t'blocks': [\n\t\t\t\tblock.config for block in self.blocks\n\t\t\t],\n\t\t\t'final_expand_layer': self.final_expand_layer.config,\n\t\t\t'feature_mix_layer': self.feature_mix_layer.config,\n\t\t\t'classifier': self.classifier.config,\n\t\t}\n\n\t@staticmethod\n\tdef build_from_config(config):\n\t\tfirst_conv = set_layer_from_config(config['first_conv'])\n\t\tfinal_expand_layer = set_layer_from_config(config['final_expand_layer'])\n\t\tfeature_mix_layer = set_layer_from_config(config['feature_mix_layer'])\n\t\tclassifier = set_layer_from_config(config['classifier'])\n\n\t\tblocks = []\n\t\tfor block_config in config['blocks']:\n\t\t\tblocks.append(ResidualBlock.build_from_config(block_config))\n\n\t\tnet = MobileNetV3(first_conv, blocks, final_expand_layer, feature_mix_layer, classifier)\n\t\tif 'bn' in config:\n\t\t\tnet.set_bn_param(**config['bn'])\n\t\telse:\n\t\t\tnet.set_bn_param(momentum=0.1, eps=1e-5)\n\n\t\treturn net\n\n\tdef zero_last_gamma(self):\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, ResidualBlock):\n\t\t\t\tif isinstance(m.conv, MBConvLayer) and isinstance(m.shortcut, IdentityLayer):\n\t\t\t\t\tm.conv.point_linear.bn.weight.data.zero_()\n\n\t@property\n\tdef grouped_block_index(self):\n\t\tinfo_list = []\n\t\tblock_index_list = []\n\t\tfor i, block in enumerate(self.blocks[1:], 1):\n\t\t\tif block.shortcut is None and len(block_index_list) > 0:\n\t\t\t\tinfo_list.append(block_index_list)\n\t\t\t\tblock_index_list = []\n\t\t\tblock_index_list.append(i)\n\t\tif len(block_index_list) > 0:\n\t\t\tinfo_list.append(block_index_list)\n\t\treturn info_list\n\n\t@staticmethod\n\tdef build_net_via_cfg(cfg, input_channel, last_channel, n_classes, dropout_rate):\n\t\t# first conv layer\n\t\tfirst_conv = ConvLayer(\n\t\t\t3, input_channel, kernel_size=3, stride=2, use_bn=True, act_func='h_swish', ops_order='weight_bn_act'\n\t\t)\n\t\t# build mobile blocks\n\t\tfeature_dim = input_channel\n\t\tblocks = []\n\t\tfor stage_id, block_config_list in cfg.items():\n\t\t\tfor k, mid_channel, out_channel, use_se, act_func, stride, expand_ratio in block_config_list:\n\t\t\t\tmb_conv = MBConvLayer(\n\t\t\t\t\tfeature_dim, out_channel, k, stride, expand_ratio, mid_channel, act_func, use_se\n\t\t\t\t)\n\t\t\t\tif stride == 1 and out_channel == feature_dim:\n\t\t\t\t\tshortcut = IdentityLayer(out_channel, out_channel)\n\t\t\t\telse:\n\t\t\t\t\tshortcut = None\n\t\t\t\tblocks.append(ResidualBlock(mb_conv, shortcut))\n\t\t\t\tfeature_dim = out_channel\n\t\t# final expand layer\n\t\tfinal_expand_layer = ConvLayer(\n\t\t\tfeature_dim, feature_dim * 6, kernel_size=1, use_bn=True, act_func='h_swish', ops_order='weight_bn_act',\n\t\t)\n\t\t# feature mix layer\n\t\tfeature_mix_layer = ConvLayer(\n\t\t\tfeature_dim * 6, last_channel, kernel_size=1, bias=False, use_bn=False, act_func='h_swish',\n\t\t)\n\t\t# classifier\n\t\tclassifier = LinearLayer(last_channel, n_classes, dropout_rate=dropout_rate)\n\n\t\treturn first_conv, blocks, final_expand_layer, feature_mix_layer, classifier\n\n\t@staticmethod\n\tdef adjust_cfg(cfg, ks=None, expand_ratio=None, depth_param=None, stage_width_list=None):\n\t\tfor i, (stage_id, block_config_list) in enumerate(cfg.items()):\n\t\t\tfor block_config in block_config_list:\n\t\t\t\tif ks is not None and stage_id != '0':\n\t\t\t\t\tblock_config[0] = ks\n\t\t\t\tif expand_ratio is not None and stage_id != '0':\n\t\t\t\t\tblock_config[-1] = expand_ratio\n\t\t\t\t\tblock_config[1] = None\n\t\t\t\t\tif stage_width_list is not None:\n\t\t\t\t\t\tblock_config[2] = stage_width_list[i]\n\t\t\tif depth_param is not None and stage_id != '0':\n\t\t\t\tnew_block_config_list = [block_config_list[0]]\n\t\t\t\tnew_block_config_list += [copy.deepcopy(block_config_list[-1]) for _ in range(depth_param - 1)]\n\t\t\t\tcfg[stage_id] = new_block_config_list\n\t\treturn cfg\n\n\tdef load_state_dict(self, state_dict, **kwargs):\n\t\tcurrent_state_dict = self.state_dict()\n\n\t\tfor key in state_dict:\n\t\t\tif key not in current_state_dict:\n\t\t\t\tassert '.mobile_inverted_conv.' in key\n\t\t\t\tnew_key = key.replace('.mobile_inverted_conv.', '.conv.')\n\t\t\telse:\n\t\t\t\tnew_key = key\n\t\t\tcurrent_state_dict[new_key] = state_dict[key]\n\t\tsuper(MobileNetV3, self).load_state_dict(current_state_dict)\n\n\nclass MobileNetV3Large(MobileNetV3):\n\n\tdef __init__(self, n_classes=1000, width_mult=1.0, bn_param=(0.1, 1e-5), dropout_rate=0.2,\n\t ks=None, expand_ratio=None, depth_param=None, stage_width_list=None):\n\t\tinput_channel = 16\n\t\tlast_channel = 1280\n\n\t\tinput_channel = make_divisible(input_channel * width_mult, MyNetwork.CHANNEL_DIVISIBLE)\n\t\tlast_channel = make_divisible(last_channel * width_mult, MyNetwork.CHANNEL_DIVISIBLE) \\\n\t\t\tif width_mult > 1.0 else last_channel\n\n\t\tcfg = {\n\t\t\t# k, exp, c, se, nl, s, e,\n\t\t\t'0': [\n\t\t\t\t[3, 16, 16, False, 'relu', 1, 1],\n\t\t\t],\n\t\t\t'1': [\n\t\t\t\t[3, 64, 24, False, 'relu', 2, None], # 4\n\t\t\t\t[3, 72, 24, False, 'relu', 1, None], # 3\n\t\t\t],\n\t\t\t'2': [\n\t\t\t\t[5, 72, 40, True, 'relu', 2, None], # 3\n\t\t\t\t[5, 120, 40, True, 'relu', 1, None], # 3\n\t\t\t\t[5, 120, 40, True, 'relu', 1, None], # 3\n\t\t\t],\n\t\t\t'3': [\n\t\t\t\t[3, 240, 80, False, 'h_swish', 2, None], # 6\n\t\t\t\t[3, 200, 80, False, 'h_swish', 1, None], # 2.5\n\t\t\t\t[3, 184, 80, False, 'h_swish', 1, None], # 2.3\n\t\t\t\t[3, 184, 80, False, 'h_swish', 1, None], # 2.3\n\t\t\t],\n\t\t\t'4': [\n\t\t\t\t[3, 480, 112, True, 'h_swish', 1, None], # 6\n\t\t\t\t[3, 672, 112, True, 'h_swish', 1, None], # 6\n\t\t\t],\n\t\t\t'5': [\n\t\t\t\t[5, 672, 160, True, 'h_swish', 2, None], # 6\n\t\t\t\t[5, 960, 160, True, 'h_swish', 1, None], # 6\n\t\t\t\t[5, 960, 160, True, 'h_swish', 1, None], # 6\n\t\t\t]\n\t\t}\n\n\t\tcfg = self.adjust_cfg(cfg, ks, expand_ratio, depth_param, stage_width_list)\n\t\t# width multiplier on mobile setting, change `exp: 1` and `c: 2`\n\t\tfor stage_id, block_config_list in cfg.items():\n\t\t\tfor block_config in block_config_list:\n\t\t\t\tif block_config[1] is not None:\n\t\t\t\t\tblock_config[1] = make_divisible(block_config[1] * width_mult, MyNetwork.CHANNEL_DIVISIBLE)\n\t\t\t\tblock_config[2] = make_divisible(block_config[2] * width_mult, MyNetwork.CHANNEL_DIVISIBLE)\n\n\t\tfirst_conv, blocks, final_expand_layer, feature_mix_layer, classifier = self.build_net_via_cfg(\n\t\t\tcfg, input_channel, last_channel, n_classes, dropout_rate\n\t\t)\n\t\tsuper(MobileNetV3Large, self).__init__(first_conv, blocks, final_expand_layer, feature_mix_layer, classifier)\n\t\t# set bn param\n\t\tself.set_bn_param(*bn_param)\n", "from typing import Dict\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom detectron2_ofa.layers import conv_with_kaiming_uniform, get_norm\nfrom detectron2_ofa.utils.registry import Registry\nfrom detectron2_ofa.layers import ShapeSpec\nfrom ..semantic_seg import build_sem_seg_head\n\n\nBASIS_MODULE_REGISTRY = Registry(\"BASIS_MODULE\")\nBASIS_MODULE_REGISTRY.__doc__ = \"\"\"\nRegistry for basis module, which produces global bases from feature maps.\n\nThe registered object will be called with `obj(cfg, input_shape)`.\nThe call should return a `nn.Module` object.\n\"\"\"\n\n\ndef build_basis_module(cfg, input_shape):\n name = cfg.MODEL.BASIS_MODULE.NAME\n return BASIS_MODULE_REGISTRY.get(name)(cfg, input_shape)\n\n\n@BASIS_MODULE_REGISTRY.register()\nclass ProtoNet(nn.Module):\n def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):\n \"\"\"\n TODO: support deconv and variable channel width\n \"\"\"\n # official protonet has a relu after each conv\n super().__init__()\n # fmt: off\n mask_dim = cfg.MODEL.BASIS_MODULE.NUM_BASES\n planes = cfg.MODEL.BASIS_MODULE.CONVS_DIM\n self.in_features = cfg.MODEL.BASIS_MODULE.IN_FEATURES\n self.loss_on = cfg.MODEL.BASIS_MODULE.LOSS_ON\n norm = cfg.MODEL.BASIS_MODULE.NORM\n num_convs = cfg.MODEL.BASIS_MODULE.NUM_CONVS\n self.visualize = cfg.MODEL.BASIS_MODULE.VISUALIZE\n # fmt: on\n\n feature_channels = {k: v.channels for k, v in input_shape.items()}\n\n conv_block = conv_with_kaiming_uniform(norm, True) # conv relu bn\n self.refine = nn.ModuleList()\n for in_feature in self.in_features:\n self.refine.append(conv_block(\n feature_channels[in_feature], planes, 3, 1))\n tower = []\n for i in range(num_convs):\n tower.append(\n conv_block(planes, planes, 3, 1))\n tower.append(\n nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False))\n tower.append(\n conv_block(planes, planes, 3, 1))\n tower.append(\n nn.Conv2d(planes, mask_dim, 1))\n self.add_module('tower', nn.Sequential(*tower))\n\n if self.loss_on:\n # fmt: off\n self.common_stride = cfg.MODEL.BASIS_MODULE.COMMON_STRIDE\n num_classes = cfg.MODEL.BASIS_MODULE.NUM_CLASSES + 1\n self.sem_loss_weight = cfg.MODEL.BASIS_MODULE.LOSS_WEIGHT\n # fmt: on\n\n inplanes = feature_channels[self.in_features[0]]\n self.seg_head = nn.Sequential(nn.Conv2d(inplanes, planes, kernel_size=3,\n stride=1, padding=1, bias=False),\n get_norm(norm, planes), #nn.BatchNorm2d(planes),\n nn.ReLU(),\n nn.Conv2d(planes, planes, kernel_size=3,\n stride=1, padding=1, bias=False),\n get_norm(norm, planes), #nn.BatchNorm2d(planes),\n nn.ReLU(),\n nn.Conv2d(planes, num_classes, kernel_size=1,\n stride=1))\n\n def forward(self, features, targets=None):\n for i, f in enumerate(self.in_features):\n if i == 0:\n x = self.refine[i](features[f])\n else:\n x_p = self.refine[i](features[f])\n x_p = F.interpolate(x_p, x.size()[2:], mode=\"bilinear\", align_corners=False)\n x = x + x_p\n outputs = {\"bases\": [self.tower(x)]}\n losses = {}\n # auxiliary thing semantic loss\n if self.training and self.loss_on:\n sem_out = self.seg_head(features[self.in_features[0]])\n # resize target to reduce memory\n gt_sem = targets.unsqueeze(1).float()\n gt_sem = F.interpolate(\n gt_sem, scale_factor=1 / self.common_stride)\n seg_loss = F.cross_entropy(\n sem_out, gt_sem.squeeze().long())\n losses['loss_basis_sem'] = seg_loss * self.sem_loss_weight\n elif self.visualize:\n outputs[\"seg_thing_out\"] = self.seg_head(features[self.in_features[0]])\n return outputs, losses\n", "# Once for All: Train One Network and Specialize it for Efficient Deployment\n# Han Cai, Chuang Gan, Tianzhe Wang, Zhekai Zhang, Song Han\n# International Conference on Learning Representations (ICLR), 2020.\n\nimport math\nimport copy\nimport time\nimport torch\nimport torch.nn as nn\n\n__all__ = [\n\t'mix_images', 'mix_labels',\n\t'label_smooth', 'cross_entropy_loss_with_soft_target', 'cross_entropy_with_label_smoothing',\n\t'clean_num_batch_tracked', 'rm_bn_from_net',\n\t'get_net_device', 'count_parameters', 'count_net_flops', 'measure_net_latency', 'get_net_info',\n\t'build_optimizer', 'calc_learning_rate',\n]\n\n\n\"\"\" Mixup \"\"\"\ndef mix_images(images, lam):\n\tflipped_images = torch.flip(images, dims=[0]) # flip along the batch dimension\n\treturn lam * images + (1 - lam) * flipped_images\n\n\ndef mix_labels(target, lam, n_classes, label_smoothing=0.1):\n\tonehot_target = label_smooth(target, n_classes, label_smoothing)\n\tflipped_target = torch.flip(onehot_target, dims=[0])\n\treturn lam * onehot_target + (1 - lam) * flipped_target\n\n\n\"\"\" Label smooth \"\"\"\ndef label_smooth(target, n_classes: int, label_smoothing=0.1):\n\t# convert to one-hot\n\tbatch_size = target.size(0)\n\ttarget = torch.unsqueeze(target, 1)\n\tsoft_target = torch.zeros((batch_size, n_classes), device=target.device)\n\tsoft_target.scatter_(1, target, 1)\n\t# label smoothing\n\tsoft_target = soft_target * (1 - label_smoothing) + label_smoothing / n_classes\n\treturn soft_target\n\n\ndef cross_entropy_loss_with_soft_target(pred, soft_target):\n\tlogsoftmax = nn.LogSoftmax()\n\treturn torch.mean(torch.sum(- soft_target * logsoftmax(pred), 1))\n\n\ndef cross_entropy_with_label_smoothing(pred, target, label_smoothing=0.1):\n\tsoft_target = label_smooth(target, pred.size(1), label_smoothing)\n\treturn cross_entropy_loss_with_soft_target(pred, soft_target)\n\n\n\"\"\" BN related \"\"\"\ndef clean_num_batch_tracked(net):\n\tfor m in net.modules():\n\t\tif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):\n\t\t\tif m.num_batches_tracked is not None:\n\t\t\t\tm.num_batches_tracked.zero_()\n\n\ndef rm_bn_from_net(net):\n\tfor m in net.modules():\n\t\tif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):\n\t\t\tm.forward = lambda x: x\n\n\n\"\"\" Network profiling \"\"\"\ndef get_net_device(net):\n\treturn net.parameters().__next__().device\n\n\ndef count_parameters(net):\n\ttotal_params = sum(p.numel() for p in net.parameters() if p.requires_grad)\n\treturn total_params\n\n\ndef count_net_flops(net, data_shape=(1, 3, 224, 224)):\n\tfrom .flops_counter import profile\n\tif isinstance(net, nn.DataParallel):\n\t\tnet = net.module\n\n\tflop, _ = profile(copy.deepcopy(net), data_shape)\n\treturn flop\n\n\ndef measure_net_latency(net, l_type='gpu8', fast=True, input_shape=(3, 224, 224), clean=False):\n\tif isinstance(net, nn.DataParallel):\n\t\tnet = net.module\n\n\t# remove bn from graph\n\trm_bn_from_net(net)\n\n\t# return `ms`\n\tif 'gpu' in l_type:\n\t\tl_type, batch_size = l_type[:3], int(l_type[3:])\n\telse:\n\t\tbatch_size = 1\n\n\tdata_shape = [batch_size] + list(input_shape)\n\tif l_type == 'cpu':\n\t\tif fast:\n\t\t\tn_warmup = 5\n\t\t\tn_sample = 10\n\t\telse:\n\t\t\tn_warmup = 50\n\t\t\tn_sample = 50\n\t\tif get_net_device(net) != torch.device('cpu'):\n\t\t\tif not clean:\n\t\t\t\tprint('move net to cpu for measuring cpu latency')\n\t\t\tnet = copy.deepcopy(net).cpu()\n\telif l_type == 'gpu':\n\t\tif fast:\n\t\t\tn_warmup = 5\n\t\t\tn_sample = 10\n\t\telse:\n\t\t\tn_warmup = 50\n\t\t\tn_sample = 50\n\telse:\n\t\traise NotImplementedError\n\timages = torch.zeros(data_shape, device=get_net_device(net))\n\n\tmeasured_latency = {'warmup': [], 'sample': []}\n\tnet.eval()\n\twith torch.no_grad():\n\t\tfor i in range(n_warmup):\n\t\t\tinner_start_time = time.time()\n\t\t\tnet(images)\n\t\t\tused_time = (time.time() - inner_start_time) * 1e3 # ms\n\t\t\tmeasured_latency['warmup'].append(used_time)\n\t\t\tif not clean:\n\t\t\t\tprint('Warmup %d: %.3f' % (i, used_time))\n\t\touter_start_time = time.time()\n\t\tfor i in range(n_sample):\n\t\t\tnet(images)\n\t\ttotal_time = (time.time() - outer_start_time) * 1e3 # ms\n\t\tmeasured_latency['sample'].append((total_time, n_sample))\n\treturn total_time / n_sample, measured_latency\n\n\ndef get_net_info(net, input_shape=(3, 224, 224), measure_latency=None, print_info=True):\n\tnet_info = {}\n\tif isinstance(net, nn.DataParallel):\n\t\tnet = net.module\n\n\t# parameters\n\tnet_info['params'] = count_parameters(net) / 1e6\n\n\t# flops\n\tnet_info['flops'] = count_net_flops(net, [1] + list(input_shape)) / 1e6\n\n\t# latencies\n\tlatency_types = [] if measure_latency is None else measure_latency.split('#')\n\tfor l_type in latency_types:\n\t\tlatency, measured_latency = measure_net_latency(net, l_type, fast=False, input_shape=input_shape)\n\t\tnet_info['%s latency' % l_type] = {\n\t\t\t'val': latency,\n\t\t\t'hist': measured_latency\n\t\t}\n\n\tif print_info:\n\t\t# print(net)\n\t\tprint('Total training params: %.2fM' % (net_info['params']))\n\t\tprint('Total FLOPs: %.2fM' % (net_info['flops']))\n\t\tfor l_type in latency_types:\n\t\t\tprint('Estimated %s latency: %.3fms' % (l_type, net_info['%s latency' % l_type]['val']))\n\n\treturn net_info\n\n\n\"\"\" optimizer \"\"\"\ndef build_optimizer(net_params, opt_type, opt_param, init_lr, weight_decay, no_decay_keys):\n\tif no_decay_keys is not None:\n\t\tassert isinstance(net_params, list) and len(net_params) == 2\n\t\tnet_params = [\n\t\t\t{'params': net_params[0], 'weight_decay': weight_decay},\n\t\t\t{'params': net_params[1], 'weight_decay': 0},\n\t\t]\n\telse:\n\t\tnet_params = [{'params': net_params, 'weight_decay': weight_decay}]\n\n\tif opt_type == 'sgd':\n\t\topt_param = {} if opt_param is None else opt_param\n\t\tmomentum, nesterov = opt_param.get('momentum', 0.9), opt_param.get('nesterov', True)\n\t\toptimizer = torch.optim.SGD(net_params, init_lr, momentum=momentum, nesterov=nesterov)\n\telif opt_type == 'adam':\n\t\toptimizer = torch.optim.Adam(net_params, init_lr)\n\telse:\n\t\traise NotImplementedError\n\treturn optimizer\n\n\n\"\"\" learning rate schedule \"\"\"\ndef calc_learning_rate(epoch, init_lr, n_epochs, batch=0, nBatch=None, lr_schedule_type='cosine'):\n\tif lr_schedule_type == 'cosine':\n\t\tt_total = n_epochs * nBatch\n\t\tt_cur = epoch * nBatch + batch\n\t\tlr = 0.5 * init_lr * (1 + math.cos(math.pi * t_cur / t_total))\n\telif lr_schedule_type is None:\n\t\tlr = init_lr\n\telse:\n\t\traise ValueError('do not support: %s' % lr_schedule_type)\n\treturn lr\n", "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport torch\n\n\nclass Matcher(object):\n \"\"\"\n This class assigns to each predicted \"element\" (e.g., a box) a ground-truth\n element. Each predicted element will have exactly zero or one matches; each\n ground-truth element may be matched to zero or more predicted elements.\n\n The matching is determined by the MxN match_quality_matrix, that characterizes\n how well each (ground-truth, prediction)-pair match each other. For example,\n if the elements are boxes, this matrix may contain box intersection-over-union\n overlap values.\n\n The matcher returns (a) a vector of length N containing the index of the\n ground-truth element m in [0, M) that matches to prediction n in [0, N).\n (b) a vector of length N containing the labels for each prediction.\n \"\"\"\n\n def __init__(self, thresholds, labels, allow_low_quality_matches=False):\n \"\"\"\n Args:\n thresholds (list): a list of thresholds used to stratify predictions\n into levels.\n labels (list): a list of values to label predictions belonging at\n each level. A label can be one of {-1, 0, 1} signifying\n {ignore, negative class, positive class}, respectively.\n allow_low_quality_matches (bool): if True, produce additional matches\n for predictions with maximum match quality lower than high_threshold.\n See set_low_quality_matches_ for more details.\n\n For example,\n thresholds = [0.3, 0.5]\n labels = [0, -1, 1]\n All predictions with iou < 0.3 will be marked with 0 and\n thus will be considered as false positives while training.\n All predictions with 0.3 <= iou < 0.5 will be marked with -1 and\n thus will be ignored.\n All predictions with 0.5 <= iou will be marked with 1 and\n thus will be considered as true positives.\n \"\"\"\n # Add -inf and +inf to first and last position in thresholds\n thresholds = thresholds[:]\n assert thresholds[0] > 0\n thresholds.insert(0, -float(\"inf\"))\n thresholds.append(float(\"inf\"))\n assert all(low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:]))\n assert all(l in [-1, 0, 1] for l in labels)\n assert len(labels) == len(thresholds) - 1\n self.thresholds = thresholds\n self.labels = labels\n self.allow_low_quality_matches = allow_low_quality_matches\n\n def __call__(self, match_quality_matrix):\n \"\"\"\n Args:\n match_quality_matrix (Tensor[float]): an MxN tensor, containing the\n pairwise quality between M ground-truth elements and N predicted\n elements. All elements must be >= 0 (due to the us of `torch.nonzero`\n for selecting indices in :meth:`set_low_quality_matches_`).\n\n Returns:\n matches (Tensor[int64]): a vector of length N, where matches[i] is a matched\n ground-truth index in [0, M)\n match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates\n whether a prediction is a true or false positive or ignored\n \"\"\"\n assert match_quality_matrix.dim() == 2\n if match_quality_matrix.numel() == 0:\n default_matches = match_quality_matrix.new_full(\n (match_quality_matrix.size(1),), 0, dtype=torch.int64\n ) # 没给gt,就默认全是IOU=0,背景类\n # When no gt boxes exist, we define IOU = 0 and therefore set labels\n # to `self.labels[0]`, which usually defaults to background class 0\n # To choose to ignore instead, can make labels=[-1,0,-1,1] + set appropriate thresholds\n default_match_labels = match_quality_matrix.new_full(\n (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8\n )\n return default_matches, default_match_labels\n\n assert torch.all(match_quality_matrix >= 0)\n\n # match_quality_matrix is M (gt) x N (predicted)\n # Max over gt elements (dim 0) to find best gt candidate for each prediction\n matched_vals, matches = match_quality_matrix.max(dim=0) # 找到每个predicted对应最匹配的IOU值以及gt\n\n match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8)\n\n for (l, low, high) in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]):\n low_high = (matched_vals >= low) & (matched_vals < high)\n match_labels[low_high] = l\n\n if self.allow_low_quality_matches:\n self.set_low_quality_matches_(match_labels, match_quality_matrix)\n\n return matches, match_labels\n\n def set_low_quality_matches_(self, match_labels, match_quality_matrix):\n \"\"\"\n Produce additional matches for predictions that have only low-quality matches.\n Specifically, for each ground-truth G find the set of predictions that have\n maximum overlap with it (including ties); for each prediction in that set, if\n it is unmatched, then match it to the ground-truth G.\n\n This function implements the RPN assignment case (i) in Sec. 3.1.2 of the\n Faster R-CNN paper: https://arxiv.org/pdf/1506.01497v3.pdf.\n \"\"\"\n # For each gt, find the prediction with which it has highest quality\n highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)\n # Find the highest quality match available, even if it is low, including ties.\n # Note that the matches qualities must be positive due to the use of\n # `torch.nonzero`.\n gt_pred_pairs_of_highest_quality = torch.nonzero(\n match_quality_matrix == highest_quality_foreach_gt[:, None]\n )\n # Example gt_pred_pairs_of_highest_quality:\n # tensor([[ 0, 39796],\n # [ 1, 32055],\n # [ 1, 32070],\n # [ 2, 39190],\n # [ 2, 40255],\n # [ 3, 40390],\n # [ 3, 41455],\n # [ 4, 45470],\n # [ 5, 45325],\n # [ 5, 46390]])\n # Each row is a (gt index, prediction index)\n # Note how gt items 1, 2, 3, and 5 each have two ties\n\n pred_inds_to_update = gt_pred_pairs_of_highest_quality[:, 1]\n match_labels[pred_inds_to_update] = 1 # 就把最大IOU的设为前景类了\n", "import os\nimport random\n\nimport torch\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\n\nfrom torch.utils.data import Subset\n\nfrom codebase.core.arch_representation.ofa import OFAArchitecture\nfrom codebase.third_party.ofa import OFAMobileNetV3\nfrom codebase.third_party.spos_ofa import SPOSMobileNetV3\nfrom codebase.third_party.ofa.utils import set_running_statistics\n\nfrom codebase.torchutils import logger\nfrom codebase.torchutils.metrics import AccuracyMetric\nfrom codebase.torchutils.common import compute_flops, compute_nparam\nfrom codebase.torchutils.common import auto_device\n\n\ndef get_train_transform(resolution):\n return transforms.Compose([\n transforms.RandomResizedCrop(resolution),\n transforms.RandomHorizontalFlip(),\n transforms.ColorJitter(brightness=32. / 255., saturation=0.5),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n ])\n\n\ndef get_test_transform(resolution):\n return transforms.Compose([\n transforms.Resize(int(resolution/7*8)),\n transforms.CenterCrop(resolution),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n ])\n\n\ndef validate(model, loader, cache_loader: list):\n model.eval()\n acc_metric = AccuracyMetric(topk=(1, 5))\n if cache_loader:\n loader = cache_loader\n add_cache = False\n else:\n add_cache = True\n with torch.no_grad():\n for _, (datas, targets) in enumerate(loader):\n if add_cache:\n cache_loader.append((datas, targets))\n datas, targets = datas.to(device=auto_device), targets.to(device=auto_device)\n outputs = model(datas)\n acc_metric.update(outputs, targets)\n return acc_metric.at(topk=1).rate, acc_metric.at(topk=5).rate\n\n\nclass OFAArchitectureEvaluator:\n def __init__(self, imagenet_root, width, pretrained_supernet, test=True, resolution=224,\n batch_size=100, num_workers=8):\n self.test = test\n self.resolution = resolution\n imagenet_train_root = os.path.join(imagenet_root, \"train\")\n imagenet_val_root = os.path.join(imagenet_root, \"val\")\n\n if not os.path.exists(imagenet_train_root) or not os.path.exists(imagenet_val_root):\n raise ValueError(f\"ImageNet folder does not exist at {imagenet_train_root} or {imagenet_val_root}.\")\n\n self.ofa_supernet = OFAMobileNetV3(\n dropout_rate=0,\n width_mult_list=width,\n ks_list=[3, 5, 7],\n expand_ratio_list=[3, 4, 6],\n depth_list=[2, 3, 4],\n )\n self.ofa_supernet.load_state_dict(torch.load(pretrained_supernet, map_location=\"cpu\"))\n self.ofa_supernet.eval()\n self.ofa_supernet.to(device=auto_device)\n\n self.trainset = datasets.ImageFolder(\n imagenet_train_root,\n transforms.Compose([\n transforms.RandomResizedCrop(self.resolution),\n transforms.RandomHorizontalFlip(),\n transforms.ColorJitter(brightness=32. / 255., saturation=0.5),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n ]))\n\n n_samples = len(self.trainset)\n g = torch.Generator()\n g.manual_seed(937162211)\n index = torch.randperm(n_samples, generator=g).tolist()\n\n sub_sampler = torch.utils.data.sampler.SubsetRandomSampler(index[:2000])\n self.bn_loader = torch.utils.data.DataLoader(\n self.trainset,\n batch_size=100, sampler=sub_sampler,\n num_workers=8, pin_memory=False)\n\n if self.test:\n self.valset = datasets.ImageFolder(imagenet_val_root, transforms.Compose([\n transforms.Resize(int(self.resolution/7*8)),\n transforms.CenterCrop(self.resolution),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n ]))\n self.target_loader = torch.utils.data.DataLoader(\n self.valset,\n batch_size=batch_size, shuffle=False,\n num_workers=num_workers, pin_memory=True)\n else:\n self.valset = datasets.ImageFolder(imagenet_train_root, transforms.Compose([\n transforms.Resize(int(self.resolution/7*8)),\n transforms.CenterCrop(self.resolution),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n ]))\n\n g = torch.Generator()\n g.manual_seed(2147483647) # set random seed before sampling validation set\n rand_indexes = torch.randperm(n_samples, generator=g).tolist()\n valid_indexes = rand_indexes[:10000]\n\n sub_valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(valid_indexes)\n self.target_loader = torch.utils.data.DataLoader(\n self.valset,\n batch_size=batch_size, sampler=sub_valid_sampler,\n num_workers=num_workers, pin_memory=False)\n\n self.cache_target_loader = list()\n\n def set_resolution(self, resolution):\n self.resolution = resolution\n self.trainset.transform = get_train_transform(resolution)\n self.valset.transform = get_test_transform(resolution)\n\n def test_arch(self, arch: OFAArchitecture, resolution):\n if resolution is not None:\n self.set_resolution(resolution)\n self.ofa_supernet.set_active_subnet(ks=arch.ks, e=arch.ratios, d=arch.depths)\n self.ofa_childnet = self.ofa_supernet.get_active_subnet(preserve_weight=True)\n set_running_statistics(self.ofa_childnet, self.bn_loader)\n top1acc, top5acc = validate(self.ofa_childnet, self.target_loader, self.cache_target_loader)\n flops = compute_flops(self.ofa_childnet, (1, 3, self.resolution, self.resolution), auto_device)\n n_params = compute_nparam(self.ofa_childnet)\n return top1acc, top5acc, flops, n_params\n\n\ndef select_subset(samples, n_total_class=1000, n_select_class=100, seed=542027):\n g = torch.Generator()\n g.manual_seed(seed)\n shuffle_class = torch.randperm(n_total_class, generator=g).tolist()\n select_class = shuffle_class[:n_select_class]\n select_class_set = set(select_class)\n select_index = []\n for index, (path, target) in enumerate(samples):\n if target in select_class_set:\n select_index.append(index)\n\n g = torch.Generator()\n g.manual_seed(seed+1)\n index_index = torch.randperm(len(select_index), generator=g).tolist()\n new_select_index = [select_index[i] for i in index_index]\n # random.shuffle(select_index)\n # build target mapping\n sorted_select_class = sorted(select_class)\n target_mapping = {k: v for v, k in enumerate(sorted_select_class)}\n return new_select_index, target_mapping\n\n\nclass OurOFAArchitectureEvaluator:\n def __init__(self, imagenet_root, width, pretrained_supernet, test=True, resolution=224,\n batch_size=100, num_workers=8):\n self.test = test\n self.resolution = resolution\n imagenet_train_root = os.path.join(imagenet_root, \"train\")\n imagenet_val_root = os.path.join(imagenet_root, \"val\")\n\n if not os.path.exists(imagenet_train_root) or not os.path.exists(imagenet_val_root):\n raise ValueError(f\"ImageNet folder does not exist at {imagenet_train_root} or {imagenet_val_root}.\")\n\n self.ofa_supernet = OFAMobileNetV3(\n n_classes=100,\n dropout_rate=0,\n width_mult_list=width,\n ks_list=[3, 5, 7],\n expand_ratio_list=[3, 4, 6],\n depth_list=[2, 3, 4],\n )\n self.ofa_supernet.load_state_dict(torch.load(pretrained_supernet, map_location=\"cpu\"))\n self.ofa_supernet.eval()\n self.ofa_supernet.to(device=auto_device)\n\n train_dataset = datasets.ImageFolder(imagenet_train_root, get_train_transform(224))\n select_index, target_mapping = select_subset(train_dataset.samples)\n val_index = select_index[-1000:]\n train_dataset.target_transform = lambda x: target_mapping[x]\n sub_val_dataset = Subset(train_dataset, val_index)\n\n bn_index = select_index[:1000]\n bn_train_dataset = Subset(train_dataset, bn_index)\n # self.trainset = datasets.ImageFolder(\n # imagenet_train_root,\n # transforms.Compose([\n # transforms.RandomResizedCrop(self.resolution),\n # transforms.RandomHorizontalFlip(),\n # transforms.ColorJitter(brightness=32. / 255., saturation=0.5),\n # transforms.ToTensor(),\n # transforms.Normalize(mean=[0.485, 0.456, 0.406],\n # std=[0.229, 0.224, 0.225]),\n # ]))\n\n # n_samples = len(self.trainset)\n # g = torch.Generator()\n # g.manual_seed(937162211)\n # index = torch.randperm(n_samples, generator=g).tolist()\n\n # sub_sampler = torch.utils.data.sampler.SubsetRandomSampler(index[:2000])\n self.bn_loader = torch.utils.data.DataLoader(\n bn_train_dataset,\n batch_size=100, shuffle=False,\n num_workers=8, pin_memory=False)\n logger.info(f\"bn loader size={len(bn_train_dataset)}\")\n\n if self.test:\n self.valset = datasets.ImageFolder(imagenet_val_root, transforms.Compose([\n transforms.Resize(int(self.resolution/7*8)),\n transforms.CenterCrop(self.resolution),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n ]))\n self.target_loader = torch.utils.data.DataLoader(\n self.valset,\n batch_size=batch_size, shuffle=False,\n num_workers=num_workers, pin_memory=True)\n logger.info(f\"test loader size={len(self.valset)}\")\n else:\n # self.valset = datasets.ImageFolder(imagenet_train_root, transforms.Compose([\n # transforms.Resize(int(self.resolution/7*8)),\n # transforms.CenterCrop(self.resolution),\n # transforms.ToTensor(),\n # transforms.Normalize(mean=[0.485, 0.456, 0.406],\n # std=[0.229, 0.224, 0.225]),\n # ]))\n\n # g = torch.Generator()\n # g.manual_seed(2147483647) # set random seed before sampling validation set\n # rand_indexes = torch.randperm(n_samples, generator=g).tolist()\n # valid_indexes = rand_indexes[:10000]\n\n # sub_valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(valid_indexes)\n self.target_loader = torch.utils.data.DataLoader(\n sub_val_dataset,\n batch_size=batch_size, shuffle=False,\n num_workers=num_workers, pin_memory=False)\n logger.info(f\"val loader size={len(sub_val_dataset)}\")\n\n self.cache_target_loader = list()\n self.cache_bn_loader = list()\n\n logger.info(\"Start to cache.\")\n for _, (datas, targets) in enumerate(self.target_loader):\n self.cache_target_loader.append((datas, targets))\n # for _, (datas, targets) in enumerate(self.bn_loader):\n # self.cache_bn_loader.append((datas, targets))\n\n # def set_resolution(self, resolution):\n # self.resolution = resolution\n # self.trainset.transform = get_train_transform(resolution)\n # self.valset.transform = get_test_transform(resolution)\n\n def test_arch(self, arch: OFAArchitecture, resolution=None):\n # if resolution is not None:\n # self.set_resolution(resolution)\n # self.ofa_supernet.set_active_subnet(ks=arch.ks, e=arch.ratios, d=arch.depths)\n ofa_childnet = self.ofa_supernet.get_active_subnet(preserve_weight=True)\n logger.info(\"Complete to fetch child net.\")\n set_running_statistics(ofa_childnet, self.bn_loader)\n logger.info(\"Complete to calibrate bn.\")\n top1acc, top5acc = validate(ofa_childnet, self.cache_target_loader, self.cache_target_loader)\n logger.info(\"Complete to compute acc.\")\n # flops = compute_flops(ofa_childnet, (1, 3, self.resolution, self.resolution), auto_device)\n # logger.info(\"Complete to compute flops.\")\n # n_params = compute_nparam(ofa_childnet)\n return top1acc, top5acc, 0, 0\n\nclass NOFAArchitectureEvaluator:\n def __init__(self, imagenet_root, width, pretrained_supernet, test=True, resolution=224,\n batch_size=100, num_workers=8):\n self.test = test\n self.resolution = resolution\n imagenet_train_root = os.path.join(imagenet_root, \"train\")\n imagenet_val_root = os.path.join(imagenet_root, \"val\")\n\n if not os.path.exists(imagenet_train_root) or not os.path.exists(imagenet_val_root):\n raise ValueError(f\"ImageNet folder does not exist at {imagenet_train_root} or {imagenet_val_root}.\")\n\n self.ofa_supernet = SPOSMobileNetV3(\n n_classes=100,\n dropout_rate=0,\n width_mult=width,\n ks_list=[3, 5, 7],\n expand_ratio_list=[3, 4, 6],\n depth_list=[2, 3, 4],\n )\n self.ofa_supernet.load_state_dict(torch.load(pretrained_supernet, map_location=\"cpu\"))\n self.ofa_supernet.eval()\n self.ofa_supernet.to(device=auto_device)\n\n train_dataset = datasets.ImageFolder(imagenet_train_root, get_train_transform(224))\n select_index, target_mapping = select_subset(train_dataset.samples)\n val_index = select_index[-1000:]\n train_dataset.target_transform = lambda x: target_mapping[x]\n sub_val_dataset = Subset(train_dataset, val_index)\n\n bn_index = select_index[:1000]\n bn_train_dataset = Subset(train_dataset, bn_index)\n self.bn_loader = torch.utils.data.DataLoader(\n bn_train_dataset,\n batch_size=100, shuffle=False,\n num_workers=8, pin_memory=False)\n logger.info(f\"bn loader size={len(bn_train_dataset)}\")\n\n if self.test:\n self.valset = datasets.ImageFolder(imagenet_val_root, transforms.Compose([\n transforms.Resize(int(self.resolution/7*8)),\n transforms.CenterCrop(self.resolution),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n ]))\n self.target_loader = torch.utils.data.DataLoader(\n self.valset,\n batch_size=batch_size, shuffle=False,\n num_workers=num_workers, pin_memory=True)\n logger.info(f\"test loader size={len(self.valset)}\")\n else:\n\n self.target_loader = torch.utils.data.DataLoader(\n sub_val_dataset,\n batch_size=batch_size, shuffle=False,\n num_workers=num_workers, pin_memory=False)\n logger.info(f\"val loader size={len(sub_val_dataset)}\")\n\n self.cache_target_loader = list()\n self.cache_bn_loader = list()\n\n logger.info(\"Start to cache.\")\n for _, (datas, targets) in enumerate(self.target_loader):\n self.cache_target_loader.append((datas, targets))\n # for _, (datas, targets) in enumerate(self.bn_loader):\n # self.cache_bn_loader.append((datas, targets))\n\n # def set_resolution(self, resolution):\n # self.resolution = resolution\n # self.trainset.transform = get_train_transform(resolution)\n # self.valset.transform = get_test_transform(resolution)\n\n def test_arch(self, arch: OFAArchitecture, resolution=None):\n # if resolution is not None:\n # self.set_resolution(resolution)\n # self.ofa_supernet.set_active_subnet(ks=arch.ks, e=arch.ratios, d=arch.depths)\n ofa_childnet = self.ofa_supernet.get_subnet(arch)\n logger.info(\"Complete to fetch child net.\")\n # set_running_statistics(ofa_childnet, self.bn_loader)\n # logger.info(\"Complete to calibrate bn.\")\n top1acc, top5acc = validate(ofa_childnet, self.cache_target_loader, self.cache_target_loader)\n logger.info(\"Complete to compute acc.\")\n # flops = compute_flops(ofa_childnet, (1, 3, self.resolution, self.resolution), auto_device)\n # logger.info(\"Complete to compute flops.\")\n # n_params = compute_nparam(ofa_childnet)\n return top1acc, top5acc, 0, 0", "import torch\n\npretrained_model_path = \"/userhome/liujing/models/chenpeng/R_18_1x-FPN_BN/model_final.pth\"\nsave_model_path = \"/userhome/liujing/models/chenpeng/R_18_1x-FPN_BN/model_final_notshared_norm.pth\"\n\n\n# load pre-trained model into pytorch_model\ncheckpoint_param = torch.load(pretrained_model_path, map_location='cpu')\nprefixes = [\"proposal_generator.fcos_head.cls_tower\", \"proposal_generator.fcos_head.bbox_tower\"]\noutput_prefixes = [\"proposal_generator.fcos_head.cls_norm\", \"proposal_generator.fcos_head.bbox_norm\"]\n# only for GN\n\nj = 0\nfor i in range(0, 10, 3):\n for prefix, output_prefix in zip(prefixes, output_prefixes):\n ker_norm_weight_name = '{}.{}.weight'.format(prefix, i + 1)\n key_norm_bias_name = '{}.{}.bias'.format(prefix, i + 1)\n key_names = [ker_norm_weight_name, key_norm_bias_name]\n for key_name in key_names:\n value = checkpoint_param[\"model\"].pop(key_name)\n for k in range(5):\n if 'weight' in key_name:\n output_key_name = '{}{}.{}.weight'.format(output_prefix, j, k)\n else:\n output_key_name = '{}{}.{}.bias'.format(output_prefix, j, k)\n checkpoint_param[\"model\"][output_key_name] = value\n j += 1\ntorch.save(checkpoint_param, save_model_path)\n", "# Once for All: Train One Network and Specialize it for Efficient Deployment\n# Han Cai, Chuang Gan, Tianzhe Wang, Zhekai Zhang, Song Han\n# International Conference on Learning Representations (ICLR), 2020.\n\nimport copy\nimport torch.nn as nn\n\nfrom codebase.third_party.spos_ofa.ofa.utils.layers import set_layer_from_config, MBConvLayer, ConvLayer, IdentityLayer, LinearLayer, ResidualBlock\nfrom codebase.third_party.spos_ofa.ofa.utils import MyNetwork, make_divisible, MyGlobalAvgPool2d\n\n__all__ = ['MobileNetV3Cifar', 'MobileNetV3CifarLarge']\n\n\nclass MobileNetV3Cifar(MyNetwork):\n\n\tdef __init__(self, first_conv, blocks, final_expand_layer, feature_mix_layer, classifier):\n\t\tsuper(MobileNetV3Cifar, self).__init__()\n\n\t\tself.first_conv = first_conv\n\t\tself.blocks = nn.ModuleList(blocks)\n\t\tself.final_expand_layer = final_expand_layer\n\t\tself.global_avg_pool = MyGlobalAvgPool2d(keep_dim=True)\n\t\tself.feature_mix_layer = feature_mix_layer\n\t\tself.classifier = classifier\n\n\tdef forward(self, x):\n\t\tx = self.first_conv(x)\n\t\tfor block in self.blocks:\n\t\t\tx = block(x)\n\t\tx = self.final_expand_layer(x)\n\t\tx = self.global_avg_pool(x) # global average pooling\n\t\tx = self.feature_mix_layer(x)\n\t\tx = x.view(x.size(0), -1)\n\t\tx = self.classifier(x)\n\t\treturn x\n\n\t@property\n\tdef module_str(self):\n\t\t_str = self.first_conv.module_str + '\\n'\n\t\tfor block in self.blocks:\n\t\t\t_str += block.module_str + '\\n'\n\t\t_str += self.final_expand_layer.module_str + '\\n'\n\t\t_str += self.global_avg_pool.__repr__() + '\\n'\n\t\t_str += self.feature_mix_layer.module_str + '\\n'\n\t\t_str += self.classifier.module_str\n\t\treturn _str\n\n\t@property\n\tdef config(self):\n\t\treturn {\n\t\t\t'name': MobileNetV3Cifar.__name__,\n\t\t\t'bn': self.get_bn_param(),\n\t\t\t'first_conv': self.first_conv.config,\n\t\t\t'blocks': [\n\t\t\t\tblock.config for block in self.blocks\n\t\t\t],\n\t\t\t'final_expand_layer': self.final_expand_layer.config,\n\t\t\t'feature_mix_layer': self.feature_mix_layer.config,\n\t\t\t'classifier': self.classifier.config,\n\t\t}\n\n\t@staticmethod\n\tdef build_from_config(config):\n\t\tfirst_conv = set_layer_from_config(config['first_conv'])\n\t\tfinal_expand_layer = set_layer_from_config(config['final_expand_layer'])\n\t\tfeature_mix_layer = set_layer_from_config(config['feature_mix_layer'])\n\t\tclassifier = set_layer_from_config(config['classifier'])\n\n\t\tblocks = []\n\t\tfor block_config in config['blocks']:\n\t\t\tblocks.append(ResidualBlock.build_from_config(block_config))\n\n\t\tnet = MobileNetV3Cifar(first_conv, blocks, final_expand_layer, feature_mix_layer, classifier)\n\t\tif 'bn' in config:\n\t\t\tnet.set_bn_param(**config['bn'])\n\t\telse:\n\t\t\tnet.set_bn_param(momentum=0.1, eps=1e-5)\n\n\t\treturn net\n\n\tdef zero_last_gamma(self):\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, ResidualBlock):\n\t\t\t\tif isinstance(m.conv, MBConvLayer) and isinstance(m.shortcut, IdentityLayer):\n\t\t\t\t\tm.conv.point_linear.bn.weight.data.zero_()\n\n\t@property\n\tdef grouped_block_index(self):\n\t\tinfo_list = []\n\t\tblock_index_list = []\n\t\tfor i, block in enumerate(self.blocks[1:], 1):\n\t\t\tif block.shortcut is None and len(block_index_list) > 0:\n\t\t\t\tinfo_list.append(block_index_list)\n\t\t\t\tblock_index_list = []\n\t\t\tblock_index_list.append(i)\n\t\tif len(block_index_list) > 0:\n\t\t\tinfo_list.append(block_index_list)\n\t\treturn info_list\n\n\t@staticmethod\n\tdef build_net_via_cfg(cfg, input_channel, last_channel, n_classes, dropout_rate):\n\t\t# first conv layer\n\t\tfirst_conv = ConvLayer(\n\t\t\t3, input_channel, kernel_size=3, stride=1, use_bn=True, act_func='h_swish', ops_order='weight_bn_act'\n\t\t)\n\t\t# build mobile blocks\n\t\tfeature_dim = input_channel\n\t\tblocks = []\n\t\tfor stage_id, block_config_list in cfg.items():\n\t\t\tfor k, mid_channel, out_channel, use_se, act_func, stride, expand_ratio in block_config_list:\n\t\t\t\tmb_conv = MBConvLayer(\n\t\t\t\t\tfeature_dim, out_channel, k, stride, expand_ratio, mid_channel, act_func, use_se\n\t\t\t\t)\n\t\t\t\tif stride == 1 and out_channel == feature_dim:\n\t\t\t\t\tshortcut = IdentityLayer(out_channel, out_channel)\n\t\t\t\telse:\n\t\t\t\t\tshortcut = None\n\t\t\t\tblocks.append(ResidualBlock(mb_conv, shortcut))\n\t\t\t\tfeature_dim = out_channel\n\t\t# final expand layer\n\t\tfinal_expand_layer = ConvLayer(\n\t\t\tfeature_dim, feature_dim * 6, kernel_size=1, use_bn=True, act_func='h_swish', ops_order='weight_bn_act',\n\t\t)\n\t\t# feature mix layer\n\t\tfeature_mix_layer = ConvLayer(\n\t\t\tfeature_dim * 6, last_channel, kernel_size=1, bias=False, use_bn=False, act_func='h_swish',\n\t\t)\n\t\t# classifier\n\t\tclassifier = LinearLayer(last_channel, n_classes, dropout_rate=dropout_rate)\n\n\t\treturn first_conv, blocks, final_expand_layer, feature_mix_layer, classifier\n\n\t@staticmethod\n\tdef adjust_cfg(cfg, ks=None, expand_ratio=None, depth_param=None, stage_width_list=None):\n\t\tfor i, (stage_id, block_config_list) in enumerate(cfg.items()):\n\t\t\tfor block_config in block_config_list:\n\t\t\t\tif ks is not None and stage_id != '0':\n\t\t\t\t\tblock_config[0] = ks\n\t\t\t\tif expand_ratio is not None and stage_id != '0':\n\t\t\t\t\tblock_config[-1] = expand_ratio\n\t\t\t\t\tblock_config[1] = None\n\t\t\t\t\tif stage_width_list is not None:\n\t\t\t\t\t\tblock_config[2] = stage_width_list[i]\n\t\t\tif depth_param is not None and stage_id != '0':\n\t\t\t\tnew_block_config_list = [block_config_list[0]]\n\t\t\t\tnew_block_config_list += [copy.deepcopy(block_config_list[-1]) for _ in range(depth_param - 1)]\n\t\t\t\tcfg[stage_id] = new_block_config_list\n\t\treturn cfg\n\n\tdef load_state_dict(self, state_dict, **kwargs):\n\t\tcurrent_state_dict = self.state_dict()\n\n\t\tfor key in state_dict:\n\t\t\tif key not in current_state_dict:\n\t\t\t\tassert '.mobile_inverted_conv.' in key\n\t\t\t\tnew_key = key.replace('.mobile_inverted_conv.', '.conv.')\n\t\t\telse:\n\t\t\t\tnew_key = key\n\t\t\tcurrent_state_dict[new_key] = state_dict[key]\n\t\tsuper(MobileNetV3Cifar, self).load_state_dict(current_state_dict)\n\n\nclass MobileNetV3CifarLarge(MobileNetV3Cifar):\n\n\tdef __init__(self, n_classes=1000, width_mult=1.0, bn_param=(0.1, 1e-5), dropout_rate=0.2,\n\t ks=None, expand_ratio=None, depth_param=None, stage_width_list=None):\n\t\tinput_channel = 16\n\t\tlast_channel = 1280\n\n\t\tinput_channel = make_divisible(input_channel * width_mult, MyNetwork.CHANNEL_DIVISIBLE)\n\t\tlast_channel = make_divisible(last_channel * width_mult, MyNetwork.CHANNEL_DIVISIBLE) \\\n\t\t\tif width_mult > 1.0 else last_channel\n\n\t\tcfg = {\n\t\t\t# k, exp, c, se, nl, s, e,\n\t\t\t'0': [\n\t\t\t\t[3, 16, 16, False, 'relu', 1, 1],\n\t\t\t],\n\t\t\t'1': [\n\t\t\t\t[3, 64, 24, False, 'relu', 2, None], # 4\n\t\t\t\t[3, 72, 24, False, 'relu', 1, None], # 3\n\t\t\t],\n\t\t\t'2': [\n\t\t\t\t[5, 72, 40, True, 'relu', 2, None], # 3\n\t\t\t\t[5, 120, 40, True, 'relu', 1, None], # 3\n\t\t\t\t[5, 120, 40, True, 'relu', 1, None], # 3\n\t\t\t],\n\t\t\t'3': [\n\t\t\t\t[3, 240, 80, False, 'h_swish', 2, None], # 6\n\t\t\t\t[3, 200, 80, False, 'h_swish', 1, None], # 2.5\n\t\t\t\t[3, 184, 80, False, 'h_swish', 1, None], # 2.3\n\t\t\t\t[3, 184, 80, False, 'h_swish', 1, None], # 2.3\n\t\t\t],\n\t\t\t'4': [\n\t\t\t\t[3, 480, 112, True, 'h_swish', 1, None], # 6\n\t\t\t\t[3, 672, 112, True, 'h_swish', 1, None], # 6\n\t\t\t],\n\t\t\t'5': [\n\t\t\t\t[5, 672, 160, True, 'h_swish', 2, None], # 6\n\t\t\t\t[5, 960, 160, True, 'h_swish', 1, None], # 6\n\t\t\t\t[5, 960, 160, True, 'h_swish', 1, None], # 6\n\t\t\t]\n\t\t}\n\n\t\tcfg = self.adjust_cfg(cfg, ks, expand_ratio, depth_param, stage_width_list)\n\t\t# width multiplier on mobile setting, change `exp: 1` and `c: 2`\n\t\tfor stage_id, block_config_list in cfg.items():\n\t\t\tfor block_config in block_config_list:\n\t\t\t\tif block_config[1] is not None:\n\t\t\t\t\tblock_config[1] = make_divisible(block_config[1] * width_mult, MyNetwork.CHANNEL_DIVISIBLE)\n\t\t\t\tblock_config[2] = make_divisible(block_config[2] * width_mult, MyNetwork.CHANNEL_DIVISIBLE)\n\n\t\tfirst_conv, blocks, final_expand_layer, feature_mix_layer, classifier = self.build_net_via_cfg(\n\t\t\tcfg, input_channel, last_channel, n_classes, dropout_rate\n\t\t)\n\t\tsuper(MobileNetV3CifarLarge, self).__init__(first_conv, blocks, final_expand_layer, feature_mix_layer, classifier)\n\t\t# set bn param\n\t\tself.set_bn_param(*bn_param)\n", "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom time import sleep\nimport numpy as np\nimport fvcore.nn.weight_init as weight_init\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom detectron2_ofa.layers import (\n Conv2d,\n DeformConv,\n FrozenBatchNorm2d,\n ModulatedDeformConv,\n ShapeSpec,\n get_norm,\n)\n\nfrom .backbone import Backbone\nfrom .build import BACKBONE_REGISTRY\n\n__all__ = [\n \"ResNetBlockBase\",\n \"BasicBlock\",\n \"BasicBlockLarge\",\n \"BottleneckBlockLarge\",\n \"BottleneckBlock\",\n \"DeformBottleneckBlock\",\n \"BasicStem\",\n \"ResNet\",\n \"OFAResNets\",\n \"make_stage\",\n \"build_resnet_backbone\",\n \"build_resnet_teacher_backbone\",\n \"build_resnet_ofa_backbone\",\n]\n\n\nclass ResNetBlockBase(nn.Module):\n def __init__(self, in_channels, out_channels, stride):\n \"\"\"\n The `__init__` method of any subclass should also contain these arguments.\n\n Args:\n in_channels (int):\n out_channels (int):\n stride (int):\n \"\"\"\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.stride = stride\n\n def freeze(self):\n for p in self.parameters():\n p.requires_grad = False\n FrozenBatchNorm2d.convert_frozen_batchnorm(self) # 这里就是师兄说的fix norm的代码\n return self\n\n\nclass BasicBlock(ResNetBlockBase):\n def __init__(\n self,\n in_channels,\n out_channels,\n *,\n bottleneck_channels,\n stride=1,\n num_groups=1,\n norm=\"BN\",\n stride_in_1x1=False,\n dilation=1,\n ):\n \"\"\"\n Args:\n norm (str or callable): a callable that takes the number of\n channels and return a `nn.Module`, or a pre-defined string\n (one of {\"FrozenBN\", \"BN\", \"GN\"}).\n stride_in_1x1 (bool): when stride==2, whether to put stride in the\n first 1x1 convolution or the bottleneck 3x3 convolution.\n \"\"\"\n super().__init__(in_channels, out_channels, stride)\n\n if in_channels != out_channels:\n self.shortcut = Conv2d(\n in_channels,\n out_channels,\n kernel_size=1,\n stride=stride,\n bias=False,\n norm=get_norm(norm, out_channels),\n )\n else:\n self.shortcut = None\n\n # The original MSRA ResNet models have stride in the first 1x1 conv\n # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have\n # stride in the 3x3 conv\n stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)\n\n self.conv1 = Conv2d(\n in_channels,\n out_channels,\n kernel_size=3,\n stride=stride,\n padding=1 * dilation,\n bias=False,\n groups=num_groups,\n dilation=dilation,\n norm=get_norm(norm, out_channels),\n )\n\n self.conv2 = Conv2d(\n out_channels,\n out_channels,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=False,\n norm=get_norm(norm, out_channels),\n )\n\n for layer in [self.conv1, self.conv2, self.shortcut]:\n if layer is not None: # shortcut can be None\n weight_init.c2_msra_fill(layer)\n\n def forward(self, x):\n out = self.conv1(x)\n out = F.relu_(out)\n\n out = self.conv2(out)\n\n if self.shortcut is not None:\n shortcut = self.shortcut(x)\n else:\n shortcut = x\n\n out += shortcut\n out = F.relu_(out)\n return out\n\n\nclass BottleneckBlock(ResNetBlockBase):\n def __init__(\n self,\n in_channels,\n out_channels,\n *,\n bottleneck_channels,\n stride=1,\n num_groups=1,\n norm=\"BN\",\n stride_in_1x1=False,\n dilation=1,\n ):\n \"\"\"\n Args:\n norm (str or callable): a callable that takes the number of\n channels and return a `nn.Module`, or a pre-defined string\n (one of {\"FrozenBN\", \"BN\", \"GN\"}).\n stride_in_1x1 (bool): when stride==2, whether to put stride in the\n first 1x1 convolution or the bottleneck 3x3 convolution.\n \"\"\"\n super().__init__(in_channels, out_channels, stride)\n\n if in_channels != out_channels:\n self.shortcut = Conv2d(\n in_channels,\n out_channels,\n kernel_size=1,\n stride=stride,\n bias=False,\n norm=get_norm(norm, out_channels),\n )\n else:\n self.shortcut = None\n\n # The original MSRA ResNet models have stride in the first 1x1 conv\n # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have\n # stride in the 3x3 conv\n stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)\n\n self.conv1 = Conv2d(\n in_channels,\n bottleneck_channels,\n kernel_size=1,\n stride=stride_1x1,\n bias=False,\n norm=get_norm(norm, bottleneck_channels),\n )\n\n self.conv2 = Conv2d(\n bottleneck_channels,\n bottleneck_channels,\n kernel_size=3,\n stride=stride_3x3,\n padding=1 * dilation,\n bias=False,\n groups=num_groups,\n dilation=dilation,\n norm=get_norm(norm, bottleneck_channels),\n )\n\n self.conv3 = Conv2d(\n bottleneck_channels,\n out_channels,\n kernel_size=1,\n bias=False,\n norm=get_norm(norm, out_channels),\n )\n\n for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:\n if layer is not None: # shortcut can be None\n weight_init.c2_msra_fill(layer)\n\n # Zero-initialize the last normalization in each residual branch,\n # so that at the beginning, the residual branch starts with zeros,\n # and each residual block behaves like an identity.\n # See Sec 5.1 in \"Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour\":\n # \"For BN layers, the learnable scaling coefficient γ is initialized\n # to be 1, except for each residual block's last BN\n # where γ is initialized to be 0.\"\n\n # nn.init.constant_(self.conv3.norm.weight, 0)\n # TODO this somehow hurts performance when training GN models from scratch.\n # Add it as an option when we need to use this code to train a backbone.\n\n def forward(self, x):\n out = self.conv1(x)\n out = F.relu_(out)\n\n out = self.conv2(out)\n out = F.relu_(out)\n\n out = self.conv3(out)\n\n if self.shortcut is not None:\n shortcut = self.shortcut(x)\n else:\n shortcut = x\n\n out += shortcut\n out = F.relu_(out)\n return out\n\n# teacher network ofa\nclass BasicBlockLarge(ResNetBlockBase):\n def __init__(\n self,\n in_channels,\n out_channels,\n *,\n bottleneck_channels,\n stride=1,\n num_groups=1,\n norm=\"BN\",\n stride_in_1x1=False,\n dilation=1,\n ):\n \"\"\"\n Args:\n norm (str or callable): a callable that takes the number of\n channels and return a `nn.Module`, or a pre-defined string\n (one of {\"FrozenBN\", \"BN\", \"GN\"}).\n stride_in_1x1 (bool): when stride==2, whether to put stride in the\n first 1x1 convolution or the bottleneck 3x3 convolution.\n \"\"\"\n super().__init__(in_channels, out_channels, stride)\n\n if in_channels != out_channels:\n self.shortcut = Conv2d(\n in_channels,\n out_channels,\n kernel_size=7,\n stride=stride,\n bias=False,\n padding=3,\n norm=get_norm(norm, out_channels),\n )\n else:\n self.shortcut = None\n\n # The original MSRA ResNet models have stride in the first 1x1 conv\n # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have\n # stride in the 3x3 conv\n stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)\n\n self.conv1 = Conv2d(\n in_channels,\n out_channels,\n kernel_size=7,\n stride=stride,\n padding=3,\n bias=False,\n groups=num_groups,\n dilation=dilation,\n norm=get_norm(norm, out_channels),\n )\n\n self.conv2 = Conv2d(\n out_channels,\n out_channels,\n kernel_size=7,\n stride=1,\n padding=3,\n bias=False,\n norm=get_norm(norm, out_channels),\n )\n\n for layer in [self.conv1, self.conv2, self.shortcut]:\n if layer is not None: # shortcut can be None\n weight_init.c2_msra_fill(layer)\n def forward(self, x):\n out = self.conv1(x)\n out = F.relu_(out)\n out = self.conv2(out)\n if self.shortcut is not None:\n shortcut = self.shortcut(x)\n else:\n shortcut = x\n out += shortcut\n out = F.relu_(out)\n return out\n\n\nclass BottleneckBlockLarge(ResNetBlockBase):\n def __init__(\n self,\n in_channels,\n out_channels,\n *,\n bottleneck_channels,\n stride=1,\n num_groups=1,\n norm=\"BN\",\n stride_in_1x1=False,\n dilation=1,\n ):\n \"\"\"\n Args:\n norm (str or callable): a callable that takes the number of\n channels and return a `nn.Module`, or a pre-defined string\n (one of {\"FrozenBN\", \"BN\", \"GN\"}).\n stride_in_1x1 (bool): when stride==2, whether to put stride in the\n first 1x1 convolution or the bottleneck 3x3 convolution.\n \"\"\"\n super().__init__(in_channels, out_channels, stride)\n\n if in_channels != out_channels:\n self.shortcut = Conv2d(\n in_channels,\n out_channels,\n kernel_size=7,\n stride=stride,\n bias=False,\n norm=get_norm(norm, out_channels),\n )\n else:\n self.shortcut = None\n\n # The original MSRA ResNet models have stride in the first 1x1 conv\n # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have\n # stride in the 3x3 conv\n stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)\n\n self.conv1 = Conv2d(\n in_channels,\n bottleneck_channels,\n kernel_size=7,\n stride=stride_1x1,\n bias=False,\n norm=get_norm(norm, bottleneck_channels),\n )\n\n self.conv2 = Conv2d(\n bottleneck_channels,\n bottleneck_channels,\n kernel_size=7,\n stride=stride_3x3,\n padding=1 * dilation,\n bias=False,\n groups=num_groups,\n dilation=dilation,\n norm=get_norm(norm, bottleneck_channels),\n )\n\n self.conv3 = Conv2d(\n bottleneck_channels,\n out_channels,\n kernel_size=7,\n bias=False,\n norm=get_norm(norm, out_channels),\n )\n\n for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:\n if layer is not None: # shortcut can be None\n weight_init.c2_msra_fill(layer)\n\n # Zero-initialize the last normalization in each residual branch,\n # so that at the beginning, the residual branch starts with zeros,\n # and each residual block behaves like an identity.\n # See Sec 5.1 in \"Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour\":\n # \"For BN layers, the learnable scaling coefficient γ is initialized\n # to be 1, except for each residual block's last BN\n # where γ is initialized to be 0.\"\n\n # nn.init.constant_(self.conv3.norm.weight, 0)\n # TODO this somehow hurts performance when training GN models from scratch.\n # Add it as an option when we need to use this code to train a backbone.\n\n def forward(self, x):\n out = self.conv1(x)\n out = F.relu_(out)\n\n out = self.conv2(out)\n out = F.relu_(out)\n\n out = self.conv3(out)\n\n if self.shortcut is not None:\n shortcut = self.shortcut(x)\n else:\n shortcut = x\n\n out += shortcut\n out = F.relu_(out)\n return out\n\n\nclass DeformBottleneckBlock(ResNetBlockBase):\n def __init__(\n self,\n in_channels,\n out_channels,\n *,\n bottleneck_channels,\n stride=1,\n num_groups=1,\n norm=\"BN\",\n stride_in_1x1=False,\n dilation=1,\n deform_modulated=False,\n deform_num_groups=1,\n ):\n \"\"\"\n Similar to :class:`BottleneckBlock`, but with deformable conv in the 3x3 convolution.\n \"\"\"\n super().__init__(in_channels, out_channels, stride)\n self.deform_modulated = deform_modulated\n\n if in_channels != out_channels:\n self.shortcut = Conv2d(\n in_channels,\n out_channels,\n kernel_size=1,\n stride=stride,\n bias=False,\n norm=get_norm(norm, out_channels),\n )\n else:\n self.shortcut = None\n\n stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)\n\n self.conv1 = Conv2d(\n in_channels,\n bottleneck_channels,\n kernel_size=1,\n stride=stride_1x1,\n bias=False,\n norm=get_norm(norm, bottleneck_channels),\n )\n\n if deform_modulated:\n deform_conv_op = ModulatedDeformConv\n # offset channels are 2 or 3 (if with modulated) * kernel_size * kernel_size\n offset_channels = 27\n else:\n deform_conv_op = DeformConv\n offset_channels = 18\n\n self.conv2_offset = Conv2d(\n bottleneck_channels,\n offset_channels * deform_num_groups,\n kernel_size=3,\n stride=stride_3x3,\n padding=1 * dilation,\n dilation=dilation,\n )\n self.conv2 = deform_conv_op(\n bottleneck_channels,\n bottleneck_channels,\n kernel_size=3,\n stride=stride_3x3,\n padding=1 * dilation,\n bias=False,\n groups=num_groups,\n dilation=dilation,\n deformable_groups=deform_num_groups,\n norm=get_norm(norm, bottleneck_channels),\n )\n\n self.conv3 = Conv2d(\n bottleneck_channels,\n out_channels,\n kernel_size=1,\n bias=False,\n norm=get_norm(norm, out_channels),\n )\n\n for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:\n if layer is not None: # shortcut can be None\n weight_init.c2_msra_fill(layer)\n\n nn.init.constant_(self.conv2_offset.weight, 0)\n nn.init.constant_(self.conv2_offset.bias, 0)\n\n def forward(self, x):\n out = self.conv1(x)\n out = F.relu_(out)\n\n if self.deform_modulated:\n offset_mask = self.conv2_offset(out)\n offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1)\n offset = torch.cat((offset_x, offset_y), dim=1)\n mask = mask.sigmoid()\n out = self.conv2(out, offset, mask)\n else:\n offset = self.conv2_offset(out)\n out = self.conv2(out, offset)\n out = F.relu_(out)\n\n out = self.conv3(out)\n\n if self.shortcut is not None:\n shortcut = self.shortcut(x)\n else:\n shortcut = x\n\n out += shortcut\n out = F.relu_(out)\n return out\n\n\ndef make_stage(block_class, num_blocks, first_stride, **kwargs):\n \"\"\"\n Create a resnet stage by creating many blocks.\n Args:\n block_class (class): a subclass of ResNetBlockBase\n num_blocks (int):\n first_stride (int): the stride of the first block. The other blocks will have stride=1.\n A `stride` argument will be passed to the block constructor.\n kwargs: other arguments passed to the block constructor.\n\n Returns:\n list[nn.Module]: a list of block module.\n \"\"\"\n blocks = []\n for i in range(num_blocks):\n blocks.append(block_class(stride=first_stride if i == 0 else 1, **kwargs))\n kwargs[\"in_channels\"] = kwargs[\"out_channels\"]\n return blocks\n\n\ndef make_stage_ofa(block_class, num_blocks, first_stride, **kwargs):\n \"\"\"\n Create a resnet stage by creating many blocks.\n Args:\n block_class (class): a subclass of ResNetBlockBase\n num_blocks (int):\n first_stride (int): the stride of the first block. The other blocks will have stride=1.\n A `stride` argument will be passed to the block constructor.\n kwargs: other arguments passed to the block constructor.\n\n Returns:\n list[nn.Module]: a list of block module.\n \"\"\"\n blocks = []\n for i in range(num_blocks):\n blocks.append(block_class(stride=first_stride if i == 0 else 1, **kwargs))\n kwargs[\"in_channel_list\"] = kwargs[\"out_channel_list\"]\n return blocks\n\n\nclass BasicStem(nn.Module):\n def __init__(self, in_channels=3, out_channels=64, norm=\"BN\", bias=False):\n \"\"\"\n Args:\n norm (str or callable): a callable that takes the number of\n channels and return a `nn.Module`, or a pre-defined string\n (one of {\"FrozenBN\", \"BN\", \"GN\"}).\n \"\"\"\n super().__init__()\n self.conv1 = Conv2d(\n in_channels,\n out_channels,\n kernel_size=7,\n stride=2,\n padding=3,\n bias=bias,\n norm=get_norm(norm, out_channels),\n )\n weight_init.c2_msra_fill(self.conv1)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu_(x)\n x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)\n return x\n\n @property\n def out_channels(self):\n return self.conv1.out_channels\n\n @property\n def stride(self):\n return 4 # = stride 2 conv -> stride 2 max pool\n\n\nclass ResNet(Backbone):\n def __init__(self, stem, stages, num_classes=None, out_features=None):\n \"\"\"\n Args:\n stem (nn.Module): a stem module\n stages (list[list[ResNetBlock]]): several (typically 4) stages,\n each contains multiple :class:`ResNetBlockBase`.\n num_classes (None or int): if None, will not perform classification.\n out_features (list[str]): name of the layers whose outputs should\n be returned in forward. Can be anything in \"stem\", \"linear\", or \"res2\" ...\n If None, will return the output of the last layer.\n \"\"\"\n super(ResNet, self).__init__()\n self.stem = stem\n self.num_classes = num_classes\n\n current_stride = self.stem.stride\n self._out_feature_strides = {\"stem\": current_stride}\n self._out_feature_channels = {\"stem\": self.stem.out_channels}\n\n self.stages_and_names = []\n for i, blocks in enumerate(stages):\n for block in blocks:\n assert isinstance(block, ResNetBlockBase), block\n curr_channels = block.out_channels\n stage = nn.Sequential(*blocks)\n name = \"res\" + str(i + 2)\n self.add_module(name, stage)\n self.stages_and_names.append((stage, name))\n self._out_feature_strides[name] = current_stride = int(\n current_stride * np.prod([k.stride for k in blocks])\n )\n self._out_feature_channels[name] = blocks[-1].out_channels\n\n if num_classes is not None:\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.linear = nn.Linear(curr_channels, num_classes)\n\n # Sec 5.1 in \"Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour\":\n # \"The 1000-way fully-connected layer is initialized by\n # drawing weights from a zero-mean Gaussian with standard deviation of 0.01.\"\n nn.init.normal_(self.linear.weight, std=0.01)\n name = \"linear\"\n\n if out_features is None:\n out_features = [name]\n self._out_features = out_features\n assert len(self._out_features)\n children = [x[0] for x in self.named_children()]\n for out_feature in self._out_features:\n assert out_feature in children, \"Available children: {}\".format(\", \".join(children))\n\n def forward(self, x):\n outputs = {}\n x = self.stem(x)\n if \"stem\" in self._out_features:\n outputs[\"stem\"] = x\n for stage, name in self.stages_and_names:\n x = stage(x)\n if name in self._out_features:\n outputs[name] = x\n if self.num_classes is not None:\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.linear(x)\n if \"linear\" in self._out_features:\n outputs[\"linear\"] = x\n return outputs\n\n def output_shape(self):\n return {\n name: ShapeSpec(\n channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]\n )\n for name in self._out_features\n }\n\n\nclass OFAResNet(Backbone):\n def __init__(self, stem, stages, num_classes=None, out_features=None):\n \"\"\"\n Args:\n stem (nn.Module): a stem module\n stages (list[list[ResNetBlock]]): several (typically 4) stages,\n each contains multiple :class:`ResNetBlockBase`.\n num_classes (None or int): if None, will not perform classification.\n out_features (list[str]): name of the layers whose outputs should\n be returned in forward. Can be anything in \"stem\", \"linear\", or \"res2\" ...\n If None, will return the output of the last layer.\n \"\"\"\n super(OFAResNet, self).__init__()\n self.stem = stem\n self.num_classes = num_classes\n\n current_stride = self.stem.stride\n self._out_feature_strides = {\"stem\": current_stride}\n self._out_feature_channels = {\"stem\": self.stem.out_channels}\n\n self.stages_and_names = []\n for i, blocks in enumerate(stages):\n for block in blocks:\n assert isinstance(block, DynamicResNetBasicBlock) or isinstance(block, DynamicResNetBottleneckBlock), block\n curr_channels = block.out_channels\n stage = nn.Sequential(*blocks)\n name = \"res\" + str(i + 2)\n self.add_module(name, stage)\n self.stages_and_names.append((stage, name))\n self._out_feature_strides[name] = current_stride = int(\n current_stride * np.prod([k.stride for k in blocks])\n )\n self._out_feature_channels[name] = blocks[-1].out_channels\n\n if num_classes is not None:\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.linear = nn.Linear(curr_channels, num_classes)\n\n # Sec 5.1 in \"Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour\":\n # \"The 1000-way fully-connected layer is initialized by\n # drawing weights from a zero-mean Gaussian with standard deviation of 0.01.\"\n nn.init.normal_(self.linear.weight, std=0.01)\n name = \"linear\"\n\n if out_features is None:\n out_features = [name]\n self._out_features = out_features\n assert len(self._out_features)\n children = [x[0] for x in self.named_children()]\n for out_feature in self._out_features:\n assert out_feature in children, \"Available children: {}\".format(\", \".join(children))\n\n def forward(self, x):\n outputs = {}\n x = self.stem(x)\n if \"stem\" in self._out_features:\n outputs[\"stem\"] = x\n for stage, name in self.stages_and_names:\n x = stage(x)\n if name in self._out_features:\n outputs[name] = x\n if self.num_classes is not None:\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.linear(x)\n if \"linear\" in self._out_features:\n outputs[\"linear\"] = x\n return outputs\n\n def output_shape(self):\n return {\n name: ShapeSpec(\n channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]\n )\n for name in self._out_features\n }\n\n\n@BACKBONE_REGISTRY.register()\ndef build_resnet_backbone(cfg, input_shape):\n \"\"\"\n Create a ResNet instance from config.\n\n Returns:\n ResNet: a :class:`ResNet` instance.\n \"\"\"\n # need registration of new blocks/stems?\n norm = cfg.MODEL.RESNETS.NORM\n stem = BasicStem(\n in_channels=input_shape.channels,\n out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,\n norm=norm,\n )\n freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT\n\n if freeze_at >= 1:\n for p in stem.parameters():\n p.requires_grad = False\n stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem)\n\n # fmt: off\n out_features = cfg.MODEL.RESNETS.OUT_FEATURES\n depth = cfg.MODEL.RESNETS.DEPTH\n num_groups = cfg.MODEL.RESNETS.NUM_GROUPS\n width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP\n bottleneck_channels = num_groups * width_per_group\n in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS\n out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS\n stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1\n res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION\n deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE\n deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED\n deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS\n # fmt: on\n assert res5_dilation in {1, 2}, \"res5_dilation cannot be {}.\".format(res5_dilation)\n\n num_blocks_per_stage = {\n 18: [2, 2, 2, 2],\n 34: [3, 4, 6, 3],\n 50: [3, 4, 6, 3],\n 101: [3, 4, 23, 3],\n 152: [3, 8, 36, 3]\n }[depth]\n\n stages = []\n\n # Avoid creating variables without gradients\n # It consumes extra memory and may cause allreduce to fail\n out_stage_idx = [{\"res2\": 2, \"res3\": 3, \"res4\": 4, \"res5\": 5, 'linear': 5}[f] for f in out_features]\n max_stage_idx = max(out_stage_idx)\n for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):\n dilation = res5_dilation if stage_idx == 5 else 1\n first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2\n stage_kargs = {\n \"num_blocks\": num_blocks_per_stage[idx],\n \"first_stride\": first_stride,\n \"in_channels\": in_channels,\n \"bottleneck_channels\": bottleneck_channels,\n \"out_channels\": out_channels,\n \"num_groups\": num_groups,\n \"norm\": norm,\n \"stride_in_1x1\": stride_in_1x1,\n \"dilation\": dilation,\n }\n if deform_on_per_stage[idx]:\n stage_kargs[\"block_class\"] = DeformBottleneckBlock\n stage_kargs[\"deform_modulated\"] = deform_modulated\n stage_kargs[\"deform_num_groups\"] = deform_num_groups\n else:\n if depth <= 34:\n stage_kargs[\"block_class\"] = BasicBlock\n else:\n stage_kargs[\"block_class\"] = BottleneckBlock\n blocks = make_stage(**stage_kargs)\n in_channels = out_channels\n out_channels *= 2\n bottleneck_channels *= 2\n\n if freeze_at >= stage_idx:\n for block in blocks:\n block.freeze() # 不单单只freeze bn,该block全都被\n stages.append(blocks)\n return ResNet(stem, stages, out_features=out_features)\n\n# ofa supernet 构建方法\nfrom codebase.third_party.spos_ofa.ofa.imagenet_classification.networks.resnets import *\nfrom codebase.third_party.spos_ofa.ofa.imagenet_classification.elastic_nn.networks.ofa_resnets import *\nfrom codebase.third_party.spos_ofa.ofa.utils import *\nfrom codebase.third_party.spos_ofa.ofa.utils.layers import *\nfrom codebase.third_party.spos_ofa.ofa.imagenet_classification.elastic_nn.modules.dynamic_layers import *\n\n@BACKBONE_REGISTRY.register()\ndef build_resnet_ofa_backbone(cfg, input_shape):\n \"\"\"\n Create a ResNetLarge instance from config.\n\n Returns:\n ResNet: a :class:`ResNet` instance.\n \"\"\"\n # fmt: off\n '''参考ofa代码里的OFAResNet,增加三个搜索空间参数'''\n depth = cfg.MODEL.RESNETS.DEPTH\n num_blocks_per_stage = {\n 18: [2, 2, 2, 2],\n 34: [3, 4, 6, 3],\n 50: [3, 4, 6, 3],\n 101: [3, 4, 23, 3],\n 152: [3, 8, 36, 3]\n }[depth]\n\n depth_list = val2list(cfg.MODEL.RESNETS.DEPTH_LIST)\n depth_list.sort()\n expand_ratio_list = val2list(cfg.MODEL.RESNETS.EXPAND_RATIO_LIST)\n width_mult_list = val2list(cfg.MODEL.RESNETS.WIDTH_MULT_LIST)\n expand_ratio_list.sort()\n width_mult_list.sort()\n\n in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS\n in_channels = [make_divisible(in_channels * width_mult, MyNetwork.CHANNEL_DIVISIBLE) for width_mult in width_mult_list]\n\n num_groups = cfg.MODEL.RESNETS.NUM_GROUPS\n width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP\n # bottleneck_channels = num_groups * width_per_group\n bottleneck_channels = [make_divisible(channel // 2, MyNetwork.CHANNEL_DIVISIBLE) for channel in in_channels]\n stage_width_list = ResNets.STAGE_WIDTH_LIST.copy()\n for i, width in enumerate(stage_width_list):\n stage_width_list[i] = [\n make_divisible(width * width_mult, MyNetwork.CHANNEL_DIVISIBLE) for width_mult in width_mult_list\n ]\n n_block_list = [base_depth + max(depth_list) for base_depth in num_blocks_per_stage]\n\n out_features = cfg.MODEL.RESNETS.OUT_FEATURES\n out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS\n stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1\n res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION\n deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE\n deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED\n deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS\n # fmt: on\n assert res5_dilation in {1, 2}, \"res5_dilation cannot be {}.\".format(res5_dilation)\n\n # 给定OFAResNets()的参数\n n_classes = cfg.MODEL.RETINANET.NUM_CLASSES\n bn_param = (0.1, 1e-5)\n dropout_rate = cfg.MODEL.RESNETS.LINEAR_DROPOUT\n stride_list = [1, 2, 2, 2]\n # need registration of new blocks/stems?\n norm = cfg.MODEL.RESNETS.NORM\n\n # stem = BasicStem(\n # in_channels=input_shape.channels,\n # out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,\n # norm=norm,\n # )\n # build input stem\n stem = [\n DynamicConvLayer(val2list(3), bottleneck_channels, 3, stride=2, use_bn=True, act_func='relu'),\n ResidualBlock(\n DynamicConvLayer(bottleneck_channels, bottleneck_channels, 3, stride=1, use_bn=True, act_func='relu'),\n IdentityLayer(bottleneck_channels, bottleneck_channels)\n ),\n DynamicConvLayer(bottleneck_channels, in_channels, 3, stride=1, use_bn=True, act_func='relu')\n ]\n freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT\n\n if freeze_at >= 1:\n for p in stem.parameters():\n p.requires_grad = False\n stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem)\n\n stages = []\n # Avoid creating variables without gradients\n # It consumes extra memory and may cause allreduce to fail\n out_stage_idx = [{\"res2\": 2, \"res3\": 3, \"res4\": 4, \"res5\": 5, 'linear': 5}[f] for f in out_features]\n max_stage_idx = max(out_stage_idx)\n for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):\n dilation = res5_dilation if stage_idx == 5 else 1\n first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2\n stage_kargs = {\n \"num_blocks\": num_blocks_per_stage[idx],\n \"first_stride\": first_stride,\n # \"in_channels\": in_channels,\n # \"bottleneck_channels\": bottleneck_channels,\n # \"out_channels\": out_channels,\n # \"num_groups\": num_groups,\n # \"norm\": norm,\n # \"stride_in_1x1\": stride_in_1x1,\n # \"dilation\": dilation,\n # 下面的参数是为了Dynamic类\n \"in_channel_list\": in_channels,\n \"out_channel_list\": stage_width_list[idx],\n \"expand_ratio_list\": expand_ratio_list,\n \"kernel_size\": 3,\n # \"stride\": first_stride,\n \"act_func\": 'relu',\n \"downsample_mode\": 'avgpool_conv',\n }\n\n if deform_on_per_stage[idx]:\n stage_kargs[\"block_class\"] = DeformBottleneckBlock\n stage_kargs[\"deform_modulated\"] = deform_modulated\n stage_kargs[\"deform_num_groups\"] = deform_num_groups\n else:\n if depth <= 34:\n stage_kargs[\"block_class\"] = DynamicResNetBasicBlock\n else:\n stage_kargs[\"block_class\"] = DynamicResNetBottleneckBlock\n blocks = make_stage_ofa(**stage_kargs)\n in_channels = stage_width_list[idx]\n # out_channels *= 2\n # bottleneck_channels *= 2\n\n if freeze_at >= stage_idx:\n for block in blocks:\n block.freeze() # 不单单只freeze bn,该block全都被\n stages.append(blocks)\n\n return ResNets(stem, stages)\n\n# ofa-teacher\n@BACKBONE_REGISTRY.register()\ndef build_resnet_teacher_backbone(cfg, input_shape):\n \"\"\"\n Create a ResNetLarge instance from config.\n\n Returns:\n ResNet: a :class:`ResNet` instance.\n \"\"\"\n # need registration of new blocks/stems?\n norm = cfg.MODEL.RESNETS.NORM\n stem = BasicStem(\n in_channels=input_shape.channels,\n out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,\n norm=norm,\n )\n freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT\n\n if freeze_at >= 1:\n for p in stem.parameters():\n p.requires_grad = False\n stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem)\n\n # fmt: off\n out_features = cfg.MODEL.RESNETS.OUT_FEATURES\n depth = cfg.MODEL.RESNETS.DEPTH\n num_groups = cfg.MODEL.RESNETS.NUM_GROUPS\n width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP\n bottleneck_channels = num_groups * width_per_group\n in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS\n out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS\n stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1\n res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION\n deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE\n deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED\n deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS\n # fmt: on\n assert res5_dilation in {1, 2}, \"res5_dilation cannot be {}.\".format(res5_dilation)\n\n num_blocks_per_stage = {\n 18: [2, 2, 2, 2],\n 34: [3, 4, 6, 3],\n 50: [3, 4, 6, 3],\n 101: [3, 4, 23, 3],\n 152: [3, 8, 36, 3]\n }[depth]\n\n stages = []\n\n # Avoid creating variables without gradients\n # It consumes extra memory and may cause allreduce to fail\n out_stage_idx = [{\"res2\": 2, \"res3\": 3, \"res4\": 4, \"res5\": 5, 'linear': 5}[f] for f in out_features]\n max_stage_idx = max(out_stage_idx)\n for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):\n dilation = res5_dilation if stage_idx == 5 else 1\n first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2\n stage_kargs = {\n \"num_blocks\": num_blocks_per_stage[idx],\n \"first_stride\": first_stride,\n \"in_channels\": in_channels,\n \"bottleneck_channels\": bottleneck_channels,\n \"out_channels\": out_channels,\n \"num_groups\": num_groups,\n \"norm\": norm,\n \"stride_in_1x1\": stride_in_1x1,\n \"dilation\": dilation,\n }\n if deform_on_per_stage[idx]:\n stage_kargs[\"block_class\"] = DeformBottleneckBlock\n stage_kargs[\"deform_modulated\"] = deform_modulated\n stage_kargs[\"deform_num_groups\"] = deform_num_groups\n else:\n if depth <= 34:\n stage_kargs[\"block_class\"] = BasicBlockLarge\n else:\n stage_kargs[\"block_class\"] = BottleneckBlockLarge\n blocks = make_stage(**stage_kargs)\n in_channels = out_channels\n out_channels *= 2\n bottleneck_channels *= 2\n\n if freeze_at >= stage_idx:\n for block in blocks:\n block.freeze() # 不单单只freeze bn,该block全都被\n stages.append(blocks)\n return ResNet(stem, stages, out_features=out_features)\n" ]
[ [ "torch.nn.ModuleList" ], [ "torch.nn.Sequential", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.Upsample", "torch.nn.functional.interpolate", "torch.nn.ReLU" ], [ "torch.optim.Adam", "torch.nn.LogSoftmax", "torch.zeros", "torch.unsqueeze", "torch.no_grad", "torch.optim.SGD", "torch.device", "torch.flip" ], [ "torch.all", "torch.nonzero" ], [ "torch.Generator", "torch.load", "torch.randperm", "torch.utils.data.DataLoader", "torch.utils.data.sampler.SubsetRandomSampler", "torch.no_grad", "torch.utils.data.Subset" ], [ "torch.save", "torch.load" ], [ "torch.nn.ModuleList" ], [ "torch.nn.Sequential", "torch.cat", "torch.nn.init.constant_", "torch.nn.Linear", "torch.nn.AdaptiveAvgPool2d", "torch.nn.functional.relu_", "torch.nn.init.normal_", "numpy.prod", "torch.chunk", "torch.nn.functional.max_pool2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Soum-Soum/Tensorflow_Face_Finder
[ "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "fec6c15d2df7012608511ad87f4b55731bf99478", "1fa4cd6a566c8745f455fc3d2273208f21f88ced", "1fa4cd6a566c8745f455fc3d2273208f21f88ced", "1fa4cd6a566c8745f455fc3d2273208f21f88ced" ]
[ "venv1/Lib/site-packages/tensorflow/python/keras/_impl/keras/layers/advanced_activations.py", "venv1/Lib/site-packages/tensorflow/python/keras/_impl/keras/datasets/imdb.py", "venv1/Lib/site-packages/tensorflow/contrib/distributions/python/ops/vector_exponential_diag.py", "venv1/Lib/site-packages/tensorflow/python/debug/ops/gen_debug_ops.py", "venv1/Lib/site-packages/tensorflow/python/ops/distributions/exponential.py", "venv1/Lib/site-packages/tensorflow/contrib/boosted_trees/python/ops/batch_ops_utils.py", "venv1/Lib/site-packages/tensorflow/python/ops/spectral_ops.py", "venv1/Lib/site-packages/tensorflow/contrib/model_pruning/python/learning.py", "venv1/Lib/site-packages/tensorflow/contrib/bayesflow/python/ops/metropolis_hastings.py", "venv1/Lib/site-packages/tensorflow/python/ops/distributions/normal.py", "venv1/Lib/site-packages/tensorflow/contrib/distributions/python/ops/half_normal.py", "venv1/Lib/site-packages/tensorflow/python/ops/gen_script_ops.py", "venv1/Lib/site-packages/tensorflow/contrib/cloud/python/ops/bigquery_reader_ops.py", "venv1/Lib/site-packages/tensorflow/contrib/eager/python/network.py", "venv1/Lib/site-packages/tensorflow/contrib/timeseries/python/timeseries/state_space_models/varma.py", "venv1/Lib/site-packages/tensorflow/contrib/eager/python/tfe.py", "venv1/Lib/site-packages/tensorflow/python/data/ops/dataset_ops.py", "venv1/Lib/site-packages/tensorflow/contrib/distributions/python/ops/bijectors/real_nvp.py", "venv1/Lib/site-packages/tensorflow/contrib/learn/python/learn/estimators/run_config.py", "venv1/Lib/site-packages/tensorflow/contrib/layers/python/layers/target_column.py", "venv1/Lib/site-packages/tensorflow/python/keras/_impl/keras/layers/convolutional.py", "venv1/Lib/site-packages/tensorflow/contrib/distributions/python/ops/bijectors/affine_linear_operator.py", "venv1/Lib/site-packages/tensorflow/contrib/predictor/predictor_factories.py", "venv1/Lib/site-packages/tensorflow/contrib/bayesflow/python/ops/custom_grad_impl.py", "venv1/Lib/site-packages/tensorflow/contrib/labeled_tensor/python/ops/_typecheck.py", "venv1/Lib/site-packages/tensorflow/contrib/seq2seq/python/ops/helper.py", "venv1/Lib/site-packages/tensorflow/python/debug/cli/cli_config.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Layers that act as activation functions.\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.keras._impl.keras import activations\r\nfrom tensorflow.python.keras._impl.keras import backend as K\r\nfrom tensorflow.python.keras._impl.keras import constraints\r\nfrom tensorflow.python.keras._impl.keras import initializers\r\nfrom tensorflow.python.keras._impl.keras import regularizers\r\nfrom tensorflow.python.keras._impl.keras.engine import InputSpec\r\nfrom tensorflow.python.keras._impl.keras.engine import Layer\r\nfrom tensorflow.python.keras._impl.keras.engine.base_layer import shape_type_conversion\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\n@tf_export('keras.layers.LeakyReLU')\r\nclass LeakyReLU(Layer):\r\n \"\"\"Leaky version of a Rectified Linear Unit.\r\n\r\n It allows a small gradient when the unit is not active:\r\n `f(x) = alpha * x for x < 0`,\r\n `f(x) = x for x >= 0`.\r\n\r\n Input shape:\r\n Arbitrary. Use the keyword argument `input_shape`\r\n (tuple of integers, does not include the samples axis)\r\n when using this layer as the first layer in a model.\r\n\r\n Output shape:\r\n Same shape as the input.\r\n\r\n Arguments:\r\n alpha: float >= 0. Negative slope coefficient.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, alpha=0.3, **kwargs):\r\n super(LeakyReLU, self).__init__(**kwargs)\r\n self.supports_masking = True\r\n self.alpha = K.cast_to_floatx(alpha)\r\n\r\n def call(self, inputs):\r\n return K.relu(inputs, alpha=self.alpha)\r\n\r\n def get_config(self):\r\n config = {'alpha': float(self.alpha)}\r\n base_config = super(LeakyReLU, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n @shape_type_conversion\r\n def compute_output_shape(self, input_shape):\r\n return input_shape\r\n\r\n\r\n@tf_export('keras.layers.PReLU')\r\nclass PReLU(Layer):\r\n \"\"\"Parametric Rectified Linear Unit.\r\n\r\n It follows:\r\n `f(x) = alpha * x for x < 0`,\r\n `f(x) = x for x >= 0`,\r\n where `alpha` is a learned array with the same shape as x.\r\n\r\n Input shape:\r\n Arbitrary. Use the keyword argument `input_shape`\r\n (tuple of integers, does not include the samples axis)\r\n when using this layer as the first layer in a model.\r\n\r\n Output shape:\r\n Same shape as the input.\r\n\r\n Arguments:\r\n alpha_initializer: initializer function for the weights.\r\n alpha_regularizer: regularizer for the weights.\r\n alpha_constraint: constraint for the weights.\r\n shared_axes: the axes along which to share learnable\r\n parameters for the activation function.\r\n For example, if the incoming feature maps\r\n are from a 2D convolution\r\n with output shape `(batch, height, width, channels)`,\r\n and you wish to share parameters across space\r\n so that each filter only has one set of parameters,\r\n set `shared_axes=[1, 2]`.\r\n\r\n \"\"\"\r\n\r\n def __init__(self,\r\n alpha_initializer='zeros',\r\n alpha_regularizer=None,\r\n alpha_constraint=None,\r\n shared_axes=None,\r\n **kwargs):\r\n super(PReLU, self).__init__(**kwargs)\r\n self.supports_masking = True\r\n self.alpha_initializer = initializers.get(alpha_initializer)\r\n self.alpha_regularizer = regularizers.get(alpha_regularizer)\r\n self.alpha_constraint = constraints.get(alpha_constraint)\r\n if shared_axes is None:\r\n self.shared_axes = None\r\n elif not isinstance(shared_axes, (list, tuple)):\r\n self.shared_axes = [shared_axes]\r\n else:\r\n self.shared_axes = list(shared_axes)\r\n\r\n @shape_type_conversion\r\n def build(self, input_shape):\r\n param_shape = list(input_shape[1:])\r\n self.param_broadcast = [False] * len(param_shape)\r\n if self.shared_axes is not None:\r\n for i in self.shared_axes:\r\n param_shape[i - 1] = 1\r\n self.param_broadcast[i - 1] = True\r\n self.alpha = self.add_weight(\r\n shape=param_shape,\r\n name='alpha',\r\n initializer=self.alpha_initializer,\r\n regularizer=self.alpha_regularizer,\r\n constraint=self.alpha_constraint)\r\n # Set input spec\r\n axes = {}\r\n if self.shared_axes:\r\n for i in range(1, len(input_shape)):\r\n if i not in self.shared_axes:\r\n axes[i] = input_shape[i]\r\n self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)\r\n self.built = True\r\n\r\n def call(self, inputs, mask=None):\r\n pos = K.relu(inputs)\r\n if K.backend() == 'theano':\r\n neg = (\r\n K.pattern_broadcast(self.alpha, self.param_broadcast) *\r\n (inputs - K.abs(inputs)) * 0.5)\r\n else:\r\n neg = -self.alpha * K.relu(-inputs)\r\n return pos + neg\r\n\r\n def get_config(self):\r\n config = {\r\n 'alpha_initializer': initializers.serialize(self.alpha_initializer),\r\n 'alpha_regularizer': regularizers.serialize(self.alpha_regularizer),\r\n 'alpha_constraint': constraints.serialize(self.alpha_constraint),\r\n 'shared_axes': self.shared_axes\r\n }\r\n base_config = super(PReLU, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n @shape_type_conversion\r\n def compute_output_shape(self, input_shape):\r\n return input_shape\r\n\r\n\r\n@tf_export('keras.layers.ELU')\r\nclass ELU(Layer):\r\n \"\"\"Exponential Linear Unit.\r\n\r\n It follows:\r\n `f(x) = alpha * (exp(x) - 1.) for x < 0`,\r\n `f(x) = x for x >= 0`.\r\n\r\n Input shape:\r\n Arbitrary. Use the keyword argument `input_shape`\r\n (tuple of integers, does not include the samples axis)\r\n when using this layer as the first layer in a model.\r\n\r\n Output shape:\r\n Same shape as the input.\r\n\r\n Arguments:\r\n alpha: scale for the negative factor.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, alpha=1.0, **kwargs):\r\n super(ELU, self).__init__(**kwargs)\r\n self.supports_masking = True\r\n self.alpha = K.cast_to_floatx(alpha)\r\n\r\n def call(self, inputs):\r\n return K.elu(inputs, self.alpha)\r\n\r\n def get_config(self):\r\n config = {'alpha': float(self.alpha)}\r\n base_config = super(ELU, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n @shape_type_conversion\r\n def compute_output_shape(self, input_shape):\r\n return input_shape\r\n\r\n\r\n@tf_export('keras.layers.ThresholdedReLU')\r\nclass ThresholdedReLU(Layer):\r\n \"\"\"Thresholded Rectified Linear Unit.\r\n\r\n It follows:\r\n `f(x) = x for x > theta`,\r\n `f(x) = 0 otherwise`.\r\n\r\n Input shape:\r\n Arbitrary. Use the keyword argument `input_shape`\r\n (tuple of integers, does not include the samples axis)\r\n when using this layer as the first layer in a model.\r\n\r\n Output shape:\r\n Same shape as the input.\r\n\r\n Arguments:\r\n theta: float >= 0. Threshold location of activation.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, theta=1.0, **kwargs):\r\n super(ThresholdedReLU, self).__init__(**kwargs)\r\n self.supports_masking = True\r\n self.theta = K.cast_to_floatx(theta)\r\n\r\n def call(self, inputs, mask=None):\r\n return inputs * K.cast(K.greater(inputs, self.theta), K.floatx())\r\n\r\n def get_config(self):\r\n config = {'theta': float(self.theta)}\r\n base_config = super(ThresholdedReLU, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n @shape_type_conversion\r\n def compute_output_shape(self, input_shape):\r\n return input_shape\r\n\r\n\r\n@tf_export('keras.layers.Softmax')\r\nclass Softmax(Layer):\r\n \"\"\"Softmax activation function.\r\n\r\n Input shape:\r\n Arbitrary. Use the keyword argument `input_shape`\r\n (tuple of integers, does not include the samples axis)\r\n when using this layer as the first layer in a model.\r\n\r\n Output shape:\r\n Same shape as the input.\r\n\r\n Arguments:\r\n axis: Integer, axis along which the softmax normalization is applied.\r\n \"\"\"\r\n\r\n def __init__(self, axis=-1, **kwargs):\r\n super(Softmax, self).__init__(**kwargs)\r\n self.supports_masking = True\r\n self.axis = axis\r\n\r\n def call(self, inputs):\r\n return activations.softmax(inputs, axis=self.axis)\r\n\r\n def get_config(self):\r\n config = {'axis': self.axis}\r\n base_config = super(Softmax, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n @shape_type_conversion\r\n def compute_output_shape(self, input_shape):\r\n return input_shape\r\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"IMDB sentiment classification dataset.\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport json\r\n\r\nimport numpy as np\r\n\r\nfrom tensorflow.python.keras._impl.keras.preprocessing.sequence import _remove_long_seq\r\nfrom tensorflow.python.keras._impl.keras.utils.data_utils import get_file\r\nfrom tensorflow.python.platform import tf_logging as logging\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\n@tf_export('keras.datasets.imdb.load_data')\r\ndef load_data(path='imdb.npz',\r\n num_words=None,\r\n skip_top=0,\r\n maxlen=None,\r\n seed=113,\r\n start_char=1,\r\n oov_char=2,\r\n index_from=3,\r\n **kwargs):\r\n \"\"\"Loads the IMDB dataset.\r\n\r\n Arguments:\r\n path: where to cache the data (relative to `~/.keras/dataset`).\r\n num_words: max number of words to include. Words are ranked\r\n by how often they occur (in the training set) and only\r\n the most frequent words are kept\r\n skip_top: skip the top N most frequently occurring words\r\n (which may not be informative).\r\n maxlen: sequences longer than this will be filtered out.\r\n seed: random seed for sample shuffling.\r\n start_char: The start of a sequence will be marked with this character.\r\n Set to 1 because 0 is usually the padding character.\r\n oov_char: words that were cut out because of the `num_words`\r\n or `skip_top` limit will be replaced with this character.\r\n index_from: index actual words with this index and higher.\r\n **kwargs: Used for backwards compatibility.\r\n\r\n Returns:\r\n Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.\r\n\r\n Raises:\r\n ValueError: in case `maxlen` is so low\r\n that no input sequence could be kept.\r\n\r\n Note that the 'out of vocabulary' character is only used for\r\n words that were present in the training set but are not included\r\n because they're not making the `num_words` cut here.\r\n Words that were not seen in the training set but are in the test set\r\n have simply been skipped.\r\n \"\"\"\r\n # Legacy support\r\n if 'nb_words' in kwargs:\r\n logging.warning('The `nb_words` argument in `load_data` '\r\n 'has been renamed `num_words`.')\r\n num_words = kwargs.pop('nb_words')\r\n if kwargs:\r\n raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))\r\n\r\n path = get_file(\r\n path,\r\n origin='https://s3.amazonaws.com/text-datasets/imdb.npz',\r\n file_hash='599dadb1135973df5b59232a0e9a887c')\r\n with np.load(path) as f:\r\n x_train, labels_train = f['x_train'], f['y_train']\r\n x_test, labels_test = f['x_test'], f['y_test']\r\n\r\n np.random.seed(seed)\r\n indices = np.arange(len(x_train))\r\n np.random.shuffle(indices)\r\n x_train = x_train[indices]\r\n labels_train = labels_train[indices]\r\n\r\n indices = np.arange(len(x_test))\r\n np.random.shuffle(indices)\r\n x_test = x_test[indices]\r\n labels_test = labels_test[indices]\r\n\r\n xs = np.concatenate([x_train, x_test])\r\n labels = np.concatenate([labels_train, labels_test])\r\n\r\n if start_char is not None:\r\n xs = [[start_char] + [w + index_from for w in x] for x in xs]\r\n elif index_from:\r\n xs = [[w + index_from for w in x] for x in xs]\r\n\r\n if maxlen:\r\n xs, labels = _remove_long_seq(maxlen, xs, labels)\r\n if not xs:\r\n raise ValueError('After filtering for sequences shorter than maxlen=' +\r\n str(maxlen) + ', no sequence was kept. '\r\n 'Increase maxlen.')\r\n if not num_words:\r\n num_words = max([max(x) for x in xs])\r\n\r\n # by convention, use 2 as OOV word\r\n # reserve 'index_from' (=3 by default) characters:\r\n # 0 (padding), 1 (start), 2 (OOV)\r\n if oov_char is not None:\r\n xs = [\r\n [w if (skip_top <= w < num_words) else oov_char for w in x] for x in xs\r\n ]\r\n else:\r\n xs = [[w for w in x if skip_top <= w < num_words] for x in xs]\r\n\r\n idx = len(x_train)\r\n x_train, y_train = np.array(xs[:idx]), np.array(labels[:idx])\r\n x_test, y_test = np.array(xs[idx:]), np.array(labels[idx:])\r\n\r\n return (x_train, y_train), (x_test, y_test)\r\n\r\n\r\n@tf_export('keras.datasets.imdb.get_word_index')\r\ndef get_word_index(path='imdb_word_index.json'):\r\n \"\"\"Retrieves the dictionary mapping word indices back to words.\r\n\r\n Arguments:\r\n path: where to cache the data (relative to `~/.keras/dataset`).\r\n\r\n Returns:\r\n The word index dictionary.\r\n \"\"\"\r\n path = get_file(\r\n path,\r\n origin='https://s3.amazonaws.com/text-datasets/imdb_word_index.json',\r\n file_hash='bfafd718b763782e994055a2d397834f')\r\n with open(path) as f:\r\n return json.load(f)\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Distribution of a vectorized Exponential, with uncorrelated components.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.contrib.distributions.python.ops import distribution_util\r\nfrom tensorflow.contrib.distributions.python.ops import vector_exponential_linear_operator as vector_exponential_linop\r\nfrom tensorflow.python.framework import ops\r\n\r\n\r\n__all__ = [\r\n \"VectorExponentialDiag\",\r\n]\r\n\r\n\r\nclass VectorExponentialDiag(\r\n vector_exponential_linop.VectorExponentialLinearOperator):\r\n \"\"\"The vectorization of the Exponential distribution on `R^k`.\r\n\r\n The vector exponential distribution is defined over a subset of `R^k`, and\r\n parameterized by a (batch of) length-`k` `loc` vector and a (batch of) `k x k`\r\n `scale` matrix: `covariance = scale @ scale.T`, where `@` denotes\r\n matrix-multiplication.\r\n\r\n #### Mathematical Details\r\n\r\n The probability density function (pdf) is defined over the image of the\r\n `scale` matrix + `loc`, applied to the positive half-space:\r\n `Supp = {loc + scale @ x : x in R^k, x_1 > 0, ..., x_k > 0}`. On this set,\r\n\r\n ```none\r\n pdf(y; loc, scale) = exp(-||x||_1) / Z, for y in Supp\r\n x = inv(scale) @ (y - loc),\r\n Z = |det(scale)|,\r\n ```\r\n\r\n where:\r\n\r\n * `loc` is a vector in `R^k`,\r\n * `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,\r\n * `Z` denotes the normalization constant, and,\r\n * `||x||_1` denotes the `l1` norm of `x`, `sum_i |x_i|`.\r\n\r\n The VectorExponential distribution is a member of the [location-scale\r\n family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be\r\n constructed as,\r\n\r\n ```none\r\n X = (X_1, ..., X_k), each X_i ~ Exponential(rate=1)\r\n Y = (Y_1, ...,Y_k) = scale @ X + loc\r\n ```\r\n\r\n #### About `VectorExponential` and `Vector` distributions in TensorFlow.\r\n\r\n The `VectorExponential` is a non-standard distribution that has useful\r\n properties.\r\n\r\n The marginals `Y_1, ..., Y_k` are *not* Exponential random variables, due to\r\n the fact that the sum of Exponential random variables is not Exponential.\r\n\r\n Instead, `Y` is a vector whose components are linear combinations of\r\n Exponential random variables. Thus, `Y` lives in the vector space generated\r\n by `vectors` of Exponential distributions. This allows the user to decide the\r\n mean and covariance (by setting `loc` and `scale`), while preserving some\r\n properties of the Exponential distribution. In particular, the tails of `Y_i`\r\n will be (up to polynomial factors) exponentially decaying.\r\n\r\n To see this last statement, note that the pdf of `Y_i` is the convolution of\r\n the pdf of `k` independent Exponential random variables. One can then show by\r\n induction that distributions with exponential (up to polynomial factors) tails\r\n are closed under convolution.\r\n\r\n\r\n #### Examples\r\n\r\n ```python\r\n tfd = tf.contrib.distributions\r\n\r\n # Initialize a single 2-variate VectorExponential, supported on\r\n # {(x, y) in R^2 : x > 0, y > 0}.\r\n\r\n # The first component has pdf exp{-x}, the second 0.5 exp{-x / 2}\r\n vex = tfd.VectorExponentialDiag(scale_diag=[1., 2.])\r\n\r\n # Compute the pdf of an`R^2` observation; return a scalar.\r\n vex.prob([3., 4.]).eval() # shape: []\r\n\r\n # Initialize a 2-batch of 3-variate Vector Exponential's.\r\n loc = [[1., 2, 3],\r\n [1., 0, 0]] # shape: [2, 3]\r\n scale_diag = [[1., 2, 3],\r\n [0.5, 1, 1.5]] # shape: [2, 3]\r\n\r\n vex = tfd.VectorExponentialDiag(loc, scale_diag)\r\n\r\n # Compute the pdf of two `R^3` observations; return a length-2 vector.\r\n x = [[1.9, 2.2, 3.1],\r\n [10., 1.0, 9.0]] # shape: [2, 3]\r\n vex.prob(x).eval() # shape: [2]\r\n ```\r\n\r\n \"\"\"\r\n\r\n def __init__(self,\r\n loc=None,\r\n scale_diag=None,\r\n scale_identity_multiplier=None,\r\n validate_args=False,\r\n allow_nan_stats=True,\r\n name=\"VectorExponentialDiag\"):\r\n \"\"\"Construct Vector Exponential distribution supported on a subset of `R^k`.\r\n\r\n The `batch_shape` is the broadcast shape between `loc` and `scale`\r\n arguments.\r\n\r\n The `event_shape` is given by last dimension of the matrix implied by\r\n `scale`. The last dimension of `loc` (if provided) must broadcast with this.\r\n\r\n Recall that `covariance = scale @ scale.T`.\r\n\r\n ```none\r\n scale = diag(scale_diag + scale_identity_multiplier * ones(k))\r\n ```\r\n\r\n where:\r\n\r\n * `scale_diag.shape = [k]`, and,\r\n * `scale_identity_multiplier.shape = []`.\r\n\r\n Additional leading dimensions (if any) will index batches.\r\n\r\n If both `scale_diag` and `scale_identity_multiplier` are `None`, then\r\n `scale` is the Identity matrix.\r\n\r\n Args:\r\n loc: Floating-point `Tensor`. If this is set to `None`, `loc` is\r\n implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where\r\n `b >= 0` and `k` is the event size.\r\n scale_diag: Non-zero, floating-point `Tensor` representing a diagonal\r\n matrix added to `scale`. May have shape `[B1, ..., Bb, k]`, `b >= 0`,\r\n and characterizes `b`-batches of `k x k` diagonal matrices added to\r\n `scale`. When both `scale_identity_multiplier` and `scale_diag` are\r\n `None` then `scale` is the `Identity`.\r\n scale_identity_multiplier: Non-zero, floating-point `Tensor` representing\r\n a scaled-identity-matrix added to `scale`. May have shape\r\n `[B1, ..., Bb]`, `b >= 0`, and characterizes `b`-batches of scaled\r\n `k x k` identity matrices added to `scale`. When both\r\n `scale_identity_multiplier` and `scale_diag` are `None` then `scale` is\r\n the `Identity`.\r\n validate_args: Python `bool`, default `False`. When `True` distribution\r\n parameters are checked for validity despite possibly degrading runtime\r\n performance. When `False` invalid inputs may silently render incorrect\r\n outputs.\r\n allow_nan_stats: Python `bool`, default `True`. When `True`,\r\n statistics (e.g., mean, mode, variance) use the value \"`NaN`\" to\r\n indicate the result is undefined. When `False`, an exception is raised\r\n if one or more of the statistic's batch members are undefined.\r\n name: Python `str` name prefixed to Ops created by this class.\r\n\r\n Raises:\r\n ValueError: if at most `scale_identity_multiplier` is specified.\r\n \"\"\"\r\n parameters = locals()\r\n with ops.name_scope(name):\r\n with ops.name_scope(\"init\", values=[\r\n loc, scale_diag, scale_identity_multiplier]):\r\n # No need to validate_args while making diag_scale. The returned\r\n # LinearOperatorDiag has an assert_non_singular method that is called by\r\n # the Bijector.\r\n scale = distribution_util.make_diag_scale(\r\n loc=loc,\r\n scale_diag=scale_diag,\r\n scale_identity_multiplier=scale_identity_multiplier,\r\n validate_args=False,\r\n assert_positive=False)\r\n super(VectorExponentialDiag, self).__init__(\r\n loc=loc,\r\n scale=scale,\r\n validate_args=validate_args,\r\n allow_nan_stats=allow_nan_stats,\r\n name=name)\r\n self._parameters = parameters\r\n", "\"\"\"Python wrappers around TensorFlow ops.\r\n\r\nThis file is MACHINE GENERATED! Do not edit.\r\n\"\"\"\r\n\r\nimport collections as _collections\r\nimport six as _six\r\n\r\nfrom tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow\r\nfrom tensorflow.python.eager import context as _context\r\nfrom tensorflow.python.eager import core as _core\r\nfrom tensorflow.python.eager import execute as _execute\r\nfrom tensorflow.python.framework import dtypes as _dtypes\r\nfrom tensorflow.python.framework import errors as _errors\r\nfrom tensorflow.python.framework import tensor_shape as _tensor_shape\r\n\r\nfrom tensorflow.core.framework import op_def_pb2 as _op_def_pb2\r\n# Needed to trigger the call to _set_call_cpp_shape_fn.\r\nfrom tensorflow.python.framework import common_shapes as _common_shapes\r\nfrom tensorflow.python.framework import op_def_registry as _op_def_registry\r\nfrom tensorflow.python.framework import ops as _ops\r\nfrom tensorflow.python.framework import op_def_library as _op_def_library\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\n@tf_export('copy')\r\ndef copy(input, tensor_name=\"\", debug_ops_spec=[], name=None):\r\n r\"\"\"Copy Op.\r\n\r\n Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the\r\n device on which the tensor is allocated.\r\n N.B.: If the all downstream attached debug ops are disabled given the current\r\n gRPC gating status, the output will simply forward the input tensor without\r\n deep-copying. See the documentation of Debug* ops for more details.\r\n\r\n Unlike the CopyHost Op, this op does not have HostMemory constraint on its\r\n input or output.\r\n\r\n Args:\r\n input: A `Tensor`. Input tensor.\r\n tensor_name: An optional `string`. Defaults to `\"\"`.\r\n The name of the input tensor.\r\n debug_ops_spec: An optional list of `strings`. Defaults to `[]`.\r\n A list of debug op spec (op, url, gated_grpc) for attached debug\r\n ops. Each element of the list has the format\r\n <debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented\r\n as 0/1. E.g., \"DebugIdentity;grpc://foo:3333;1\",\r\n \"DebugIdentity;file:///tmp/tfdbg_1;0\".\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n Output tensor, deep-copied from input.\r\n \"\"\"\r\n _ctx = _context.context()\r\n if not _ctx.executing_eagerly():\r\n if tensor_name is None:\r\n tensor_name = \"\"\r\n tensor_name = _execute.make_str(tensor_name, \"tensor_name\")\r\n if debug_ops_spec is None:\r\n debug_ops_spec = []\r\n if not isinstance(debug_ops_spec, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'debug_ops_spec' argument to \"\r\n \"'copy' Op, not %r.\" % debug_ops_spec)\r\n debug_ops_spec = [_execute.make_str(_s, \"debug_ops_spec\") for _s in debug_ops_spec]\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Copy\", input=input, tensor_name=tensor_name,\r\n debug_ops_spec=debug_ops_spec, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"tensor_name\",\r\n _op.get_attr(\"tensor_name\"), \"debug_ops_spec\",\r\n _op.get_attr(\"debug_ops_spec\"))\r\n _execute.record_gradient(\r\n \"Copy\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._handle, _ctx.device_name, \"Copy\", name,\r\n _ctx._post_execution_callbacks, input, \"tensor_name\", tensor_name,\r\n \"debug_ops_spec\", debug_ops_spec)\r\n return _result\r\n except _core._FallbackException:\r\n return copy_eager_fallback(\r\n input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec,\r\n name=name)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef copy_eager_fallback(input, tensor_name=\"\", debug_ops_spec=[], name=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function copy\r\n \"\"\"\r\n _ctx = _context.context()\r\n if tensor_name is None:\r\n tensor_name = \"\"\r\n tensor_name = _execute.make_str(tensor_name, \"tensor_name\")\r\n if debug_ops_spec is None:\r\n debug_ops_spec = []\r\n if not isinstance(debug_ops_spec, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'debug_ops_spec' argument to \"\r\n \"'copy' Op, not %r.\" % debug_ops_spec)\r\n debug_ops_spec = [_execute.make_str(_s, \"debug_ops_spec\") for _s in debug_ops_spec]\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"T\", _attr_T, \"tensor_name\", tensor_name, \"debug_ops_spec\",\r\n debug_ops_spec)\r\n _result = _execute.execute(b\"Copy\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Copy\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('copy_host')\r\ndef copy_host(input, tensor_name=\"\", debug_ops_spec=[], name=None):\r\n r\"\"\"Copy Host Op.\r\n\r\n Performs CPU-to-CPU deep-copying of tensor.\r\n N.B.: If the all downstream attached debug ops are disabled given the current\r\n gRPC gating status, the output will simply forward the input tensor without\r\n deep-copying. See the documentation of Debug* ops for more details.\r\n\r\n Unlike the Copy Op, this op has HostMemory constraint on its input or output.\r\n\r\n Args:\r\n input: A `Tensor`. Input tensor.\r\n tensor_name: An optional `string`. Defaults to `\"\"`.\r\n The name of the input tensor.\r\n debug_ops_spec: An optional list of `strings`. Defaults to `[]`.\r\n A list of debug op spec (op, url, gated_grpc) for attached debug\r\n ops. Each element of the list has the format\r\n <debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented\r\n as 0/1. E.g., \"DebugIdentity;grpc://foo:3333;1\",\r\n \"DebugIdentity;file:///tmp/tfdbg_1;0\".\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n Output tensor, deep-copied from input.\r\n \"\"\"\r\n _ctx = _context.context()\r\n if not _ctx.executing_eagerly():\r\n if tensor_name is None:\r\n tensor_name = \"\"\r\n tensor_name = _execute.make_str(tensor_name, \"tensor_name\")\r\n if debug_ops_spec is None:\r\n debug_ops_spec = []\r\n if not isinstance(debug_ops_spec, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'debug_ops_spec' argument to \"\r\n \"'copy_host' Op, not %r.\" % debug_ops_spec)\r\n debug_ops_spec = [_execute.make_str(_s, \"debug_ops_spec\") for _s in debug_ops_spec]\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"CopyHost\", input=input, tensor_name=tensor_name,\r\n debug_ops_spec=debug_ops_spec, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"tensor_name\",\r\n _op.get_attr(\"tensor_name\"), \"debug_ops_spec\",\r\n _op.get_attr(\"debug_ops_spec\"))\r\n _execute.record_gradient(\r\n \"CopyHost\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._handle, _ctx.device_name, \"CopyHost\", name,\r\n _ctx._post_execution_callbacks, input, \"tensor_name\", tensor_name,\r\n \"debug_ops_spec\", debug_ops_spec)\r\n return _result\r\n except _core._FallbackException:\r\n return copy_host_eager_fallback(\r\n input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec,\r\n name=name)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef copy_host_eager_fallback(input, tensor_name=\"\", debug_ops_spec=[], name=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function copy_host\r\n \"\"\"\r\n _ctx = _context.context()\r\n if tensor_name is None:\r\n tensor_name = \"\"\r\n tensor_name = _execute.make_str(tensor_name, \"tensor_name\")\r\n if debug_ops_spec is None:\r\n debug_ops_spec = []\r\n if not isinstance(debug_ops_spec, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'debug_ops_spec' argument to \"\r\n \"'copy_host' Op, not %r.\" % debug_ops_spec)\r\n debug_ops_spec = [_execute.make_str(_s, \"debug_ops_spec\") for _s in debug_ops_spec]\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"T\", _attr_T, \"tensor_name\", tensor_name, \"debug_ops_spec\",\r\n debug_ops_spec)\r\n _result = _execute.execute(b\"CopyHost\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"CopyHost\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('debug_identity')\r\ndef debug_identity(input, device_name=\"\", tensor_name=\"\", debug_urls=[], gated_grpc=False, name=None):\r\n r\"\"\"Debug Identity Op.\r\n\r\n Provides an identity mapping of the non-Ref type input tensor for debugging.\r\n\r\n Args:\r\n input: A `Tensor`. Input tensor, non-Reference type.\r\n device_name: An optional `string`. Defaults to `\"\"`.\r\n tensor_name: An optional `string`. Defaults to `\"\"`.\r\n Name of the input tensor.\r\n debug_urls: An optional list of `strings`. Defaults to `[]`.\r\n List of URLs to debug targets, e.g.,\r\n file:///foo/tfdbg_dump, grpc:://localhost:11011\r\n gated_grpc: An optional `bool`. Defaults to `False`.\r\n Whether this op will be gated. If any of the debug_urls of this\r\n debug node is of the grpc:// scheme, when the value of this attribute is set\r\n to True, the data will not actually be sent via the grpc stream unless this\r\n debug op has been enabled at the debug_url. If all of the debug_urls of this\r\n debug node are of the grpc:// scheme and the debug op is enabled at none of\r\n them, the output will be an empty Tensor.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n Output tensor that equals the input tensor.\r\n \"\"\"\r\n _ctx = _context.context()\r\n if not _ctx.executing_eagerly():\r\n if device_name is None:\r\n device_name = \"\"\r\n device_name = _execute.make_str(device_name, \"device_name\")\r\n if tensor_name is None:\r\n tensor_name = \"\"\r\n tensor_name = _execute.make_str(tensor_name, \"tensor_name\")\r\n if debug_urls is None:\r\n debug_urls = []\r\n if not isinstance(debug_urls, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'debug_urls' argument to \"\r\n \"'debug_identity' Op, not %r.\" % debug_urls)\r\n debug_urls = [_execute.make_str(_s, \"debug_urls\") for _s in debug_urls]\r\n if gated_grpc is None:\r\n gated_grpc = False\r\n gated_grpc = _execute.make_bool(gated_grpc, \"gated_grpc\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"DebugIdentity\", input=input, device_name=device_name,\r\n tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc,\r\n name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"device_name\",\r\n _op.get_attr(\"device_name\"), \"tensor_name\",\r\n _op.get_attr(\"tensor_name\"), \"debug_urls\",\r\n _op.get_attr(\"debug_urls\"), \"gated_grpc\",\r\n _op.get_attr(\"gated_grpc\"))\r\n _execute.record_gradient(\r\n \"DebugIdentity\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._handle, _ctx.device_name, \"DebugIdentity\", name,\r\n _ctx._post_execution_callbacks, input, \"device_name\", device_name,\r\n \"tensor_name\", tensor_name, \"debug_urls\", debug_urls, \"gated_grpc\",\r\n gated_grpc)\r\n return _result\r\n except _core._FallbackException:\r\n return debug_identity_eager_fallback(\r\n input, device_name=device_name, tensor_name=tensor_name,\r\n debug_urls=debug_urls, gated_grpc=gated_grpc, name=name)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef debug_identity_eager_fallback(input, device_name=\"\", tensor_name=\"\", debug_urls=[], gated_grpc=False, name=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function debug_identity\r\n \"\"\"\r\n _ctx = _context.context()\r\n if device_name is None:\r\n device_name = \"\"\r\n device_name = _execute.make_str(device_name, \"device_name\")\r\n if tensor_name is None:\r\n tensor_name = \"\"\r\n tensor_name = _execute.make_str(tensor_name, \"tensor_name\")\r\n if debug_urls is None:\r\n debug_urls = []\r\n if not isinstance(debug_urls, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'debug_urls' argument to \"\r\n \"'debug_identity' Op, not %r.\" % debug_urls)\r\n debug_urls = [_execute.make_str(_s, \"debug_urls\") for _s in debug_urls]\r\n if gated_grpc is None:\r\n gated_grpc = False\r\n gated_grpc = _execute.make_bool(gated_grpc, \"gated_grpc\")\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"T\", _attr_T, \"device_name\", device_name, \"tensor_name\",\r\n tensor_name, \"debug_urls\", debug_urls, \"gated_grpc\", gated_grpc)\r\n _result = _execute.execute(b\"DebugIdentity\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"DebugIdentity\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('debug_nan_count')\r\ndef debug_nan_count(input, device_name=\"\", tensor_name=\"\", debug_urls=[], gated_grpc=False, name=None):\r\n r\"\"\"Debug NaN Value Counter Op\r\n\r\n Counts number of NaNs in the input tensor, for debugging.\r\n\r\n Args:\r\n input: A `Tensor`. Input tensor, non-Reference type.\r\n device_name: An optional `string`. Defaults to `\"\"`.\r\n tensor_name: An optional `string`. Defaults to `\"\"`.\r\n Name of the input tensor.\r\n debug_urls: An optional list of `strings`. Defaults to `[]`.\r\n List of URLs to debug targets, e.g.,\r\n file:///foo/tfdbg_dump, grpc:://localhost:11011.\r\n gated_grpc: An optional `bool`. Defaults to `False`.\r\n Whether this op will be gated. If any of the debug_urls of this\r\n debug node is of the grpc:// scheme, when the value of this attribute is set\r\n to True, the data will not actually be sent via the grpc stream unless this\r\n debug op has been enabled at the debug_url. If all of the debug_urls of this\r\n debug node are of the grpc:// scheme and the debug op is enabled at none of\r\n them, the output will be an empty Tensor.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `int64`.\r\n An integer output tensor that is the number of NaNs in the input.\r\n \"\"\"\r\n _ctx = _context.context()\r\n if not _ctx.executing_eagerly():\r\n if device_name is None:\r\n device_name = \"\"\r\n device_name = _execute.make_str(device_name, \"device_name\")\r\n if tensor_name is None:\r\n tensor_name = \"\"\r\n tensor_name = _execute.make_str(tensor_name, \"tensor_name\")\r\n if debug_urls is None:\r\n debug_urls = []\r\n if not isinstance(debug_urls, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'debug_urls' argument to \"\r\n \"'debug_nan_count' Op, not %r.\" % debug_urls)\r\n debug_urls = [_execute.make_str(_s, \"debug_urls\") for _s in debug_urls]\r\n if gated_grpc is None:\r\n gated_grpc = False\r\n gated_grpc = _execute.make_bool(gated_grpc, \"gated_grpc\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"DebugNanCount\", input=input, device_name=device_name,\r\n tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc,\r\n name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"device_name\",\r\n _op.get_attr(\"device_name\"), \"tensor_name\",\r\n _op.get_attr(\"tensor_name\"), \"debug_urls\",\r\n _op.get_attr(\"debug_urls\"), \"gated_grpc\",\r\n _op.get_attr(\"gated_grpc\"))\r\n _execute.record_gradient(\r\n \"DebugNanCount\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._handle, _ctx.device_name, \"DebugNanCount\", name,\r\n _ctx._post_execution_callbacks, input, \"device_name\", device_name,\r\n \"tensor_name\", tensor_name, \"debug_urls\", debug_urls, \"gated_grpc\",\r\n gated_grpc)\r\n return _result\r\n except _core._FallbackException:\r\n return debug_nan_count_eager_fallback(\r\n input, device_name=device_name, tensor_name=tensor_name,\r\n debug_urls=debug_urls, gated_grpc=gated_grpc, name=name)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef debug_nan_count_eager_fallback(input, device_name=\"\", tensor_name=\"\", debug_urls=[], gated_grpc=False, name=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function debug_nan_count\r\n \"\"\"\r\n _ctx = _context.context()\r\n if device_name is None:\r\n device_name = \"\"\r\n device_name = _execute.make_str(device_name, \"device_name\")\r\n if tensor_name is None:\r\n tensor_name = \"\"\r\n tensor_name = _execute.make_str(tensor_name, \"tensor_name\")\r\n if debug_urls is None:\r\n debug_urls = []\r\n if not isinstance(debug_urls, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'debug_urls' argument to \"\r\n \"'debug_nan_count' Op, not %r.\" % debug_urls)\r\n debug_urls = [_execute.make_str(_s, \"debug_urls\") for _s in debug_urls]\r\n if gated_grpc is None:\r\n gated_grpc = False\r\n gated_grpc = _execute.make_bool(gated_grpc, \"gated_grpc\")\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"T\", _attr_T, \"device_name\", device_name, \"tensor_name\",\r\n tensor_name, \"debug_urls\", debug_urls, \"gated_grpc\", gated_grpc)\r\n _result = _execute.execute(b\"DebugNanCount\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"DebugNanCount\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('debug_numeric_summary')\r\ndef debug_numeric_summary(input, device_name=\"\", tensor_name=\"\", debug_urls=[], lower_bound=float('-inf'), upper_bound=float('inf'), mute_if_healthy=False, gated_grpc=False, name=None):\r\n r\"\"\"Debug Numeric Summary Op.\r\n\r\n Provide a basic summary of numeric value types, range and distribution.\r\n\r\n Args:\r\n input: A `Tensor`. Input tensor, non-Reference type, float or double.\r\n device_name: An optional `string`. Defaults to `\"\"`.\r\n tensor_name: An optional `string`. Defaults to `\"\"`.\r\n Name of the input tensor.\r\n debug_urls: An optional list of `strings`. Defaults to `[]`.\r\n List of URLs to debug targets, e.g.,\r\n file:///foo/tfdbg_dump, grpc:://localhost:11011\r\n lower_bound: An optional `float`. Defaults to `float('-inf')`.\r\n (float) The lower bound <= which values will be included in the\r\n generalized -inf count. Default: -inf.\r\n upper_bound: An optional `float`. Defaults to `float('inf')`.\r\n (float) The upper bound >= which values will be included in the\r\n generalized +inf count. Default: +inf.\r\n mute_if_healthy: An optional `bool`. Defaults to `False`.\r\n (bool) Do not send data to the debug URLs unless at least one\r\n of elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and\r\n inf counts) is non-zero.\r\n gated_grpc: An optional `bool`. Defaults to `False`.\r\n Whether this op will be gated. If any of the debug_urls of this\r\n debug node is of the grpc:// scheme, when the value of this attribute is set\r\n to True, the data will not actually be sent via the grpc stream unless this\r\n debug op has been enabled at the debug_url. If all of the debug_urls of this\r\n debug node are of the grpc:// scheme and the debug op is enabled at none of\r\n them, the output will be an empty Tensor.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `float64`.\r\n A double tensor of shape [14 + nDimensions], where nDimensions is the\r\n the number of dimensions of the tensor's shape. The elements of output are:\r\n [0]: is initialized (1.0) or not (0.0).\r\n [1]: total number of elements\r\n [2]: NaN element count\r\n [3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by\r\n default.\r\n [4]: negative element count (excluding -inf), if lower_bound is the default\r\n -inf. Otherwise, this is the count of elements > lower_bound and < 0.\r\n [5]: zero element count\r\n [6]: positive element count (excluding +inf), if upper_bound is the default\r\n -inf. Otherwise, this is the count of elements < upper_bound and > 0.\r\n [7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by\r\n default.\r\n Output elements [1:8] are all zero, if the tensor is uninitialized.\r\n [8]: minimum of all non-inf and non-NaN elements.\r\n If uninitialized or no such element exists: +inf.\r\n [9]: maximum of all non-inf and non-NaN elements.\r\n If uninitialized or no such element exists: -inf.\r\n [10]: mean of all non-inf and non-NaN elements.\r\n If uninitialized or no such element exists: NaN.\r\n [11]: variance of all non-inf and non-NaN elements.\r\n If uninitialized or no such element exists: NaN.\r\n [12]: Data type of the tensor encoded as an enum integer. See the DataType\r\n proto for more details.\r\n [13]: Number of dimensions of the tensor (ndims).\r\n [14+]: Sizes of the dimensions.\r\n \"\"\"\r\n _ctx = _context.context()\r\n if not _ctx.executing_eagerly():\r\n if device_name is None:\r\n device_name = \"\"\r\n device_name = _execute.make_str(device_name, \"device_name\")\r\n if tensor_name is None:\r\n tensor_name = \"\"\r\n tensor_name = _execute.make_str(tensor_name, \"tensor_name\")\r\n if debug_urls is None:\r\n debug_urls = []\r\n if not isinstance(debug_urls, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'debug_urls' argument to \"\r\n \"'debug_numeric_summary' Op, not %r.\" % debug_urls)\r\n debug_urls = [_execute.make_str(_s, \"debug_urls\") for _s in debug_urls]\r\n if lower_bound is None:\r\n lower_bound = float('-inf')\r\n lower_bound = _execute.make_float(lower_bound, \"lower_bound\")\r\n if upper_bound is None:\r\n upper_bound = float('inf')\r\n upper_bound = _execute.make_float(upper_bound, \"upper_bound\")\r\n if mute_if_healthy is None:\r\n mute_if_healthy = False\r\n mute_if_healthy = _execute.make_bool(mute_if_healthy, \"mute_if_healthy\")\r\n if gated_grpc is None:\r\n gated_grpc = False\r\n gated_grpc = _execute.make_bool(gated_grpc, \"gated_grpc\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"DebugNumericSummary\", input=input, device_name=device_name,\r\n tensor_name=tensor_name, debug_urls=debug_urls,\r\n lower_bound=lower_bound, upper_bound=upper_bound,\r\n mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"device_name\",\r\n _op.get_attr(\"device_name\"), \"tensor_name\",\r\n _op.get_attr(\"tensor_name\"), \"debug_urls\",\r\n _op.get_attr(\"debug_urls\"), \"lower_bound\",\r\n _op.get_attr(\"lower_bound\"), \"upper_bound\",\r\n _op.get_attr(\"upper_bound\"), \"mute_if_healthy\",\r\n _op.get_attr(\"mute_if_healthy\"), \"gated_grpc\",\r\n _op.get_attr(\"gated_grpc\"))\r\n _execute.record_gradient(\r\n \"DebugNumericSummary\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._handle, _ctx.device_name, \"DebugNumericSummary\", name,\r\n _ctx._post_execution_callbacks, input, \"device_name\", device_name,\r\n \"tensor_name\", tensor_name, \"debug_urls\", debug_urls, \"lower_bound\",\r\n lower_bound, \"upper_bound\", upper_bound, \"mute_if_healthy\",\r\n mute_if_healthy, \"gated_grpc\", gated_grpc)\r\n return _result\r\n except _core._FallbackException:\r\n return debug_numeric_summary_eager_fallback(\r\n input, device_name=device_name, tensor_name=tensor_name,\r\n debug_urls=debug_urls, lower_bound=lower_bound,\r\n upper_bound=upper_bound, mute_if_healthy=mute_if_healthy,\r\n gated_grpc=gated_grpc, name=name)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef debug_numeric_summary_eager_fallback(input, device_name=\"\", tensor_name=\"\", debug_urls=[], lower_bound=float('-inf'), upper_bound=float('inf'), mute_if_healthy=False, gated_grpc=False, name=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function debug_numeric_summary\r\n \"\"\"\r\n _ctx = _context.context()\r\n if device_name is None:\r\n device_name = \"\"\r\n device_name = _execute.make_str(device_name, \"device_name\")\r\n if tensor_name is None:\r\n tensor_name = \"\"\r\n tensor_name = _execute.make_str(tensor_name, \"tensor_name\")\r\n if debug_urls is None:\r\n debug_urls = []\r\n if not isinstance(debug_urls, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'debug_urls' argument to \"\r\n \"'debug_numeric_summary' Op, not %r.\" % debug_urls)\r\n debug_urls = [_execute.make_str(_s, \"debug_urls\") for _s in debug_urls]\r\n if lower_bound is None:\r\n lower_bound = float('-inf')\r\n lower_bound = _execute.make_float(lower_bound, \"lower_bound\")\r\n if upper_bound is None:\r\n upper_bound = float('inf')\r\n upper_bound = _execute.make_float(upper_bound, \"upper_bound\")\r\n if mute_if_healthy is None:\r\n mute_if_healthy = False\r\n mute_if_healthy = _execute.make_bool(mute_if_healthy, \"mute_if_healthy\")\r\n if gated_grpc is None:\r\n gated_grpc = False\r\n gated_grpc = _execute.make_bool(gated_grpc, \"gated_grpc\")\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"T\", _attr_T, \"device_name\", device_name, \"tensor_name\",\r\n tensor_name, \"debug_urls\", debug_urls, \"lower_bound\", lower_bound,\r\n \"upper_bound\", upper_bound, \"mute_if_healthy\", mute_if_healthy,\r\n \"gated_grpc\", gated_grpc)\r\n _result = _execute.execute(b\"DebugNumericSummary\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"DebugNumericSummary\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\ndef _InitOpDefLibrary(op_list_proto_bytes):\r\n op_list = _op_def_pb2.OpList()\r\n op_list.ParseFromString(op_list_proto_bytes)\r\n _op_def_registry.register_op_list(op_list)\r\n op_def_lib = _op_def_library.OpDefLibrary()\r\n op_def_lib.add_op_list(op_list)\r\n return op_def_lib\r\n# op {\r\n# name: \"Copy\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"tensor_name\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"\"\r\n# }\r\n# }\r\n# attr {\r\n# name: \"debug_ops_spec\"\r\n# type: \"list(string)\"\r\n# default_value {\r\n# list {\r\n# }\r\n# }\r\n# }\r\n# allows_uninitialized_input: true\r\n# }\r\n# op {\r\n# name: \"CopyHost\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"tensor_name\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"\"\r\n# }\r\n# }\r\n# attr {\r\n# name: \"debug_ops_spec\"\r\n# type: \"list(string)\"\r\n# default_value {\r\n# list {\r\n# }\r\n# }\r\n# }\r\n# allows_uninitialized_input: true\r\n# }\r\n# op {\r\n# name: \"DebugIdentity\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"device_name\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"\"\r\n# }\r\n# }\r\n# attr {\r\n# name: \"tensor_name\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"\"\r\n# }\r\n# }\r\n# attr {\r\n# name: \"debug_urls\"\r\n# type: \"list(string)\"\r\n# default_value {\r\n# list {\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"gated_grpc\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# allows_uninitialized_input: true\r\n# }\r\n# op {\r\n# name: \"DebugNanCount\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_INT64\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"device_name\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"\"\r\n# }\r\n# }\r\n# attr {\r\n# name: \"tensor_name\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"\"\r\n# }\r\n# }\r\n# attr {\r\n# name: \"debug_urls\"\r\n# type: \"list(string)\"\r\n# default_value {\r\n# list {\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"gated_grpc\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# allows_uninitialized_input: true\r\n# }\r\n# op {\r\n# name: \"DebugNumericSummary\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_DOUBLE\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"device_name\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"\"\r\n# }\r\n# }\r\n# attr {\r\n# name: \"tensor_name\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"\"\r\n# }\r\n# }\r\n# attr {\r\n# name: \"debug_urls\"\r\n# type: \"list(string)\"\r\n# default_value {\r\n# list {\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"lower_bound\"\r\n# type: \"float\"\r\n# default_value {\r\n# f: -inf\r\n# }\r\n# }\r\n# attr {\r\n# name: \"upper_bound\"\r\n# type: \"float\"\r\n# default_value {\r\n# f: inf\r\n# }\r\n# }\r\n# attr {\r\n# name: \"mute_if_healthy\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# attr {\r\n# name: \"gated_grpc\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# allows_uninitialized_input: true\r\n# }\r\n_op_def_lib = _InitOpDefLibrary(b\"\\nl\\n\\004Copy\\022\\n\\n\\005input\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\031\\n\\013tensor_name\\022\\006string\\032\\002\\022\\000\\\"\\\"\\n\\016debug_ops_spec\\022\\014list(string)\\032\\002\\n\\000\\230\\001\\001\\np\\n\\010CopyHost\\022\\n\\n\\005input\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\031\\n\\013tensor_name\\022\\006string\\032\\002\\022\\000\\\"\\\"\\n\\016debug_ops_spec\\022\\014list(string)\\032\\002\\n\\000\\230\\001\\001\\n\\244\\001\\n\\rDebugIdentity\\022\\n\\n\\005input\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\031\\n\\013device_name\\022\\006string\\032\\002\\022\\000\\\"\\031\\n\\013tensor_name\\022\\006string\\032\\002\\022\\000\\\"\\036\\n\\ndebug_urls\\022\\014list(string)\\032\\002\\n\\000\\\"\\026\\n\\ngated_grpc\\022\\004bool\\032\\002(\\000\\230\\001\\001\\n\\243\\001\\n\\rDebugNanCount\\022\\n\\n\\005input\\\"\\001T\\032\\n\\n\\006output\\030\\t\\\"\\t\\n\\001T\\022\\004type\\\"\\031\\n\\013device_name\\022\\006string\\032\\002\\022\\000\\\"\\031\\n\\013tensor_name\\022\\006string\\032\\002\\022\\000\\\"\\036\\n\\ndebug_urls\\022\\014list(string)\\032\\002\\n\\000\\\"\\026\\n\\ngated_grpc\\022\\004bool\\032\\002(\\000\\230\\001\\001\\n\\200\\002\\n\\023DebugNumericSummary\\022\\n\\n\\005input\\\"\\001T\\032\\n\\n\\006output\\030\\002\\\"\\t\\n\\001T\\022\\004type\\\"\\031\\n\\013device_name\\022\\006string\\032\\002\\022\\000\\\"\\031\\n\\013tensor_name\\022\\006string\\032\\002\\022\\000\\\"\\036\\n\\ndebug_urls\\022\\014list(string)\\032\\002\\n\\000\\\"\\033\\n\\013lower_bound\\022\\005float\\032\\005%\\000\\000\\200\\377\\\"\\033\\n\\013upper_bound\\022\\005float\\032\\005%\\000\\000\\200\\177\\\"\\033\\n\\017mute_if_healthy\\022\\004bool\\032\\002(\\000\\\"\\026\\n\\ngated_grpc\\022\\004bool\\032\\002(\\000\\230\\001\\001\")\r\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"The Exponential distribution class.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\n\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import nn\r\nfrom tensorflow.python.ops import random_ops\r\nfrom tensorflow.python.ops.distributions import gamma\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\n__all__ = [\r\n \"Exponential\",\r\n \"ExponentialWithSoftplusRate\",\r\n]\r\n\r\n\r\n@tf_export(\"distributions.Exponential\")\r\nclass Exponential(gamma.Gamma):\r\n \"\"\"Exponential distribution.\r\n\r\n The Exponential distribution is parameterized by an event `rate` parameter.\r\n\r\n #### Mathematical Details\r\n\r\n The probability density function (pdf) is,\r\n\r\n ```none\r\n pdf(x; lambda, x > 0) = exp(-lambda x) / Z\r\n Z = 1 / lambda\r\n ```\r\n\r\n where `rate = lambda` and `Z` is the normalizaing constant.\r\n\r\n The Exponential distribution is a special case of the Gamma distribution,\r\n i.e.,\r\n\r\n ```python\r\n Exponential(rate) = Gamma(concentration=1., rate)\r\n ```\r\n\r\n The Exponential distribution uses a `rate` parameter, or \"inverse scale\",\r\n which can be intuited as,\r\n\r\n ```none\r\n X ~ Exponential(rate=1)\r\n Y = X / rate\r\n ```\r\n\r\n \"\"\"\r\n\r\n def __init__(self,\r\n rate,\r\n validate_args=False,\r\n allow_nan_stats=True,\r\n name=\"Exponential\"):\r\n \"\"\"Construct Exponential distribution with parameter `rate`.\r\n\r\n Args:\r\n rate: Floating point tensor, equivalent to `1 / mean`. Must contain only\r\n positive values.\r\n validate_args: Python `bool`, default `False`. When `True` distribution\r\n parameters are checked for validity despite possibly degrading runtime\r\n performance. When `False` invalid inputs may silently render incorrect\r\n outputs.\r\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\r\n (e.g., mean, mode, variance) use the value \"`NaN`\" to indicate the\r\n result is undefined. When `False`, an exception is raised if one or\r\n more of the statistic's batch members are undefined.\r\n name: Python `str` name prefixed to Ops created by this class.\r\n \"\"\"\r\n parameters = locals()\r\n # Even though all statistics of are defined for valid inputs, this is not\r\n # true in the parent class \"Gamma.\" Therefore, passing\r\n # allow_nan_stats=True\r\n # through to the parent class results in unnecessary asserts.\r\n with ops.name_scope(name, values=[rate]):\r\n self._rate = ops.convert_to_tensor(rate, name=\"rate\")\r\n super(Exponential, self).__init__(\r\n concentration=array_ops.ones([], dtype=self._rate.dtype),\r\n rate=self._rate,\r\n allow_nan_stats=allow_nan_stats,\r\n validate_args=validate_args,\r\n name=name)\r\n # While the Gamma distribution is not reparameterizable, the exponential\r\n # distribution is.\r\n self._reparameterization_type = True\r\n self._parameters = parameters\r\n self._graph_parents += [self._rate]\r\n\r\n @staticmethod\r\n def _param_shapes(sample_shape):\r\n return {\"rate\": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)}\r\n\r\n @property\r\n def rate(self):\r\n return self._rate\r\n\r\n def _sample_n(self, n, seed=None):\r\n shape = array_ops.concat([[n], array_ops.shape(self._rate)], 0)\r\n # Uniform variates must be sampled from the open-interval `(0, 1)` rather\r\n # than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`\r\n # because it is the smallest, positive, \"normal\" number. A \"normal\" number\r\n # is such that the mantissa has an implicit leading 1. Normal, positive\r\n # numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In\r\n # this case, a subnormal number (i.e., np.nextafter) can cause us to sample\r\n # 0.\r\n sampled = random_ops.random_uniform(\r\n shape,\r\n minval=np.finfo(self.dtype.as_numpy_dtype).tiny,\r\n maxval=1.,\r\n seed=seed,\r\n dtype=self.dtype)\r\n return -math_ops.log(sampled) / self._rate\r\n\r\n\r\nclass ExponentialWithSoftplusRate(Exponential):\r\n \"\"\"Exponential with softplus transform on `rate`.\"\"\"\r\n\r\n def __init__(self,\r\n rate,\r\n validate_args=False,\r\n allow_nan_stats=True,\r\n name=\"ExponentialWithSoftplusRate\"):\r\n parameters = locals()\r\n with ops.name_scope(name, values=[rate]):\r\n super(ExponentialWithSoftplusRate, self).__init__(\r\n rate=nn.softplus(rate, name=\"softplus_rate\"),\r\n validate_args=validate_args,\r\n allow_nan_stats=allow_nan_stats,\r\n name=name)\r\n self._parameters = parameters\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Utility for batching remote OPs together to reduce RPC overhead.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport abc\r\nimport collections\r\n\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import tensor_shape\r\nfrom tensorflow.python.ops import array_ops\r\n\r\n\r\nclass ScheduledOp(object):\r\n \"\"\"Represents a scheduled remote operation.\"\"\"\r\n\r\n __metaclass__ = abc.ABCMeta\r\n\r\n @abc.abstractmethod\r\n def batching_key(self):\r\n \"\"\"Returns the key for batching operations.\"\"\"\r\n\r\n @abc.abstractmethod\r\n def batch_runner_fn(self):\r\n \"\"\"Returns the function that executes the operation on the batch.\"\"\"\r\n\r\n\r\nclass ScheduledStampedResourceOp(ScheduledOp):\r\n \"\"\"Wrapper class for batched operations on stamped resources.\"\"\"\r\n\r\n def __init__(self, resource_handle, op, **kwargs):\r\n self.resource_handle = resource_handle\r\n self.op = op\r\n self.args = kwargs\r\n\r\n def batching_key(self):\r\n # We want to group the same operations on the same device and run them in\r\n # one batch. So we use (device, operation) as the key.\r\n return self.resource_handle.device, self.op\r\n\r\n def batch_runner_fn(self):\r\n return _scheduled_stamp_resource_op_runner\r\n\r\n\r\ndef _move_tensors(tensors, device):\r\n \"\"\"Moves a list of tensors to a device by concatenating/splitting them.\"\"\"\r\n # Reset the device setting to avoid weird interactions with device merging\r\n # logic.\r\n with ops.device(None):\r\n if all(tensor.shape == tensor_shape.scalar() for tensor in tensors):\r\n with ops.device(tensors[0].device):\r\n values = array_ops.stack(tensors)\r\n with ops.device(device):\r\n return array_ops.unstack(values)\r\n else:\r\n with ops.device(tensors[0].device):\r\n sizes = array_ops.stack(\r\n [array_ops.shape(tensor)[0] for tensor in tensors])\r\n values = array_ops.concat(tensors, axis=0)\r\n with ops.device(device):\r\n sizes = array_ops.unstack(sizes)\r\n return list(array_ops.split(values, sizes, axis=0))\r\n\r\n\r\ndef _scheduled_stamp_resource_op_runner(batch, stamp):\r\n \"\"\"Runs a batch operation on a stamped resource.\"\"\"\r\n if not batch:\r\n return\r\n arg_keys = set(batch[0].args.keys())\r\n grouped_args = collections.OrderedDict()\r\n resource_handles = []\r\n # Check that the set of arguments is the same across all the scheduled ops.\r\n for op in batch:\r\n if set(op.args.keys()) != arg_keys:\r\n raise ValueError(\"Mismatching arguments: %s, %s.\", op.args, arg_keys)\r\n for key in arg_keys:\r\n grouped_args.setdefault(key, []).append(op.args[key])\r\n resource_handles.append(op.resource_handle)\r\n # Move all the inputs to the op device in one RPC.\r\n grouped_args = collections.OrderedDict(\r\n (k, _move_tensors(v, resource_handles[0].device))\r\n for k, v in sorted(grouped_args.items()))\r\n with ops.device(resource_handles[0].device):\r\n return batch[0].op(resource_handles, stamp, **grouped_args)\r\n\r\n\r\ndef run_handler_scheduled_ops(per_handler_ops, stamp, worker_device):\r\n \"\"\"Given a dictionary of ops for each handler, runs them in batch.\"\"\"\r\n batched_ops = collections.OrderedDict()\r\n # Group the ops by their batching_key. Ops that share the same batching key\r\n # can be executed together.\r\n for handler in per_handler_ops.keys():\r\n for op in per_handler_ops[handler]:\r\n key = (op.batching_key(), op.batch_runner_fn())\r\n batched_ops.setdefault(key, []).append(op)\r\n op_results = {}\r\n for batch in batched_ops.values():\r\n # Run each of the batched ops using its runner.\r\n results = batch[0].batch_runner_fn()(batch, stamp)\r\n # If the result is a tuple, move each entry in the tuple in one RPC.\r\n if isinstance(results, tuple):\r\n results = tuple(\r\n _move_tensors(result, worker_device) for result in results)\r\n # Once all the results are on the worker, create individual tuple for\r\n # each scheduled op request.\r\n for i in range(len(batch)):\r\n op_results[batch[i]] = tuple(result[i] for result in results)\r\n # If the result is a tuple, it didn't have any outputs, so use the\r\n # `ops.Operation` as the result for all the scheduled ops.\r\n elif isinstance(results, ops.Operation):\r\n for i in range(len(batch)):\r\n op_results[batch[i]] = results\r\n else:\r\n raise ValueError(\"Unknown type of result %s.\", results)\r\n handler_results = collections.defaultdict(list)\r\n # Dispatch the results of the ScheduledOps to the handlers that requested\r\n # them.\r\n for handler in per_handler_ops.keys():\r\n for op in per_handler_ops[handler]:\r\n handler_results[handler].append(op_results[op])\r\n return handler_results\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Spectral operators (e.g. DCT, FFT, RFFT).\r\n\r\n@@dct\r\n@@fft\r\n@@ifft\r\n@@fft2d\r\n@@ifft2d\r\n@@fft3d\r\n@@ifft3d\r\n@@rfft\r\n@@irfft\r\n@@rfft2d\r\n@@irfft2d\r\n@@rfft3d\r\n@@irfft3d\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport math as _math\r\n\r\nfrom tensorflow.python.framework import dtypes as _dtypes\r\nfrom tensorflow.python.framework import ops as _ops\r\nfrom tensorflow.python.framework import tensor_util as _tensor_util\r\nfrom tensorflow.python.ops import array_ops as _array_ops\r\nfrom tensorflow.python.ops import gen_spectral_ops\r\nfrom tensorflow.python.ops import math_ops as _math_ops\r\nfrom tensorflow.python.util.all_util import remove_undocumented\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\ndef _infer_fft_length_for_rfft(input_tensor, fft_rank):\r\n \"\"\"Infers the `fft_length` argument for a `rank` RFFT from `input_tensor`.\"\"\"\r\n # A TensorShape for the inner fft_rank dimensions.\r\n fft_shape = input_tensor.get_shape()[-fft_rank:]\r\n\r\n # If any dim is unknown, fall back to tensor-based math.\r\n if not fft_shape.is_fully_defined():\r\n return _array_ops.shape(input_tensor)[-fft_rank:]\r\n\r\n # Otherwise, return a constant.\r\n return _ops.convert_to_tensor(fft_shape.as_list(), _dtypes.int32)\r\n\r\n\r\ndef _infer_fft_length_for_irfft(input_tensor, fft_rank):\r\n \"\"\"Infers the `fft_length` argument for a `rank` IRFFT from `input_tensor`.\"\"\"\r\n # A TensorShape for the inner fft_rank dimensions.\r\n fft_shape = input_tensor.get_shape()[-fft_rank:]\r\n\r\n # If any dim is unknown, fall back to tensor-based math.\r\n if not fft_shape.is_fully_defined():\r\n fft_length = _array_ops.unstack(_array_ops.shape(input_tensor)[-fft_rank:])\r\n fft_length[-1] = _math_ops.maximum(0, 2 * (fft_length[-1] - 1))\r\n return _array_ops.stack(fft_length)\r\n\r\n # Otherwise, return a constant.\r\n fft_length = fft_shape.as_list()\r\n if fft_length:\r\n fft_length[-1] = max(0, 2 * (fft_length[-1] - 1))\r\n return _ops.convert_to_tensor(fft_length, _dtypes.int32)\r\n\r\n\r\ndef _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length, is_reverse=False):\r\n \"\"\"Pads `input_tensor` to `fft_length` on its inner-most `fft_rank` dims.\"\"\"\r\n fft_shape = _tensor_util.constant_value_as_shape(fft_length)\r\n\r\n # Edge case: skip padding empty tensors.\r\n if (input_tensor.shape.ndims is not None and\r\n any(dim.value == 0 for dim in input_tensor.shape)):\r\n return input_tensor\r\n\r\n # If we know the shapes ahead of time, we can either skip or pre-compute the\r\n # appropriate paddings. Otherwise, fall back to computing paddings in\r\n # TensorFlow.\r\n if fft_shape.is_fully_defined() and input_tensor.shape.ndims is not None:\r\n # Slice the last FFT-rank dimensions from input_tensor's shape.\r\n input_fft_shape = input_tensor.shape[-fft_shape.ndims:]\r\n\r\n if input_fft_shape.is_fully_defined():\r\n # In reverse, we only pad the inner-most dimension to fft_length / 2 + 1.\r\n if is_reverse:\r\n fft_shape = fft_shape[:-1].concatenate(fft_shape[-1].value // 2 + 1)\r\n\r\n paddings = [[0, max(fft_dim.value - input_dim.value, 0)]\r\n for fft_dim, input_dim in zip(fft_shape, input_fft_shape)]\r\n if any(pad > 0 for _, pad in paddings):\r\n outer_paddings = [[0, 0]] * max((input_tensor.shape.ndims -\r\n fft_shape.ndims), 0)\r\n return _array_ops.pad(input_tensor, outer_paddings + paddings)\r\n return input_tensor\r\n\r\n # If we can't determine the paddings ahead of time, then we have to pad. If\r\n # the paddings end up as zero, tf.pad has a special-case that does no work.\r\n input_rank = _array_ops.rank(input_tensor)\r\n input_fft_shape = _array_ops.shape(input_tensor)[-fft_rank:]\r\n outer_dims = _math_ops.maximum(0, input_rank - fft_rank)\r\n outer_paddings = _array_ops.zeros([outer_dims], fft_length.dtype)\r\n # In reverse, we only pad the inner-most dimension to fft_length / 2 + 1.\r\n if is_reverse:\r\n fft_length = _array_ops.concat([fft_length[:-1],\r\n fft_length[-1:] // 2 + 1], 0)\r\n fft_paddings = _math_ops.maximum(0, fft_length - input_fft_shape)\r\n paddings = _array_ops.concat([outer_paddings, fft_paddings], 0)\r\n paddings = _array_ops.stack([_array_ops.zeros_like(paddings), paddings],\r\n axis=1)\r\n return _array_ops.pad(input_tensor, paddings)\r\n\r\n\r\ndef _rfft_wrapper(fft_fn, fft_rank, default_name):\r\n \"\"\"Wrapper around gen_spectral_ops.rfft* that infers fft_length argument.\"\"\"\r\n\r\n def _rfft(input_tensor, fft_length=None, name=None):\r\n with _ops.name_scope(name, default_name,\r\n [input_tensor, fft_length]) as name:\r\n input_tensor = _ops.convert_to_tensor(input_tensor, _dtypes.float32)\r\n input_tensor.shape.with_rank_at_least(fft_rank)\r\n if fft_length is None:\r\n fft_length = _infer_fft_length_for_rfft(input_tensor, fft_rank)\r\n else:\r\n fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)\r\n input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length)\r\n return fft_fn(input_tensor, fft_length, name)\r\n _rfft.__doc__ = fft_fn.__doc__\r\n return _rfft\r\n\r\n\r\ndef _irfft_wrapper(ifft_fn, fft_rank, default_name):\r\n \"\"\"Wrapper around gen_spectral_ops.irfft* that infers fft_length argument.\"\"\"\r\n\r\n def _irfft(input_tensor, fft_length=None, name=None):\r\n with _ops.name_scope(name, default_name,\r\n [input_tensor, fft_length]) as name:\r\n input_tensor = _ops.convert_to_tensor(input_tensor, _dtypes.complex64)\r\n input_tensor.shape.with_rank_at_least(fft_rank)\r\n if fft_length is None:\r\n fft_length = _infer_fft_length_for_irfft(input_tensor, fft_rank)\r\n else:\r\n fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)\r\n input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length,\r\n is_reverse=True)\r\n return ifft_fn(input_tensor, fft_length, name)\r\n _irfft.__doc__ = ifft_fn.__doc__\r\n return _irfft\r\n\r\n\r\nfft = gen_spectral_ops.fft\r\nifft = gen_spectral_ops.ifft\r\nfft2d = gen_spectral_ops.fft2d\r\nifft2d = gen_spectral_ops.ifft2d\r\nfft3d = gen_spectral_ops.fft3d\r\nifft3d = gen_spectral_ops.ifft3d\r\nrfft = _rfft_wrapper(gen_spectral_ops.rfft, 1, \"rfft\")\r\ntf_export(\"spectral.rfft\")(rfft)\r\nirfft = _irfft_wrapper(gen_spectral_ops.irfft, 1, \"irfft\")\r\ntf_export(\"spectral.irfft\")(irfft)\r\nrfft2d = _rfft_wrapper(gen_spectral_ops.rfft2d, 2, \"rfft2d\")\r\ntf_export(\"spectral.rfft2d\")(rfft2d)\r\nirfft2d = _irfft_wrapper(gen_spectral_ops.irfft2d, 2, \"irfft2d\")\r\ntf_export(\"spectral.irfft2d\")(irfft2d)\r\nrfft3d = _rfft_wrapper(gen_spectral_ops.rfft3d, 3, \"rfft3d\")\r\ntf_export(\"spectral.rfft3d\")(rfft3d)\r\nirfft3d = _irfft_wrapper(gen_spectral_ops.irfft3d, 3, \"irfft3d\")\r\ntf_export(\"spectral.irfft3d\")(irfft3d)\r\n\r\n\r\ndef _validate_dct_arguments(dct_type, n, axis, norm):\r\n if n is not None:\r\n raise NotImplementedError(\"The DCT length argument is not implemented.\")\r\n if axis != -1:\r\n raise NotImplementedError(\"axis must be -1. Got: %s\" % axis)\r\n if dct_type != 2:\r\n raise ValueError(\"Only the Type II DCT is supported.\")\r\n if norm not in (None, \"ortho\"):\r\n raise ValueError(\r\n \"Unknown normalization. Expected None or 'ortho', got: %s\" % norm)\r\n\r\n\r\n# TODO(rjryan): Implement `type`, `n` and `axis` parameters.\r\n@tf_export(\"spectral.dct\")\r\ndef dct(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disable=redefined-builtin\r\n \"\"\"Computes the 1D [Discrete Cosine Transform (DCT)][dct] of `input`.\r\n\r\n Currently only Type II is supported. Implemented using a length `2N` padded\r\n @{tf.spectral.rfft}, as described here: https://dsp.stackexchange.com/a/10606\r\n\r\n @compatibility(scipy)\r\n Equivalent to scipy.fftpack.dct for the Type-II DCT.\r\n https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html\r\n @end_compatibility\r\n\r\n Args:\r\n input: A `[..., samples]` `float32` `Tensor` containing the signals to\r\n take the DCT of.\r\n type: The DCT type to perform. Must be 2.\r\n n: For future expansion. The length of the transform. Must be `None`.\r\n axis: For future expansion. The axis to compute the DCT along. Must be `-1`.\r\n norm: The normalization to apply. `None` for no normalization or `'ortho'`\r\n for orthonormal normalization.\r\n name: An optional name for the operation.\r\n\r\n Returns:\r\n A `[..., samples]` `float32` `Tensor` containing the DCT of `input`.\r\n\r\n Raises:\r\n ValueError: If `type` is not `2`, `n` is not `None, `axis` is not `-1`, or\r\n `norm` is not `None` or `'ortho'`.\r\n\r\n [dct]: https://en.wikipedia.org/wiki/Discrete_cosine_transform\r\n \"\"\"\r\n _validate_dct_arguments(type, n, axis, norm)\r\n with _ops.name_scope(name, \"dct\", [input]):\r\n # We use the RFFT to compute the DCT and TensorFlow only supports float32\r\n # for FFTs at the moment.\r\n input = _ops.convert_to_tensor(input, dtype=_dtypes.float32)\r\n\r\n axis_dim = input.shape[-1].value or _array_ops.shape(input)[-1]\r\n axis_dim_float = _math_ops.to_float(axis_dim)\r\n scale = 2.0 * _math_ops.exp(_math_ops.complex(\r\n 0.0, -_math.pi * _math_ops.range(axis_dim_float) /\r\n (2.0 * axis_dim_float)))\r\n\r\n # TODO(rjryan): Benchmark performance and memory usage of the various\r\n # approaches to computing a DCT via the RFFT.\r\n dct2 = _math_ops.real(\r\n rfft(input, fft_length=[2 * axis_dim])[..., :axis_dim] * scale)\r\n\r\n if norm == \"ortho\":\r\n n1 = 0.5 * _math_ops.rsqrt(axis_dim_float)\r\n n2 = n1 * _math_ops.sqrt(2.0)\r\n # Use tf.pad to make a vector of [n1, n2, n2, n2, ...].\r\n weights = _array_ops.pad(\r\n _array_ops.expand_dims(n1, 0), [[0, axis_dim - 1]],\r\n constant_values=n2)\r\n dct2 *= weights\r\n\r\n return dct2\r\n\r\nremove_undocumented(__name__)\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Wrapper around tf-slim's training code contrib/slim/python/slim/learning.py\r\nto support training of pruned models\r\n\r\n*******************************************************************\r\n* A simple working training script with support for model pruning *\r\n*******************************************************************\r\n\r\n # Load data and create the model:\r\n images, labels = LoadData(...)\r\n predictions = MyModel(images)\r\n\r\n # Define the loss:\r\n slim.losses.log_loss(predictions, labels)\r\n total_loss = slim.losses.get_total_loss()\r\n\r\n # Define the optimizer:\r\n optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)\r\n\r\n # Create the train_op\r\n train_op = slim.learning.create_train_op(total_loss, optimizer)\r\n\r\n # Set up sparsity\r\n sparsity = pruning.setup_gradual_sparsity(self.global_step)\r\n\r\n # Create mask update op\r\n mask_update_op = pruning.add_mask_update_ip(sparsity)\r\n\r\n # Run training.\r\n learning.train(train_op,\r\n my_log_dir,\r\n mask_update_op)\r\n see contrib/slim/python/slim/learning.py for additional examples\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.contrib import slim as _slim\r\n\r\n_USE_DEFAULT = 0\r\ntrain_step = _slim.learning.train_step\r\n\r\n\r\ndef train(train_op,\r\n logdir,\r\n mask_update_op,\r\n train_step_fn=train_step,\r\n train_step_kwargs=_USE_DEFAULT,\r\n log_every_n_steps=1,\r\n graph=None,\r\n master='',\r\n is_chief=True,\r\n global_step=None,\r\n number_of_steps=None,\r\n init_op=_USE_DEFAULT,\r\n init_feed_dict=None,\r\n local_init_op=_USE_DEFAULT,\r\n init_fn=None,\r\n ready_op=_USE_DEFAULT,\r\n summary_op=_USE_DEFAULT,\r\n save_summaries_secs=600,\r\n summary_writer=_USE_DEFAULT,\r\n startup_delay_steps=0,\r\n saver=None,\r\n save_interval_secs=600,\r\n sync_optimizer=None,\r\n session_config=None,\r\n trace_every_n_steps=None):\r\n \"\"\"Wrapper around tf-slim's train function.\r\n\r\n Runs a training loop using a TensorFlow supervisor.\r\n When the sync_optimizer is supplied, gradient updates are applied\r\n synchronously. Otherwise, gradient updates are applied asynchronous.\r\n\r\n Args:\r\n train_op: A `Tensor` that, when executed, will apply the gradients and\r\n return the loss value.\r\n logdir: The directory where training logs are written to. If None, model\r\n checkpoints and summaries will not be written.\r\n mask_update_op: Operation that upon execution updates the weight masks and\r\n thresholds.\r\n train_step_fn: The function to call in order to execute a single gradient\r\n step. The function must have take exactly four arguments: the current\r\n session, the `train_op` `Tensor`, a global step `Tensor` and a dictionary.\r\n train_step_kwargs: A dictionary which is passed to the `train_step_fn`. By\r\n default, two `Boolean`, scalar ops called \"should_stop\" and \"should_log\"\r\n are provided.\r\n log_every_n_steps: The frequency, in terms of global steps, that the loss\r\n and global step and logged.\r\n graph: The graph to pass to the supervisor. If no graph is supplied the\r\n default graph is used.\r\n master: The address of the tensorflow master.\r\n is_chief: Specifies whether or not the training is being run by the primary\r\n replica during replica training.\r\n global_step: The `Tensor` representing the global step. If left as `None`,\r\n then slim.variables.get_or_create_global_step() is used.\r\n number_of_steps: The max number of gradient steps to take during training,\r\n as measured by 'global_step': training will stop if global_step is\r\n greater than 'number_of_steps'. If the value is left as None, training\r\n proceeds indefinitely.\r\n init_op: The initialization operation. If left to its default value, then\r\n the session is initialized by calling `tf.global_variables_initializer()`.\r\n init_feed_dict: A feed dictionary to use when executing the `init_op`.\r\n local_init_op: The local initialization operation. If left to its default\r\n value, then the session is initialized by calling\r\n `tf.local_variables_initializer()` and `tf.tables_initializer()`.\r\n init_fn: An optional callable to be executed after `init_op` is called. The\r\n callable must accept one argument, the session being initialized.\r\n ready_op: Operation to check if the model is ready to use. If left to its\r\n default value, then the session checks for readiness by calling\r\n `tf.report_uninitialized_variables()`.\r\n summary_op: The summary operation.\r\n save_summaries_secs: How often, in seconds, to save summaries.\r\n summary_writer: `SummaryWriter` to use. Can be `None`\r\n to indicate that no summaries should be written. If unset, we\r\n create a SummaryWriter.\r\n startup_delay_steps: The number of steps to wait for before beginning. Note\r\n that this must be 0 if a sync_optimizer is supplied.\r\n saver: Saver to save checkpoints. If None, a default one will be created\r\n and used.\r\n save_interval_secs: How often, in seconds, to save the model to `logdir`.\r\n sync_optimizer: an instance of tf.train.SyncReplicasOptimizer, or a list of\r\n them. If the argument is supplied, gradient updates will be synchronous.\r\n If left as `None`, gradient updates will be asynchronous.\r\n session_config: An instance of `tf.ConfigProto` that will be used to\r\n configure the `Session`. If left as `None`, the default will be used.\r\n trace_every_n_steps: produce and save a `Timeline` in Chrome trace format\r\n and add it to the summaries every `trace_every_n_steps`. If None, no trace\r\n information will be produced or saved.\r\n\r\n Returns:\r\n the value of the loss function after training.\r\n\r\n Raises:\r\n ValueError: if `train_op` is empty or if `startup_delay_steps` is\r\n non-zero when `sync_optimizer` is supplied, if `number_of_steps` is\r\n negative, or if `trace_every_n_steps` is not `None` and no `logdir` is\r\n provided.\r\n \"\"\"\r\n\r\n def train_step_with_pruning_fn(sess, train_op, global_step,\r\n train_step_kwargs):\r\n total_loss, should_stop = train_step_fn(sess, train_op, global_step,\r\n train_step_kwargs)\r\n sess.run(mask_update_op)\r\n return total_loss, should_stop\r\n\r\n total_loss, _ = _slim.learning.train(\r\n train_op,\r\n logdir,\r\n train_step_fn=train_step_with_pruning_fn,\r\n train_step_kwargs=train_step_kwargs,\r\n log_every_n_steps=log_every_n_steps,\r\n graph=graph,\r\n master=master,\r\n is_chief=is_chief,\r\n global_step=global_step,\r\n number_of_steps=number_of_steps,\r\n init_op=init_op,\r\n init_feed_dict=init_feed_dict,\r\n local_init_op=local_init_op,\r\n init_fn=init_fn,\r\n ready_op=ready_op,\r\n summary_op=summary_op,\r\n save_summaries_secs=save_summaries_secs,\r\n summary_writer=summary_writer,\r\n startup_delay_steps=startup_delay_steps,\r\n saver=saver,\r\n save_interval_secs=save_interval_secs,\r\n sync_optimizer=sync_optimizer,\r\n session_config=session_config,\r\n trace_every_n_steps=trace_every_n_steps)\r\n\r\n return total_loss\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Functions to create a Markov Chain Monte Carlo Metropolis step.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\n# go/tf-wildcard-import\r\n# pylint: disable=wildcard-import\r\nfrom tensorflow.contrib.bayesflow.python.ops.metropolis_hastings_impl import *\r\n# pylint: enable=wildcard-import\r\nfrom tensorflow.python.util.all_util import remove_undocumented\r\n\r\n_allowed_symbols = [\r\n 'kernel',\r\n 'evolve',\r\n 'proposal_uniform',\r\n 'proposal_normal',\r\n]\r\n\r\nremove_undocumented(__name__, _allowed_symbols)\r\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"The Normal (Gaussian) distribution class.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport math\r\n\r\nfrom tensorflow.python.framework import constant_op\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import tensor_shape\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import check_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import nn\r\nfrom tensorflow.python.ops import random_ops\r\nfrom tensorflow.python.ops.distributions import distribution\r\nfrom tensorflow.python.ops.distributions import kullback_leibler\r\nfrom tensorflow.python.ops.distributions import special_math\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\n__all__ = [\r\n \"Normal\",\r\n \"NormalWithSoftplusScale\",\r\n]\r\n\r\n\r\n@tf_export(\"distributions.Normal\")\r\nclass Normal(distribution.Distribution):\r\n \"\"\"The Normal distribution with location `loc` and `scale` parameters.\r\n\r\n #### Mathematical details\r\n\r\n The probability density function (pdf) is,\r\n\r\n ```none\r\n pdf(x; mu, sigma) = exp(-0.5 (x - mu)**2 / sigma**2) / Z\r\n Z = (2 pi sigma**2)**0.5\r\n ```\r\n\r\n where `loc = mu` is the mean, `scale = sigma` is the std. deviation, and, `Z`\r\n is the normalization constant.\r\n\r\n The Normal distribution is a member of the [location-scale family](\r\n https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be\r\n constructed as,\r\n\r\n ```none\r\n X ~ Normal(loc=0, scale=1)\r\n Y = loc + scale * X\r\n ```\r\n\r\n #### Examples\r\n\r\n Examples of initialization of one or a batch of distributions.\r\n\r\n ```python\r\n # Define a single scalar Normal distribution.\r\n dist = tf.distributions.Normal(loc=0., scale=3.)\r\n\r\n # Evaluate the cdf at 1, returning a scalar.\r\n dist.cdf(1.)\r\n\r\n # Define a batch of two scalar valued Normals.\r\n # The first has mean 1 and standard deviation 11, the second 2 and 22.\r\n dist = tf.distributions.Normal(loc=[1, 2.], scale=[11, 22.])\r\n\r\n # Evaluate the pdf of the first distribution on 0, and the second on 1.5,\r\n # returning a length two tensor.\r\n dist.prob([0, 1.5])\r\n\r\n # Get 3 samples, returning a 3 x 2 tensor.\r\n dist.sample([3])\r\n ```\r\n\r\n Arguments are broadcast when possible.\r\n\r\n ```python\r\n # Define a batch of two scalar valued Normals.\r\n # Both have mean 1, but different standard deviations.\r\n dist = tf.distributions.Normal(loc=1., scale=[11, 22.])\r\n\r\n # Evaluate the pdf of both distributions on the same point, 3.0,\r\n # returning a length 2 tensor.\r\n dist.prob(3.0)\r\n ```\r\n\r\n \"\"\"\r\n\r\n def __init__(self,\r\n loc,\r\n scale,\r\n validate_args=False,\r\n allow_nan_stats=True,\r\n name=\"Normal\"):\r\n \"\"\"Construct Normal distributions with mean and stddev `loc` and `scale`.\r\n\r\n The parameters `loc` and `scale` must be shaped in a way that supports\r\n broadcasting (e.g. `loc + scale` is a valid operation).\r\n\r\n Args:\r\n loc: Floating point tensor; the means of the distribution(s).\r\n scale: Floating point tensor; the stddevs of the distribution(s).\r\n Must contain only positive values.\r\n validate_args: Python `bool`, default `False`. When `True` distribution\r\n parameters are checked for validity despite possibly degrading runtime\r\n performance. When `False` invalid inputs may silently render incorrect\r\n outputs.\r\n allow_nan_stats: Python `bool`, default `True`. When `True`,\r\n statistics (e.g., mean, mode, variance) use the value \"`NaN`\" to\r\n indicate the result is undefined. When `False`, an exception is raised\r\n if one or more of the statistic's batch members are undefined.\r\n name: Python `str` name prefixed to Ops created by this class.\r\n\r\n Raises:\r\n TypeError: if `loc` and `scale` have different `dtype`.\r\n \"\"\"\r\n parameters = locals()\r\n with ops.name_scope(name, values=[loc, scale]):\r\n with ops.control_dependencies([check_ops.assert_positive(scale)] if\r\n validate_args else []):\r\n self._loc = array_ops.identity(loc, name=\"loc\")\r\n self._scale = array_ops.identity(scale, name=\"scale\")\r\n check_ops.assert_same_float_dtype([self._loc, self._scale])\r\n super(Normal, self).__init__(\r\n dtype=self._scale.dtype,\r\n reparameterization_type=distribution.FULLY_REPARAMETERIZED,\r\n validate_args=validate_args,\r\n allow_nan_stats=allow_nan_stats,\r\n parameters=parameters,\r\n graph_parents=[self._loc, self._scale],\r\n name=name)\r\n\r\n @staticmethod\r\n def _param_shapes(sample_shape):\r\n return dict(\r\n zip((\"loc\", \"scale\"), ([ops.convert_to_tensor(\r\n sample_shape, dtype=dtypes.int32)] * 2)))\r\n\r\n @property\r\n def loc(self):\r\n \"\"\"Distribution parameter for the mean.\"\"\"\r\n return self._loc\r\n\r\n @property\r\n def scale(self):\r\n \"\"\"Distribution parameter for standard deviation.\"\"\"\r\n return self._scale\r\n\r\n def _batch_shape_tensor(self):\r\n return array_ops.broadcast_dynamic_shape(\r\n array_ops.shape(self.loc),\r\n array_ops.shape(self.scale))\r\n\r\n def _batch_shape(self):\r\n return array_ops.broadcast_static_shape(\r\n self.loc.get_shape(),\r\n self.scale.get_shape())\r\n\r\n def _event_shape_tensor(self):\r\n return constant_op.constant([], dtype=dtypes.int32)\r\n\r\n def _event_shape(self):\r\n return tensor_shape.scalar()\r\n\r\n def _sample_n(self, n, seed=None):\r\n shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)\r\n sampled = random_ops.random_normal(\r\n shape=shape, mean=0., stddev=1., dtype=self.loc.dtype, seed=seed)\r\n return sampled * self.scale + self.loc\r\n\r\n def _log_prob(self, x):\r\n return self._log_unnormalized_prob(x) - self._log_normalization()\r\n\r\n def _log_cdf(self, x):\r\n return special_math.log_ndtr(self._z(x))\r\n\r\n def _cdf(self, x):\r\n return special_math.ndtr(self._z(x))\r\n\r\n def _log_survival_function(self, x):\r\n return special_math.log_ndtr(-self._z(x))\r\n\r\n def _survival_function(self, x):\r\n return special_math.ndtr(-self._z(x))\r\n\r\n def _log_unnormalized_prob(self, x):\r\n return -0.5 * math_ops.square(self._z(x))\r\n\r\n def _log_normalization(self):\r\n return 0.5 * math.log(2. * math.pi) + math_ops.log(self.scale)\r\n\r\n def _entropy(self):\r\n # Use broadcasting rules to calculate the full broadcast scale.\r\n scale = self.scale * array_ops.ones_like(self.loc)\r\n return 0.5 * math.log(2. * math.pi * math.e) + math_ops.log(scale)\r\n\r\n def _mean(self):\r\n return self.loc * array_ops.ones_like(self.scale)\r\n\r\n def _quantile(self, p):\r\n return self._inv_z(special_math.ndtri(p))\r\n\r\n def _stddev(self):\r\n return self.scale * array_ops.ones_like(self.loc)\r\n\r\n def _mode(self):\r\n return self._mean()\r\n\r\n def _z(self, x):\r\n \"\"\"Standardize input `x` to a unit normal.\"\"\"\r\n with ops.name_scope(\"standardize\", values=[x]):\r\n return (x - self.loc) / self.scale\r\n\r\n def _inv_z(self, z):\r\n \"\"\"Reconstruct input `x` from a its normalized version.\"\"\"\r\n with ops.name_scope(\"reconstruct\", values=[z]):\r\n return z * self.scale + self.loc\r\n\r\n\r\nclass NormalWithSoftplusScale(Normal):\r\n \"\"\"Normal with softplus applied to `scale`.\"\"\"\r\n\r\n def __init__(self,\r\n loc,\r\n scale,\r\n validate_args=False,\r\n allow_nan_stats=True,\r\n name=\"NormalWithSoftplusScale\"):\r\n parameters = locals()\r\n with ops.name_scope(name, values=[scale]):\r\n super(NormalWithSoftplusScale, self).__init__(\r\n loc=loc,\r\n scale=nn.softplus(scale, name=\"softplus_scale\"),\r\n validate_args=validate_args,\r\n allow_nan_stats=allow_nan_stats,\r\n name=name)\r\n self._parameters = parameters\r\n\r\n\r\n@kullback_leibler.RegisterKL(Normal, Normal)\r\ndef _kl_normal_normal(n_a, n_b, name=None):\r\n \"\"\"Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.\r\n\r\n Args:\r\n n_a: instance of a Normal distribution object.\r\n n_b: instance of a Normal distribution object.\r\n name: (optional) Name to use for created operations.\r\n default is \"kl_normal_normal\".\r\n\r\n Returns:\r\n Batchwise KL(n_a || n_b)\r\n \"\"\"\r\n with ops.name_scope(name, \"kl_normal_normal\", [n_a.loc, n_b.loc]):\r\n one = constant_op.constant(1, dtype=n_a.dtype)\r\n two = constant_op.constant(2, dtype=n_a.dtype)\r\n half = constant_op.constant(0.5, dtype=n_a.dtype)\r\n s_a_squared = math_ops.square(n_a.scale)\r\n s_b_squared = math_ops.square(n_b.scale)\r\n ratio = s_a_squared / s_b_squared\r\n return (math_ops.square(n_a.loc - n_b.loc) / (two * s_b_squared) +\r\n half * (ratio - one - math_ops.log(ratio)))\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"The Half Normal distribution class.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\n\r\nfrom tensorflow.python.framework import constant_op\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import tensor_shape\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import check_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import nn\r\nfrom tensorflow.python.ops import random_ops\r\nfrom tensorflow.python.ops.distributions import distribution\r\nfrom tensorflow.python.ops.distributions import special_math\r\n\r\n\r\n__all__ = [\r\n \"HalfNormal\",\r\n]\r\n\r\n\r\nclass HalfNormal(distribution.Distribution):\r\n \"\"\"The Half Normal distribution with scale `scale`.\r\n\r\n #### Mathematical details\r\n\r\n The half normal is a transformation of a centered normal distribution.\r\n If some random variable `X` has normal distribution,\r\n ```none\r\n X ~ Normal(0.0, scale)\r\n Y = |X|\r\n ```\r\n Then `Y` will have half normal distribution. The probability density\r\n function (pdf) is:\r\n\r\n ```none\r\n pdf(x; scale, x > 0) = sqrt(2) / (scale * sqrt(pi)) *\r\n exp(- 1/2 * (x / scale) ** 2)\r\n )\r\n ```\r\n Where `scale = sigma` is the standard deviation of the underlying normal\r\n distribution.\r\n\r\n #### Examples\r\n\r\n Examples of initialization of one or a batch of distributions.\r\n\r\n ```python\r\n # Define a single scalar HalfNormal distribution.\r\n dist = tf.contrib.distributions.HalfNormal(scale=3.0)\r\n\r\n # Evaluate the cdf at 1, returning a scalar.\r\n dist.cdf(1.)\r\n\r\n # Define a batch of two scalar valued HalfNormals.\r\n # The first has scale 11.0, the second 22.0\r\n dist = tf.contrib.distributions.HalfNormal(scale=[11.0, 22.0])\r\n\r\n # Evaluate the pdf of the first distribution on 1.0, and the second on 1.5,\r\n # returning a length two tensor.\r\n dist.prob([1.0, 1.5])\r\n\r\n # Get 3 samples, returning a 3 x 2 tensor.\r\n dist.sample([3])\r\n ```\r\n\r\n \"\"\"\r\n\r\n def __init__(self,\r\n scale,\r\n validate_args=False,\r\n allow_nan_stats=True,\r\n name=\"HalfNormal\"):\r\n \"\"\"Construct HalfNormals with scale `scale`.\r\n\r\n Args:\r\n scale: Floating point tensor; the scales of the distribution(s).\r\n Must contain only positive values.\r\n validate_args: Python `bool`, default `False`. When `True` distribution\r\n parameters are checked for validity despite possibly degrading runtime\r\n performance. When `False` invalid inputs may silently render incorrect\r\n outputs.\r\n allow_nan_stats: Python `bool`, default `True`. When `True`,\r\n statistics (e.g., mean, mode, variance) use the value \"`NaN`\" to\r\n indicate the result is undefined. When `False`, an exception is raised\r\n if one or more of the statistic's batch members are undefined.\r\n name: Python `str` name prefixed to Ops created by this class.\r\n \"\"\"\r\n parameters = locals()\r\n with ops.name_scope(name, values=[scale]):\r\n with ops.control_dependencies([check_ops.assert_positive(scale)] if\r\n validate_args else []):\r\n self._scale = array_ops.identity(scale, name=\"scale\")\r\n super(HalfNormal, self).__init__(\r\n dtype=self._scale.dtype,\r\n reparameterization_type=distribution.FULLY_REPARAMETERIZED,\r\n validate_args=validate_args,\r\n allow_nan_stats=allow_nan_stats,\r\n parameters=parameters,\r\n graph_parents=[self._scale],\r\n name=name)\r\n\r\n @staticmethod\r\n def _param_shapes(sample_shape):\r\n return {\"scale\": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)}\r\n\r\n @property\r\n def scale(self):\r\n \"\"\"Distribution parameter for the scale.\"\"\"\r\n return self._scale\r\n\r\n def _batch_shape_tensor(self):\r\n return array_ops.shape(self.scale)\r\n\r\n def _batch_shape(self):\r\n return self.scale.shape\r\n\r\n def _event_shape_tensor(self):\r\n return constant_op.constant([], dtype=dtypes.int32)\r\n\r\n def _event_shape(self):\r\n return tensor_shape.scalar()\r\n\r\n def _sample_n(self, n, seed=None):\r\n shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)\r\n sampled = random_ops.random_normal(\r\n shape=shape, mean=0., stddev=1., dtype=self.dtype, seed=seed)\r\n return math_ops.abs(sampled * self.scale)\r\n\r\n def _prob(self, x):\r\n coeff = np.sqrt(2) / self.scale / np.sqrt(np.pi)\r\n pdf = coeff * math_ops.exp(- 0.5 * (x / self.scale) ** 2)\r\n return pdf * math_ops.cast(x >= 0, self.dtype)\r\n\r\n def _cdf(self, x):\r\n truncated_x = nn.relu(x)\r\n return math_ops.erf(truncated_x / self.scale / np.sqrt(2.0))\r\n\r\n def _entropy(self):\r\n return 0.5 * math_ops.log(np.pi * self.scale ** 2.0 / 2.0) + 0.5\r\n\r\n def _mean(self):\r\n return self.scale * np.sqrt(2.0) / np.sqrt(np.pi)\r\n\r\n def _quantile(self, p):\r\n return np.sqrt(2.0) * self.scale * special_math.erfinv(p)\r\n\r\n def _mode(self):\r\n return array_ops.zeros(self.batch_shape_tensor())\r\n\r\n def _variance(self):\r\n return self.scale ** 2.0 * (1.0 - 2.0 / np.pi)\r\n", "\"\"\"Python wrappers around TensorFlow ops.\r\n\r\nThis file is MACHINE GENERATED! Do not edit.\r\n\"\"\"\r\n\r\nimport collections as _collections\r\nimport six as _six\r\n\r\nfrom tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow\r\nfrom tensorflow.python.eager import context as _context\r\nfrom tensorflow.python.eager import core as _core\r\nfrom tensorflow.python.eager import execute as _execute\r\nfrom tensorflow.python.framework import dtypes as _dtypes\r\nfrom tensorflow.python.framework import errors as _errors\r\nfrom tensorflow.python.framework import tensor_shape as _tensor_shape\r\n\r\nfrom tensorflow.core.framework import op_def_pb2 as _op_def_pb2\r\n# Needed to trigger the call to _set_call_cpp_shape_fn.\r\nfrom tensorflow.python.framework import common_shapes as _common_shapes\r\nfrom tensorflow.python.framework import op_def_registry as _op_def_registry\r\nfrom tensorflow.python.framework import ops as _ops\r\nfrom tensorflow.python.framework import op_def_library as _op_def_library\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\ndef eager_py_func(input, token, Tout, name=None):\r\n r\"\"\"Eagerly executes a python function to compute func(input)->output. The\r\n\r\n semantics of the input, output, and attributes are the same as those for\r\r\n PyFunc.\r\n\r\n Args:\r\n input: A list of `Tensor` objects.\r\n token: A `string`.\r\n Tout: A list of `tf.DTypes`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A list of `Tensor` objects of type `Tout`.\r\n \"\"\"\r\n _ctx = _context.context()\r\n if not _ctx.executing_eagerly():\r\n token = _execute.make_str(token, \"token\")\r\n if not isinstance(Tout, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'Tout' argument to \"\r\n \"'eager_py_func' Op, not %r.\" % Tout)\r\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"EagerPyFunc\", input=input, token=token, Tout=Tout, name=name)\r\n _result = _op.outputs[:]\r\n if not _result:\r\n return _op\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"token\", _op.get_attr(\"token\"), \"Tin\", _op.get_attr(\"Tin\"),\r\n \"Tout\", _op.get_attr(\"Tout\"))\r\n _execute.record_gradient(\r\n \"EagerPyFunc\", _inputs_flat, _attrs, _result, name)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._handle, _ctx.device_name, \"EagerPyFunc\", name,\r\n _ctx._post_execution_callbacks, input, \"token\", token, \"Tout\", Tout)\r\n return _result\r\n except _core._FallbackException:\r\n return eager_py_func_eager_fallback(\r\n input, token=token, Tout=Tout, name=name)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef eager_py_func_eager_fallback(input, token, Tout, name=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function eager_py_func\r\n \"\"\"\r\n _ctx = _context.context()\r\n token = _execute.make_str(token, \"token\")\r\n if not isinstance(Tout, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'Tout' argument to \"\r\n \"'eager_py_func' Op, not %r.\" % Tout)\r\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\r\n _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)\r\n _inputs_flat = list(input)\r\n _attrs = (\"token\", token, \"Tin\", _attr_Tin, \"Tout\", Tout)\r\n _result = _execute.execute(b\"EagerPyFunc\", len(Tout), inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"EagerPyFunc\", _inputs_flat, _attrs, _result, name)\r\n return _result\r\n\r\n\r\ndef py_func(input, token, Tout, name=None):\r\n r\"\"\"Invokes a python function to compute func(input)->output.\r\n\r\n This operation is considered stateful. For a stateless version, see\r\r\n PyFuncStateless.\r\n\r\n Args:\r\n input: A list of `Tensor` objects.\r\n List of Tensors that will provide input to the Op.\r\n token: A `string`.\r\n A token representing a registered python function in this address space.\r\n Tout: A list of `tf.DTypes`. Data types of the outputs from the op.\r\r\n The length of the list specifies the number of outputs.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A list of `Tensor` objects of type `Tout`.\r\n \"\"\"\r\n _ctx = _context.context()\r\n if not _ctx.executing_eagerly():\r\n token = _execute.make_str(token, \"token\")\r\n if not isinstance(Tout, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'Tout' argument to \"\r\n \"'py_func' Op, not %r.\" % Tout)\r\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"PyFunc\", input=input, token=token, Tout=Tout, name=name)\r\n _result = _op.outputs[:]\r\n if not _result:\r\n return _op\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"token\", _op.get_attr(\"token\"), \"Tin\", _op.get_attr(\"Tin\"),\r\n \"Tout\", _op.get_attr(\"Tout\"))\r\n _execute.record_gradient(\r\n \"PyFunc\", _inputs_flat, _attrs, _result, name)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._handle, _ctx.device_name, \"PyFunc\", name,\r\n _ctx._post_execution_callbacks, input, \"token\", token, \"Tout\", Tout)\r\n return _result\r\n except _core._FallbackException:\r\n return py_func_eager_fallback(\r\n input, token=token, Tout=Tout, name=name)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef py_func_eager_fallback(input, token, Tout, name=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function py_func\r\n \"\"\"\r\n _ctx = _context.context()\r\n token = _execute.make_str(token, \"token\")\r\n if not isinstance(Tout, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'Tout' argument to \"\r\n \"'py_func' Op, not %r.\" % Tout)\r\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\r\n _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)\r\n _inputs_flat = list(input)\r\n _attrs = (\"token\", token, \"Tin\", _attr_Tin, \"Tout\", Tout)\r\n _result = _execute.execute(b\"PyFunc\", len(Tout), inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"PyFunc\", _inputs_flat, _attrs, _result, name)\r\n return _result\r\n\r\n\r\ndef py_func_stateless(input, token, Tout, name=None):\r\n r\"\"\"A stateless version of PyFunc.\r\n\r\n Args:\r\n input: A list of `Tensor` objects.\r\n token: A `string`.\r\n Tout: A list of `tf.DTypes`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A list of `Tensor` objects of type `Tout`.\r\n \"\"\"\r\n _ctx = _context.context()\r\n if not _ctx.executing_eagerly():\r\n token = _execute.make_str(token, \"token\")\r\n if not isinstance(Tout, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'Tout' argument to \"\r\n \"'py_func_stateless' Op, not %r.\" % Tout)\r\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"PyFuncStateless\", input=input, token=token, Tout=Tout, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"token\", _op.get_attr(\"token\"), \"Tin\", _op.get_attr(\"Tin\"),\r\n \"Tout\", _op.get_attr(\"Tout\"))\r\n _execute.record_gradient(\r\n \"PyFuncStateless\", _inputs_flat, _attrs, _result, name)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._handle, _ctx.device_name, \"PyFuncStateless\", name,\r\n _ctx._post_execution_callbacks, input, \"token\", token, \"Tout\", Tout)\r\n return _result\r\n except _core._FallbackException:\r\n return py_func_stateless_eager_fallback(\r\n input, token=token, Tout=Tout, name=name)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef py_func_stateless_eager_fallback(input, token, Tout, name=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function py_func_stateless\r\n \"\"\"\r\n _ctx = _context.context()\r\n token = _execute.make_str(token, \"token\")\r\n if not isinstance(Tout, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'Tout' argument to \"\r\n \"'py_func_stateless' Op, not %r.\" % Tout)\r\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\r\n _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)\r\n _inputs_flat = list(input)\r\n _attrs = (\"token\", token, \"Tin\", _attr_Tin, \"Tout\", Tout)\r\n _result = _execute.execute(b\"PyFuncStateless\", len(Tout),\r\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\r\n name=name)\r\n _execute.record_gradient(\r\n \"PyFuncStateless\", _inputs_flat, _attrs, _result, name)\r\n return _result\r\n\r\ndef _InitOpDefLibrary(op_list_proto_bytes):\r\n op_list = _op_def_pb2.OpList()\r\n op_list.ParseFromString(op_list_proto_bytes)\r\n _op_def_registry.register_op_list(op_list)\r\n op_def_lib = _op_def_library.OpDefLibrary()\r\n op_def_lib.add_op_list(op_list)\r\n return op_def_lib\r\n# op {\r\n# name: \"EagerPyFunc\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_list_attr: \"Tin\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_list_attr: \"Tout\"\r\n# }\r\n# attr {\r\n# name: \"token\"\r\n# type: \"string\"\r\n# }\r\n# attr {\r\n# name: \"Tin\"\r\n# type: \"list(type)\"\r\n# has_minimum: true\r\n# }\r\n# attr {\r\n# name: \"Tout\"\r\n# type: \"list(type)\"\r\n# has_minimum: true\r\n# }\r\n# is_stateful: true\r\n# }\r\n# op {\r\n# name: \"PyFunc\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_list_attr: \"Tin\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_list_attr: \"Tout\"\r\n# }\r\n# attr {\r\n# name: \"token\"\r\n# type: \"string\"\r\n# }\r\n# attr {\r\n# name: \"Tin\"\r\n# type: \"list(type)\"\r\n# has_minimum: true\r\n# }\r\n# attr {\r\n# name: \"Tout\"\r\n# type: \"list(type)\"\r\n# has_minimum: true\r\n# }\r\n# is_stateful: true\r\n# }\r\n# op {\r\n# name: \"PyFuncStateless\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_list_attr: \"Tin\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_list_attr: \"Tout\"\r\n# }\r\n# attr {\r\n# name: \"token\"\r\n# type: \"string\"\r\n# }\r\n# attr {\r\n# name: \"Tin\"\r\n# type: \"list(type)\"\r\n# has_minimum: true\r\n# }\r\n# attr {\r\n# name: \"Tout\"\r\n# type: \"list(type)\"\r\n# has_minimum: true\r\n# }\r\n# }\r\n_op_def_lib = _InitOpDefLibrary(b\"\\nj\\n\\013EagerPyFunc\\022\\014\\n\\005input2\\003Tin\\032\\016\\n\\006output2\\004Tout\\\"\\017\\n\\005token\\022\\006string\\\"\\023\\n\\003Tin\\022\\nlist(type)(\\001\\\"\\024\\n\\004Tout\\022\\nlist(type)(\\001\\210\\001\\001\\ne\\n\\006PyFunc\\022\\014\\n\\005input2\\003Tin\\032\\016\\n\\006output2\\004Tout\\\"\\017\\n\\005token\\022\\006string\\\"\\023\\n\\003Tin\\022\\nlist(type)(\\001\\\"\\024\\n\\004Tout\\022\\nlist(type)(\\001\\210\\001\\001\\nk\\n\\017PyFuncStateless\\022\\014\\n\\005input2\\003Tin\\032\\016\\n\\006output2\\004Tout\\\"\\017\\n\\005token\\022\\006string\\\"\\023\\n\\003Tin\\022\\nlist(type)(\\001\\\"\\024\\n\\004Tout\\022\\nlist(type)(\\001\")\r\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"BigQuery reading support for TensorFlow.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.contrib.cloud.python.ops import gen_bigquery_reader_ops\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.ops import io_ops\r\n\r\n\r\nclass BigQueryReader(io_ops.ReaderBase):\r\n \"\"\"A Reader that outputs keys and tf.Example values from a BigQuery table.\r\n\r\n Example use:\r\n ```python\r\n # Assume a BigQuery has the following schema,\r\n # name STRING,\r\n # age INT,\r\n # state STRING\r\n\r\n # Create the parse_examples list of features.\r\n features = dict(\r\n name=tf.FixedLenFeature([1], tf.string),\r\n age=tf.FixedLenFeature([1], tf.int32),\r\n state=tf.FixedLenFeature([1], dtype=tf.string, default_value=\"UNK\"))\r\n\r\n # Create a Reader.\r\n reader = bigquery_reader_ops.BigQueryReader(project_id=PROJECT,\r\n dataset_id=DATASET,\r\n table_id=TABLE,\r\n timestamp_millis=TIME,\r\n num_partitions=NUM_PARTITIONS,\r\n features=features)\r\n\r\n # Populate a queue with the BigQuery Table partitions.\r\n queue = tf.train.string_input_producer(reader.partitions())\r\n\r\n # Read and parse examples.\r\n row_id, examples_serialized = reader.read(queue)\r\n examples = tf.parse_example(examples_serialized, features=features)\r\n\r\n # Process the Tensors examples[\"name\"], examples[\"age\"], etc...\r\n ```\r\n\r\n Note that to create a reader a snapshot timestamp is necessary. This\r\n will enable the reader to look at a consistent snapshot of the table.\r\n For more information, see 'Table Decorators' in BigQuery docs.\r\n\r\n See ReaderBase for supported methods.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n project_id,\r\n dataset_id,\r\n table_id,\r\n timestamp_millis,\r\n num_partitions,\r\n features=None,\r\n columns=None,\r\n test_end_point=None,\r\n name=None):\r\n \"\"\"Creates a BigQueryReader.\r\n\r\n Args:\r\n project_id: GCP project ID.\r\n dataset_id: BigQuery dataset ID.\r\n table_id: BigQuery table ID.\r\n timestamp_millis: timestamp to snapshot the table in milliseconds since\r\n the epoch. Relative (negative or zero) snapshot times are not allowed.\r\n For more details, see 'Table Decorators' in BigQuery docs.\r\n num_partitions: Number of non-overlapping partitions to read from.\r\n features: parse_example compatible dict from keys to `VarLenFeature` and\r\n `FixedLenFeature` objects. Keys are read as columns from the db.\r\n columns: list of columns to read, can be set iff features is None.\r\n test_end_point: Used only for testing purposes (optional).\r\n name: a name for the operation (optional).\r\n\r\n Raises:\r\n TypeError: - If features is neither None nor a dict or\r\n - If columns is neither None nor a list or\r\n - If both features and columns are None or set.\r\n \"\"\"\r\n if (features is None) == (columns is None):\r\n raise TypeError(\"exactly one of features and columns must be set.\")\r\n\r\n if features is not None:\r\n if not isinstance(features, dict):\r\n raise TypeError(\"features must be a dict.\")\r\n self._columns = list(features.keys())\r\n elif columns is not None:\r\n if not isinstance(columns, list):\r\n raise TypeError(\"columns must be a list.\")\r\n self._columns = columns\r\n\r\n self._project_id = project_id\r\n self._dataset_id = dataset_id\r\n self._table_id = table_id\r\n self._timestamp_millis = timestamp_millis\r\n self._num_partitions = num_partitions\r\n self._test_end_point = test_end_point\r\n\r\n reader = gen_bigquery_reader_ops.big_query_reader(\r\n name=name,\r\n project_id=self._project_id,\r\n dataset_id=self._dataset_id,\r\n table_id=self._table_id,\r\n timestamp_millis=self._timestamp_millis,\r\n columns=self._columns,\r\n test_end_point=self._test_end_point)\r\n super(BigQueryReader, self).__init__(reader)\r\n\r\n def partitions(self, name=None):\r\n \"\"\"Returns serialized BigQueryTablePartition messages.\r\n\r\n These messages represent a non-overlapping division of a table for a\r\n bulk read.\r\n\r\n Args:\r\n name: a name for the operation (optional).\r\n\r\n Returns:\r\n `1-D` string `Tensor` of serialized `BigQueryTablePartition` messages.\r\n \"\"\"\r\n return gen_bigquery_reader_ops.generate_big_query_reader_partitions(\r\n name=name,\r\n project_id=self._project_id,\r\n dataset_id=self._dataset_id,\r\n table_id=self._table_id,\r\n timestamp_millis=self._timestamp_millis,\r\n num_partitions=self._num_partitions,\r\n test_end_point=self._test_end_point,\r\n columns=self._columns)\r\n\r\n\r\nops.NotDifferentiable(\"BigQueryReader\")\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"A Network is a composition of Layers.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport collections\r\nimport os\r\nimport weakref\r\n\r\nfrom tensorflow.python.eager import context\r\nfrom tensorflow.python.estimator import util as estimator_util\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.layers import base\r\nfrom tensorflow.python.ops import variable_scope\r\nfrom tensorflow.python.training import checkpoint_utils\r\nfrom tensorflow.python.training import saver as saver_lib\r\nfrom tensorflow.python.training import training_util\r\n\r\n# pylint: disable=protected-access\r\n# Explanation for protected-access disable: Network has lots of same-class and\r\n# parent-class references across different objects, and some to private\r\n# functions in base.py which should be reused.\r\n\r\n\r\ndef _network_name_scope_naming(current_variable_scope):\r\n \"\"\"Name scope naming to match operation names to variable names.\r\n\r\n Used in Networks and also applied to non-Network Layers which are added to\r\n Networks before being built.\r\n\r\n Args:\r\n current_variable_scope: A VariableScope object.\r\n Returns:\r\n A name scope name.\r\n \"\"\"\r\n return current_variable_scope.name + \"/\"\r\n\r\n\r\nclass Network(base.Layer):\r\n \"\"\"Represents the composition of a set of Layers.\r\n\r\n `Network` implements the `Layer` interface and adds convenience methods for\r\n managing sub-`Layer`s, such as listing variables.\r\n\r\n `Layer`s (including other `Network`s) should be added via `track_layer`. They\r\n can then be used when overriding the `Network.call` method:\r\n\r\n ```python\r\n class TwoLayerNetwork(tfe.Network):\r\n\r\n def __init__(self, name):\r\n super(TwoLayerNetwork, self).__init__(name=name)\r\n self.layer_one = self.track_layer(tf.layers.Dense(16, input_shape=(8,)))\r\n self.layer_two = self.track_layer(tf.layers.Dense(1, input_shape=(16,)))\r\n\r\n def call(self, inputs):\r\n return self.layer_two(self.layer_one(inputs))\r\n ```\r\n\r\n After constructing an object and calling the `Network`, a list of variables\r\n created by tracked `Layer`s is available via `Network.variables`:\r\n\r\n ```python\r\n net = TwoLayerNetwork(name=\"net\")\r\n output = net(tf.ones([1, 8]))\r\n print([v.name for v in net.variables])\r\n ```\r\n\r\n This example prints variable names, one kernel and one bias per\r\n `tf.layers.Dense` layer:\r\n\r\n ```\r\n ['net/dense/kernel:0',\r\n 'net/dense/bias:0',\r\n 'net/dense_1/kernel:0',\r\n 'net/dense_1/bias:0']\r\n ```\r\n\r\n These variables can be passed to a `Saver` (`tf.train.Saver`, or\r\n `tf.contrib.eager.Saver` when executing eagerly) to save or restore the\r\n `Network`, typically alongside a global step and `tf.train.Optimizer`\r\n variables when checkpointing during training.\r\n\r\n Note that the semantics of calling a `Network` with graph execution (i.e. not\r\n executing eagerly) may change slightly in the future. Currently stateful ops\r\n are pruned from the graph unless they or something that depends on them is\r\n executed in a session, but this behavior is not consistent with eager\r\n execution (where stateful ops are executed eagerly). `Layer`s from `tf.layers`\r\n do not depend on this pruning and so will not be affected, but `Network`s\r\n which rely on stateful ops being added to the graph but not executed (e.g. via\r\n custom `Layer`s which manage stateful ops) may break with this change.\r\n \"\"\"\r\n # TODO(josh11b,ashankar,allenl):\r\n # - Should 'trainable' be changeable on the Network object?\r\n # - Do we allow add_variable in Network?\r\n # - Detect layers used in __call__ that weren't registered with track_layer.\r\n # - Convert inputs to __call__ to tensors.\r\n\r\n def __init__(self, name=None):\r\n \"\"\"Configure the `Network`.\r\n\r\n Args:\r\n name: The name to use for this `Network`. If specified, it must be unique\r\n in the context where this `Network` is first\r\n (1) added to another `Network` (in which case it must not share a name\r\n with other `Layers` added to that `Network`), or\r\n (2) built/called (in which case no other 'top-level' `Network`s may\r\n share this name).\r\n If unspecified or None, the `Network` will be named using its class\r\n name, with a number appended if necessary for uniqueness (e.g. MyNetwork\r\n -> 'my_network_1').\r\n\r\n Raises:\r\n ValueError: If `name` is not valid. Note that some naming errors will\r\n instead be raised when the `Network` is called.\r\n \"\"\"\r\n if isinstance(name, variable_scope.VariableScope):\r\n raise ValueError(\"VariableScopes are not valid Network names.\")\r\n if name is not None and \"/\" in name:\r\n raise ValueError(\r\n \"Forward slashes ('/') are not allowed in Network names.\")\r\n super(Network, self).__init__(name=name)\r\n self._layers = []\r\n self._sub_layer_name_uids = collections.defaultdict(int)\r\n # Initially None, but set to False for networks which are first built as\r\n # top-level.\r\n self._first_parent = None # A weak reference to our first parent.\r\n self._non_network_sublayers = []\r\n self._owned_layers = {}\r\n # The scope to use if we end up without a parent.\r\n self._default_parent_variable_scope = variable_scope.get_variable_scope()\r\n # Hold on to the variable scope counts from init to check whether a scope\r\n # with the name we want was ever created in our parent scope. Without this\r\n # check we might have name collisions if the parent scope on init gets\r\n # closed before build is called.\r\n self._variable_scope_counts_on_init = (\r\n variable_scope._get_default_variable_store().variable_scopes_count)\r\n\r\n def _name_scope_name(self, current_variable_scope):\r\n \"\"\"Overrides Layer op naming to match variable naming.\"\"\"\r\n return _network_name_scope_naming(\r\n current_variable_scope=current_variable_scope)\r\n\r\n def _init_set_name(self, name):\r\n # Anonymous Networks (name=None) defer setting a final name until they are\r\n # (1) added to another Network, or (2) built/called (where (2) is only used\r\n # for a \"top level\" network).\r\n #\r\n # However, if we were provided an explicit name (name is not None), that\r\n # will always be the final name of the Network; if it turns out not to be\r\n # unique or if variable names can't be prefixed by it we will throw an\r\n # error.\r\n self._name = name\r\n self._base_name = None\r\n\r\n def _finalize_name(self, parent_network):\r\n if not self._name:\r\n # Were were not passed a name explicitly (or it was blank), so this is an\r\n # anonymous Network. We make up a unique name.\r\n if parent_network:\r\n avoid_names = parent_network._owned_layers\r\n name_uid_map = parent_network._sub_layer_name_uids\r\n else:\r\n name_uid_map = base._get_default_graph_uid_map()\r\n # Figure out which names we have to avoid based on which variable scope\r\n # we're nested in.\r\n strip_name = self._default_parent_variable_scope.name\r\n if strip_name:\r\n strip_name += \"/\"\r\n def _strip_on_init_scope(name):\r\n if name.startswith(strip_name):\r\n return name[len(strip_name):]\r\n else:\r\n return None\r\n avoid_names = set(\r\n _strip_on_init_scope(name)\r\n for name in self._variable_scope_counts_on_init.keys() if name)\r\n self._name, self._base_name = self._make_unique_name(\r\n name_uid_map=name_uid_map, avoid_names=avoid_names,\r\n namespace=self._default_parent_variable_scope.name,\r\n zero_based=True)\r\n if self._first_parent is None or (self._first_parent # False = no parent\r\n and self._first_parent() is None):\r\n # Save a pointer to the parent Network so that we can later check that the\r\n # scope name we get is correct.\r\n if not parent_network:\r\n self._first_parent = parent_network\r\n else:\r\n self._first_parent = weakref.ref(parent_network)\r\n\r\n def _set_scope(self, scope=None):\r\n if self._scope is None:\r\n if not self._first_parent:\r\n first_parent = self._first_parent\r\n else:\r\n first_parent = self._first_parent()\r\n if first_parent is None:\r\n # If we were never added to another Network, or that Network has beed\r\n # garbage collected before being called, then we're a top-level Network.\r\n self._finalize_name(\r\n # Use False to make sure the value sticks and we don't inherit a\r\n # parent if we're added to a network later.\r\n parent_network=False)\r\n if scope is not None:\r\n raise ValueError(\"Networks may not be created with explicit scopes.\")\r\n if first_parent:\r\n first_parent._set_scope()\r\n parent_scope = first_parent._scope\r\n else:\r\n parent_scope = self._default_parent_variable_scope\r\n with variable_scope.variable_scope(parent_scope) as parent_vs:\r\n expected_scope_name = parent_vs.name + \"/\" + self._name\r\n if expected_scope_name in self._variable_scope_counts_on_init:\r\n raise ValueError(\r\n (\"A Network named '%s' already exists (or a variable_scope was \"\r\n \"created with this name). Names must be unique.\") % (\r\n self._name,))\r\n # Make sure variables with this prefix will be unique.\r\n with variable_scope.variable_scope(\r\n None, use_resource=True, default_name=self._name) as scope:\r\n self._scope = scope\r\n scope_name = scope.name\r\n suffix_start = scope_name.rfind(\"/\") + 1\r\n # rfind is -1 if there is no slash in the string, in which case the\r\n # suffix starts at the beginning of the string (there is no prefix).\r\n scope_suffix = scope_name[suffix_start:]\r\n scope_prefix = scope_name[:suffix_start]\r\n if scope_suffix != self._name:\r\n raise ValueError(\r\n (\"A Network named '%s' already exists (or a variable_scope was \"\r\n \"created with this name). Names must be unique.\") % (\r\n self._name,))\r\n if (first_parent\r\n and scope_prefix[:-1] != first_parent.scope_name):\r\n raise ValueError(\r\n (\"Network variable names must match a nesting of sub-Network \"\r\n \"names. Expected prefix '%s' from parent network, but got \"\r\n \"'%s' when attempting to create a variable_scope for Network \"\r\n \"'%s'. Likely an explicit variable_scope was inserted into \"\r\n \"the nesting.\") % (\r\n first_parent.scope_name,\r\n scope_prefix[:-1],\r\n self._name))\r\n elif not first_parent and scope_prefix:\r\n # For the case when this Network is not nested inside any other\r\n # Network, but is in a variable_scope. This Network's name takes on\r\n # the full variable scope prefix.\r\n self._name = scope_name\r\n\r\n for non_network_sublayer in self._non_network_sublayers:\r\n self._set_scope_for_nonnetwork_sublayer(non_network_sublayer)\r\n\r\n def _set_scope_for_nonnetwork_sublayer(self, sublayer):\r\n if sublayer._scope is None:\r\n if sublayer._first_parent is None:\r\n constituent_first_parent = None\r\n else:\r\n constituent_first_parent = sublayer._first_parent()\r\n if constituent_first_parent:\r\n constituent_first_parent._set_scope()\r\n parent_scope = constituent_first_parent._scope\r\n else:\r\n self._finalize_name(False)\r\n raise ValueError(\r\n (\"The parent of a Layer added to Network %s was garbage collected \"\r\n \"before the Layer was built. If this limitation bothers you \"\r\n \"please file a feature request.\") %\r\n (self.name,))\r\n with variable_scope.variable_scope(parent_scope):\r\n # Horrid hack to make Layer variable names which are direct\r\n # sub-layers of Networks conform to the Network variable naming\r\n # conventions.\r\n with variable_scope.variable_scope(\r\n None, use_resource=True,\r\n default_name=sublayer.name) as sub_scope:\r\n sublayer._scope = sub_scope\r\n # Also switch op naming for this Layer to match Network conventions,\r\n # i.e. op naming matching variable naming.\r\n sublayer._name_scope_name = _network_name_scope_naming\r\n\r\n @base.Layer.name.getter\r\n def name(self):\r\n if self._name is None:\r\n raise ValueError(\r\n \"The network does not yet have a final name, but a name was \"\r\n \"requested for it. Networks get a name when they are added to \"\r\n \"another Network via track_layer, or when they are first \"\r\n \"called/built.\")\r\n return self._name\r\n\r\n def track_layer(self, layer):\r\n \"\"\"Track a Layer in this Network.\r\n\r\n `Network` requires that all `Layer`s used in `call()` be tracked so that the\r\n `Network` can export a complete list of variables.\r\n\r\n Args:\r\n layer: A `tf.layers.Layer` object.\r\n\r\n Returns:\r\n The passed in `layer`.\r\n\r\n Raises:\r\n RuntimeError: If __init__ has not been called.\r\n TypeError: If `layer` is the wrong type.\r\n ValueError: If a `Layer` with the same name has already been added.\r\n \"\"\"\r\n if not hasattr(self, \"_layers\"):\r\n raise RuntimeError(\"Need to call Network.__init__ before adding layers\")\r\n if not isinstance(layer, base.Layer):\r\n raise TypeError(\r\n \"Network.track_layer() passed type %s, not a tf.layers.Layer\" %\r\n (type(layer),))\r\n if isinstance(layer, Network):\r\n layer._finalize_name(parent_network=self)\r\n else:\r\n # `layer` is a non-Network, so it hasn't been named to follow Network\r\n # conventions for contained Layers (i.e. the same conventions as for\r\n # sub-Networks). This renaming is necessary to isolate Network variable\r\n # naming from Layers constructed outside the Network and never added to it\r\n # (because Layers are named globally).\r\n if not layer.built:\r\n if not hasattr(layer, \"_first_parent\"):\r\n dereferenced_layer_first_parent = None\r\n else:\r\n dereferenced_layer_first_parent = layer._first_parent()\r\n if dereferenced_layer_first_parent is None:\r\n if layer._name != layer._base_name:\r\n # If name and base_name do not match, then this Layer used anonymous\r\n # naming and we have to rename it. Otherwise there's an explicit\r\n # name, and we should respect it (subject to error checking).\r\n layer._name, layer._base_name = layer._make_unique_name(\r\n name_uid_map=self._sub_layer_name_uids,\r\n avoid_names=self._owned_layers,\r\n zero_based=True\r\n # No namespace required, since we've specified our own UID map.\r\n )\r\n layer._first_parent = weakref.ref(self)\r\n self._non_network_sublayers.append(layer)\r\n if (not layer.built\r\n and layer._first_parent\r\n and self is layer._first_parent()):\r\n if layer.name in self._owned_layers:\r\n if self._owned_layers[layer.name] is layer:\r\n return layer\r\n raise ValueError(\r\n \"Attempt to add two Layers with the name '%s' to the same Network.\"\r\n % (layer.name))\r\n self._owned_layers[layer.name] = layer\r\n self._layers.append(layer)\r\n return layer\r\n\r\n def get_layer(self, name=None, index=None):\r\n \"\"\"Get a contained `tf.layers.Layer` either by name or index.\r\n\r\n Args:\r\n name: String matching one of the names of a contained `Layer`. Note that\r\n the names of `Layer`s added to `Network`s may not be unique when doing\r\n layer sharing (i.e. adding a `Layer` to this `Network` which was already\r\n added to another `Network`). The lowest index `Layer` with a matching\r\n name will be returned.\r\n index: Integer in [0, number of layers). Layers are assigned an index\r\n by the order they are added.\r\n\r\n Returns:\r\n A `tf.layers.Layer` object.\r\n\r\n Raises:\r\n ValueError: If neither or both of 'index' or 'name' is specified, or the\r\n lookup failed.\r\n \"\"\"\r\n if index is not None:\r\n if name is not None:\r\n raise ValueError(\"Exactly one of 'index' or 'name' must be provided\")\r\n if len(self._layers) <= index:\r\n raise ValueError(\"Was asked to retrieve layer at index \" + str(index) +\r\n \" but model only has \" + str(len(self._layers)) +\r\n \" layers.\")\r\n else:\r\n return self._layers[index]\r\n else:\r\n if not name:\r\n raise ValueError(\"Provide either a layer name or layer index.\")\r\n for layer in self._layers:\r\n if layer.name == name:\r\n return layer\r\n raise ValueError(\"No such layer: \" + name)\r\n\r\n # The following methods are for implementing the Layer interface.\r\n\r\n @property\r\n def weights(self):\r\n # TODO(josh11b): Should this return a set or perform de-duplication of\r\n # variables in the case of shared layers/variables that appear in\r\n # multiple places in the Network?\r\n weights = []\r\n for layer in self._layers:\r\n weights += layer.weights\r\n return weights\r\n\r\n @property\r\n def trainable_weights(self):\r\n weights = []\r\n for layer in self._layers:\r\n weights += layer.trainable_weights\r\n return weights\r\n\r\n @property\r\n def non_trainable_weights(self):\r\n weights = []\r\n for layer in self._layers:\r\n weights += layer.non_trainable_weights\r\n return weights\r\n\r\n @property\r\n def trainable(self):\r\n return True\r\n\r\n @trainable.setter\r\n def trainable(self, value):\r\n if not value:\r\n # We believe it better to decide which layers & networks are trainable\r\n # at the Trainer level than here. Otherwise you can run into trouble if a\r\n # layer/network is shared between two models, but is trainable in one\r\n # but not the other (like with adversarial networks).\r\n raise AttributeError(\"cannot mark Network as not trainable\")\r\n\r\n @property\r\n def layers(self):\r\n return self._layers\r\n\r\n def add_variable(self, name, shape, dtype=None, initializer=None,\r\n regularizer=None, trainable=True, constraint=None):\r\n raise RuntimeError(\r\n \"add_variable not supported in Network class yet. Please file an issue \"\r\n \"at https://github.com/tensorflow/tensorflow/issues/new if this is \"\r\n \"important to you\")\r\n\r\n def add_loss(self, losses, inputs=None):\r\n raise RuntimeError(\r\n \"add_loss is not supported in Network class yet. Please file an issue \"\r\n \"at https://github.com/tensorflow/tensorflow/issues/new if this is \"\r\n \"important to you\")\r\n\r\n @property\r\n def losses(self):\r\n \"\"\"Gather losses from `Layer`s in the `Network`.\r\n\r\n Note that when executing eagerly, `Layer.losses` evaluates\r\n regularizers. When using graph execution, variable regularization ops have\r\n already been created and are simply returned here.\r\n\r\n Returns:\r\n A list of tensors.\r\n \"\"\"\r\n layer_losses = []\r\n for layer in self.layers:\r\n layer_losses.extend(layer.losses)\r\n return layer_losses\r\n\r\n # TODO(allenl): Support other Layer methods needed for graph mode, such as for\r\n # updates\r\n\r\n\r\nclass Sequential(Network):\r\n \"\"\"Represents a linear sequence of Layers or functions.\r\n\r\n The output of each layer/function is provided as the input to the next.\r\n The inputs passed to `__call__` are passed to the inputs of the first\r\n Layer, and it returns the outputs of the last Layer.\r\n\r\n Args:\r\n layers_funcs: An optional sequence where each element is either a\r\n tf.layers.Layer object or a callable.\r\n name: An optional string name to use for this Network.\r\n \"\"\"\r\n\r\n def __init__(self, layers_funcs=None, name=None):\r\n super(Sequential, self).__init__(name=name)\r\n self._layers_funcs = []\r\n if layers_funcs:\r\n for l in layers_funcs:\r\n self.add(l)\r\n\r\n def add(self, layer_func):\r\n if isinstance(layer_func, base.Layer):\r\n args = estimator_util.fn_args(layer_func.call)\r\n self.track_layer(layer_func)\r\n elif callable(layer_func):\r\n args = estimator_util.fn_args(layer_func)\r\n else:\r\n raise TypeError(\r\n \"Sequential.add() takes only tf.layers.Layer objects or callables; \"\r\n \"not '%s' of type '%s'.\" % (layer_func, type(layer_func)))\r\n self._layers_funcs.append(((\"training\" in args), layer_func))\r\n\r\n def call(self, inputs, training=None):\r\n \"\"\"Call each Layer in the order they were added.\"\"\"\r\n # TODO(josh11b): Support \"mode\" and maybe other arguments\r\n if training is None:\r\n for _, l in self._layers_funcs:\r\n inputs = l(inputs)\r\n else:\r\n for has_training_arg, l in self._layers_funcs:\r\n if has_training_arg:\r\n inputs = l(inputs, training)\r\n else:\r\n inputs = l(inputs)\r\n return inputs\r\n\r\n\r\n_DeferredRestoration = collections.namedtuple(\r\n\r\n \"_DeferredRestoration\",\r\n [\r\n # The map_func to use (either user-specified or the default).\r\n \"map_func\",\r\n # Boolean, True if the user specified an explicit map_func, for error\r\n # messages.\r\n \"map_func_is_user\",\r\n # A mapping from checkpoint names to initial values of not-yet-created\r\n # variables which should be restored. These values come from parsing a\r\n # checkpoint.\r\n \"checkpointed_variables_to_restore\",\r\n # A mapping from checkpoint name to variable objects of variables which\r\n # have already been restored, for error checking.\r\n \"restored_variables\",\r\n # The session to restore with (if in graph mode).\r\n \"session\",\r\n # Names of the Network where the restore was requested, for error\r\n # messages.\r\n \"network_name\",\r\n \"network_scope_name\"\r\n ])\r\n\r\n\r\ndef _default_naming_conflict_error_message(\r\n mapped_name, first_variable, second_variable,\r\n network_name, network_scope_name):\r\n return (\r\n (\"The default checkpoint variable name mapping strategy for Network \"\r\n \"'%s' resulted in a naming conflict. We attempted to strip off the \"\r\n \"variable prefix for the Network ('%s'), but this resulted in two \"\r\n \"variables named '%s' (originally '%s' and '%s'). This should only \"\r\n \"happen when using variable sharing (i.e. the Network contains Networks \"\r\n \"or Layers which were first added to another Network, and therefore \"\r\n \"have that Network's variable prefix). One solution is to pass \"\r\n \"`map_func=lambda n: n` to save and restore to use fully qualified \"\r\n \"variable names in the checkpoint, although this will require that the \"\r\n \"variable prefix of the Network being restored into is also '%s'. You \"\r\n \"may alternatively write an arbitrary mapping.\")\r\n % (\r\n network_name, network_scope_name, mapped_name,\r\n first_variable._shared_name,\r\n second_variable._shared_name, network_scope_name\r\n ))\r\n\r\n\r\ndef _restore_custom_map_func_error_message(\r\n mapped_name, first_variable, second_variable,\r\n network_name, network_scope_name):\r\n return (\r\n (\"The map_func passed to restore_network_checkpoint for the Network '%s' \"\r\n \"resulted in two variables named '%s' (originally '%s' and '%s'). Since \"\r\n \"this is also an error when saving, this Network was \"\r\n \"probably not saved with this map_func. Note that map_func \"\r\n \"always maps from full variable names to checkpoint names; \"\r\n \"there is no need to specify an inverse mapping.\\n\\n\"\r\n \"Try stripping less from the variable names, or renaming parts \"\r\n \"of the Network. For reference, variables created by sub-Layers \"\r\n \"of this Network are prefixed with '%s', but if they are \"\r\n \"re-used after being added to another Network they will have \"\r\n \"that Network's full variable prefix instead.\") % (\r\n network_name, mapped_name,\r\n first_variable._shared_name,\r\n second_variable._shared_name,\r\n network_scope_name))\r\n\r\n\r\ndef _make_custom_getter_for_deferred_restorations():\r\n \"\"\"Returns a custom getter which searches `deferred_restorations`.\r\n\r\n Returns: A tuple of (_custom_getter, deferred_restorations)\r\n _custom_getter: The getter which should be added to variable_scopes where\r\n variables will be created.\r\n deferred_restorations: A list for _DeferredRestoration objects. Typically\r\n empty when the getter is set, and expanded as deferred restorations are\r\n requested. All new deferred restorations should be appended to the end of\r\n the list, where they will have priority over older deferred restorations.\r\n \"\"\"\r\n deferred_restorations = []\r\n\r\n def _custom_getter(getter, name, shape=None, dtype=None,\r\n initializer=None,\r\n *args, **kwargs):\r\n \"\"\"A custom getter which processes deferred restorations.\"\"\"\r\n # Iterate over restorations, newest first (newer restorations will take\r\n # precedence over older restorations, just like with immediate restorations\r\n # into existing variables).\r\n delayed_restoration = None\r\n found_value = False\r\n value_to_restore = None\r\n for delayed_restoration in reversed(\r\n deferred_restorations):\r\n checkpoint_name = delayed_restoration.map_func(name)\r\n if (checkpoint_name\r\n in delayed_restoration.checkpointed_variables_to_restore):\r\n found_value = True\r\n value_to_restore = (\r\n delayed_restoration.checkpointed_variables_to_restore[\r\n checkpoint_name])\r\n if found_value:\r\n break\r\n # value_to_restore may be False because this variable is not in any\r\n # checkpoint we are restoring, or None because we have explicitly set it to\r\n # None when it was previously fetched. In either case, we don't need to\r\n # set an initializer.\r\n if found_value and value_to_restore is not None:\r\n initializer = value_to_restore\r\n shape = None\r\n variable = getter(name, shape=shape, dtype=dtype, initializer=initializer,\r\n *args, **kwargs)\r\n if found_value and value_to_restore is not None:\r\n # Mark as already restored from this checkpoint.\r\n delayed_restoration.checkpointed_variables_to_restore[\r\n checkpoint_name] = None\r\n if not context.executing_eagerly():\r\n delayed_restoration.session.run(variable.initializer)\r\n if found_value:\r\n # Error checking should run even if we've already restored a value.\r\n if delayed_restoration.restored_variables.setdefault(\r\n checkpoint_name, variable) is not variable:\r\n # Naming conflict. We've tried to initialize two variables with the\r\n # same value from the checkpoint.\r\n if delayed_restoration.map_func_is_user:\r\n raise ValueError(\r\n _restore_custom_map_func_error_message(\r\n mapped_name=checkpoint_name,\r\n first_variable=delayed_restoration.restored_variables[\r\n checkpoint_name],\r\n second_variable=variable,\r\n network_name=delayed_restoration.network_name,\r\n network_scope_name=delayed_restoration.network_scope_name))\r\n else:\r\n raise ValueError(\r\n _default_naming_conflict_error_message(\r\n mapped_name=checkpoint_name,\r\n first_variable=delayed_restoration.restored_variables[\r\n checkpoint_name],\r\n second_variable=variable,\r\n network_name=delayed_restoration.network_name,\r\n network_scope_name=delayed_restoration.network_scope_name))\r\n return variable\r\n return _custom_getter, deferred_restorations\r\n\r\n\r\ndef _make_prefix_stripping_map_fn(scope_name):\r\n \"\"\"Closure for stripping the scope name of a Network.\r\n\r\n Implemented as a closure rather than a member function to avoid reference\r\n cycles in deferred restorations (this function should not have a reference to\r\n the Network which created it).\r\n\r\n Args:\r\n scope_name: The Network.scope_name to strip from variables.\r\n Returns:\r\n A scope_name-stripping default `map_fn` for the Network.\r\n \"\"\"\r\n\r\n def _strip_variable_prefix(original_variable_name):\r\n \"\"\"The default map_func for saving or restoring variables.\r\n\r\n Strips the variable prefix for the Network on which save/restore was called,\r\n and leaves other variable names fully qualified in the checkpoint.\r\n\r\n Args:\r\n original_variable_name: The _shared_name of the variable (no :0\r\n suffix) to map.\r\n Returns:\r\n The checkpoint name of the variable.\r\n \"\"\"\r\n scope_name_with_slash = scope_name + \"/\"\r\n if original_variable_name.startswith(scope_name_with_slash):\r\n return original_variable_name[len(scope_name_with_slash):]\r\n else:\r\n return original_variable_name\r\n\r\n return _strip_variable_prefix\r\n\r\n\r\ndef save_network_checkpoint(\r\n network, save_path, global_step=None, map_func=None):\r\n \"\"\"Save variables from the Network to a checkpoint.\r\n\r\n Args:\r\n network: A Network object to save.\r\n save_path: Either a checkpoint prefix or the name of a directory to save\r\n the checkpoint in (in which case the checkpoint will be named based on\r\n the Network name).\r\n global_step: The global step to use when naming the checkpoint. If None\r\n (default), we will first try to get the default global step. If that\r\n fails because no default global step exists, then the checkpoint is\r\n created without a global step suffix.\r\n map_func: A function mapping fully qualified variable names\r\n (e.g. 'my_network_1/dense_1/kernel') to names in the checkpoint. By\r\n default (if `map_func=None`), the variable prefix for the network being\r\n restored (`Network.scope_name + '/'`, e.g. 'my_network_1/') is stripped\r\n and all other variable names (shared with other Networks) are left\r\n unchanged.\r\n Returns:\r\n The checkpoint prefix for the saved checkpoint, which may be passed to\r\n `Network.restore`.\r\n Raises:\r\n ValueError: If the Network has not yet been called, or if map_func results\r\n in a name collision.\r\n \"\"\"\r\n if not network.built:\r\n raise ValueError(\r\n \"Attempt to save the Network before it was first called. This means \"\r\n \"variables have not yet been created, so there is nothing to save.\")\r\n network._set_scope() # scope_name should be available to map_funcs\r\n if global_step is None:\r\n global_step = training_util.get_global_step()\r\n if os.path.isdir(save_path):\r\n # If we were passed a directory, default to naming based on the Network\r\n # name.\r\n save_path = os.path.join(save_path, network.name.replace(\"/\", \"_\"))\r\n user_map_func = map_func\r\n if map_func is None:\r\n map_func = _make_prefix_stripping_map_fn(network.scope_name)\r\n variable_map = {}\r\n for variable in network.variables:\r\n mapped_name = map_func(variable._shared_name)\r\n if variable_map.setdefault(mapped_name, variable) is not variable:\r\n if user_map_func is None:\r\n # Instead of erroring out, we could just re-try and silently use the\r\n # full variable names in the checkpoint. This could be odd for deeply\r\n # nested sub-Networks (since the full prefix from the nesting would\r\n # get added), so for now we'll let the user deal with this case.\r\n raise ValueError(_default_naming_conflict_error_message(\r\n mapped_name=mapped_name,\r\n first_variable=variable_map[mapped_name],\r\n second_variable=variable,\r\n network_name=network.name,\r\n network_scope_name=network.scope_name))\r\n else:\r\n # The user passed their own problematic map_func.\r\n raise ValueError(\r\n (\"The map_func passed to save_network_checkpoint for the Network \"\r\n \"'%s' resulted in two variables named '%s' ('%s' and '%s'). Try \"\r\n \"stripping less from the variable names, or renaming parts of \"\r\n \"the Network. For reference, variables created by sub-Layers of \"\r\n \"this Network are prefixed with '%s', but if they are re-used \"\r\n \"after being added to another Network, they will have that \"\r\n \"Network's full variable prefix instead.\") % (\r\n network.name, mapped_name,\r\n variable_map[mapped_name]._shared_name,\r\n variable._shared_name,\r\n network.scope_name))\r\n if context.executing_eagerly():\r\n sess = None\r\n else:\r\n sess = ops.get_default_session()\r\n return saver_lib.Saver(variable_map).save(\r\n sess=sess, save_path=save_path, write_meta_graph=False,\r\n global_step=global_step)\r\n\r\n\r\ndef _add_deferred_restoration(layer, deferred_restoration):\r\n \"\"\"Add a deferred restoration to this Layer and all children.\r\n\r\n Restorations which are requested later have higher priority, and the highest\r\n priority matching restoration is applied to a variable when it is created.\r\n\r\n Args:\r\n layer: The Layer (may not be a Network) to operate on.\r\n deferred_restoration: A _DeferredRestoration object.\r\n \"\"\"\r\n # Networks don't create variables at the moment, so this append isn't strictly\r\n # necessary. We could get by with only adding deferred restorations to\r\n # non-Network Layers.\r\n if isinstance(layer, Network):\r\n layer._set_scope()\r\n # Make sure this Layer has a deferred restoration queue and a custom getter,\r\n # then add our request to it.\r\n if not hasattr(layer, \"_custom_getter\"):\r\n assert not hasattr(layer, \"_deferred_restorations\")\r\n layer._custom_getter, layer._deferred_restorations = (\r\n _make_custom_getter_for_deferred_restorations())\r\n # We use set_custom_getter because it avoids recursively calling up the\r\n # variable_scope tree. We've done the tree traversal ourselves and have added\r\n # the request to each Layer which needs it.\r\n layer._scope.set_custom_getter(layer._custom_getter)\r\n layer._deferred_restorations.append(deferred_restoration)\r\n if isinstance(layer, Network):\r\n for sublayer in layer.layers:\r\n if not isinstance(sublayer, Network):\r\n layer._set_scope_for_nonnetwork_sublayer(sublayer)\r\n _add_deferred_restoration(sublayer, deferred_restoration)\r\n\r\n\r\ndef _restore_existing_variables(network, save_path, map_func, user_map_func):\r\n \"\"\"Use a standard Saver to restore existing variables from a checkpoint.\r\n\r\n Args:\r\n network: A Network object to restore.\r\n save_path: The checkpoint prefix or directory to read from.\r\n map_func: The function to use when mapping from variable names to\r\n checkpoint names.\r\n user_map_func: The original map_func passed by the user, for error\r\n checking.\r\n Returns:\r\n A dictionary mapping from checkpoint names to variable objects which have\r\n been restored (for bookkeeping to avoid deferred restorations on these\r\n variables).\r\n Raises:\r\n ValueError: If there is a name collision.\r\n \"\"\"\r\n existing_variables_by_checkpoint_name = {}\r\n for variable in network.variables:\r\n checkpoint_name = map_func(variable._shared_name)\r\n if existing_variables_by_checkpoint_name.setdefault(\r\n checkpoint_name, variable) is not variable:\r\n if user_map_func is None:\r\n raise ValueError(_default_naming_conflict_error_message(\r\n mapped_name=checkpoint_name,\r\n first_variable=existing_variables_by_checkpoint_name[\r\n checkpoint_name],\r\n second_variable=variable,\r\n network_name=network.name,\r\n network_scope_name=network.scope_name))\r\n else:\r\n raise ValueError(_restore_custom_map_func_error_message(\r\n mapped_name=checkpoint_name,\r\n first_variable=existing_variables_by_checkpoint_name[\r\n checkpoint_name],\r\n second_variable=variable,\r\n network_name=network.name,\r\n network_scope_name=network.scope_name))\r\n if existing_variables_by_checkpoint_name:\r\n if context.executing_eagerly():\r\n sess = None\r\n else:\r\n sess = ops.get_default_session()\r\n saver_lib.Saver(var_list=existing_variables_by_checkpoint_name).restore(\r\n sess=sess, save_path=save_path)\r\n return existing_variables_by_checkpoint_name\r\n\r\n\r\ndef _set_restore_on_create(network, save_path, map_func, user_map_func,\r\n existing_variables_by_checkpoint_name):\r\n \"\"\"If necessary, request deferred restorations of variables.\"\"\"\r\n checkpoint_reader = checkpoint_utils.load_checkpoint(save_path)\r\n checkpointed_variables_to_restore = {}\r\n for checkpoint_name, _ in checkpoint_utils.list_variables(save_path):\r\n if checkpoint_name in existing_variables_by_checkpoint_name:\r\n # This variable was already created and restored.\r\n continue\r\n # Save the variable for later restoration in a custom getter.\r\n checkpointed_variables_to_restore[checkpoint_name] = (\r\n checkpoint_reader.get_tensor(checkpoint_name))\r\n # Only set a deferred restoration if there are checkpoint variables which\r\n # have not been assigned to existing variables. Note that this loses out on\r\n # some opportunity for error checking, but avoids creating\r\n # _DeferredRestoration objects once a Network has been built (so that\r\n # restoring in a loop does not take increasing amounts of memory).\r\n if checkpointed_variables_to_restore:\r\n if context.executing_eagerly():\r\n sess = None\r\n else:\r\n sess = ops.get_default_session()\r\n # We need a name for error messages. If we haven't been added to another\r\n # Network yet, we're top-level.\r\n network._finalize_name(False)\r\n network._set_scope()\r\n # Save a record of this restoration for use in the custom getter.\r\n deferred_restoration = _DeferredRestoration(\r\n map_func=map_func,\r\n map_func_is_user=(user_map_func is not None),\r\n checkpointed_variables_to_restore=checkpointed_variables_to_restore,\r\n restored_variables={},\r\n session=sess,\r\n network_name=network.name,\r\n network_scope_name=network.scope_name)\r\n # Add the deferred registration to non-Network children, and request that\r\n # Networks propagate the request to their children.\r\n _add_deferred_restoration(network, deferred_restoration)\r\n\r\n\r\ndef restore_network_checkpoint(network, save_path, map_func=None):\r\n \"\"\"Restore the Network from a checkpoint.\r\n\r\n If variables have already been created (typically when some or all of the\r\n `Network` is built), they are assigned values from the checkpoint immediately,\r\n overwriting any existing values (in graph mode the default session is used for\r\n the assignments).\r\n\r\n If there are checkpoint entries which do not correspond to any existing\r\n variables in the `Network`, these values are saved for deferred restoration;\r\n their initial values will be the checkpointed values once they are\r\n created. Requests for multiple deferred restorations behave the same way as\r\n immediate restorations, in that later requests will take priority over earlier\r\n requests relevant to the same variable.\r\n\r\n If this `Network` shares `Layer`s with another network, those `Layer`s will\r\n also have their variables restored from the checkpoint.\r\n\r\n Args:\r\n network: A Network object to restore.\r\n save_path: The return value of `tfe.save_network_checkpoint`, or a directory\r\n to search for a checkpoint.\r\n map_func: A function mapping fully qualified variable names\r\n (e.g. 'my_network_1/dense_1/kernel') to names in the checkpoint. By\r\n default (if `map_func=None`), the variable prefix for the network being\r\n restored (`Network.scope_name + '/'`, e.g. 'my_network_1/') is stripped\r\n and all other variable names (shared with other Networks) are left\r\n unchanged. Note that this is the _same_ map_func as\r\n `tfe.save_network_checkpoint`, not an inverse mapping.\r\n \"\"\"\r\n network._finalize_name(parent_network=False)\r\n network._set_scope() # scope_name should be available to map_funcs\r\n if os.path.isdir(save_path):\r\n # If we don't have a name yet, set no parent.\r\n save_path = os.path.join(save_path, network.name.replace(\"/\", \"_\"))\r\n user_map_func = map_func\r\n if map_func is None:\r\n map_func = _make_prefix_stripping_map_fn(network.scope_name)\r\n # Step one is to restore any existing variables from the checkpoint.\r\n existing_variables_by_checkpoint_name = _restore_existing_variables(\r\n network=network,\r\n save_path=save_path,\r\n map_func=map_func,\r\n user_map_func=user_map_func)\r\n # Step two is to set a custom getter which restores variables on creation,\r\n # for those variables which have not been added to sub-Layers yet.\r\n _set_restore_on_create(\r\n network=network,\r\n save_path=save_path,\r\n map_func=map_func,\r\n user_map_func=user_map_func,\r\n existing_variables_by_checkpoint_name=(\r\n existing_variables_by_checkpoint_name))\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\nr\"\"\"Multivariate autoregressive model (vector autoregression).\r\n\r\nImplements the following model (num_blocks = max(ar_order, ma_order + 1)):\r\n\r\n y(t, 1) = \\sum_{i=1}^{ar_order} ar_coefs[i] * y(t - 1, i)\r\n y(t, i) = y(t - 1, i - 1) + ma_coefs[i - 1] * e(t) for 1 < i < num_blocks\r\n y(t, num_blocks) = y(t - 1, num_blocks - 1) + e(t)\r\n\r\nWhere e(t) are Gaussian with zero mean and learned covariance.\r\n\r\nEach element of ar_coefs and ma_coefs is a [num_features x num_features]\r\nmatrix. Each y(t, i) is a vector of length num_features. Indices in the above\r\nequations are one-based. Initial conditions y(0, i) come from prior state (which\r\nmay either be learned or left as a constant with high prior covariance).\r\n\r\nIf ar_order > ma_order, the observation model is:\r\n y(t, 1) + observation_noise(t)\r\n\r\nIf ma_order >= ar_order, it is (to observe the moving average component):\r\n y(t, 1) + y(t, num_blocks) + observation_noise(t)\r\n\r\nWhere observation_noise(t) are Gaussian with zero mean and learned covariance.\r\n\r\nThis implementation uses a formulation which puts all of the autoregressive\r\ncoefficients in the transition equation for the observed component, which\r\nenables learning using truncated backpropagation. Noise is not applied directly\r\nto the observed component (with the exception of standard observation noise),\r\nwhich further aids learning of the autoregressive coefficients when VARMA is in\r\nan ensemble with other models (in which case having an observation noise term is\r\nusually unavoidable).\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.contrib.timeseries.python.timeseries import math_utils\r\nfrom tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model\r\n\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import init_ops\r\nfrom tensorflow.python.ops import linalg_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import variable_scope\r\n\r\n\r\nclass VARMA(state_space_model.StateSpaceModel):\r\n \"\"\"A VARMA model implementation as a special case of the state space model.\"\"\"\r\n\r\n def __init__(self,\r\n autoregressive_order,\r\n moving_average_order,\r\n configuration=state_space_model.StateSpaceModelConfiguration()):\r\n \"\"\"Construct a VARMA model.\r\n\r\n The size of the latent state for this model is:\r\n num_features * max(autoregressive_order, moving_average_order + 1)\r\n Square matrices of this size are constructed and multiplied.\r\n\r\n Args:\r\n autoregressive_order: The maximum autoregressive lag.\r\n moving_average_order: The maximum moving average lag, after which\r\n transient deviations are expected to return to their long-term mean.\r\n configuration: A StateSpaceModelConfiguration object.\r\n \"\"\"\r\n self.ar_order = autoregressive_order\r\n self.ma_order = moving_average_order\r\n self.state_num_blocks = max(autoregressive_order, moving_average_order + 1)\r\n super(VARMA, self).__init__(configuration=configuration)\r\n self.state_dimension = self.state_num_blocks * self.num_features\r\n\r\n def _define_parameters(self, observation_transition_tradeoff_log=None):\r\n with variable_scope.variable_scope(self._variable_scope):\r\n # TODO(allenl): Evaluate parameter transformations for AR/MA coefficients\r\n # which improve interpretability/stability.\r\n self.ar_coefs = variable_scope.get_variable(\r\n name=\"ar_coefs\",\r\n shape=[self.num_features, self.num_features, self.ar_order],\r\n dtype=self.dtype,\r\n initializer=init_ops.zeros_initializer())\r\n self.ma_coefs = variable_scope.get_variable(\r\n name=\"ma_coefs\",\r\n initializer=array_ops.tile(\r\n linalg_ops.eye(self.num_features, dtype=self.dtype)[None, :, :],\r\n [self.ma_order, 1, 1]),\r\n dtype=self.dtype)\r\n super(VARMA, self)._define_parameters(\r\n observation_transition_tradeoff_log=observation_transition_tradeoff_log)\r\n\r\n def get_state_transition(self):\r\n \"\"\"Construct state transition matrix from VARMA parameters.\r\n\r\n Returns:\r\n the state transition matrix. It has shape\r\n [self.state_dimendion, self.state_dimension].\r\n \"\"\"\r\n # Pad any unused AR blocks with zeros. The extra state is necessary if\r\n # ma_order >= ar_order.\r\n ar_coefs_padded = array_ops.reshape(\r\n array_ops.pad(self.ar_coefs,\r\n [[0, 0], [0, 0],\r\n [0, self.state_num_blocks - self.ar_order]]),\r\n [self.num_features, self.state_dimension])\r\n shift_matrix = array_ops.pad(\r\n linalg_ops.eye(\r\n (self.state_num_blocks - 1) * self.num_features, dtype=self.dtype),\r\n [[0, 0], [0, self.num_features]])\r\n return array_ops.concat([ar_coefs_padded, shift_matrix], axis=0)\r\n\r\n def get_noise_transform(self):\r\n \"\"\"Construct state noise transform matrix from VARMA parameters.\r\n\r\n Returns:\r\n the state noise transform matrix. It has shape\r\n [self.state_dimendion, self.num_features].\r\n \"\"\"\r\n # Noise is broadcast, through the moving average coefficients, to\r\n # un-observed parts of the latent state.\r\n ma_coefs_padded = array_ops.reshape(\r\n array_ops.pad(self.ma_coefs,\r\n [[self.state_num_blocks - 1 - self.ma_order, 0], [0, 0],\r\n [0, 0]]),\r\n [(self.state_num_blocks - 1) * self.num_features, self.num_features],\r\n name=\"noise_transform\")\r\n # Deterministically apply noise to the oldest component.\r\n return array_ops.concat(\r\n [ma_coefs_padded,\r\n linalg_ops.eye(self.num_features, dtype=self.dtype)],\r\n axis=0)\r\n\r\n def get_observation_model(self, times):\r\n \"\"\"Construct observation model matrix from VARMA parameters.\r\n\r\n Args:\r\n times: A [batch size] vector indicating the times observation models are\r\n requested for. Unused.\r\n Returns:\r\n the observation model matrix. It has shape\r\n [self.num_features, self.state_dimension].\r\n \"\"\"\r\n del times # StateSpaceModel will broadcast along the batch dimension\r\n if self.ar_order > self.ma_order or self.state_num_blocks < 2:\r\n return array_ops.pad(\r\n linalg_ops.eye(self.num_features, dtype=self.dtype),\r\n [[0, 0], [0, self.num_features * (self.state_num_blocks - 1)]],\r\n name=\"observation_model\")\r\n else:\r\n # Add a second observed component which \"catches\" the accumulated moving\r\n # average errors as they reach the end of the state. If ar_order >\r\n # ma_order, this is unnecessary, since accumulated errors cycle naturally.\r\n return array_ops.concat(\r\n [\r\n array_ops.pad(\r\n linalg_ops.eye(self.num_features, dtype=self.dtype),\r\n [[0, 0], [0,\r\n self.num_features * (self.state_num_blocks - 2)]]),\r\n linalg_ops.eye(self.num_features, dtype=self.dtype)\r\n ],\r\n axis=1,\r\n name=\"observation_model\")\r\n\r\n def get_state_transition_noise_covariance(\r\n self, minimum_initial_variance=1e-5):\r\n # Most state space models use only an explicit observation noise term to\r\n # model deviations from expectations, and so a low initial transition noise\r\n # parameter is helpful there. Since deviations from expectations are also\r\n # modeled as transition noise in VARMA, we set its initial value based on a\r\n # slight over-estimate empirical observation noise.\r\n if self._input_statistics is not None:\r\n feature_variance = self._scale_variance(\r\n self._input_statistics.series_start_moments.variance)\r\n initial_transition_noise_scale = math_ops.log(\r\n math_ops.maximum(\r\n math_ops.reduce_mean(feature_variance), minimum_initial_variance))\r\n else:\r\n initial_transition_noise_scale = 0.\r\n state_noise_transform = ops.convert_to_tensor(\r\n self.get_noise_transform(), dtype=self.dtype)\r\n state_noise_dimension = state_noise_transform.get_shape()[1].value\r\n return math_utils.variable_covariance_matrix(\r\n state_noise_dimension, \"state_transition_noise\",\r\n dtype=self.dtype,\r\n initial_overall_scale_log=initial_transition_noise_scale)\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"TensorFlow Eager execution prototype.\r\n\r\nEXPERIMENTAL: APIs here are unstable and likely to change without notice.\r\n\r\nTo use, at program startup, call `tfe.enable_eager_execution()`.\r\n\r\n@@metrics\r\n\r\n@@list_devices\r\n@@num_gpus\r\n\r\n@@py_func\r\n@@defun\r\n@@make_template\r\n@@implicit_gradients\r\n@@implicit_value_and_gradients\r\n@@gradients_function\r\n@@value_and_gradients_function\r\n@@GradientTape\r\n\r\n@@run\r\n@@enable_eager_execution\r\n\r\n@@custom_gradient\r\n\r\n@@add_execution_callback\r\n@@clear_execution_callbacks\r\n@@inf_callback\r\n@@inf_nan_callback\r\n@@nan_callback\r\n@@seterr\r\n\r\n@@Iterator\r\n@@Saver\r\n@@restore_variables_on_create\r\n@@Variable\r\n@@get_optimizer_variables\r\n@@EagerVariableStore\r\n\r\n@@Network\r\n@@Sequential\r\n@@save_network_checkpoint\r\n@@restore_network_checkpoint\r\n\r\n@@Checkpoint\r\n@@Checkpointable\r\n@@CheckpointableSaver\r\n\r\n@@executing_eagerly\r\n@@in_eager_mode\r\n\r\n@@run_test_in_graph_and_eager_modes\r\n\r\n@@DEVICE_PLACEMENT_EXPLICIT\r\n@@DEVICE_PLACEMENT_WARN\r\n@@DEVICE_PLACEMENT_SILENT\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\n\r\n# pylint:disable=g-bad-import-order,g-import-not-at-top,unused-import\r\n#\r\nfrom tensorflow.contrib.eager.python import metrics\r\nfrom tensorflow.contrib.eager.python.checkpointable_utils import CheckpointableSaver\r\nfrom tensorflow.contrib.eager.python.checkpointable_utils import Checkpoint\r\nfrom tensorflow.contrib.eager.python.datasets import Iterator\r\nfrom tensorflow.contrib.eager.python.network import Network\r\nfrom tensorflow.contrib.eager.python.network import Sequential\r\nfrom tensorflow.contrib.eager.python.network import save_network_checkpoint\r\nfrom tensorflow.contrib.eager.python.network import restore_network_checkpoint\r\nfrom tensorflow.contrib.eager.python.saver import get_optimizer_variables\r\nfrom tensorflow.contrib.eager.python.saver import restore_variables_on_create\r\nfrom tensorflow.contrib.eager.python.saver import Saver\r\nfrom tensorflow.python.eager import backprop\r\nfrom tensorflow.python.eager import function\r\nfrom tensorflow.python.eager.context import DEVICE_PLACEMENT_EXPLICIT\r\nfrom tensorflow.python.eager.context import DEVICE_PLACEMENT_WARN\r\nfrom tensorflow.python.eager.context import DEVICE_PLACEMENT_SILENT\r\nfrom tensorflow.python.eager.context import executing_eagerly\r\nfrom tensorflow.python.eager.context import list_devices\r\nfrom tensorflow.python.eager.context import num_gpus\r\nfrom tensorflow.python.eager.execution_callbacks import add_execution_callback\r\nfrom tensorflow.python.eager.execution_callbacks import clear_execution_callbacks\r\nfrom tensorflow.python.eager.execution_callbacks import inf_callback\r\nfrom tensorflow.python.eager.execution_callbacks import inf_nan_callback\r\nfrom tensorflow.python.eager.execution_callbacks import nan_callback\r\nfrom tensorflow.python.eager.execution_callbacks import seterr\r\nfrom tensorflow.python.framework.ops import enable_eager_execution\r\nfrom tensorflow.python.framework.ops import eager_run as run\r\nfrom tensorflow.python.framework.test_util import run_in_graph_and_eager_modes as run_test_in_graph_and_eager_modes\r\nfrom tensorflow.python.ops.custom_gradient import custom_gradient\r\nfrom tensorflow.python.ops.resource_variable_ops import ResourceVariable as Variable\r\nfrom tensorflow.python.ops.variable_scope import EagerVariableStore\r\nfrom tensorflow.python.ops import script_ops\r\nfrom tensorflow.python.ops import template\r\nfrom tensorflow.python.training.checkpointable import Checkpointable\r\nfrom tensorflow.python.util.all_util import remove_undocumented\r\n\r\npy_func = script_ops.eager_py_func\r\ndefun = function.defun\r\nmake_template = template.make_template_internal\r\nimplicit_gradients = backprop.implicit_grad\r\nimplicit_value_and_gradients = backprop.implicit_val_and_grad\r\ngradients_function = backprop.gradients_function\r\nvalue_and_gradients_function = backprop.val_and_grad_function\r\nGradientTape = backprop.GradientTape # pylint: disable=invalid-name\r\nin_eager_mode = executing_eagerly\r\n\r\nremove_undocumented(__name__)\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Python wrappers for Datasets.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport abc\r\nimport collections\r\nimport threading\r\n\r\nimport numpy as np\r\nimport six\r\n\r\nfrom tensorflow.python.data.ops import iterator_ops\r\nfrom tensorflow.python.data.util import nest\r\nfrom tensorflow.python.data.util import random_seed\r\nfrom tensorflow.python.data.util import sparse\r\nfrom tensorflow.python.eager import context\r\nfrom tensorflow.python.framework import constant_op\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import function\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import sparse_tensor as sparse_tensor_lib\r\nfrom tensorflow.python.framework import tensor_shape\r\nfrom tensorflow.python.framework import tensor_util\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import gen_dataset_ops\r\nfrom tensorflow.python.ops import gen_io_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import script_ops\r\nfrom tensorflow.python.util import deprecation\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\n@tf_export(\"data.Dataset\")\r\nclass Dataset(object):\r\n \"\"\"Represents a potentially large set of elements.\r\n\r\n A `Dataset` can be used to represent an input pipeline as a\r\n collection of elements (nested structures of tensors) and a \"logical\r\n plan\" of transformations that act on those elements.\r\n \"\"\"\r\n __metaclass__ = abc.ABCMeta\r\n\r\n def __init__(self):\r\n pass\r\n\r\n @abc.abstractmethod\r\n def _as_variant_tensor(self):\r\n \"\"\"Creates a scalar `tf.Tensor` of `tf.variant` representing this dataset.\r\n\r\n Returns:\r\n A scalar `tf.Tensor` of `tf.variant` type, which represents this dataset.\r\n \"\"\"\r\n raise NotImplementedError(\"Dataset._as_variant_tensor\")\r\n\r\n def make_initializable_iterator(self, shared_name=None):\r\n \"\"\"Creates an `Iterator` for enumerating the elements of this dataset.\r\n\r\n Note: The returned iterator will be in an uninitialized state,\r\n and you must run the `iterator.initializer` operation before using it:\r\n\r\n ```python\r\n dataset = ...\r\n iterator = dataset.make_initializable_iterator()\r\n # ...\r\n sess.run(iterator.initializer)\r\n ```\r\n\r\n Args:\r\n shared_name: (Optional.) If non-empty, the returned iterator will be\r\n shared under the given name across multiple sessions that share the\r\n same devices (e.g. when using a remote server).\r\n\r\n Returns:\r\n An `Iterator` over the elements of this dataset.\r\n\r\n Raises:\r\n RuntimeError: If eager execution is enabled.\r\n \"\"\"\r\n if context.executing_eagerly():\r\n raise RuntimeError(\r\n \"dataset.make_initializable_iterator is not supported when eager \"\r\n \"execution is enabled.\")\r\n if shared_name is None:\r\n shared_name = \"\"\r\n iterator_resource = gen_dataset_ops.iterator(\r\n container=\"\",\r\n shared_name=shared_name,\r\n output_types=nest.flatten(\r\n sparse.as_dense_types(self.output_types, self.output_classes)),\r\n output_shapes=nest.flatten(\r\n sparse.as_dense_shapes(self.output_shapes, self.output_classes)))\r\n with ops.colocate_with(iterator_resource):\r\n initializer = gen_dataset_ops.make_iterator(self._as_variant_tensor(),\r\n iterator_resource)\r\n return iterator_ops.Iterator(iterator_resource, initializer,\r\n self.output_types, self.output_shapes,\r\n self.output_classes)\r\n\r\n def make_one_shot_iterator(self):\r\n \"\"\"Creates an `Iterator` for enumerating the elements of this dataset.\r\n\r\n Note: The returned iterator will be initialized automatically.\r\n A \"one-shot\" iterator does not currently support re-initialization.\r\n\r\n Returns:\r\n An `Iterator` over the elements of this dataset.\r\n\r\n Raises:\r\n RuntimeError: If eager execution is enabled.\r\n \"\"\"\r\n if context.executing_eagerly():\r\n raise RuntimeError(\r\n \"dataset.make_one_shot_iterator is not supported when eager \"\r\n \"execution is enabled.\")\r\n # NOTE(mrry): We capture by value here to ensure that `_make_dataset()` is\r\n # a 0-argument function.\r\n @function.Defun(capture_by_value=True)\r\n def _make_dataset():\r\n return self._as_variant_tensor() # pylint: disable=protected-access\r\n\r\n try:\r\n _make_dataset.add_to_graph(ops.get_default_graph())\r\n except ValueError as err:\r\n if \"Cannot capture a stateful node\" in str(err):\r\n raise ValueError(\r\n \"Failed to create a one-shot iterator for a dataset. \"\r\n \"`Dataset.make_one_shot_iterator()` does not support datasets that \"\r\n \"capture stateful objects, such as a `Variable` or `LookupTable`. \"\r\n \"In these cases, use `Dataset.make_initializable_iterator()`. \"\r\n \"(Original error: %s)\" % err)\r\n else:\r\n six.reraise(ValueError, err)\r\n\r\n return iterator_ops.Iterator(\r\n gen_dataset_ops.one_shot_iterator(\r\n dataset_factory=_make_dataset,\r\n output_types=nest.flatten(\r\n sparse.as_dense_types(self.output_types, self.output_classes)),\r\n output_shapes=nest.flatten(\r\n sparse.as_dense_shapes(self.output_shapes,\r\n self.output_classes))), None,\r\n self.output_types, self.output_shapes, self.output_classes)\r\n\r\n @abc.abstractproperty\r\n def output_classes(self):\r\n \"\"\"Returns the class of each component of an element of this dataset.\r\n\r\n The expected values are `tf.Tensor` and `tf.SparseTensor`.\r\n\r\n Returns:\r\n A nested structure of Python `type` objects corresponding to each\r\n component of an element of this dataset.\r\n \"\"\"\r\n raise NotImplementedError(\"Dataset.output_classes\")\r\n\r\n @abc.abstractproperty\r\n def output_shapes(self):\r\n \"\"\"Returns the shape of each component of an element of this dataset.\r\n\r\n Returns:\r\n A nested structure of `tf.TensorShape` objects corresponding to each\r\n component of an element of this dataset.\r\n \"\"\"\r\n raise NotImplementedError(\"Dataset.output_shapes\")\r\n\r\n @abc.abstractproperty\r\n def output_types(self):\r\n \"\"\"Returns the type of each component of an element of this dataset.\r\n\r\n Returns:\r\n A nested structure of `tf.DType` objects corresponding to each component\r\n of an element of this dataset.\r\n \"\"\"\r\n raise NotImplementedError(\"Dataset.output_types\")\r\n\r\n def __repr__(self):\r\n output_shapes = nest.map_structure(str, self.output_shapes)\r\n output_shapes = str(output_shapes).replace(\"'\", \"\")\r\n output_types = nest.map_structure(repr, self.output_types)\r\n output_types = str(output_types).replace(\"'\", \"\")\r\n return (\"<%s shapes: %s, types: %s>\" % (type(self).__name__, output_shapes,\r\n output_types))\r\n\r\n @staticmethod\r\n def from_tensors(tensors):\r\n \"\"\"Creates a `Dataset` with a single element, comprising the given tensors.\r\n\r\n Args:\r\n tensors: A nested structure of tensors.\r\n\r\n Returns:\r\n Dataset: A `Dataset`.\r\n \"\"\"\r\n return TensorDataset(tensors)\r\n\r\n @staticmethod\r\n def from_tensor_slices(tensors):\r\n \"\"\"Creates a `Dataset` whose elements are slices of the given tensors.\r\n\r\n Args:\r\n tensors: A nested structure of tensors, each having the same size in the\r\n 0th dimension.\r\n\r\n Returns:\r\n Dataset: A `Dataset`.\r\n \"\"\"\r\n return TensorSliceDataset(tensors)\r\n\r\n @staticmethod\r\n @deprecation.deprecated(None, \"Use `tf.data.Dataset.from_tensor_slices()`.\")\r\n def from_sparse_tensor_slices(sparse_tensor):\r\n \"\"\"Splits each rank-N `tf.SparseTensor` in this dataset row-wise.\r\n\r\n Args:\r\n sparse_tensor: A `tf.SparseTensor`.\r\n\r\n Returns:\r\n Dataset: A `Dataset` of rank-(N-1) sparse tensors.\r\n \"\"\"\r\n return SparseTensorSliceDataset(sparse_tensor)\r\n\r\n class _GeneratorState(object):\r\n \"\"\"Stores outstanding iterators created from a Python generator.\r\n\r\n This class keeps track of potentially multiple iterators that may have\r\n been created from a generator, e.g. in the case that the dataset is\r\n repeated, or nested within a parallel computation.\r\n \"\"\"\r\n\r\n def __init__(self, generator):\r\n self._generator = generator\r\n self._lock = threading.Lock()\r\n self._next_id = 0 # GUARDED_BY(self._lock)\r\n self._iterators = collections.defaultdict(lambda: iter(generator()))\r\n\r\n def get_next_id(self):\r\n with self._lock:\r\n ret = self._next_id\r\n self._next_id += 1\r\n # NOTE(mrry): Explicitly create an array of `np.int64` because implicit\r\n # casting in `py_func()` will create an array of `np.int32` on Windows,\r\n # leading to a runtime error.\r\n return np.array(ret, dtype=np.int64)\r\n\r\n def get_iterator(self, iterator_id):\r\n return self._iterators[iterator_id]\r\n\r\n def iterator_completed(self, iterator_id):\r\n del self._iterators[iterator_id]\r\n\r\n @staticmethod\r\n def from_generator(generator, output_types, output_shapes=None):\r\n \"\"\"Creates a `Dataset` whose elements are generated by `generator`.\r\n\r\n The `generator` argument must be a callable object that returns\r\n an object that support the `iter()` protocol (e.g. a generator function).\r\n The elements generated by `generator` must be compatible with the given\r\n `output_types` and (optional) `output_shapes` arguments.\r\n\r\n For example:\r\n\r\n ```python\r\n import itertools\r\n\r\n def gen():\r\n for i in itertools.count(1):\r\n yield (i, [1] * i)\r\n\r\n ds = Dataset.from_generator(\r\n gen, (tf.int64, tf.int64), (tf.TensorShape([]), tf.TensorShape([None])))\r\n value = ds.make_one_shot_iterator().get_next()\r\n\r\n sess.run(value) # (1, array([1]))\r\n sess.run(value) # (2, array([1, 1]))\r\n ```\r\n\r\n NOTE: The current implementation of `Dataset.from_generator()` uses\r\n @{tf.py_func} and inherits the same constraints. In particular, it\r\n requires the `Dataset`- and `Iterator`-related operations to be placed\r\n on a device in the same process as the Python program that called\r\n `Dataset.from_generator()`. The body of `generator` will not be\r\n serialized in a `GraphDef`, and you should not use this method if you\r\n need to serialize your model and restore it in a different environment.\r\n\r\n NOTE: If `generator` depends on mutable global variables or other external\r\n state, be aware that the runtime may invoke `generator` multiple times\r\n (in order to support repeating the `Dataset`) and at any time\r\n between the call to `Dataset.from_generator()` and the production of the\r\n first element from the generator. Mutating global variables or external\r\n state can cause undefined behavior, and we recommend that you explicitly\r\n cache any external state in `generator` before calling\r\n `Dataset.from_generator()`.\r\n\r\n Args:\r\n generator: A callable object that takes no arguments and returns an\r\n object that supports the `iter()` protocol.\r\n output_types: A nested structure of `tf.DType` objects corresponding to\r\n each component of an element yielded by `generator`.\r\n output_shapes: (Optional.) A nested structure of `tf.TensorShape`\r\n objects corresponding to each component of an element yielded by\r\n `generator`.\r\n\r\n Returns:\r\n Dataset: A `Dataset`.\r\n \"\"\"\r\n if not callable(generator):\r\n raise TypeError(\"`generator` must be callable.\")\r\n if output_shapes is None:\r\n output_shapes = nest.map_structure(\r\n lambda _: tensor_shape.TensorShape(None), output_types)\r\n else:\r\n output_shapes = nest.map_structure_up_to(\r\n output_types, tensor_shape.as_shape, output_shapes)\r\n\r\n flattened_types = nest.flatten(output_types)\r\n flattened_shapes = nest.flatten(output_shapes)\r\n\r\n generator_state = Dataset._GeneratorState(generator)\r\n\r\n def get_iterator_id_fn(unused_dummy):\r\n \"\"\"Creates a unique `iterator_id` for each pass over the dataset.\r\n\r\n The returned `iterator_id` disambiguates between multiple concurrently\r\n existing iterators.\r\n\r\n Args:\r\n unused_dummy: Ignored value.\r\n\r\n Returns:\r\n A `tf.int64` tensor whose value uniquely identifies an iterator in\r\n `generator_state`.\r\n \"\"\"\r\n return script_ops.py_func(\r\n generator_state.get_next_id, [], dtypes.int64, stateful=True)\r\n\r\n def generator_next_fn(iterator_id_t):\r\n \"\"\"Generates the next element from iterator with ID `iterator_id_t`.\r\n\r\n We map this function across an infinite repetition of the\r\n `iterator_id_t`, and raise `StopIteration` to terminate the iteration.\r\n\r\n Args:\r\n iterator_id_t: A `tf.int64` tensor whose value uniquely identifies\r\n the iterator in `generator_state` from which to generate an element.\r\n\r\n Returns:\r\n A nested structure of tensors representing an element from the iterator.\r\n \"\"\"\r\n\r\n def generator_py_func(iterator_id):\r\n \"\"\"A `py_func` that will be called to invoke the iterator.\"\"\"\r\n # `next()` raises `StopIteration` when there are no more\r\n # elements remaining to be generated.\r\n values = next(generator_state.get_iterator(iterator_id))\r\n\r\n # Use the same _convert function from the py_func() implementation to\r\n # convert the returned values to arrays early, so that we can inspect\r\n # their values.\r\n # pylint: disable=protected-access\r\n ret_arrays = [\r\n script_ops.FuncRegistry._convert(ret, dtype=dtype.as_numpy_dtype)\r\n for ret, dtype in zip(\r\n nest.flatten_up_to(output_types, values), flattened_types)\r\n ]\r\n # pylint: enable=protected-access\r\n\r\n # Additional type and shape checking to ensure that the components\r\n # of the generated element match the `output_types` and `output_shapes`\r\n # arguments.\r\n for (ret_array, expected_dtype, expected_shape) in zip(\r\n ret_arrays, flattened_types, flattened_shapes):\r\n if ret_array.dtype != expected_dtype.as_numpy_dtype:\r\n raise TypeError(\r\n \"`generator` yielded an element of type %s where an element \"\r\n \"of type %s was expected.\" % (ret_array.dtype,\r\n expected_dtype.as_numpy_dtype))\r\n if not expected_shape.is_compatible_with(ret_array.shape):\r\n raise ValueError(\r\n \"`generator` yielded an element of shape %s where an element \"\r\n \"of shape %s was expected.\" % (ret_array.shape, expected_shape))\r\n\r\n return ret_arrays\r\n\r\n flat_values = script_ops.py_func(\r\n generator_py_func, [iterator_id_t], flattened_types, stateful=True)\r\n\r\n # The `py_func()` op drops the inferred shapes, so we add them back in\r\n # here.\r\n if output_shapes is not None:\r\n for ret_t, shape in zip(flat_values, flattened_shapes):\r\n ret_t.set_shape(shape)\r\n\r\n return nest.pack_sequence_as(output_types, flat_values)\r\n\r\n def finalize_fn(iterator_id_t):\r\n \"\"\"Releases host-side state for the iterator with ID `iterator_id_t`.\"\"\"\r\n\r\n def finalize_py_func(iterator_id):\r\n generator_state.iterator_completed(iterator_id)\r\n # We return a dummy value so that the `finalize_fn` has a valid\r\n # signature.\r\n # NOTE(mrry): Explicitly create an array of `np.int64` because implicit\r\n # casting in `py_func()` will create an array of `np.int32` on Windows,\r\n # leading to a runtime error.\r\n return np.array(0, dtype=np.int64)\r\n\r\n return script_ops.py_func(\r\n finalize_py_func, [iterator_id_t], dtypes.int64, stateful=True)\r\n\r\n # This function associates each traversal of `generator` with a unique\r\n # iterator ID.\r\n def flat_map_fn(dummy_arg):\r\n # The `get_iterator_id_fn` gets a unique ID for the current instance of\r\n # of the generator.\r\n # The `generator_next_fn` gets the next element from the iterator with the\r\n # given ID, and raises StopIteration when that iterator contains no\r\n # more elements.\r\n return _GeneratorDataset(dummy_arg, get_iterator_id_fn, generator_next_fn,\r\n finalize_fn)\r\n\r\n # A single-element dataset that, each time it is evaluated, contains a\r\n # freshly-generated and unique (for the returned dataset) int64\r\n # ID that will be used to identify the appropriate Python state, which\r\n # is encapsulated in `generator_state`, and captured in\r\n # `get_iterator_id_map_fn`.\r\n dummy = 0\r\n id_dataset = Dataset.from_tensors(dummy)\r\n\r\n # A dataset that contains all of the elements generated by a\r\n # single iterator created from `generator`, identified by the\r\n # iterator ID contained in `id_dataset`. Lifting the iteration\r\n # into a flat_map here enables multiple repetitions and/or nested\r\n # versions of the returned dataset to be created, because it forces\r\n # the generation of a new ID for each version.\r\n return id_dataset.flat_map(flat_map_fn)\r\n\r\n @staticmethod\r\n def range(*args):\r\n \"\"\"Creates a `Dataset` of a step-separated range of values.\r\n\r\n For example:\r\n\r\n ```python\r\n Dataset.range(5) == [0, 1, 2, 3, 4]\r\n Dataset.range(2, 5) == [2, 3, 4]\r\n Dataset.range(1, 5, 2) == [1, 3]\r\n Dataset.range(1, 5, -2) == []\r\n Dataset.range(5, 1) == []\r\n Dataset.range(5, 1, -2) == [5, 3]\r\n ```\r\n\r\n Args:\r\n *args: follow same semantics as python's xrange.\r\n len(args) == 1 -> start = 0, stop = args[0], step = 1\r\n len(args) == 2 -> start = args[0], stop = args[1], step = 1\r\n len(args) == 3 -> start = args[0], stop = args[1, stop = args[2]\r\n\r\n Returns:\r\n Dataset: A `RangeDataset`.\r\n\r\n Raises:\r\n ValueError: if len(args) == 0.\r\n \"\"\"\r\n return RangeDataset(*args)\r\n\r\n @staticmethod\r\n def zip(datasets):\r\n \"\"\"Creates a `Dataset` by zipping together the given datasets.\r\n\r\n This method has similar semantics to the built-in `zip()` function\r\n in Python, with the main difference being that the `datasets`\r\n argument can be an arbitrary nested structure of `Dataset` objects.\r\n For example:\r\n\r\n ```python\r\n # NOTE: The following examples use `{ ... }` to represent the\r\n # contents of a dataset.\r\n a = { 1, 2, 3 }\r\n b = { 4, 5, 6 }\r\n c = { (7, 8), (9, 10), (11, 12) }\r\n d = { 13, 14 }\r\n\r\n # The nested structure of the `datasets` argument determines the\r\n # structure of elements in the resulting dataset.\r\n Dataset.zip((a, b)) == { (1, 4), (2, 5), (3, 6) }\r\n Dataset.zip((b, a)) == { (4, 1), (5, 2), (6, 3) }\r\n\r\n # The `datasets` argument may contain an arbitrary number of\r\n # datasets.\r\n Dataset.zip((a, b, c)) == { (1, 4, (7, 8)),\r\n (2, 5, (9, 10)),\r\n (3, 6, (11, 12)) }\r\n\r\n # The number of elements in the resulting dataset is the same as\r\n # the size of the smallest dataset in `datasets`.\r\n Dataset.zip((a, d)) == { (1, 13), (2, 14) }\r\n ```\r\n\r\n Args:\r\n datasets: A nested structure of datasets.\r\n\r\n Returns:\r\n Dataset: A `Dataset`.\r\n \"\"\"\r\n return ZipDataset(datasets)\r\n\r\n def concatenate(self, dataset):\r\n \"\"\"Creates a `Dataset` by concatenating given dataset with this dataset.\r\n\r\n ```python\r\n # NOTE: The following examples use `{ ... }` to represent the\r\n # contents of a dataset.\r\n a = { 1, 2, 3 }\r\n b = { 4, 5, 6, 7 }\r\n\r\n # Input dataset and dataset to be concatenated should have same\r\n # nested structures and output types.\r\n # c = { (8, 9), (10, 11), (12, 13) }\r\n # d = { 14.0, 15.0, 16.0 }\r\n # a.concatenate(c) and a.concatenate(d) would result in error.\r\n\r\n a.concatenate(b) == { 1, 2, 3, 4, 5, 6, 7 }\r\n ```\r\n\r\n Args:\r\n dataset: `Dataset` to be concatenated.\r\n\r\n Returns:\r\n Dataset: A `Dataset`.\r\n \"\"\"\r\n return ConcatenateDataset(self, dataset)\r\n\r\n def prefetch(self, buffer_size):\r\n \"\"\"Creates a `Dataset` that prefetches elements from this dataset.\r\n\r\n Args:\r\n buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the\r\n maximum number elements that will be buffered when prefetching.\r\n\r\n Returns:\r\n Dataset: A `Dataset`.\r\n \"\"\"\r\n return PrefetchDataset(self, buffer_size)\r\n\r\n @staticmethod\r\n def list_files(file_pattern, shuffle=None):\r\n \"\"\"A dataset of all files matching a pattern.\r\n\r\n Example:\r\n If we had the following files on our filesystem:\r\n - /path/to/dir/a.txt\r\n - /path/to/dir/b.py\r\n - /path/to/dir/c.py\r\n If we pass \"/path/to/dir/*.py\" as the directory, the dataset would\r\n produce:\r\n - /path/to/dir/b.py\r\n - /path/to/dir/c.py\r\n\r\n NOTE: The order of the file names returned can be non-deterministic even\r\n when `shuffle` is `False`.\r\n\r\n Args:\r\n file_pattern: A string or scalar string `tf.Tensor`, representing\r\n the filename pattern that will be matched.\r\n shuffle: (Optional.) If `True`, the file names will be shuffled randomly.\r\n Defaults to `True`.\r\n\r\n Returns:\r\n Dataset: A `Dataset` of strings corresponding to file names.\r\n \"\"\"\r\n # TODO(b/73959787): Add a `seed` argument and make the `shuffle=False`\r\n # behavior deterministic (e.g. by sorting the filenames).\r\n if shuffle is None:\r\n shuffle = True\r\n matching_files = gen_io_ops.matching_files(file_pattern)\r\n dataset = Dataset.from_tensor_slices(matching_files)\r\n if shuffle:\r\n # NOTE(mrry): The shuffle buffer size must be greater than zero, but the\r\n # list of files might be empty.\r\n buffer_size = math_ops.maximum(\r\n array_ops.shape(matching_files, out_type=dtypes.int64)[0], 1)\r\n dataset = dataset.shuffle(buffer_size)\r\n return dataset\r\n\r\n def repeat(self, count=None):\r\n \"\"\"Repeats this dataset `count` times.\r\n\r\n NOTE: If this dataset is a function of global state (e.g. a random number\r\n generator), then different repetitions may produce different elements.\r\n\r\n Args:\r\n count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the\r\n number of times the dataset should be repeated. The default behavior\r\n (if `count` is `None` or `-1`) is for the dataset be repeated\r\n indefinitely.\r\n\r\n Returns:\r\n Dataset: A `Dataset`.\r\n \"\"\"\r\n return RepeatDataset(self, count)\r\n\r\n def _enumerate(self, start=0):\r\n\r\n max_value = np.iinfo(dtypes.int64.as_numpy_dtype).max\r\n return Dataset.zip((Dataset.range(start, max_value), self))\r\n\r\n def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None):\r\n \"\"\"Randomly shuffles the elements of this dataset.\r\n\r\n Args:\r\n buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the\r\n number of elements from this dataset from which the new\r\n dataset will sample.\r\n seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the\r\n random seed that will be used to create the distribution. See\r\n @{tf.set_random_seed} for behavior.\r\n reshuffle_each_iteration: (Optional.) A boolean, which if true indicates\r\n that the dataset should be pseudorandomly reshuffled each time it is\r\n iterated over. (Defaults to `True`.)\r\n\r\n Returns:\r\n Dataset: A `Dataset`.\r\n \"\"\"\r\n return ShuffleDataset(self, buffer_size, seed, reshuffle_each_iteration)\r\n\r\n def cache(self, filename=\"\"):\r\n \"\"\"Caches the elements in this dataset.\r\n\r\n Args:\r\n filename: A `tf.string` scalar `tf.Tensor`, representing the name of a\r\n directory on the filesystem to use for caching tensors in this Dataset.\r\n If a filename is not provided, the dataset will be cached in memory.\r\n\r\n Returns:\r\n Dataset: A `Dataset`.\r\n \"\"\"\r\n return CacheDataset(self, filename)\r\n\r\n def take(self, count):\r\n \"\"\"Creates a `Dataset` with at most `count` elements from this dataset.\r\n\r\n Args:\r\n count: A `tf.int64` scalar `tf.Tensor`, representing the number of\r\n elements of this dataset that should be taken to form the new dataset.\r\n If `count` is -1, or if `count` is greater than the size of this\r\n dataset, the new dataset will contain all elements of this dataset.\r\n\r\n Returns:\r\n Dataset: A `Dataset`.\r\n \"\"\"\r\n return TakeDataset(self, count)\r\n\r\n def skip(self, count):\r\n \"\"\"Creates a `Dataset` that skips `count` elements from this dataset.\r\n\r\n Args:\r\n count: A `tf.int64` scalar `tf.Tensor`, representing the number\r\n of elements of this dataset that should be skipped to form the\r\n new dataset. If `count` is greater than the size of this\r\n dataset, the new dataset will contain no elements. If `count`\r\n is -1, skips the entire dataset.\r\n\r\n Returns:\r\n Dataset: A `Dataset`.\r\n \"\"\"\r\n return SkipDataset(self, count)\r\n\r\n def shard(self, num_shards, index):\r\n \"\"\"Creates a `Dataset` that includes only 1/`num_shards` of this dataset.\r\n\r\n This dataset operator is very useful when running distributed training, as\r\n it allows each worker to read a unique subset.\r\n\r\n When reading a single input file, you can skip elements as follows:\r\n\r\n ```python\r\n d = tf.data.TFRecordDataset(FLAGS.input_file)\r\n d = d.shard(FLAGS.num_workers, FLAGS.worker_index)\r\n d = d.repeat(FLAGS.num_epochs)\r\n d = d.shuffle(FLAGS.shuffle_buffer_size)\r\n d = d.map(parser_fn, num_parallel_calls=FLAGS.num_map_threads)\r\n ```\r\n\r\n Important caveats:\r\n\r\n - Be sure to shard before you use any randomizing operator (such as\r\n shuffle).\r\n - Generally it is best if the shard operator is used early in the dataset\r\n pipeline. For example, when reading from a set of TFRecord files, shard\r\n before converting the dataset to input samples. This avoids reading every\r\n file on every worker. The following is an example of an efficient\r\n sharding strategy within a complete pipeline:\r\n\r\n ```python\r\n d = Dataset.list_files(FLAGS.pattern)\r\n d = d.shard(FLAGS.num_workers, FLAGS.worker_index)\r\n d = d.repeat(FLAGS.num_epochs)\r\n d = d.shuffle(FLAGS.shuffle_buffer_size)\r\n d = d.repeat()\r\n d = d.interleave(tf.data.TFRecordDataset,\r\n cycle_length=FLAGS.num_readers, block_length=1)\r\n d = d.map(parser_fn, num_parallel_calls=FLAGS.num_map_threads)\r\n ```\r\n\r\n Args:\r\n num_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of\r\n shards operating in parallel.\r\n index: A `tf.int64` scalar `tf.Tensor`, representing the worker index.\r\n\r\n Returns:\r\n Dataset: A `Dataset`.\r\n\r\n Raises:\r\n ValueError: if `num_shards` or `index` are illegal values. Note: error\r\n checking is done on a best-effort basis, and aren't guaranteed to be\r\n caught upon dataset creation. (e.g. providing in a placeholder tensor\r\n bypasses the early checking, and will instead result in an error during\r\n a session.run call.)\r\n \"\"\"\r\n num_shards = ops.convert_to_tensor(\r\n num_shards, name=\"num_shards\", dtype=dtypes.int64)\r\n num_shards_static = tensor_util.constant_value(num_shards)\r\n index = ops.convert_to_tensor(index, name=\"index\", dtype=dtypes.int64)\r\n index_static = tensor_util.constant_value(index)\r\n\r\n if num_shards_static is not None and num_shards_static < 1:\r\n raise ValueError(\"num_shards must be >= 1; got: %s\" % num_shards_static)\r\n if index_static is not None and index_static < 0:\r\n raise ValueError(\"index must be >= 0; got: %s\" % index_static)\r\n if (index_static is not None and num_shards_static is not None and\r\n index_static >= num_shards_static):\r\n raise ValueError(\"index must be <= num_shards; %s is not < %s\" %\r\n (index_static, num_shards_static))\r\n\r\n def filter_fn(elem_index, _):\r\n mod_result = math_ops.mod(elem_index, num_shards)\r\n return math_ops.equal(mod_result, index)\r\n\r\n return self._enumerate().filter(filter_fn).map(lambda _, elem: elem)\r\n\r\n def batch(self, batch_size):\r\n \"\"\"Combines consecutive elements of this dataset into batches.\r\n\r\n NOTE: If the number of elements (`N`) in this dataset is not an exact\r\n multiple of `batch_size`, the final batch contain smaller tensors with\r\n shape `N % batch_size` in the batch dimension. If your program depends on\r\n the batches having the same shape, consider using the\r\n @{tf.contrib.data.batch_and_drop_remainder} transformation instead.\r\n\r\n Args:\r\n batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of\r\n consecutive elements of this dataset to combine in a single batch.\r\n\r\n Returns:\r\n Dataset: A `Dataset`.\r\n \"\"\"\r\n return BatchDataset(self, batch_size)\r\n\r\n def padded_batch(self, batch_size, padded_shapes, padding_values=None):\r\n \"\"\"Combines consecutive elements of this dataset into padded batches.\r\n\r\n This transformation combines multiple consecutive elements of the input\r\n dataset into a single element. Like @{tf.data.Dataset.batch}, the tensors\r\n in the resulting element have an additional outer dimension, which will be\r\n `batch_size` for all but the last element, and `N % batch_size` for the\r\n last element (where `N` is the number of elements in this dataset). Unlike\r\n @{tf.data.Dataset.batch}, the elements may have different shapes for some\r\n of their components, and this transformation will pad each component to\r\n the respective shape in `padding_shapes`. The `padding_shapes` argument\r\n determines the resulting shape for each dimension of each component in an\r\n output element:\r\n\r\n * If the dimension is a constant (e.g. `tf.Dimension(37)`), the component\r\n will be padded out to that length in that dimension.\r\n * If the dimension is unknown (e.g. `tf.Dimension(None)`), the component\r\n will be padded out to the maximum length of all elements in that\r\n dimension.\r\n\r\n NOTE: If the number of elements (`N`) in this dataset is not an exact\r\n multiple of `batch_size`, the final batch contain smaller tensors with\r\n shape `N % batch_size` in the batch dimension. If your program depends on\r\n the batches having the same shape, consider using the\r\n @{tf.contrib.data.padded_batch_and_drop_remainder} transformation instead.\r\n\r\n See also @{tf.contrib.data.dense_to_sparse_batch}, which combines elements\r\n that may have different shapes into a @{tf.SparseTensor}.\r\n\r\n Args:\r\n batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of\r\n consecutive elements of this dataset to combine in a single batch.\r\n padded_shapes: A nested structure of `tf.TensorShape` or\r\n `tf.int64` vector tensor-like objects representing the shape\r\n to which the respective component of each input element should\r\n be padded prior to batching. Any unknown dimensions\r\n (e.g. `tf.Dimension(None)` in a `tf.TensorShape` or `-1` in a\r\n tensor-like object) will be padded to the maximum size of that\r\n dimension in each batch.\r\n padding_values: (Optional.) A nested structure of scalar-shaped\r\n `tf.Tensor`, representing the padding values to use for the\r\n respective components. Defaults are `0` for numeric types and\r\n the empty string for string types.\r\n\r\n Returns:\r\n Dataset: A `Dataset`.\r\n \"\"\"\r\n return PaddedBatchDataset(self, batch_size, padded_shapes, padding_values)\r\n\r\n def map(self, map_func, num_parallel_calls=None):\r\n \"\"\"Maps `map_func` across this dataset.\r\n\r\n Args:\r\n map_func: A function mapping a nested structure of tensors (having\r\n shapes and types defined by `self.output_shapes` and\r\n `self.output_types`) to another nested structure of tensors.\r\n num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,\r\n representing the number elements to process in parallel. If not\r\n specified, elements will be processed sequentially.\r\n\r\n Returns:\r\n Dataset: A `Dataset`.\r\n \"\"\"\r\n if num_parallel_calls is None:\r\n return MapDataset(self, map_func)\r\n else:\r\n return ParallelMapDataset(self, map_func, num_parallel_calls)\r\n\r\n def flat_map(self, map_func):\r\n \"\"\"Maps `map_func` across this dataset and flattens the result.\r\n\r\n Args:\r\n map_func: A function mapping a nested structure of tensors (having shapes\r\n and types defined by `self.output_shapes` and `self.output_types`) to a\r\n `Dataset`.\r\n\r\n Returns:\r\n Dataset: A `Dataset`.\r\n \"\"\"\r\n return FlatMapDataset(self, map_func)\r\n\r\n def interleave(self, map_func, cycle_length, block_length=1):\r\n \"\"\"Maps `map_func` across this dataset, and interleaves the results.\r\n\r\n For example, you can use `Dataset.interleave()` to process many input files\r\n concurrently:\r\n\r\n ```python\r\n # Preprocess 4 files concurrently, and interleave blocks of 16 records from\r\n # each file.\r\n filenames = [\"/var/data/file1.txt\", \"/var/data/file2.txt\", ...]\r\n dataset = (Dataset.from_tensor_slices(filenames)\r\n .interleave(lambda x:\r\n TextLineDataset(x).map(parse_fn, num_parallel_calls=1),\r\n cycle_length=4, block_length=16))\r\n ```\r\n\r\n The `cycle_length` and `block_length` arguments control the order in which\r\n elements are produced. `cycle_length` controls the number of input elements\r\n that are processed concurrently. If you set `cycle_length` to 1, this\r\n transformation will handle one input element at a time, and will produce\r\n identical results = to @{tf.data.Dataset.flat_map}. In general,\r\n this transformation will apply `map_func` to `cycle_length` input elements,\r\n open iterators on the returned `Dataset` objects, and cycle through them\r\n producing `block_length` consecutive elements from each iterator, and\r\n consuming the next input element each time it reaches the end of an\r\n iterator.\r\n\r\n For example:\r\n\r\n ```python\r\n # NOTE: The following examples use `{ ... }` to represent the\r\n # contents of a dataset.\r\n a = { 1, 2, 3, 4, 5 }\r\n\r\n # NOTE: New lines indicate \"block\" boundaries.\r\n a.interleave(lambda x: Dataset.from_tensors(x).repeat(6),\r\n cycle_length=2, block_length=4) == {\r\n 1, 1, 1, 1,\r\n 2, 2, 2, 2,\r\n 1, 1,\r\n 2, 2,\r\n 3, 3, 3, 3,\r\n 4, 4, 4, 4,\r\n 3, 3,\r\n 4, 4,\r\n 5, 5, 5, 5,\r\n 5, 5,\r\n }\r\n ```\r\n\r\n NOTE: The order of elements yielded by this transformation is\r\n deterministic, as long as `map_func` is a pure function. If\r\n `map_func` contains any stateful operations, the order in which\r\n that state is accessed is undefined.\r\n\r\n Args:\r\n map_func: A function mapping a nested structure of tensors (having shapes\r\n and types defined by `self.output_shapes` and `self.output_types`) to a\r\n `Dataset`.\r\n cycle_length: The number of elements from this dataset that will be\r\n processed concurrently.\r\n block_length: The number of consecutive elements to produce from each\r\n input element before cycling to another input element.\r\n\r\n Returns:\r\n Dataset: A `Dataset`.\r\n \"\"\"\r\n return InterleaveDataset(self, map_func, cycle_length, block_length)\r\n\r\n def filter(self, predicate):\r\n \"\"\"Filters this dataset according to `predicate`.\r\n\r\n Args:\r\n predicate: A function mapping a nested structure of tensors (having shapes\r\n and types defined by `self.output_shapes` and `self.output_types`) to a\r\n scalar `tf.bool` tensor.\r\n\r\n Returns:\r\n Dataset: A `Dataset`.\r\n \"\"\"\r\n return FilterDataset(self, predicate)\r\n\r\n def apply(self, transformation_func):\r\n \"\"\"Apply a transformation function to this dataset.\r\n\r\n `apply` enables chaining of custom `Dataset` transformations, which are\r\n represented as functions that take one `Dataset` argument and return a\r\n transformed `Dataset`.\r\n\r\n For example:\r\n\r\n ```\r\n dataset = (dataset.map(lambda x: x ** 2)\r\n .apply(group_by_window(key_func, reduce_func, window_size))\r\n .map(lambda x: x ** 3))\r\n ```\r\n\r\n Args:\r\n transformation_func: A function that takes one `Dataset` argument and\r\n returns a `Dataset`.\r\n\r\n Returns:\r\n Dataset: The `Dataset` returned by applying `transformation_func` to this\r\n dataset.\r\n \"\"\"\r\n dataset = transformation_func(self)\r\n if not isinstance(dataset, Dataset):\r\n raise TypeError(\"`transformation_func` must return a Dataset.\")\r\n return dataset\r\n\r\n\r\nclass TensorDataset(Dataset):\r\n \"\"\"A `Dataset` with a single element, viz. a nested structure of tensors.\"\"\"\r\n\r\n def __init__(self, tensors):\r\n \"\"\"See `Dataset.from_tensors()` for details.\"\"\"\r\n super(TensorDataset, self).__init__()\r\n with ops.name_scope(\"tensors\"):\r\n tensors = nest.pack_sequence_as(tensors, [\r\n sparse_tensor_lib.SparseTensor.from_value(t)\r\n if sparse_tensor_lib.is_sparse(t) else ops.convert_to_tensor(\r\n t, name=\"component_%d\" % i)\r\n for i, t in enumerate(nest.flatten(tensors))\r\n ])\r\n\r\n self._tensors = sparse.serialize_sparse_tensors(tensors)\r\n self._output_classes = sparse.get_classes(tensors)\r\n self._output_shapes = nest.pack_sequence_as(\r\n tensors, [t.get_shape() for t in nest.flatten(tensors)])\r\n self._output_types = nest.pack_sequence_as(\r\n tensors, [t.dtype for t in nest.flatten(tensors)])\r\n\r\n def _as_variant_tensor(self):\r\n return gen_dataset_ops.tensor_dataset(\r\n nest.flatten(self._tensors),\r\n output_shapes=nest.flatten(\r\n sparse.as_dense_shapes(self.output_shapes, self.output_classes)))\r\n\r\n @property\r\n def output_classes(self):\r\n return self._output_classes\r\n\r\n @property\r\n def output_shapes(self):\r\n return self._output_shapes\r\n\r\n @property\r\n def output_types(self):\r\n return self._output_types\r\n\r\n\r\nclass TensorSliceDataset(Dataset):\r\n \"\"\"A `Dataset` of slices from a nested structure of tensors.\"\"\"\r\n\r\n def __init__(self, tensors):\r\n \"\"\"See `Dataset.from_tensor_slices()` for details.\"\"\"\r\n super(TensorSliceDataset, self).__init__()\r\n with ops.name_scope(\"tensors\"):\r\n tensors = nest.pack_sequence_as(tensors, [\r\n sparse_tensor_lib.SparseTensor.from_value(t)\r\n if sparse_tensor_lib.is_sparse(t) else ops.convert_to_tensor(\r\n t, name=\"component_%d\" % i)\r\n for i, t in enumerate(nest.flatten(tensors))\r\n ])\r\n flat_tensors = nest.flatten(tensors)\r\n\r\n batch_dim = flat_tensors[0].get_shape()[0]\r\n for t in flat_tensors[1:]:\r\n batch_dim.assert_is_compatible_with(t.get_shape()[0])\r\n self._tensors = sparse.serialize_many_sparse_tensors(tensors)\r\n self._output_classes = sparse.get_classes(tensors)\r\n self._output_shapes = nest.pack_sequence_as(\r\n tensors, [t.get_shape()[1:] for t in nest.flatten(tensors)])\r\n self._output_types = nest.pack_sequence_as(\r\n tensors, [t.dtype for t in nest.flatten(tensors)])\r\n\r\n def _as_variant_tensor(self):\r\n return gen_dataset_ops.tensor_slice_dataset(\r\n nest.flatten(self._tensors),\r\n output_shapes=nest.flatten(\r\n sparse.as_dense_shapes(self.output_shapes, self.output_classes)))\r\n\r\n @property\r\n def output_classes(self):\r\n return self._output_classes\r\n\r\n @property\r\n def output_shapes(self):\r\n return self._output_shapes\r\n\r\n @property\r\n def output_types(self):\r\n return self._output_types\r\n\r\n\r\nclass SparseTensorSliceDataset(Dataset):\r\n \"\"\"A `Dataset` that splits a rank-N `tf.SparseTensor` into its rows.\"\"\"\r\n\r\n def __init__(self, sparse_tensor):\r\n \"\"\"See `Dataset.from_sparse_tensor_slices()` for details.\"\"\"\r\n super(SparseTensorSliceDataset, self).__init__()\r\n if not isinstance(sparse_tensor, sparse_tensor_lib.SparseTensor):\r\n raise TypeError(\"`sparse_tensor` must be a `tf.SparseTensor` object.\")\r\n self._sparse_tensor = sparse_tensor\r\n\r\n def _as_variant_tensor(self):\r\n return gen_dataset_ops.sparse_tensor_slice_dataset(\r\n self._sparse_tensor.indices, self._sparse_tensor.values,\r\n self._sparse_tensor.dense_shape)\r\n\r\n @property\r\n def output_classes(self):\r\n return (ops.Tensor, ops.Tensor, ops.Tensor)\r\n\r\n @property\r\n def output_shapes(self):\r\n indices_shape = self._sparse_tensor.indices.get_shape()\r\n shape_shape = self._sparse_tensor.dense_shape.get_shape()\r\n rank = (indices_shape[1] - 1).merge_with(shape_shape[0] - 1)\r\n num_values = tensor_shape.Dimension(None)\r\n return (tensor_shape.TensorShape([num_values, rank]),\r\n tensor_shape.TensorShape([num_values]),\r\n tensor_shape.TensorShape([rank]))\r\n\r\n @property\r\n def output_types(self):\r\n return (dtypes.int64, self._sparse_tensor.dtype, dtypes.int64)\r\n\r\n\r\nclass _GeneratorDataset(Dataset):\r\n \"\"\"A `Dataset` that generates elements by invoking a function.\"\"\"\r\n\r\n def __init__(self, init_args, init_func, next_func, finalize_func):\r\n \"\"\"Constructs a `_GeneratorDataset`.\r\n\r\n Args:\r\n init_args: A nested structure representing the arguments to `init_func`.\r\n init_func: A TensorFlow function that will be called on `init_args` each\r\n time a C++ iterator over this dataset is constructed. Returns a nested\r\n structure representing the \"state\" of the dataset.\r\n next_func: A TensorFlow function that will be called on the result of\r\n `init_func` to produce each element, and that raises `OutOfRangeError`\r\n to terminate iteration.\r\n finalize_func: A TensorFlow function that will be called on the result of\r\n `init_func` immediately before a C++ iterator over this dataset is\r\n destroyed. The return value is ignored.\r\n \"\"\"\r\n super(_GeneratorDataset, self).__init__()\r\n # These members will be initialized by `tf_init_func`.\r\n self._state_classes = None\r\n self._state_shapes = None\r\n self._state_types = None\r\n\r\n self._init_args = init_args\r\n\r\n init_args_classes = sparse.get_classes(init_args)\r\n init_args_shapes = nest.pack_sequence_as(\r\n init_args, [t.get_shape() for t in nest.flatten(init_args)])\r\n init_args_types = nest.pack_sequence_as(\r\n init_args, [t.dtype for t in nest.flatten(init_args)])\r\n\r\n @function.Defun(*nest.flatten(\r\n sparse.as_dense_types(init_args_types, init_args_classes)))\r\n def tf_init_func(*args):\r\n \"\"\"A wrapper for Defun that facilitates shape inference.\"\"\"\r\n dense_shapes = sparse.as_dense_shapes(init_args_shapes, init_args_classes)\r\n for arg, shape in zip(args, nest.flatten(dense_shapes)):\r\n arg.set_shape(shape)\r\n\r\n nested_args = nest.pack_sequence_as(init_args_classes, args)\r\n nested_args = sparse.deserialize_sparse_tensors(\r\n nested_args, init_args_types, init_args_shapes, init_args_classes)\r\n if _should_unpack_args(nested_args):\r\n ret = init_func(*nested_args)\r\n else:\r\n ret = init_func(nested_args)\r\n\r\n # If `init_func` returns a list of tensors, `nest.flatten()` and\r\n # `ops.convert_to_tensor()` would conspire to attempt to stack\r\n # those tensors into a single tensor, because the customized\r\n # version of `nest.flatten()` does not recurse into lists. Since\r\n # it is more likely that the list arose from returning the\r\n # result of an operation (such as `tf.py_func()`) that returns a\r\n # list of not-necessarily-stackable tensors, we treat the\r\n # returned value is a `tuple` instead. A user wishing to pack\r\n # the return value into a single tensor can use an explicit\r\n # `tf.stack()` before returning.\r\n if isinstance(ret, list):\r\n ret = tuple(ret)\r\n\r\n # Convert any `SparseTensorValue`s to `SparseTensor`s.\r\n ret = nest.pack_sequence_as(ret, [\r\n sparse_tensor_lib.SparseTensor.from_value(t)\r\n if sparse_tensor_lib.is_sparse(t) else t for t in nest.flatten(ret)\r\n ])\r\n\r\n self._state_classes = sparse.get_classes(ret)\r\n self._state_shapes = nest.pack_sequence_as(\r\n ret, [t.get_shape() for t in nest.flatten(ret)])\r\n self._state_types = nest.pack_sequence_as(\r\n ret, [t.dtype for t in nest.flatten(ret)])\r\n\r\n # Serialize any sparse tensors and convert result to tensors.\r\n ret = nest.pack_sequence_as(ret, [\r\n ops.convert_to_tensor(t)\r\n for t in nest.flatten(sparse.serialize_sparse_tensors(ret))\r\n ])\r\n return nest.flatten(ret)\r\n\r\n self._init_func = tf_init_func\r\n self._init_func.add_to_graph(ops.get_default_graph())\r\n\r\n # These members will be initialized by `tf_next_func`.\r\n self._output_classes = None\r\n self._output_shapes = None\r\n self._output_types = None\r\n\r\n @function.Defun(*nest.flatten(\r\n sparse.as_dense_types(self._state_types, self._state_classes)))\r\n def tf_next_func(*args):\r\n \"\"\"A wrapper for Defun that facilitates shape inference.\"\"\"\r\n # Pass in shape information from the input_dataset.\r\n dense_shapes = sparse.as_dense_shapes(self._state_shapes,\r\n self._state_classes)\r\n for arg, shape in zip(args, nest.flatten(dense_shapes)):\r\n arg.set_shape(shape)\r\n\r\n nested_args = nest.pack_sequence_as(self._state_classes, args)\r\n nested_args = sparse.deserialize_sparse_tensors(\r\n nested_args, self._state_types, self._state_shapes,\r\n self._state_classes)\r\n if _should_unpack_args(nested_args):\r\n ret = next_func(*nested_args)\r\n else:\r\n ret = next_func(nested_args)\r\n\r\n # If `next_func` returns a list of tensors, `nest.flatten()` and\r\n # `ops.convert_to_tensor()` would conspire to attempt to stack\r\n # those tensors into a single tensor, because the customized\r\n # version of `nest.flatten()` does not recurse into lists. Since\r\n # it is more likely that the list arose from returning the\r\n # result of an operation (such as `tf.py_func()`) that returns a\r\n # list of not-necessarily-stackable tensors, we treat the\r\n # returned value is a `tuple` instead. A user wishing to pack\r\n # the return value into a single tensor can use an explicit\r\n # `tf.stack()` before returning.\r\n if isinstance(ret, list):\r\n ret = tuple(ret)\r\n\r\n # Convert any `SparseTensorValue`s to `SparseTensor`s.\r\n ret = nest.pack_sequence_as(ret, [\r\n sparse_tensor_lib.SparseTensor.from_value(t)\r\n if sparse_tensor_lib.is_sparse(t) else t for t in nest.flatten(ret)\r\n ])\r\n\r\n self._output_classes = sparse.get_classes(ret)\r\n self._output_shapes = nest.pack_sequence_as(\r\n ret, [t.get_shape() for t in nest.flatten(ret)])\r\n self._output_types = nest.pack_sequence_as(\r\n ret, [t.dtype for t in nest.flatten(ret)])\r\n\r\n # Serialize any sparse tensors and convert result to tensors.\r\n ret = nest.pack_sequence_as(ret, [\r\n ops.convert_to_tensor(t)\r\n for t in nest.flatten(sparse.serialize_sparse_tensors(ret))\r\n ])\r\n return nest.flatten(ret)\r\n\r\n self._next_func = tf_next_func\r\n self._next_func.add_to_graph(ops.get_default_graph())\r\n\r\n @function.Defun(*nest.flatten(\r\n sparse.as_dense_types(self._state_types, self._state_classes)))\r\n def tf_finalize_func(*args):\r\n \"\"\"A wrapper for Defun that facilitates shape inference.\"\"\"\r\n # Pass in shape information from the state.\r\n dense_shapes = sparse.as_dense_shapes(self._state_shapes,\r\n self._state_classes)\r\n for arg, shape in zip(args, nest.flatten(dense_shapes)):\r\n arg.set_shape(shape)\r\n\r\n nested_args = nest.pack_sequence_as(self._state_classes, args)\r\n nested_args = sparse.deserialize_sparse_tensors(\r\n nested_args, self._state_types, self._state_shapes,\r\n self._state_classes)\r\n if _should_unpack_args(nested_args):\r\n return finalize_func(*nested_args)\r\n else:\r\n return finalize_func(nested_args)\r\n\r\n self._finalize_func = tf_finalize_func\r\n self._finalize_func.add_to_graph(ops.get_default_graph())\r\n\r\n def _as_variant_tensor(self):\r\n return gen_dataset_ops.generator_dataset(\r\n nest.flatten(self._init_args) + self._init_func.captured_inputs,\r\n self._next_func.captured_inputs,\r\n self._finalize_func.captured_inputs,\r\n init_func=self._init_func,\r\n next_func=self._next_func,\r\n finalize_func=self._finalize_func,\r\n output_types=nest.flatten(\r\n sparse.as_dense_types(self.output_types, self.output_classes)),\r\n output_shapes=nest.flatten(\r\n sparse.as_dense_shapes(self.output_shapes, self.output_classes)))\r\n\r\n @property\r\n def output_classes(self):\r\n return self._output_classes\r\n\r\n @property\r\n def output_shapes(self):\r\n return self._output_shapes\r\n\r\n @property\r\n def output_types(self):\r\n return self._output_types\r\n\r\n\r\nclass ZipDataset(Dataset):\r\n \"\"\"A `Dataset` that zips its inputs together.\"\"\"\r\n\r\n def __init__(self, datasets):\r\n \"\"\"See `Dataset.zip()` for details.\"\"\"\r\n super(ZipDataset, self).__init__()\r\n for ds in nest.flatten(datasets):\r\n if not isinstance(ds, Dataset):\r\n if isinstance(ds, list):\r\n message = (\"The argument to `Dataset.zip()` must be a nested \"\r\n \"structure of `Dataset` objects. Nested structures do not \"\r\n \"support Python lists; please use a tuple instead.\")\r\n else:\r\n message = (\"The argument to `Dataset.zip()` must be a nested \"\r\n \"structure of `Dataset` objects.\")\r\n raise TypeError(message)\r\n self._datasets = datasets\r\n\r\n def _as_variant_tensor(self):\r\n # pylint: disable=protected-access\r\n return gen_dataset_ops.zip_dataset(\r\n [ds._as_variant_tensor() for ds in nest.flatten(self._datasets)],\r\n output_shapes=[\r\n s\r\n for ds in nest.flatten(self._datasets)\r\n for s in nest.flatten(ds.output_shapes)\r\n ],\r\n output_types=[\r\n t\r\n for ds in nest.flatten(self._datasets)\r\n for t in nest.flatten(ds.output_types)\r\n ])\r\n # pylint: enable=protected-access\r\n\r\n @property\r\n def output_classes(self):\r\n return nest.pack_sequence_as(\r\n self._datasets,\r\n [ds.output_classes for ds in nest.flatten(self._datasets)])\r\n\r\n @property\r\n def output_shapes(self):\r\n return nest.pack_sequence_as(\r\n self._datasets,\r\n [ds.output_shapes for ds in nest.flatten(self._datasets)])\r\n\r\n @property\r\n def output_types(self):\r\n return nest.pack_sequence_as(\r\n self._datasets,\r\n [ds.output_types for ds in nest.flatten(self._datasets)])\r\n\r\n\r\nclass ConcatenateDataset(Dataset):\r\n \"\"\"A `Dataset` that concatenates its input with given dataset.\"\"\"\r\n\r\n def __init__(self, input_dataset, dataset_to_concatenate):\r\n \"\"\"See `Dataset.concatenate()` for details.\"\"\"\r\n super(ConcatenateDataset, self).__init__()\r\n self._input_dataset = input_dataset\r\n self._dataset_to_concatenate = dataset_to_concatenate\r\n nest.assert_same_structure(input_dataset.output_types,\r\n dataset_to_concatenate.output_types)\r\n for a, b in zip(\r\n nest.flatten(input_dataset.output_types),\r\n nest.flatten(dataset_to_concatenate.output_types)):\r\n if a != b:\r\n raise TypeError(\r\n \"Two datasets to concatenate have different types %s and %s\" %\r\n (input_dataset.output_types, dataset_to_concatenate.output_types))\r\n\r\n def _as_variant_tensor(self):\r\n # pylint: disable=protected-access\r\n return gen_dataset_ops.concatenate_dataset(\r\n self._input_dataset._as_variant_tensor(),\r\n self._dataset_to_concatenate._as_variant_tensor(),\r\n output_shapes=nest.flatten(\r\n sparse.as_dense_shapes(self.output_shapes, self.output_classes)),\r\n output_types=nest.flatten(\r\n sparse.as_dense_types(self.output_types, self.output_classes)))\r\n # pylint: enable=protected-access\r\n\r\n @property\r\n def output_classes(self):\r\n return self._input_dataset.output_classes\r\n\r\n @property\r\n def output_shapes(self):\r\n return nest.pack_sequence_as(self._input_dataset.output_shapes, [\r\n ts1.most_specific_compatible_shape(ts2)\r\n for (ts1, ts2) in zip(\r\n nest.flatten(self._input_dataset.output_shapes),\r\n nest.flatten(self._dataset_to_concatenate.output_shapes))\r\n ])\r\n\r\n @property\r\n def output_types(self):\r\n return self._input_dataset.output_types\r\n\r\n\r\nclass RepeatDataset(Dataset):\r\n \"\"\"A `Dataset` that repeats its input several times.\"\"\"\r\n\r\n def __init__(self, input_dataset, count):\r\n \"\"\"See `Dataset.repeat()` for details.\"\"\"\r\n super(RepeatDataset, self).__init__()\r\n self._input_dataset = input_dataset\r\n if count is None:\r\n self._count = constant_op.constant(-1, dtype=dtypes.int64, name=\"count\")\r\n else:\r\n self._count = ops.convert_to_tensor(\r\n count, dtype=dtypes.int64, name=\"count\")\r\n\r\n def _as_variant_tensor(self):\r\n return gen_dataset_ops.repeat_dataset(\r\n self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access\r\n count=self._count,\r\n output_shapes=nest.flatten(\r\n sparse.as_dense_shapes(self.output_shapes, self.output_classes)),\r\n output_types=nest.flatten(\r\n sparse.as_dense_types(self.output_types, self.output_classes)))\r\n\r\n @property\r\n def output_classes(self):\r\n return self._input_dataset.output_classes\r\n\r\n @property\r\n def output_shapes(self):\r\n return self._input_dataset.output_shapes\r\n\r\n @property\r\n def output_types(self):\r\n return self._input_dataset.output_types\r\n\r\n\r\nclass RangeDataset(Dataset):\r\n \"\"\"A `Dataset` of a step separated range of values.\"\"\"\r\n\r\n def __init__(self, *args):\r\n \"\"\"See `Dataset.range()` for details.\"\"\"\r\n super(RangeDataset, self).__init__()\r\n self._parse_args(*args)\r\n\r\n def _parse_args(self, *args):\r\n if len(args) == 1:\r\n self._start = self._build_tensor(0, \"start\")\r\n self._stop = self._build_tensor(args[0], \"stop\")\r\n self._step = self._build_tensor(1, \"step\")\r\n elif len(args) == 2:\r\n self._start = self._build_tensor(args[0], \"start\")\r\n self._stop = self._build_tensor(args[1], \"stop\")\r\n self._step = self._build_tensor(1, \"step\")\r\n elif len(args) == 3:\r\n self._start = self._build_tensor(args[0], \"start\")\r\n self._stop = self._build_tensor(args[1], \"stop\")\r\n self._step = self._build_tensor(args[2], \"step\")\r\n else:\r\n raise ValueError(\"Invalid arguments to RangeDataset: %s\" % str(args))\r\n\r\n def _build_tensor(self, int64_value, name):\r\n return ops.convert_to_tensor(int64_value, dtype=dtypes.int64, name=name)\r\n\r\n def _as_variant_tensor(self):\r\n return gen_dataset_ops.range_dataset(\r\n start=self._start,\r\n stop=self._stop,\r\n step=self._step,\r\n output_shapes=nest.flatten(\r\n sparse.as_dense_shapes(self.output_shapes, self.output_classes)),\r\n output_types=nest.flatten(\r\n sparse.as_dense_types(self.output_types, self.output_classes)))\r\n\r\n @property\r\n def output_classes(self):\r\n return ops.Tensor\r\n\r\n @property\r\n def output_shapes(self):\r\n return tensor_shape.scalar()\r\n\r\n @property\r\n def output_types(self):\r\n return dtypes.int64\r\n\r\n\r\nclass CacheDataset(Dataset):\r\n \"\"\"A `Dataset` that caches elements of its input.\"\"\"\r\n\r\n def __init__(self, input_dataset, filename):\r\n \"\"\"See `Dataset.cache()` for details.\"\"\"\r\n super(CacheDataset, self).__init__()\r\n self._input_dataset = input_dataset\r\n self._filename = ops.convert_to_tensor(\r\n filename, dtype=dtypes.string, name=\"filename\")\r\n\r\n def _as_variant_tensor(self):\r\n return gen_dataset_ops.cache_dataset(\r\n self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access\r\n filename=self._filename,\r\n output_shapes=nest.flatten(\r\n sparse.as_dense_shapes(self.output_shapes, self.output_classes)),\r\n output_types=nest.flatten(\r\n sparse.as_dense_types(self.output_types, self.output_classes)))\r\n\r\n @property\r\n def output_classes(self):\r\n return self._input_dataset.output_classes\r\n\r\n @property\r\n def output_shapes(self):\r\n return self._input_dataset.output_shapes\r\n\r\n @property\r\n def output_types(self):\r\n return self._input_dataset.output_types\r\n\r\n\r\nclass ShuffleDataset(Dataset):\r\n \"\"\"A `Dataset` that randomly shuffles the elements of its input.\"\"\"\r\n\r\n def __init__(self,\r\n input_dataset,\r\n buffer_size,\r\n seed=None,\r\n reshuffle_each_iteration=None):\r\n \"\"\"Randomly shuffles the elements of this dataset.\r\n\r\n Args:\r\n input_dataset: The input dataset.\r\n buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the\r\n number of elements from this dataset from which the new\r\n dataset will sample.\r\n seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the\r\n random seed that will be used to create the distribution. See\r\n @{tf.set_random_seed} for behavior.\r\n reshuffle_each_iteration: (Optional.) A boolean, which if true indicates\r\n that the dataset should be pseudorandomly reshuffled each time it is\r\n iterated over. (Defaults to `True`.)\r\n\r\n Returns:\r\n A `Dataset`.\r\n\r\n Raises:\r\n ValueError: if invalid arguments are provided.\r\n \"\"\"\r\n super(ShuffleDataset, self).__init__()\r\n self._input_dataset = input_dataset\r\n self._buffer_size = ops.convert_to_tensor(\r\n buffer_size, dtype=dtypes.int64, name=\"buffer_size\")\r\n self._seed, self._seed2 = random_seed.get_seed(seed)\r\n if reshuffle_each_iteration is None:\r\n self._reshuffle_each_iteration = True\r\n else:\r\n self._reshuffle_each_iteration = reshuffle_each_iteration\r\n\r\n def _as_variant_tensor(self):\r\n return gen_dataset_ops.shuffle_dataset(\r\n self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access\r\n buffer_size=self._buffer_size,\r\n seed=self._seed,\r\n seed2=self._seed2,\r\n reshuffle_each_iteration=self._reshuffle_each_iteration,\r\n output_shapes=nest.flatten(\r\n sparse.as_dense_shapes(self.output_shapes, self.output_classes)),\r\n output_types=nest.flatten(\r\n sparse.as_dense_types(self.output_types, self.output_classes)))\r\n\r\n @property\r\n def output_classes(self):\r\n return self._input_dataset.output_classes\r\n\r\n @property\r\n def output_shapes(self):\r\n return self._input_dataset.output_shapes\r\n\r\n @property\r\n def output_types(self):\r\n return self._input_dataset.output_types\r\n\r\n\r\nclass TakeDataset(Dataset):\r\n \"\"\"A `Dataset` containing the first `count` elements from its input.\"\"\"\r\n\r\n def __init__(self, input_dataset, count):\r\n \"\"\"See `Dataset.take()` for details.\"\"\"\r\n super(TakeDataset, self).__init__()\r\n self._input_dataset = input_dataset\r\n self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name=\"count\")\r\n\r\n def _as_variant_tensor(self):\r\n return gen_dataset_ops.take_dataset(\r\n self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access\r\n count=self._count,\r\n output_shapes=nest.flatten(\r\n sparse.as_dense_shapes(self.output_shapes, self.output_classes)),\r\n output_types=nest.flatten(\r\n sparse.as_dense_types(self.output_types, self.output_classes)))\r\n\r\n @property\r\n def output_classes(self):\r\n return self._input_dataset.output_classes\r\n\r\n @property\r\n def output_shapes(self):\r\n return self._input_dataset.output_shapes\r\n\r\n @property\r\n def output_types(self):\r\n return self._input_dataset.output_types\r\n\r\n\r\nclass SkipDataset(Dataset):\r\n \"\"\"A `Dataset` skipping the first `count` elements from its input.\"\"\"\r\n\r\n def __init__(self, input_dataset, count):\r\n \"\"\"See `Dataset.skip()` for details.\"\"\"\r\n super(SkipDataset, self).__init__()\r\n self._input_dataset = input_dataset\r\n self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name=\"count\")\r\n\r\n def _as_variant_tensor(self):\r\n return gen_dataset_ops.skip_dataset(\r\n self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access\r\n count=self._count,\r\n output_shapes=nest.flatten(\r\n sparse.as_dense_shapes(self.output_shapes, self.output_classes)),\r\n output_types=nest.flatten(\r\n sparse.as_dense_types(self.output_types, self.output_classes)))\r\n\r\n @property\r\n def output_classes(self):\r\n return self._input_dataset.output_classes\r\n\r\n @property\r\n def output_shapes(self):\r\n return self._input_dataset.output_shapes\r\n\r\n @property\r\n def output_types(self):\r\n return self._input_dataset.output_types\r\n\r\n\r\nclass BatchDataset(Dataset):\r\n \"\"\"A `Dataset` that batches contiguous elements from its input.\"\"\"\r\n\r\n def __init__(self, input_dataset, batch_size):\r\n \"\"\"See `Dataset.batch()` for details.\"\"\"\r\n super(BatchDataset, self).__init__()\r\n self._input_dataset = input_dataset\r\n self._batch_size = ops.convert_to_tensor(\r\n batch_size, dtype=dtypes.int64, name=\"batch_size\")\r\n\r\n def _as_variant_tensor(self):\r\n return gen_dataset_ops.batch_dataset(\r\n self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access\r\n batch_size=self._batch_size,\r\n output_shapes=nest.flatten(\r\n sparse.as_dense_shapes(self.output_shapes, self.output_classes)),\r\n output_types=nest.flatten(\r\n sparse.as_dense_types(self.output_types, self.output_classes)))\r\n\r\n @property\r\n def output_classes(self):\r\n return self._input_dataset.output_classes\r\n\r\n @property\r\n def output_shapes(self):\r\n input_shapes = self._input_dataset.output_shapes\r\n return nest.pack_sequence_as(input_shapes, [\r\n tensor_shape.vector(None).concatenate(s)\r\n for s in nest.flatten(self._input_dataset.output_shapes)\r\n ])\r\n\r\n @property\r\n def output_types(self):\r\n return self._input_dataset.output_types\r\n\r\n\r\ndef _partial_shape_to_tensor(shape_like):\r\n try:\r\n # First attempt to convert the input to a shape, and return the\r\n # \"canonical\" tensor representation, which uses `-1` in place of\r\n # `None`.\r\n shape_like = tensor_shape.as_shape(shape_like)\r\n return ops.convert_to_tensor(\r\n [dim if dim is not None else -1 for dim in shape_like.as_list()],\r\n dtype=dtypes.int64)\r\n except (TypeError, ValueError):\r\n # The argument was not trivially convertible to a\r\n # `tf.TensorShape`, so fall back on the conversion to tensor\r\n # machinery.\r\n return ops.convert_to_tensor(shape_like, dtype=dtypes.int64)\r\n\r\n\r\ndef _padding_value_to_tensor(value, output_type):\r\n \"\"\"Converts the padding value to a tensor.\r\n\r\n Args:\r\n value: The padding value.\r\n output_type: Its expected dtype.\r\n\r\n Returns:\r\n A scalar `Tensor`.\r\n\r\n Raises:\r\n ValueError: if the padding value is not a scalar.\r\n TypeError: if the padding value's type does not match `output_type`.\r\n \"\"\"\r\n value = ops.convert_to_tensor(value, name=\"padding_value\")\r\n if not value.shape.is_compatible_with(tensor_shape.scalar()):\r\n raise ValueError(\"Padding value should be a scalar, but is not: %s\" % value)\r\n if value.dtype != output_type:\r\n raise TypeError(\"Padding value tensor (%s) does not match output type: %s\" %\r\n (value, output_type))\r\n return value\r\n\r\n\r\ndef _default_padding(input_dataset):\r\n\r\n def make_zero(t):\r\n if t.base_dtype == dtypes.string:\r\n return \"\"\r\n elif t.base_dtype == dtypes.variant:\r\n raise TypeError(\"Unable to create padding for field of type 'variant'\")\r\n else:\r\n return np.zeros_like(t.as_numpy_dtype())\r\n\r\n return nest.map_structure(make_zero, input_dataset.output_types)\r\n\r\n\r\nclass PaddedBatchDataset(Dataset):\r\n \"\"\"A `Dataset` that batches and pads contiguous elements from its input.\"\"\"\r\n\r\n def __init__(self, input_dataset, batch_size, padded_shapes, padding_values):\r\n \"\"\"See `Dataset.batch()` for details.\"\"\"\r\n super(PaddedBatchDataset, self).__init__()\r\n if sparse.any_sparse(input_dataset.output_classes):\r\n # TODO(b/63669786): support batching of sparse tensors\r\n raise TypeError(\r\n \"Batching of padded sparse tensors is not currently supported\")\r\n self._input_dataset = input_dataset\r\n self._batch_size = ops.convert_to_tensor(\r\n batch_size, dtype=dtypes.int64, name=\"batch_size\")\r\n padding_values = (\r\n padding_values\r\n if padding_values is not None else _default_padding(input_dataset))\r\n self._padded_shapes = nest.map_structure_up_to(\r\n input_dataset.output_shapes, _partial_shape_to_tensor, padded_shapes)\r\n self._padding_values = nest.map_structure_up_to(\r\n input_dataset.output_shapes, _padding_value_to_tensor, padding_values,\r\n input_dataset.output_types)\r\n\r\n def _as_variant_tensor(self):\r\n return gen_dataset_ops.padded_batch_dataset(\r\n self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access\r\n batch_size=self._batch_size,\r\n padded_shapes=[\r\n ops.convert_to_tensor(s, dtype=dtypes.int64)\r\n for s in nest.flatten(self._padded_shapes)\r\n ],\r\n padding_values=nest.flatten(self._padding_values),\r\n output_shapes=nest.flatten(\r\n sparse.as_dense_shapes(self.output_shapes, self.output_classes)))\r\n\r\n @property\r\n def output_classes(self):\r\n return self._input_dataset.output_classes\r\n\r\n @property\r\n def output_shapes(self):\r\n\r\n def _padded_shape_to_batch_shape(s):\r\n return tensor_shape.vector(None).concatenate(\r\n tensor_util.constant_value_as_shape(s))\r\n\r\n return nest.map_structure(_padded_shape_to_batch_shape, self._padded_shapes)\r\n\r\n @property\r\n def output_types(self):\r\n return self._input_dataset.output_types\r\n\r\n\r\ndef _should_unpack_args(args):\r\n \"\"\"Returns `True` if `args` should be `*args` when passed to a callable.\"\"\"\r\n return type(args) is tuple # pylint: disable=unidiomatic-typecheck\r\n\r\n\r\nclass MapDataset(Dataset):\r\n \"\"\"A `Dataset` that maps a function over elements in its input.\"\"\"\r\n\r\n def __init__(self, input_dataset, map_func):\r\n \"\"\"See `Dataset.map()` for details.\"\"\"\r\n super(MapDataset, self).__init__()\r\n self._input_dataset = input_dataset\r\n\r\n self._output_classes = None\r\n self._output_shapes = None\r\n self._output_types = None\r\n\r\n @function.Defun(*nest.flatten(\r\n sparse.as_dense_types(input_dataset.output_types,\r\n input_dataset.output_classes)))\r\n def tf_map_func(*args):\r\n \"\"\"A wrapper for Defun that facilitates shape inference.\"\"\"\r\n # Pass in shape information from the input_dataset.\r\n dense_shapes = sparse.as_dense_shapes(input_dataset.output_shapes,\r\n input_dataset.output_classes)\r\n for arg, shape in zip(args, nest.flatten(dense_shapes)):\r\n arg.set_shape(shape)\r\n\r\n nested_args = nest.pack_sequence_as(input_dataset.output_types, args)\r\n nested_args = sparse.deserialize_sparse_tensors(\r\n nested_args, input_dataset.output_types, input_dataset.output_shapes,\r\n input_dataset.output_classes)\r\n if _should_unpack_args(nested_args):\r\n ret = map_func(*nested_args)\r\n else:\r\n ret = map_func(nested_args)\r\n\r\n # If `map_func` returns a list of tensors, `nest.flatten()` and\r\n # `ops.convert_to_tensor()` would conspire to attempt to stack\r\n # those tensors into a single tensor, because the customized\r\n # version of `nest.flatten()` does not recurse into lists. Since\r\n # it is more likely that the list arose from returning the\r\n # result of an operation (such as `tf.py_func()`) that returns a\r\n # list of not-necessarily-stackable tensors, we treat the\r\n # returned value is a `tuple` instead. A user wishing to pack\r\n # the return value into a single tensor can use an explicit\r\n # `tf.stack()` before returning.\r\n if isinstance(ret, list):\r\n ret = tuple(ret)\r\n\r\n # Convert any `SparseTensorValue`s to `SparseTensor`s.\r\n ret = nest.pack_sequence_as(ret, [\r\n sparse_tensor_lib.SparseTensor.from_value(t)\r\n if sparse_tensor_lib.is_sparse(t) else t for t in nest.flatten(ret)\r\n ])\r\n\r\n self._output_classes = sparse.get_classes(ret)\r\n self._output_shapes = nest.pack_sequence_as(\r\n ret, [t.get_shape() for t in nest.flatten(ret)])\r\n self._output_types = nest.pack_sequence_as(\r\n ret, [t.dtype for t in nest.flatten(ret)])\r\n\r\n # Serialize any sparse tensors and convert result to tensors.\r\n ret = nest.pack_sequence_as(ret, [\r\n ops.convert_to_tensor(t)\r\n for t in nest.flatten(sparse.serialize_sparse_tensors(ret))\r\n ])\r\n return nest.flatten(ret)\r\n\r\n self._map_func = tf_map_func\r\n self._map_func.add_to_graph(ops.get_default_graph())\r\n\r\n def _as_variant_tensor(self):\r\n input_t = self._input_dataset._as_variant_tensor() # pylint: disable=protected-access\r\n return gen_dataset_ops.map_dataset(\r\n input_t,\r\n self._map_func.captured_inputs,\r\n f=self._map_func,\r\n output_types=nest.flatten(\r\n sparse.as_dense_types(self.output_types, self.output_classes)),\r\n output_shapes=nest.flatten(\r\n sparse.as_dense_shapes(self.output_shapes, self.output_classes)))\r\n\r\n @property\r\n def output_classes(self):\r\n return self._output_classes\r\n\r\n @property\r\n def output_shapes(self):\r\n return self._output_shapes\r\n\r\n @property\r\n def output_types(self):\r\n return self._output_types\r\n\r\n\r\nclass ParallelMapDataset(MapDataset):\r\n \"\"\"A `Dataset` that maps a function over elements in its input in parallel.\"\"\"\r\n\r\n def __init__(self, input_dataset, map_func, num_parallel_calls):\r\n \"\"\"See `Dataset.map()` for details.\"\"\"\r\n super(ParallelMapDataset, self).__init__(input_dataset, map_func)\r\n\r\n self._num_parallel_calls = ops.convert_to_tensor(\r\n num_parallel_calls, dtype=dtypes.int32, name=\"num_parallel_calls\")\r\n\r\n def _as_variant_tensor(self):\r\n input_t = self._input_dataset._as_variant_tensor() # pylint: disable=protected-access\r\n # pylint: disable=protected-access\r\n return gen_dataset_ops.parallel_map_dataset(\r\n input_t,\r\n self._map_func.captured_inputs,\r\n f=self._map_func,\r\n num_parallel_calls=self._num_parallel_calls,\r\n output_types=nest.flatten(\r\n sparse.as_dense_types(self.output_types, self.output_classes)),\r\n output_shapes=nest.flatten(\r\n sparse.as_dense_shapes(self.output_shapes, self.output_classes)))\r\n # pylint: enable=protected-access\r\n\r\n\r\nclass FlatMapDataset(Dataset):\r\n \"\"\"A `Dataset` that maps a function over its input and flattens the result.\"\"\"\r\n\r\n def __init__(self, input_dataset, map_func):\r\n \"\"\"See `Dataset.flat_map()` for details.\"\"\"\r\n super(FlatMapDataset, self).__init__()\r\n self._input_dataset = input_dataset\r\n\r\n @function.Defun(*nest.flatten(\r\n sparse.as_dense_types(input_dataset.output_types,\r\n input_dataset.output_classes)))\r\n def tf_map_func(*args):\r\n \"\"\"A wrapper for Defun that facilitates shape inference.\"\"\"\r\n # Pass in shape information from the input_dataset.\r\n dense_shapes = sparse.as_dense_shapes(input_dataset.output_shapes,\r\n input_dataset.output_classes)\r\n for arg, shape in zip(args, nest.flatten(dense_shapes)):\r\n arg.set_shape(shape)\r\n\r\n nested_args = nest.pack_sequence_as(input_dataset.output_types, args)\r\n nested_args = sparse.deserialize_sparse_tensors(\r\n nested_args, input_dataset.output_types, input_dataset.output_shapes,\r\n input_dataset.output_classes)\r\n if _should_unpack_args(nested_args):\r\n dataset = map_func(*nested_args)\r\n else:\r\n dataset = map_func(nested_args)\r\n\r\n if not isinstance(dataset, Dataset):\r\n raise TypeError(\"`map_func` must return a `Dataset` object.\")\r\n\r\n self._output_classes = dataset.output_classes\r\n self._output_types = dataset.output_types\r\n self._output_shapes = dataset.output_shapes\r\n\r\n return dataset._as_variant_tensor() # pylint: disable=protected-access\r\n\r\n self._map_func = tf_map_func\r\n self._map_func.add_to_graph(ops.get_default_graph())\r\n\r\n def _as_variant_tensor(self):\r\n return gen_dataset_ops.flat_map_dataset(\r\n self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access\r\n self._map_func.captured_inputs,\r\n f=self._map_func,\r\n output_types=nest.flatten(\r\n sparse.as_dense_types(self.output_types, self.output_classes)),\r\n output_shapes=nest.flatten(\r\n sparse.as_dense_shapes(self.output_shapes, self.output_classes)))\r\n\r\n @property\r\n def output_classes(self):\r\n return self._output_classes\r\n\r\n @property\r\n def output_shapes(self):\r\n return self._output_shapes\r\n\r\n @property\r\n def output_types(self):\r\n return self._output_types\r\n\r\n\r\nclass InterleaveDataset(Dataset):\r\n \"\"\"A `Dataset` that maps a function over its input and interleaves the result.\r\n \"\"\"\r\n\r\n def __init__(self, input_dataset, map_func, cycle_length, block_length):\r\n \"\"\"See `Dataset.interleave()` for details.\"\"\"\r\n super(InterleaveDataset, self).__init__()\r\n self._input_dataset = input_dataset\r\n\r\n @function.Defun(*nest.flatten(\r\n sparse.as_dense_types(input_dataset.output_types,\r\n input_dataset.output_classes)))\r\n def tf_map_func(*args):\r\n \"\"\"A wrapper for Defun that facilitates shape inference.\"\"\"\r\n # Pass in shape information from the input_dataset.\r\n dense_shapes = sparse.as_dense_shapes(input_dataset.output_shapes,\r\n input_dataset.output_classes)\r\n for arg, shape in zip(args, nest.flatten(dense_shapes)):\r\n arg.set_shape(shape)\r\n\r\n nested_args = nest.pack_sequence_as(input_dataset.output_types, args)\r\n nested_args = sparse.deserialize_sparse_tensors(\r\n nested_args, input_dataset.output_types, input_dataset.output_shapes,\r\n input_dataset.output_classes)\r\n if _should_unpack_args(nested_args):\r\n dataset = map_func(*nested_args)\r\n else:\r\n dataset = map_func(nested_args)\r\n\r\n if not isinstance(dataset, Dataset):\r\n raise TypeError(\"`map_func` must return a `Dataset` object.\")\r\n\r\n self._output_classes = dataset.output_classes\r\n self._output_types = dataset.output_types\r\n self._output_shapes = dataset.output_shapes\r\n\r\n return dataset._as_variant_tensor() # pylint: disable=protected-access\r\n\r\n self._map_func = tf_map_func\r\n self._map_func.add_to_graph(ops.get_default_graph())\r\n\r\n self._cycle_length = ops.convert_to_tensor(\r\n cycle_length, dtype=dtypes.int64, name=\"cycle_length\")\r\n self._block_length = ops.convert_to_tensor(\r\n block_length, dtype=dtypes.int64, name=\"block_length\")\r\n\r\n def _as_variant_tensor(self):\r\n return gen_dataset_ops.interleave_dataset(\r\n self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access\r\n self._map_func.captured_inputs,\r\n self._cycle_length,\r\n self._block_length,\r\n f=self._map_func,\r\n output_types=nest.flatten(\r\n sparse.as_dense_types(self.output_types, self.output_classes)),\r\n output_shapes=nest.flatten(\r\n sparse.as_dense_shapes(self.output_shapes, self.output_classes)))\r\n\r\n @property\r\n def output_classes(self):\r\n return self._output_classes\r\n\r\n @property\r\n def output_shapes(self):\r\n return self._output_shapes\r\n\r\n @property\r\n def output_types(self):\r\n return self._output_types\r\n\r\n\r\nclass FilterDataset(Dataset):\r\n \"\"\"A `Dataset` that filters its input according to a predicate function.\"\"\"\r\n\r\n def __init__(self, input_dataset, predicate):\r\n \"\"\"See `Dataset.filter()` for details.\"\"\"\r\n super(FilterDataset, self).__init__()\r\n self._input_dataset = input_dataset\r\n\r\n @function.Defun(*nest.flatten(\r\n sparse.as_dense_types(input_dataset.output_types,\r\n input_dataset.output_classes)))\r\n def tf_predicate(*args):\r\n \"\"\"A wrapper for Defun that facilitates shape inference.\"\"\"\r\n # Pass in shape information from the input_dataset.\r\n dense_shapes = sparse.as_dense_shapes(input_dataset.output_shapes,\r\n input_dataset.output_classes)\r\n for arg, shape in zip(args, nest.flatten(dense_shapes)):\r\n arg.set_shape(shape)\r\n\r\n nested_args = nest.pack_sequence_as(input_dataset.output_types, args)\r\n nested_args = sparse.deserialize_sparse_tensors(\r\n nested_args, input_dataset.output_types, input_dataset.output_shapes,\r\n input_dataset.output_classes)\r\n if _should_unpack_args(nested_args):\r\n ret = predicate(*nested_args)\r\n else:\r\n ret = predicate(nested_args)\r\n\r\n ret = ops.convert_to_tensor(ret, dtype=dtypes.bool)\r\n if not (ret.dtype == dtypes.bool and\r\n ret.shape.is_compatible_with(tensor_shape.scalar())):\r\n raise ValueError(\"`predicate` must return a scalar boolean tensor.\")\r\n\r\n return ret\r\n\r\n self._predicate = tf_predicate\r\n self._predicate.add_to_graph(ops.get_default_graph())\r\n\r\n def _as_variant_tensor(self):\r\n return gen_dataset_ops.filter_dataset(\r\n self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access\r\n other_arguments=self._predicate.captured_inputs,\r\n predicate=self._predicate,\r\n output_types=nest.flatten(\r\n sparse.as_dense_types(self.output_types, self.output_classes)),\r\n output_shapes=nest.flatten(\r\n sparse.as_dense_shapes(self.output_shapes, self.output_classes)))\r\n\r\n @property\r\n def output_classes(self):\r\n return self._input_dataset.output_classes\r\n\r\n @property\r\n def output_shapes(self):\r\n return self._input_dataset.output_shapes\r\n\r\n @property\r\n def output_types(self):\r\n return self._input_dataset.output_types\r\n\r\n\r\nclass PrefetchDataset(Dataset):\r\n \"\"\"A `Dataset` that asynchronously prefetches its input.\"\"\"\r\n\r\n def __init__(self, input_dataset, buffer_size):\r\n \"\"\"See `Dataset.prefetch()` for details.\"\"\"\r\n super(PrefetchDataset, self).__init__()\r\n self._input_dataset = input_dataset\r\n self._buffer_size = ops.convert_to_tensor(\r\n buffer_size, dtype=dtypes.int64, name=\"buffer_size\")\r\n\r\n def _as_variant_tensor(self):\r\n return gen_dataset_ops.prefetch_dataset(\r\n self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access\r\n buffer_size=self._buffer_size,\r\n output_shapes=nest.flatten(\r\n sparse.as_dense_shapes(self.output_shapes, self.output_classes)),\r\n output_types=nest.flatten(\r\n sparse.as_dense_types(self.output_types, self.output_classes)))\r\n\r\n @property\r\n def output_classes(self):\r\n return self._input_dataset.output_classes\r\n\r\n @property\r\n def output_shapes(self):\r\n return self._input_dataset.output_shapes\r\n\r\n @property\r\n def output_types(self):\r\n return self._input_dataset.output_types\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Real NVP bijector.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.framework import constant_op\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.layers import core as layers\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import nn_ops\r\nfrom tensorflow.python.ops import template as template_ops\r\nfrom tensorflow.python.ops.distributions import bijector as bijector_lib\r\n\r\n\r\n__all__ = [\r\n \"RealNVP\",\r\n \"real_nvp_default_template\"\r\n]\r\n\r\n\r\nclass RealNVP(bijector_lib.Bijector):\r\n \"\"\"RealNVP \"affine coupling layer\" for vector-valued events.\r\n\r\n Real NVP models a normalizing flow on a `D`-dimensional distribution via a\r\n single `D-d`-dimensional conditional distribution [1]:\r\n\r\n `y[d:D] = y[d:D] * math_ops.exp(log_scale_fn(y[d:D])) + shift_fn(y[d:D])`\r\n `y[0:d] = x[0:d]`\r\n\r\n The last `D-d` units are scaled and shifted based on the first `d` units only,\r\n while the first `d` units are 'masked' and left unchanged. Real NVP's\r\n `shift_and_log_scale_fn` computes vector-valued quantities. For\r\n scale-and-shift transforms that do not depend on any masked units, i.e.\r\n `d=0`, use the `tfb.Affine` bijector with learned parameters instead.\r\n\r\n Masking is currently only supported for base distributions with\r\n `event_ndims=1`. For more sophisticated masking schemes like checkerboard or\r\n channel-wise masking [2], use the `tfb.Permute` bijector to re-order desired\r\n masked units into the first `d` units. For base distributions with\r\n `event_ndims > 1`, use the `tfb.Reshape` bijector to flatten the event shape.\r\n\r\n Recall that the MAF bijector [2] implements a normalizing flow via an\r\n autoregressive transformation. MAF and IAF have opposite computational\r\n tradeoffs - MAF can train all units in parallel but must sample units\r\n sequentially, while IAF must train units sequentially but can sample in\r\n parallel. In contrast, Real NVP can compute both forward and inverse\r\n computations in parallel. However, the lack of an autoregressive\r\n transformations makes it less expressive on a per-bijector basis.\r\n\r\n A \"valid\" `shift_and_log_scale_fn` must compute each `shift` (aka `loc` or\r\n \"mu\" [2]) and `log(scale)` (aka \"alpha\" [2]) such that each are broadcastable\r\n with the arguments to `forward` and `inverse`, i.e., such that the\r\n calculations in `forward`, `inverse` [below] are possible. For convenience,\r\n `real_nvp_default_nvp` is offered as a possible `shift_and_log_scale_fn`\r\n function.\r\n\r\n NICE [3] is a special case of the Real NVP bijector which discards the scale\r\n transformation, resulting in a constant-time inverse-log-determinant-Jacobian.\r\n To use a NICE bijector instead of Real NVP, `shift_and_log_scale_fn` should\r\n return `(shift, None)`, and `is_constant_jacobian` should be set to `True` in\r\n the `RealNVP` constructor. Calling `real_nvp_default_template` with\r\n `shift_only=True` returns one such NICE-compatible `shift_and_log_scale_fn`.\r\n\r\n Caching: the scalar input depth `D` of the base distribution is not known at\r\n construction time. The first call to any of `forward(x)`, `inverse(x)`,\r\n `inverse_log_det_jacobian(x)`, or `forward_log_det_jacobian(x)` memoizes\r\n `D`, which is re-used in subsequent calls. This shape must be known prior to\r\n graph execution (which is the case if using tf.layers).\r\n\r\n #### Example Use\r\n\r\n ```python\r\n tfd = tf.contrib.distributions\r\n tfb = tfd.bijectors\r\n\r\n # A common choice for a normalizing flow is to use a Gaussian for the base\r\n # distribution. (However, any continuous distribution would work.) E.g.,\r\n nvp = tfd.TransformedDistribution(\r\n distribution=tfd.MultivariateNormalDiag(loc=[0., 0., 0.])),\r\n bijector=tfb.RealNVP(\r\n num_masked=2,\r\n shift_and_log_scale_fn=tfb.real_nvp_default_template(\r\n hidden_layers=[512, 512])))\r\n\r\n x = nvp.sample()\r\n nvp.log_prob(x)\r\n nvp.log_prob(0.)\r\n ```\r\n\r\n For more examples, see [4].\r\n\r\n [1]: \"Density Estimation using Real NVP.\"\r\n Laurent Dinh, Jascha Sohl-Dickstein, Samy Bengio. ICLR. 2017.\r\n https://arxiv.org/abs/1605.08803\r\n\r\n [2]: \"Masked Autoregressive Flow for Density Estimation.\"\r\n George Papamakarios, Theo Pavlakou, Iain Murray. Arxiv. 2017.\r\n https://arxiv.org/abs/1705.07057\r\n\r\n [3]: \"NICE: Non-linear Independent Components Estimation.\"\r\n Laurent Dinh, David Krueger, Yoshua Bengio. ICLR. 2015.\r\n https://arxiv.org/abs/1410.8516\r\n\r\n [4]: \"Normalizing Flows Tutorial, Part 2: Modern Normalizing Flows.\"\r\n Eric Jang. Blog post. January 2018.\r\n http://blog.evjang.com/2018/01/nf2.html\r\n \"\"\"\r\n\r\n def __init__(self,\r\n num_masked,\r\n shift_and_log_scale_fn,\r\n is_constant_jacobian=False,\r\n validate_args=False,\r\n name=None):\r\n \"\"\"Creates the Real NVP or NICE bijector.\r\n\r\n Args:\r\n num_masked: Python `int` indicating that the first `d` units of the event\r\n should be masked. Must be in the closed interval `[1, D-1]`, where `D`\r\n is the event size of the base distribution.\r\n shift_and_log_scale_fn: Python `callable` which computes `shift` and\r\n `log_scale` from both the forward domain (`x`) and the inverse domain\r\n (`y`). Calculation must respect the \"autoregressive property\" (see class\r\n docstring). Suggested default\r\n `masked_autoregressive_default_template(hidden_layers=...)`.\r\n Typically the function contains `tf.Variables` and is wrapped using\r\n `tf.make_template`. Returning `None` for either (both) `shift`,\r\n `log_scale` is equivalent to (but more efficient than) returning zero.\r\n is_constant_jacobian: Python `bool`. Default: `False`. When `True` the\r\n implementation assumes `log_scale` does not depend on the forward domain\r\n (`x`) or inverse domain (`y`) values. (No validation is made;\r\n `is_constant_jacobian=False` is always safe but possibly computationally\r\n inefficient.)\r\n validate_args: Python `bool` indicating whether arguments should be\r\n checked for correctness.\r\n name: Python `str`, name given to ops managed by this object.\r\n\r\n Raises:\r\n ValueError: If num_masked < 1.\r\n \"\"\"\r\n name = name or \"real_nvp\"\r\n if num_masked <= 0:\r\n raise ValueError(\"num_masked must be a positive integer.\")\r\n self._num_masked = num_masked\r\n # At construction time, we don't know input_depth.\r\n self._input_depth = None\r\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\r\n super(RealNVP, self).__init__(\r\n event_ndims=1,\r\n is_constant_jacobian=is_constant_jacobian,\r\n validate_args=validate_args,\r\n name=name)\r\n\r\n def _cache_input_depth(self, x):\r\n if self._input_depth is None:\r\n self._input_depth = x.shape.with_rank_at_least(1)[-1].value\r\n if self._input_depth is None:\r\n raise NotImplementedError(\r\n \"Rightmost dimension must be known prior to graph execution.\")\r\n if self._num_masked >= self._input_depth:\r\n raise ValueError(\r\n \"Number of masked units must be smaller than the event size.\")\r\n\r\n def _forward(self, x):\r\n self._cache_input_depth(x)\r\n # Performs scale and shift.\r\n x0, x1 = x[:, :self._num_masked], x[:, self._num_masked:]\r\n shift, log_scale = self._shift_and_log_scale_fn(\r\n x0, self._input_depth - self._num_masked)\r\n y1 = x1\r\n if log_scale is not None:\r\n y1 *= math_ops.exp(log_scale)\r\n if shift is not None:\r\n y1 += shift\r\n y = array_ops.concat([x0, y1], axis=-1)\r\n return y\r\n\r\n def _inverse(self, y):\r\n self._cache_input_depth(y)\r\n # Performs un-shift and un-scale.\r\n y0, y1 = y[:, :self._num_masked], y[:, self._num_masked:]\r\n shift, log_scale = self._shift_and_log_scale_fn(\r\n y0, self._input_depth - self._num_masked)\r\n x1 = y1\r\n if shift is not None:\r\n x1 -= shift\r\n if log_scale is not None:\r\n x1 *= math_ops.exp(-log_scale)\r\n x = array_ops.concat([y0, x1], axis=-1)\r\n return x\r\n\r\n def _inverse_log_det_jacobian(self, y):\r\n self._cache_input_depth(y)\r\n y0 = y[:, :self._num_masked]\r\n _, log_scale = self._shift_and_log_scale_fn(\r\n y0, self._input_depth - self._num_masked)\r\n if log_scale is None:\r\n return constant_op.constant(0., dtype=y.dtype, name=\"ildj\")\r\n return -math_ops.reduce_sum(log_scale, axis=-1)\r\n\r\n def _forward_log_det_jacobian(self, x):\r\n self._cache_input_depth(x)\r\n x0 = x[:, :self._num_masked]\r\n _, log_scale = self._shift_and_log_scale_fn(\r\n x0, self._input_depth - self._num_masked)\r\n if log_scale is None:\r\n return constant_op.constant(0., dtype=x.dtype, name=\"ildj\")\r\n return math_ops.reduce_sum(log_scale, axis=-1)\r\n\r\n\r\ndef real_nvp_default_template(\r\n hidden_layers,\r\n shift_only=False,\r\n activation=nn_ops.relu,\r\n name=None,\r\n *args,\r\n **kwargs):\r\n \"\"\"Build a scale-and-shift function using a multi-layer neural network.\r\n\r\n This will be wrapped in a make_template to ensure the variables are only\r\n created once. It takes the `d`-dimensional input x[0:d] and returns the `D-d`\r\n dimensional outputs `loc` (\"mu\") and `log_scale` (\"alpha\").\r\n\r\n Arguments:\r\n hidden_layers: Python `list`-like of non-negative integer, scalars\r\n indicating the number of units in each hidden layer. Default: `[512, 512].\r\n shift_only: Python `bool` indicating if only the `shift` term shall be\r\n computed (i.e. NICE bijector). Default: `False`.\r\n activation: Activation function (callable). Explicitly setting to `None`\r\n implies a linear activation.\r\n name: A name for ops managed by this function. Default:\r\n \"real_nvp_default_template\".\r\n *args: `tf.layers.dense` arguments.\r\n **kwargs: `tf.layers.dense` keyword arguments.\r\n\r\n Returns:\r\n shift: `Float`-like `Tensor` of shift terms (the \"mu\" in [2]).\r\n log_scale: `Float`-like `Tensor` of log(scale) terms (the \"alpha\" in [2]).\r\n\r\n Raises:\r\n NotImplementedError: if rightmost dimension of `inputs` is unknown prior to\r\n graph execution.\r\n \"\"\"\r\n\r\n with ops.name_scope(name, \"real_nvp_default_template\"):\r\n def _fn(x, output_units):\r\n \"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"\r\n for units in hidden_layers:\r\n x = layers.dense(\r\n inputs=x,\r\n units=units,\r\n activation=activation,\r\n *args,\r\n **kwargs)\r\n x = layers.dense(\r\n inputs=x,\r\n units=(1 if shift_only else 2) * output_units,\r\n activation=None,\r\n *args,\r\n **kwargs)\r\n if shift_only:\r\n return x, None\r\n shift, log_scale = array_ops.split(x, 2, axis=-1)\r\n return shift, log_scale\r\n return template_ops.make_template(\r\n \"real_nvp_default_template\", _fn)\r\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Run Config (deprecated, use tf.estimator.RunConfig instead).\r\n\r\nThis module and all its submodules are deprecated. See\r\n[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)\r\nfor migration instructions.\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport collections\r\nimport json\r\nimport os\r\n\r\nimport six\r\n\r\nfrom tensorflow.contrib.framework.python.framework import experimental\r\nfrom tensorflow.core.protobuf import config_pb2\r\nfrom tensorflow.python.estimator import run_config as core_run_config\r\nfrom tensorflow.python.platform import tf_logging as logging\r\nfrom tensorflow.python.training import server_lib\r\nfrom tensorflow.python.util.deprecation import deprecated\r\n\r\n\r\n# A list of the property names in RunConfig user allows to change. They will\r\n# not affect the execution framework, so when execution framework checks the\r\n# `uid` of the RunConfig, it should be ignored.\r\n_DEFAULT_UID_WHITE_LIST = [\r\n 'tf_random_seed',\r\n 'save_summary_steps',\r\n 'save_checkpoints_steps',\r\n 'save_checkpoints_secs',\r\n 'session_config',\r\n 'keep_checkpoint_max',\r\n 'keep_checkpoint_every_n_hours',\r\n 'log_step_count_steps',\r\n]\r\n\r\n\r\nclass Environment(object):\r\n \"\"\"DEPRECATED CLASS.\"\"\"\r\n # For running general distributed training.\r\n CLOUD = 'cloud'\r\n # For running Google-internal distributed training.\r\n GOOGLE = 'google'\r\n # For running on local desktop.\r\n LOCAL = 'local'\r\n\r\n\r\nclass TaskType(object):\r\n \"\"\"DEPRECATED CLASS.\"\"\"\r\n MASTER = 'master'\r\n PS = 'ps'\r\n WORKER = 'worker'\r\n\r\n\r\nclass ClusterConfig(object):\r\n \"\"\"This class specifies the configurations for a distributed run.\r\n\r\n THIS CLASS IS DEPRECATED. Use tf.estimator.RunConfig instead.\r\n\r\n If you're using an `Estimator`, you should probably use the subclass\r\n RunConfig instead.\r\n \"\"\"\r\n\r\n def __init__(self, master=None, evaluation_master=None):\r\n \"\"\"Constructor.\r\n\r\n Sets the properties `cluster_spec`, `is_chief`, `master` (if `None` in the\r\n args), `num_ps_replicas`, `task_id`, and `task_type` based on the\r\n `TF_CONFIG` environment variable, if the pertinent information is\r\n present. The `TF_CONFIG` environment variable is a JSON object with\r\n attributes: `cluster`, `environment`, and `task`.\r\n\r\n `cluster` is a JSON serialized version of `ClusterSpec`'s Python dict from\r\n `server_lib.py`, mapping task types (usually one of the TaskType enums) to a\r\n list of task addresses.\r\n\r\n `environment` specifies the runtime environment for the job (usually one of\r\n the `Environment` enums). Defaults to `LOCAL`.\r\n\r\n `task` has two attributes: `type` and `index`, where `type` can be any of\r\n the task types in `cluster`. When `TF_CONFIG` contains said information, the\r\n following properties are set on this class:\r\n\r\n * `task_type` is set to `TF_CONFIG['task']['type']`. Defaults to `None`.\r\n * `task_id` is set to `TF_CONFIG['task']['index']`. Defaults to 0.\r\n * `cluster_spec` is parsed from `TF_CONFIG['cluster']`. Defaults to {}.\r\n * `master` is determined by looking up `task_type` and `task_id` in the\r\n `cluster_spec`. Defaults to ''.\r\n * `num_ps_replicas` is set by counting the number of nodes listed\r\n in the `ps` attribute of `cluster_spec`. Defaults to 0.\r\n * `num_worker_replicas` is set by counting the number of nodes listed\r\n in the `worker` attribute of `cluster_spec`. Defaults to 0.\r\n * `is_chief` is deteremined based on `task_type`, `type_id`, and\r\n `environment`.\r\n\r\n Example:\r\n ```\r\n cluster = {'ps': ['host1:2222', 'host2:2222'],\r\n 'worker': ['host3:2222', 'host4:2222', 'host5:2222']}\r\n os.environ['TF_CONFIG'] = json.dumps(\r\n {'cluster': cluster,\r\n 'task': {'type': 'worker', 'index': 1}})\r\n config = ClusterConfig()\r\n assert config.master == 'host4:2222'\r\n assert config.task_id == 1\r\n assert config.num_ps_replicas == 2\r\n assert config.num_worker_replicas == 3\r\n assert config.cluster_spec == server_lib.ClusterSpec(cluster)\r\n assert config.task_type == 'worker'\r\n assert not config.is_chief\r\n ```\r\n\r\n Args:\r\n master: TensorFlow master. Defaults to empty string for local.\r\n evaluation_master: The master on which to perform evaluation.\r\n \"\"\"\r\n # If not explicitly specified in the constructor and the TF_CONFIG\r\n # environment variable is present, load cluster_spec from TF_CONFIG.\r\n config = json.loads(os.environ.get('TF_CONFIG') or '{}')\r\n\r\n # Set task_type and task_id if the TF_CONFIG environment variable is\r\n # present. Otherwise, use the respective default (None / 0).\r\n task_env = config.get('task', {})\r\n self._task_type = task_env.get('type', None)\r\n self._task_id = self.get_task_id()\r\n\r\n self._cluster_spec = server_lib.ClusterSpec(config.get('cluster', {}))\r\n self._master = (master if master is not None else\r\n _get_master(self._cluster_spec, self._task_type,\r\n self._task_id) or '')\r\n self._num_ps_replicas = _count_ps(self._cluster_spec) or 0\r\n self._num_worker_replicas = _count_worker(self._cluster_spec) or 0\r\n\r\n # Set is_chief.\r\n self._environment = config.get('environment', Environment.LOCAL)\r\n self._is_chief = None\r\n if self._task_type is None:\r\n self._is_chief = (self._task_id == 0)\r\n elif self._environment == Environment.CLOUD:\r\n # When the TF_CONFIG environment variable is set, we can set the\r\n # default of is_chief to 0 when task_type is \"master\" and task_id is 0.\r\n self._is_chief = (self._task_type == TaskType.MASTER and\r\n self._task_id == 0)\r\n else:\r\n # Legacy behavior is that is_chief is None if task_id == 0.\r\n self._is_chief = (self._task_type == TaskType.WORKER and\r\n self._task_id == 0)\r\n\r\n self._evaluation_master = evaluation_master or ''\r\n\r\n @property\r\n def cluster_spec(self):\r\n return self._cluster_spec\r\n\r\n @property\r\n def environment(self):\r\n return self._environment\r\n\r\n @property\r\n def evaluation_master(self):\r\n return self._evaluation_master\r\n\r\n @property\r\n def is_chief(self):\r\n return self._is_chief\r\n\r\n @property\r\n def master(self):\r\n return self._master\r\n\r\n @property\r\n def num_ps_replicas(self):\r\n return self._num_ps_replicas\r\n\r\n @property\r\n def num_worker_replicas(self):\r\n return self._num_worker_replicas\r\n\r\n @property\r\n def task_id(self):\r\n return self._task_id\r\n\r\n @property\r\n def task_type(self):\r\n return self._task_type\r\n\r\n @staticmethod\r\n def get_task_id():\r\n \"\"\"Returns task index from `TF_CONFIG` environmental variable.\r\n\r\n If you have a ClusterConfig instance, you can just access its task_id\r\n property instead of calling this function and re-parsing the environmental\r\n variable.\r\n\r\n Returns:\r\n `TF_CONFIG['task']['index']`. Defaults to 0.\r\n \"\"\"\r\n config = json.loads(os.environ.get('TF_CONFIG') or '{}')\r\n task_env = config.get('task', {})\r\n task_index = task_env.get('index')\r\n return int(task_index) if task_index else 0\r\n\r\n\r\nclass RunConfig(ClusterConfig, core_run_config.RunConfig):\r\n \"\"\"This class specifies the configurations for an `Estimator` run.\r\n\r\n This class is a deprecated implementation of @{tf.estimator.RunConfig}\r\n interface.\r\n \"\"\"\r\n _USE_DEFAULT = 0\r\n\r\n @deprecated(None, 'When switching to tf.estimator.Estimator, use'\r\n ' tf.estimator.RunConfig instead.')\r\n def __init__(self,\r\n master=None,\r\n num_cores=0,\r\n log_device_placement=False,\r\n gpu_memory_fraction=1,\r\n tf_random_seed=None,\r\n save_summary_steps=100,\r\n save_checkpoints_secs=_USE_DEFAULT,\r\n save_checkpoints_steps=None,\r\n keep_checkpoint_max=5,\r\n keep_checkpoint_every_n_hours=10000,\r\n log_step_count_steps=100,\r\n evaluation_master='',\r\n model_dir=None,\r\n session_config=None):\r\n \"\"\"Constructor.\r\n\r\n The superclass `ClusterConfig` may set properties like `cluster_spec`,\r\n `is_chief`, `master` (if `None` in the args), `num_ps_replicas`, `task_id`,\r\n and `task_type` based on the `TF_CONFIG` environment variable. See\r\n `ClusterConfig` for more details.\r\n\r\n N.B.: If `save_checkpoints_steps` or `save_checkpoints_secs` is set,\r\n `keep_checkpoint_max` might need to be adjusted accordingly, especially in\r\n distributed training. For example, setting `save_checkpoints_secs` as 60\r\n without adjusting `keep_checkpoint_max` (defaults to 5) leads to situation\r\n that checkpoint would be garbage collected after 5 minutes. In distributed\r\n training, the evaluation job starts asynchronously and might fail to load or\r\n find the checkpoint due to race condition.\r\n\r\n Args:\r\n master: TensorFlow master. Defaults to empty string for local.\r\n num_cores: Number of cores to be used. If 0, the system picks an\r\n appropriate number (default: 0).\r\n log_device_placement: Log the op placement to devices (default: False).\r\n gpu_memory_fraction: Fraction of GPU memory used by the process on\r\n each GPU uniformly on the same machine.\r\n tf_random_seed: Random seed for TensorFlow initializers.\r\n Setting this value allows consistency between reruns.\r\n save_summary_steps: Save summaries every this many steps.\r\n save_checkpoints_secs: Save checkpoints every this many seconds. Can not\r\n be specified with `save_checkpoints_steps`.\r\n save_checkpoints_steps: Save checkpoints every this many steps. Can not be\r\n specified with `save_checkpoints_secs`.\r\n keep_checkpoint_max: The maximum number of recent checkpoint files to\r\n keep. As new files are created, older files are deleted. If None or 0,\r\n all checkpoint files are kept. Defaults to 5 (that is, the 5 most recent\r\n checkpoint files are kept.)\r\n keep_checkpoint_every_n_hours: Number of hours between each checkpoint\r\n to be saved. The default value of 10,000 hours effectively disables\r\n the feature.\r\n log_step_count_steps: The frequency, in number of global steps, that the\r\n global step/sec will be logged during training.\r\n evaluation_master: the master on which to perform evaluation.\r\n model_dir: directory where model parameters, graph etc are saved. If\r\n `None`, will use `model_dir` property in `TF_CONFIG` environment\r\n variable. If both are set, must have same value. If both are `None`, see\r\n `Estimator` about where the model will be saved.\r\n session_config: a ConfigProto used to set session parameters, or None.\r\n Note - using this argument, it is easy to provide settings which break\r\n otherwise perfectly good models. Use with care.\r\n \"\"\"\r\n super(RunConfig, self).__init__(\r\n master=master, evaluation_master=evaluation_master)\r\n\r\n gpu_options = config_pb2.GPUOptions(\r\n per_process_gpu_memory_fraction=gpu_memory_fraction)\r\n self._tf_config = config_pb2.ConfigProto(\r\n log_device_placement=log_device_placement,\r\n inter_op_parallelism_threads=num_cores,\r\n intra_op_parallelism_threads=num_cores,\r\n gpu_options=gpu_options)\r\n\r\n self._tf_random_seed = tf_random_seed\r\n self._save_summary_steps = save_summary_steps\r\n self._save_checkpoints_secs = save_checkpoints_secs\r\n self._log_step_count_steps = log_step_count_steps\r\n self._session_config = session_config\r\n if save_checkpoints_secs == RunConfig._USE_DEFAULT:\r\n if save_checkpoints_steps is None:\r\n self._save_checkpoints_secs = 600\r\n else:\r\n self._save_checkpoints_secs = None\r\n self._save_checkpoints_steps = save_checkpoints_steps\r\n\r\n # TODO(weiho): Remove these after ModelFn refactoring, when users can\r\n # create Scaffold and Saver in their model_fn to set these.\r\n self._keep_checkpoint_max = keep_checkpoint_max\r\n self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours\r\n self._model_dir = _get_model_dir(model_dir)\r\n\r\n @experimental\r\n def uid(self, whitelist=None):\r\n \"\"\"Generates a 'Unique Identifier' based on all internal fields.\r\n\r\n Caller should use the uid string to check `RunConfig` instance integrity\r\n in one session use, but should not rely on the implementation details, which\r\n is subject to change.\r\n\r\n Args:\r\n whitelist: A list of the string names of the properties uid should not\r\n include. If `None`, defaults to `_DEFAULT_UID_WHITE_LIST`, which\r\n includes most properties user allowes to change.\r\n\r\n Returns:\r\n A uid string.\r\n \"\"\"\r\n if whitelist is None:\r\n whitelist = _DEFAULT_UID_WHITE_LIST\r\n\r\n state = {k: v for k, v in self.__dict__.items() if not k.startswith('__')}\r\n # Pop out the keys in whitelist.\r\n for k in whitelist:\r\n state.pop('_' + k, None)\r\n\r\n ordered_state = collections.OrderedDict(\r\n sorted(state.items(), key=lambda t: t[0]))\r\n # For class instance without __repr__, some special cares are required.\r\n # Otherwise, the object address will be used.\r\n if '_cluster_spec' in ordered_state:\r\n ordered_state['_cluster_spec'] = collections.OrderedDict(\r\n sorted(ordered_state['_cluster_spec'].as_dict().items(),\r\n key=lambda t: t[0]))\r\n return ', '.join(\r\n '%s=%r' % (k, v) for (k, v) in six.iteritems(ordered_state))\r\n\r\n @property\r\n def model_dir(self):\r\n return self._model_dir\r\n\r\n @property\r\n def tf_config(self):\r\n return self._tf_config\r\n\r\n @property\r\n def tf_random_seed(self):\r\n return self._tf_random_seed\r\n\r\n @property\r\n def save_summary_steps(self):\r\n return self._save_summary_steps\r\n\r\n @property\r\n def save_checkpoints_secs(self):\r\n return self._save_checkpoints_secs\r\n\r\n @property\r\n def save_checkpoints_steps(self):\r\n return self._save_checkpoints_steps\r\n\r\n @property\r\n def session_config(self):\r\n return self._session_config\r\n\r\n @property\r\n def keep_checkpoint_max(self):\r\n return self._keep_checkpoint_max\r\n\r\n @property\r\n def keep_checkpoint_every_n_hours(self):\r\n return self._keep_checkpoint_every_n_hours\r\n\r\n @property\r\n def log_step_count_steps(self):\r\n return self._log_step_count_steps\r\n\r\n\r\ndef _count_ps(cluster_spec):\r\n \"\"\"Counts the number of parameter servers in cluster_spec.\"\"\"\r\n return len(cluster_spec.as_dict().get('ps', [])) if cluster_spec else 0\r\n\r\n\r\ndef _count_worker(cluster_spec):\r\n \"\"\"Counts the number of workers in cluster_spec.\r\n\r\n Workers with TaskType.WORKER and TaskType.MASTER are included in the return\r\n value.\r\n\r\n Args:\r\n cluster_spec: a ClusterSpec instance that describes current deployment.\r\n\r\n Returns:\r\n The total number of eligible workers.\r\n\r\n If 'cluster_spec' was None, then 0 is returned.\r\n \"\"\"\r\n return (len(cluster_spec.as_dict().get('worker', [])) +\r\n len(cluster_spec.as_dict().get('master', []))) if cluster_spec else 0\r\n\r\n\r\ndef _get_master(cluster_spec, task_type, task_id):\r\n \"\"\"Returns the appropriate string for the TensorFlow master.\"\"\"\r\n if not cluster_spec:\r\n return ''\r\n\r\n # If there is only one node in the cluster, do things locally.\r\n jobs = cluster_spec.jobs\r\n if len(jobs) == 1 and len(cluster_spec.job_tasks(jobs[0])) == 1:\r\n return ''\r\n\r\n # Lookup the master in cluster_spec using task_type and task_id,\r\n # if possible.\r\n if task_type:\r\n if task_type not in jobs:\r\n raise ValueError(\r\n '%s is not a valid task_type in the cluster_spec:\\n'\r\n '%s\\n\\n'\r\n 'Note that these values may be coming from the TF_CONFIG environment '\r\n 'variable.' % (task_type, cluster_spec))\r\n addresses = cluster_spec.job_tasks(task_type)\r\n if task_id >= len(addresses) or task_id < 0:\r\n raise ValueError(\r\n '%d is not a valid task_id for task_type %s in the '\r\n 'cluster_spec:\\n'\r\n '%s\\n\\n'\r\n 'Note that these value may be coming from the TF_CONFIG environment '\r\n 'variable.' % (task_id, task_type, cluster_spec))\r\n return 'grpc://' + addresses[task_id]\r\n\r\n # For backwards compatibility, we return empty string if task_type was\r\n # not set (task_type did not previously exist).\r\n return ''\r\n\r\n\r\ndef _get_model_dir(model_dir):\r\n \"\"\"Returns `model_dir` based user provided `model_dir` or `TF_CONFIG`.\"\"\"\r\n\r\n model_dir_in_tf_config = json.loads(\r\n os.environ.get('TF_CONFIG') or '{}').get('model_dir', None)\r\n if model_dir_in_tf_config is not None:\r\n if model_dir is not None and model_dir_in_tf_config != model_dir:\r\n raise ValueError(\r\n '`model_dir` provided in RunConfig construct, if set, '\r\n 'must have the same value as the model_dir in TF_CONFIG. '\r\n 'model_dir: {}\\nTF_CONFIG[\"model_dir\"]: {}.\\n'.format(\r\n model_dir, model_dir_in_tf_config))\r\n\r\n logging.info('Using model_dir in TF_CONFIG: %s', model_dir_in_tf_config)\r\n\r\n return model_dir or model_dir_in_tf_config\r\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"TargetColumn abstract a single head in the model.\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport six\r\n\r\nfrom tensorflow.contrib.framework import deprecated\r\nfrom tensorflow.contrib.losses.python.losses import loss_ops\r\nfrom tensorflow.contrib.metrics.python.ops import metric_ops\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import control_flow_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import nn\r\n\r\n\r\n@deprecated(\r\n \"2016-11-12\", \"This file will be removed after the deprecation date.\"\r\n \"Please switch to \"\r\n \"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py\")\r\ndef regression_target(label_name=None,\r\n weight_column_name=None,\r\n label_dimension=1):\r\n \"\"\"Creates a _TargetColumn for linear regression.\r\n\r\n Args:\r\n label_name: String, name of the key in label dict. Can be null if label\r\n is a tensor (single headed models).\r\n weight_column_name: A string defining feature column name representing\r\n weights. It is used to down weight or boost examples during training. It\r\n will be multiplied by the loss of the example.\r\n label_dimension: dimension of the target for multilabels.\r\n\r\n Returns:\r\n An instance of _TargetColumn\r\n \"\"\"\r\n return _RegressionTargetColumn(\r\n loss_fn=_mean_squared_loss,\r\n label_name=label_name,\r\n weight_column_name=weight_column_name,\r\n label_dimension=label_dimension)\r\n\r\n\r\n# TODO(zakaria): Add logistic_regression_target\r\n\r\n\r\n@deprecated(\r\n \"2016-11-12\", \"This file will be removed after the deprecation date.\"\r\n \"Please switch to \"\r\n \"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py\")\r\ndef multi_class_target(n_classes, label_name=None, weight_column_name=None):\r\n \"\"\"Creates a _TargetColumn for multi class single label classification.\r\n\r\n The target column uses softmax cross entropy loss.\r\n\r\n Args:\r\n n_classes: Integer, number of classes, must be >= 2\r\n label_name: String, name of the key in label dict. Can be null if label\r\n is a tensor (single headed models).\r\n weight_column_name: A string defining feature column name representing\r\n weights. It is used to down weight or boost examples during training. It\r\n will be multiplied by the loss of the example.\r\n\r\n Returns:\r\n An instance of _MultiClassTargetColumn.\r\n\r\n Raises:\r\n ValueError: if n_classes is < 2\r\n \"\"\"\r\n if n_classes < 2:\r\n raise ValueError(\"n_classes must be > 1 for classification.\")\r\n if n_classes == 2:\r\n loss_fn = _log_loss_with_two_classes\r\n else:\r\n loss_fn = _softmax_cross_entropy_loss\r\n return _MultiClassTargetColumn(\r\n loss_fn=loss_fn,\r\n n_classes=n_classes,\r\n label_name=label_name,\r\n weight_column_name=weight_column_name)\r\n\r\n\r\n@deprecated(\r\n \"2016-11-12\", \"This file will be removed after the deprecation date.\"\r\n \"Please switch to \"\r\n \"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py\")\r\ndef binary_svm_target(label_name=None, weight_column_name=None):\r\n \"\"\"Creates a _TargetColumn for binary classification with SVMs.\r\n\r\n The target column uses binary hinge loss.\r\n\r\n Args:\r\n label_name: String, name of the key in label dict. Can be null if label\r\n is a tensor (single headed models).\r\n weight_column_name: A string defining feature column name representing\r\n weights. It is used to down weight or boost examples during training. It\r\n will be multiplied by the loss of the example.\r\n\r\n Returns:\r\n An instance of _TargetColumn.\r\n\r\n \"\"\"\r\n return _BinarySvmTargetColumn(\r\n label_name=label_name, weight_column_name=weight_column_name)\r\n\r\n\r\n@deprecated(\r\n \"2016-11-12\", \"This file will be removed after the deprecation date.\"\r\n \"Please switch to \"\r\n \"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py\")\r\nclass ProblemType(object):\r\n UNSPECIFIED = 0\r\n CLASSIFICATION = 1\r\n LINEAR_REGRESSION = 2\r\n LOGISTIC_REGRESSION = 3\r\n\r\n\r\nclass _TargetColumn(object):\r\n \"\"\"_TargetColumn is the abstraction for a single head in a model.\r\n\r\n Args:\r\n loss_fn: a function that returns the loss tensor.\r\n num_label_columns: Integer, number of label columns.\r\n label_name: String, name of the key in label dict. Can be null if label\r\n is a tensor (single headed models).\r\n weight_column_name: A string defining feature column name representing\r\n weights. It is used to down weight or boost examples during training. It\r\n will be multiplied by the loss of the example.\r\n\r\n Raises:\r\n ValueError: if loss_fn or n_classes are missing.\r\n \"\"\"\r\n\r\n def __init__(self, loss_fn, num_label_columns, label_name, weight_column_name,\r\n problem_type):\r\n if not loss_fn:\r\n raise ValueError(\"loss_fn must be provided\")\r\n if num_label_columns is None: # n_classes can be 0\r\n raise ValueError(\"num_label_columns must be provided\")\r\n\r\n self._loss_fn = loss_fn\r\n self._num_label_columns = num_label_columns\r\n self._label_name = label_name\r\n self._weight_column_name = weight_column_name\r\n self._problem_type = problem_type\r\n\r\n def logits_to_predictions(self, logits, proba=False):\r\n # Abstrat, Subclasses must implement.\r\n raise NotImplementedError()\r\n\r\n def get_eval_ops(self, features, logits, labels, metrics=None):\r\n \"\"\"Returns eval op.\"\"\"\r\n raise NotImplementedError\r\n\r\n @property\r\n def label_name(self):\r\n return self._label_name\r\n\r\n @property\r\n def weight_column_name(self):\r\n return self._weight_column_name\r\n\r\n @property\r\n def num_label_columns(self):\r\n return self._num_label_columns\r\n\r\n def get_weight_tensor(self, features):\r\n if not self._weight_column_name:\r\n return None\r\n else:\r\n return array_ops.reshape(\r\n math_ops.to_float(features[self._weight_column_name]), shape=(-1,))\r\n\r\n @property\r\n def problem_type(self):\r\n return self._problem_type\r\n\r\n def _weighted_loss(self, loss, weight_tensor):\r\n \"\"\"Returns cumulative weighted loss.\"\"\"\r\n unweighted_loss = array_ops.reshape(loss, shape=(-1,))\r\n weighted_loss = math_ops.multiply(unweighted_loss,\r\n array_ops.reshape(\r\n weight_tensor, shape=(-1,)))\r\n return weighted_loss\r\n\r\n def training_loss(self, logits, target, features, name=\"training_loss\"):\r\n \"\"\"Returns training loss tensor for this head.\r\n\r\n Training loss is different from the loss reported on the tensorboard as we\r\n should respect the example weights when computing the gradient.\r\n\r\n L = sum_{i} w_{i} * l_{i} / B\r\n\r\n where B is the number of examples in the batch, l_{i}, w_{i} are individual\r\n losses, and example weight.\r\n\r\n Args:\r\n logits: logits, a float tensor.\r\n target: either a tensor for labels or in multihead case, a dict of string\r\n to target tensor.\r\n features: features dict.\r\n name: Op name.\r\n\r\n Returns:\r\n Loss tensor.\r\n \"\"\"\r\n target = target[self.name] if isinstance(target, dict) else target\r\n loss_unweighted = self._loss_fn(logits, target)\r\n\r\n weight_tensor = self.get_weight_tensor(features)\r\n if weight_tensor is None:\r\n return math_ops.reduce_mean(loss_unweighted, name=name)\r\n loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor)\r\n return math_ops.reduce_mean(loss_weighted, name=name)\r\n\r\n def loss(self, logits, target, features):\r\n \"\"\"Returns loss tensor for this head.\r\n\r\n The loss returned is the weighted average.\r\n\r\n L = sum_{i} w_{i} * l_{i} / sum_{i} w_{i}\r\n\r\n Args:\r\n logits: logits, a float tensor.\r\n target: either a tensor for labels or in multihead case, a dict of string\r\n to target tensor.\r\n features: features dict.\r\n\r\n Returns:\r\n Loss tensor.\r\n \"\"\"\r\n target = target[self.name] if isinstance(target, dict) else target\r\n loss_unweighted = self._loss_fn(logits, target)\r\n\r\n weight_tensor = self.get_weight_tensor(features)\r\n if weight_tensor is None:\r\n return math_ops.reduce_mean(loss_unweighted, name=\"loss\")\r\n loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor)\r\n return math_ops.div(math_ops.reduce_sum(loss_weighted),\r\n math_ops.to_float(math_ops.reduce_sum(weight_tensor)),\r\n name=\"loss\")\r\n\r\n\r\nclass _RegressionTargetColumn(_TargetColumn):\r\n \"\"\"_TargetColumn for regression.\"\"\"\r\n\r\n def __init__(self, loss_fn, label_name, weight_column_name, label_dimension):\r\n super(_RegressionTargetColumn, self).__init__(\r\n loss_fn=loss_fn,\r\n num_label_columns=label_dimension,\r\n label_name=label_name,\r\n weight_column_name=weight_column_name,\r\n problem_type=ProblemType.LINEAR_REGRESSION)\r\n\r\n def logits_to_predictions(self, logits, proba=False):\r\n if self.num_label_columns == 1:\r\n return array_ops.squeeze(logits, squeeze_dims=[1])\r\n return logits\r\n\r\n def get_eval_ops(self, features, logits, labels, metrics=None):\r\n loss = self.loss(logits, labels, features)\r\n result = {\"loss\": metric_ops.streaming_mean(loss)}\r\n if metrics:\r\n predictions = self.logits_to_predictions(logits, proba=False)\r\n result.update(\r\n _run_metrics(predictions, labels, metrics,\r\n self.get_weight_tensor(features)))\r\n return result\r\n\r\n\r\nclass _MultiClassTargetColumn(_TargetColumn):\r\n \"\"\"_TargetColumn for classification.\"\"\"\r\n\r\n # TODO(zakaria): support multilabel.\r\n def __init__(self, loss_fn, n_classes, label_name, weight_column_name):\r\n if n_classes < 2:\r\n raise ValueError(\"n_classes must be >= 2\")\r\n super(_MultiClassTargetColumn, self).__init__(\r\n loss_fn=loss_fn,\r\n num_label_columns=1 if n_classes == 2 else n_classes,\r\n label_name=label_name,\r\n weight_column_name=weight_column_name,\r\n problem_type=ProblemType.CLASSIFICATION)\r\n\r\n def logits_to_predictions(self, logits, proba=False):\r\n if self.num_label_columns == 1:\r\n logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1)\r\n\r\n if proba:\r\n return nn.softmax(logits)\r\n else:\r\n return math_ops.argmax(logits, 1)\r\n\r\n def _default_eval_metrics(self):\r\n if self._num_label_columns == 1:\r\n return get_default_binary_metrics_for_eval(thresholds=[.5])\r\n return {}\r\n\r\n def get_eval_ops(self, features, logits, labels, metrics=None):\r\n loss = self.loss(logits, labels, features)\r\n result = {\"loss\": metric_ops.streaming_mean(loss)}\r\n\r\n # Adds default metrics.\r\n if metrics is None:\r\n # TODO(b/29366811): This currently results in both an \"accuracy\" and an\r\n # \"accuracy/threshold_0.500000_mean\" metric for binary classification.\r\n metrics = {(\"accuracy\", \"classes\"): metric_ops.streaming_accuracy}\r\n\r\n predictions = math_ops.sigmoid(logits)\r\n labels_float = math_ops.to_float(labels)\r\n\r\n default_metrics = self._default_eval_metrics()\r\n for metric_name, metric_op in default_metrics.items():\r\n result[metric_name] = metric_op(predictions, labels_float)\r\n\r\n class_metrics = {}\r\n proba_metrics = {}\r\n for name, metric_op in six.iteritems(metrics):\r\n if isinstance(name, tuple):\r\n if len(name) != 2:\r\n raise ValueError(\"Ignoring metric {}. It returned a tuple with \"\r\n \"len {}, expected 2.\".format(name, len(name)))\r\n else:\r\n if name[1] not in [\"classes\", \"probabilities\"]:\r\n raise ValueError(\"Ignoring metric {}. The 2nd element of its \"\r\n \"name should be either 'classes' or \"\r\n \"'probabilities'.\".format(name))\r\n elif name[1] == \"classes\":\r\n class_metrics[name[0]] = metric_op\r\n else:\r\n proba_metrics[name[0]] = metric_op\r\n elif isinstance(name, str):\r\n class_metrics[name] = metric_op\r\n else:\r\n raise ValueError(\"Ignoring metric {}. Its name is not in the correct \"\r\n \"form.\".format(name))\r\n if class_metrics:\r\n class_predictions = self.logits_to_predictions(logits, proba=False)\r\n result.update(\r\n _run_metrics(class_predictions, labels, class_metrics,\r\n self.get_weight_tensor(features)))\r\n if proba_metrics:\r\n predictions = self.logits_to_predictions(logits, proba=True)\r\n result.update(\r\n _run_metrics(predictions, labels, proba_metrics,\r\n self.get_weight_tensor(features)))\r\n return result\r\n\r\n\r\nclass _BinarySvmTargetColumn(_MultiClassTargetColumn):\r\n \"\"\"_TargetColumn for binary classification using SVMs.\"\"\"\r\n\r\n def __init__(self, label_name, weight_column_name):\r\n\r\n def loss_fn(logits, target):\r\n check_shape_op = control_flow_ops.Assert(\r\n math_ops.less_equal(array_ops.rank(target), 2),\r\n [\"target's shape should be either [batch_size, 1] or [batch_size]\"])\r\n with ops.control_dependencies([check_shape_op]):\r\n target = array_ops.reshape(\r\n target, shape=[array_ops.shape(target)[0], 1])\r\n return loss_ops.hinge_loss(logits, target)\r\n\r\n super(_BinarySvmTargetColumn, self).__init__(\r\n loss_fn=loss_fn,\r\n n_classes=2,\r\n label_name=label_name,\r\n weight_column_name=weight_column_name)\r\n\r\n def logits_to_predictions(self, logits, proba=False):\r\n if proba:\r\n raise ValueError(\r\n \"logits to probabilities is not supported for _BinarySvmTargetColumn\")\r\n\r\n logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1)\r\n return math_ops.argmax(logits, 1)\r\n\r\n\r\n# TODO(zakaria): use contrib losses.\r\ndef _mean_squared_loss(logits, target):\r\n # To prevent broadcasting inside \"-\".\r\n if len(target.get_shape()) == 1:\r\n target = array_ops.expand_dims(target, dim=[1])\r\n\r\n logits.get_shape().assert_is_compatible_with(target.get_shape())\r\n return math_ops.square(logits - math_ops.to_float(target))\r\n\r\n\r\ndef _log_loss_with_two_classes(logits, target):\r\n # sigmoid_cross_entropy_with_logits requires [batch_size, 1] target.\r\n if len(target.get_shape()) == 1:\r\n target = array_ops.expand_dims(target, dim=[1])\r\n loss_vec = nn.sigmoid_cross_entropy_with_logits(\r\n labels=math_ops.to_float(target), logits=logits)\r\n return loss_vec\r\n\r\n\r\ndef _softmax_cross_entropy_loss(logits, target):\r\n # Check that we got integer for classification.\r\n if not target.dtype.is_integer:\r\n raise ValueError(\"Target's dtype should be integer \"\r\n \"Instead got %s.\" % target.dtype)\r\n # sparse_softmax_cross_entropy_with_logits requires [batch_size] target.\r\n if len(target.get_shape()) == 2:\r\n target = array_ops.squeeze(target, squeeze_dims=[1])\r\n loss_vec = nn.sparse_softmax_cross_entropy_with_logits(\r\n labels=target, logits=logits)\r\n return loss_vec\r\n\r\n\r\ndef _run_metrics(predictions, labels, metrics, weights):\r\n result = {}\r\n labels = math_ops.cast(labels, predictions.dtype)\r\n for name, metric in six.iteritems(metrics or {}):\r\n if weights is not None:\r\n result[name] = metric(predictions, labels, weights=weights)\r\n else:\r\n result[name] = metric(predictions, labels)\r\n\r\n return result\r\n\r\n\r\n@deprecated(\r\n \"2016-11-12\", \"This file will be removed after the deprecation date.\"\r\n \"Please switch to \"\r\n \"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py\")\r\ndef get_default_binary_metrics_for_eval(thresholds):\r\n \"\"\"Returns a dictionary of basic metrics for logistic regression.\r\n\r\n Args:\r\n thresholds: List of floating point thresholds to use for accuracy,\r\n precision, and recall metrics. If None, defaults to [0.5].\r\n\r\n Returns:\r\n Dictionary mapping metrics string names to metrics functions.\r\n \"\"\"\r\n metrics = {}\r\n metrics[_MetricKeys.PREDICTION_MEAN] = _predictions_streaming_mean\r\n metrics[_MetricKeys.TARGET_MEAN] = _labels_streaming_mean\r\n # Also include the streaming mean of the label as an accuracy baseline, as\r\n # a reminder to users.\r\n metrics[_MetricKeys.ACCURACY_BASELINE] = _labels_streaming_mean\r\n\r\n metrics[_MetricKeys.AUC] = _streaming_auc\r\n\r\n for threshold in thresholds:\r\n metrics[_MetricKeys.ACCURACY_MEAN %\r\n threshold] = _accuracy_at_threshold(threshold)\r\n # Precision for positive examples.\r\n metrics[_MetricKeys.PRECISION_MEAN % threshold] = _streaming_at_threshold(\r\n metric_ops.streaming_precision_at_thresholds, threshold)\r\n # Recall for positive examples.\r\n metrics[_MetricKeys.RECALL_MEAN % threshold] = _streaming_at_threshold(\r\n metric_ops.streaming_recall_at_thresholds, threshold)\r\n\r\n return metrics\r\n\r\n\r\ndef _float_weights_or_none(weights):\r\n if weights is None:\r\n return None\r\n return math_ops.to_float(weights)\r\n\r\n\r\ndef _labels_streaming_mean(unused_predictions, labels, weights=None):\r\n return metric_ops.streaming_mean(labels, weights=weights)\r\n\r\n\r\ndef _predictions_streaming_mean(predictions, unused_labels, weights=None):\r\n return metric_ops.streaming_mean(predictions, weights=weights)\r\n\r\n\r\ndef _streaming_auc(predictions, labels, weights=None):\r\n return metric_ops.streaming_auc(\r\n predictions, labels, weights=_float_weights_or_none(weights))\r\n\r\n\r\ndef _accuracy_at_threshold(threshold):\r\n\r\n def _accuracy_metric(predictions, labels, weights=None):\r\n threshold_predictions = math_ops.to_float(\r\n math_ops.greater_equal(predictions, threshold))\r\n return metric_ops.streaming_accuracy(\r\n predictions=threshold_predictions, labels=labels, weights=weights)\r\n\r\n return _accuracy_metric\r\n\r\n\r\ndef _streaming_at_threshold(streaming_metrics_fn, threshold):\r\n\r\n def _streaming_metrics(predictions, labels, weights=None):\r\n precision_tensor, update_op = streaming_metrics_fn(\r\n predictions,\r\n labels=labels,\r\n thresholds=[threshold],\r\n weights=_float_weights_or_none(weights))\r\n return array_ops.squeeze(precision_tensor), update_op\r\n\r\n return _streaming_metrics\r\n\r\n\r\nclass _MetricKeys(object):\r\n AUC = \"auc\"\r\n PREDICTION_MEAN = \"labels/prediction_mean\"\r\n TARGET_MEAN = \"labels/actual_target_mean\"\r\n ACCURACY_BASELINE = \"accuracy/baseline_target_mean\"\r\n ACCURACY_MEAN = \"accuracy/threshold_%f_mean\"\r\n PRECISION_MEAN = \"precision/positive_threshold_%f_mean\"\r\n RECALL_MEAN = \"recall/positive_threshold_%f_mean\"\r\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Keras convolution layers and image transformation layers.\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.framework import tensor_shape\r\nfrom tensorflow.python.keras._impl.keras import activations\r\nfrom tensorflow.python.keras._impl.keras import backend as K\r\nfrom tensorflow.python.keras._impl.keras import constraints\r\nfrom tensorflow.python.keras._impl.keras import initializers\r\nfrom tensorflow.python.keras._impl.keras import regularizers\r\nfrom tensorflow.python.keras._impl.keras.engine import InputSpec\r\nfrom tensorflow.python.keras._impl.keras.engine import Layer\r\n# imports for backwards namespace compatibility\r\n# pylint: disable=unused-import\r\nfrom tensorflow.python.keras._impl.keras.layers.pooling import AveragePooling1D\r\nfrom tensorflow.python.keras._impl.keras.layers.pooling import AveragePooling2D\r\nfrom tensorflow.python.keras._impl.keras.layers.pooling import AveragePooling3D\r\nfrom tensorflow.python.keras._impl.keras.layers.pooling import MaxPooling1D\r\nfrom tensorflow.python.keras._impl.keras.layers.pooling import MaxPooling2D\r\nfrom tensorflow.python.keras._impl.keras.layers.pooling import MaxPooling3D\r\n# pylint: enable=unused-import\r\nfrom tensorflow.python.keras._impl.keras.utils import conv_utils\r\nfrom tensorflow.python.layers import convolutional as tf_convolutional_layers\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\n@tf_export('keras.layers.Conv1D', 'keras.layers.Convolution1D')\r\nclass Conv1D(tf_convolutional_layers.Conv1D, Layer):\r\n \"\"\"1D convolution layer (e.g. temporal convolution).\r\n\r\n This layer creates a convolution kernel that is convolved\r\n with the layer input over a single spatial (or temporal) dimension\r\n to produce a tensor of outputs.\r\n If `use_bias` is True, a bias vector is created and added to the outputs.\r\n Finally, if `activation` is not `None`,\r\n it is applied to the outputs as well.\r\n\r\n When using this layer as the first layer in a model,\r\n provide an `input_shape` argument\r\n (tuple of integers or `None`, e.g.\r\n `(10, 128)` for sequences of 10 vectors of 128-dimensional vectors,\r\n or `(None, 128)` for variable-length sequences of 128-dimensional vectors.\r\n\r\n Arguments:\r\n filters: Integer, the dimensionality of the output space\r\n (i.e. the number of output filters in the convolution).\r\n kernel_size: An integer or tuple/list of a single integer,\r\n specifying the length of the 1D convolution window.\r\n strides: An integer or tuple/list of a single integer,\r\n specifying the stride length of the convolution.\r\n Specifying any stride value != 1 is incompatible with specifying\r\n any `dilation_rate` value != 1.\r\n padding: One of `\"valid\"`, `\"causal\"` or `\"same\"` (case-insensitive).\r\n `\"causal\"` results in causal (dilated) convolutions, e.g. output[t]\r\n does not depend on input[t+1:]. Useful when modeling temporal data\r\n where the model should not violate the temporal order.\r\n See [WaveNet: A Generative Model for Raw Audio, section\r\n 2.1](https://arxiv.org/abs/1609.03499).\r\n dilation_rate: an integer or tuple/list of a single integer, specifying\r\n the dilation rate to use for dilated convolution.\r\n Currently, specifying any `dilation_rate` value != 1 is\r\n incompatible with specifying any `strides` value != 1.\r\n activation: Activation function to use.\r\n If you don't specify anything, no activation is applied\r\n (ie. \"linear\" activation: `a(x) = x`).\r\n use_bias: Boolean, whether the layer uses a bias vector.\r\n kernel_initializer: Initializer for the `kernel` weights matrix.\r\n bias_initializer: Initializer for the bias vector.\r\n kernel_regularizer: Regularizer function applied to\r\n the `kernel` weights matrix.\r\n bias_regularizer: Regularizer function applied to the bias vector.\r\n activity_regularizer: Regularizer function applied to\r\n the output of the layer (its \"activation\")..\r\n kernel_constraint: Constraint function applied to the kernel matrix.\r\n bias_constraint: Constraint function applied to the bias vector.\r\n\r\n Input shape:\r\n 3D tensor with shape: `(batch_size, steps, input_dim)`\r\n\r\n Output shape:\r\n 3D tensor with shape: `(batch_size, new_steps, filters)`\r\n `steps` value might have changed due to padding or strides.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n filters,\r\n kernel_size,\r\n strides=1,\r\n padding='valid',\r\n dilation_rate=1,\r\n activation=None,\r\n use_bias=True,\r\n kernel_initializer='glorot_uniform',\r\n bias_initializer='zeros',\r\n kernel_regularizer=None,\r\n bias_regularizer=None,\r\n activity_regularizer=None,\r\n kernel_constraint=None,\r\n bias_constraint=None,\r\n **kwargs):\r\n super(Conv1D, self).__init__(\r\n filters=filters,\r\n kernel_size=kernel_size,\r\n strides=strides,\r\n padding=padding,\r\n data_format='channels_last',\r\n dilation_rate=dilation_rate,\r\n activation=activations.get(activation),\r\n use_bias=use_bias,\r\n kernel_initializer=initializers.get(kernel_initializer),\r\n bias_initializer=initializers.get(bias_initializer),\r\n kernel_regularizer=regularizers.get(kernel_regularizer),\r\n bias_regularizer=regularizers.get(bias_regularizer),\r\n activity_regularizer=regularizers.get(activity_regularizer),\r\n kernel_constraint=constraints.get(kernel_constraint),\r\n bias_constraint=constraints.get(bias_constraint),\r\n **kwargs)\r\n\r\n def get_config(self):\r\n config = {\r\n 'filters': self.filters,\r\n 'kernel_size': self.kernel_size,\r\n 'strides': self.strides,\r\n 'padding': self.padding,\r\n 'dilation_rate': self.dilation_rate,\r\n 'activation': activations.serialize(self.activation),\r\n 'use_bias': self.use_bias,\r\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\r\n 'bias_initializer': initializers.serialize(self.bias_initializer),\r\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\r\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\r\n 'activity_regularizer':\r\n regularizers.serialize(self.activity_regularizer),\r\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\r\n 'bias_constraint': constraints.serialize(self.bias_constraint)\r\n }\r\n base_config = super(Conv1D, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n\r\n@tf_export('keras.layers.Conv2D', 'keras.layers.Convolution2D')\r\nclass Conv2D(tf_convolutional_layers.Conv2D, Layer):\r\n \"\"\"2D convolution layer (e.g. spatial convolution over images).\r\n\r\n This layer creates a convolution kernel that is convolved\r\n with the layer input to produce a tensor of\r\n outputs. If `use_bias` is True,\r\n a bias vector is created and added to the outputs. Finally, if\r\n `activation` is not `None`, it is applied to the outputs as well.\r\n\r\n When using this layer as the first layer in a model,\r\n provide the keyword argument `input_shape`\r\n (tuple of integers, does not include the sample axis),\r\n e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures\r\n in `data_format=\"channels_last\"`.\r\n\r\n Arguments:\r\n filters: Integer, the dimensionality of the output space\r\n (i.e. the number of output filters in the convolution).\r\n kernel_size: An integer or tuple/list of 2 integers, specifying the\r\n width and height of the 2D convolution window.\r\n Can be a single integer to specify the same value for\r\n all spatial dimensions.\r\n strides: An integer or tuple/list of 2 integers,\r\n specifying the strides of the convolution along the width and height.\r\n Can be a single integer to specify the same value for\r\n all spatial dimensions.\r\n Specifying any stride value != 1 is incompatible with specifying\r\n any `dilation_rate` value != 1.\r\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\r\n data_format: A string,\r\n one of `channels_last` (default) or `channels_first`.\r\n The ordering of the dimensions in the inputs.\r\n `channels_last` corresponds to inputs with shape\r\n `(batch, height, width, channels)` while `channels_first`\r\n corresponds to inputs with shape\r\n `(batch, channels, height, width)`.\r\n It defaults to the `image_data_format` value found in your\r\n Keras config file at `~/.keras/keras.json`.\r\n If you never set it, then it will be \"channels_last\".\r\n dilation_rate: an integer or tuple/list of 2 integers, specifying\r\n the dilation rate to use for dilated convolution.\r\n Can be a single integer to specify the same value for\r\n all spatial dimensions.\r\n Currently, specifying any `dilation_rate` value != 1 is\r\n incompatible with specifying any stride value != 1.\r\n activation: Activation function to use.\r\n If you don't specify anything, no activation is applied\r\n (ie. \"linear\" activation: `a(x) = x`).\r\n use_bias: Boolean, whether the layer uses a bias vector.\r\n kernel_initializer: Initializer for the `kernel` weights matrix.\r\n bias_initializer: Initializer for the bias vector.\r\n kernel_regularizer: Regularizer function applied to\r\n the `kernel` weights matrix.\r\n bias_regularizer: Regularizer function applied to the bias vector.\r\n activity_regularizer: Regularizer function applied to\r\n the output of the layer (its \"activation\")..\r\n kernel_constraint: Constraint function applied to the kernel matrix.\r\n bias_constraint: Constraint function applied to the bias vector.\r\n\r\n Input shape:\r\n 4D tensor with shape:\r\n `(samples, channels, rows, cols)` if data_format='channels_first'\r\n or 4D tensor with shape:\r\n `(samples, rows, cols, channels)` if data_format='channels_last'.\r\n\r\n Output shape:\r\n 4D tensor with shape:\r\n `(samples, filters, new_rows, new_cols)` if data_format='channels_first'\r\n or 4D tensor with shape:\r\n `(samples, new_rows, new_cols, filters)` if data_format='channels_last'.\r\n `rows` and `cols` values might have changed due to padding.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n filters,\r\n kernel_size,\r\n strides=(1, 1),\r\n padding='valid',\r\n data_format=None,\r\n dilation_rate=(1, 1),\r\n activation=None,\r\n use_bias=True,\r\n kernel_initializer='glorot_uniform',\r\n bias_initializer='zeros',\r\n kernel_regularizer=None,\r\n bias_regularizer=None,\r\n activity_regularizer=None,\r\n kernel_constraint=None,\r\n bias_constraint=None,\r\n **kwargs):\r\n if data_format is None:\r\n data_format = K.image_data_format()\r\n super(Conv2D, self).__init__(\r\n filters=filters,\r\n kernel_size=kernel_size,\r\n strides=strides,\r\n padding=padding,\r\n data_format=data_format,\r\n dilation_rate=dilation_rate,\r\n activation=activations.get(activation),\r\n use_bias=use_bias,\r\n kernel_initializer=initializers.get(kernel_initializer),\r\n bias_initializer=initializers.get(bias_initializer),\r\n kernel_regularizer=regularizers.get(kernel_regularizer),\r\n bias_regularizer=regularizers.get(bias_regularizer),\r\n activity_regularizer=regularizers.get(activity_regularizer),\r\n kernel_constraint=constraints.get(kernel_constraint),\r\n bias_constraint=constraints.get(bias_constraint),\r\n **kwargs)\r\n\r\n def get_config(self):\r\n config = {\r\n 'filters': self.filters,\r\n 'kernel_size': self.kernel_size,\r\n 'strides': self.strides,\r\n 'padding': self.padding,\r\n 'data_format': self.data_format,\r\n 'dilation_rate': self.dilation_rate,\r\n 'activation': activations.serialize(self.activation),\r\n 'use_bias': self.use_bias,\r\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\r\n 'bias_initializer': initializers.serialize(self.bias_initializer),\r\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\r\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\r\n 'activity_regularizer':\r\n regularizers.serialize(self.activity_regularizer),\r\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\r\n 'bias_constraint': constraints.serialize(self.bias_constraint)\r\n }\r\n base_config = super(Conv2D, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n\r\n@tf_export('keras.layers.Conv3D', 'keras.layers.Convolution3D')\r\nclass Conv3D(tf_convolutional_layers.Conv3D, Layer):\r\n \"\"\"3D convolution layer (e.g. spatial convolution over volumes).\r\n\r\n This layer creates a convolution kernel that is convolved\r\n with the layer input to produce a tensor of\r\n outputs. If `use_bias` is True,\r\n a bias vector is created and added to the outputs. Finally, if\r\n `activation` is not `None`, it is applied to the outputs as well.\r\n\r\n When using this layer as the first layer in a model,\r\n provide the keyword argument `input_shape`\r\n (tuple of integers, does not include the sample axis),\r\n e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes\r\n with a single channel,\r\n in `data_format=\"channels_last\"`.\r\n\r\n Arguments:\r\n filters: Integer, the dimensionality of the output space\r\n (i.e. the number of output filters in the convolution).\r\n kernel_size: An integer or tuple/list of 3 integers, specifying the\r\n depth, height and width of the 3D convolution window.\r\n Can be a single integer to specify the same value for\r\n all spatial dimensions.\r\n strides: An integer or tuple/list of 3 integers,\r\n specifying the strides of the convolution along each spatial\r\n dimension.\r\n Can be a single integer to specify the same value for\r\n all spatial dimensions.\r\n Specifying any stride value != 1 is incompatible with specifying\r\n any `dilation_rate` value != 1.\r\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\r\n data_format: A string,\r\n one of `channels_last` (default) or `channels_first`.\r\n The ordering of the dimensions in the inputs.\r\n `channels_last` corresponds to inputs with shape\r\n `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\r\n while `channels_first` corresponds to inputs with shape\r\n `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\r\n It defaults to the `image_data_format` value found in your\r\n Keras config file at `~/.keras/keras.json`.\r\n If you never set it, then it will be \"channels_last\".\r\n dilation_rate: an integer or tuple/list of 3 integers, specifying\r\n the dilation rate to use for dilated convolution.\r\n Can be a single integer to specify the same value for\r\n all spatial dimensions.\r\n Currently, specifying any `dilation_rate` value != 1 is\r\n incompatible with specifying any stride value != 1.\r\n activation: Activation function to use.\r\n If you don't specify anything, no activation is applied\r\n (ie. \"linear\" activation: `a(x) = x`).\r\n use_bias: Boolean, whether the layer uses a bias vector.\r\n kernel_initializer: Initializer for the `kernel` weights matrix.\r\n bias_initializer: Initializer for the bias vector.\r\n kernel_regularizer: Regularizer function applied to\r\n the `kernel` weights matrix.\r\n bias_regularizer: Regularizer function applied to the bias vector.\r\n activity_regularizer: Regularizer function applied to\r\n the output of the layer (its \"activation\")..\r\n kernel_constraint: Constraint function applied to the kernel matrix.\r\n bias_constraint: Constraint function applied to the bias vector.\r\n\r\n Input shape:\r\n 5D tensor with shape:\r\n `(samples, channels, conv_dim1, conv_dim2, conv_dim3)` if\r\n data_format='channels_first'\r\n or 5D tensor with shape:\r\n `(samples, conv_dim1, conv_dim2, conv_dim3, channels)` if\r\n data_format='channels_last'.\r\n\r\n Output shape:\r\n 5D tensor with shape:\r\n `(samples, filters, new_conv_dim1, new_conv_dim2, new_conv_dim3)` if\r\n data_format='channels_first'\r\n or 5D tensor with shape:\r\n `(samples, new_conv_dim1, new_conv_dim2, new_conv_dim3, filters)` if\r\n data_format='channels_last'.\r\n `new_conv_dim1`, `new_conv_dim2` and `new_conv_dim3` values might have\r\n changed due to padding.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n filters,\r\n kernel_size,\r\n strides=(1, 1, 1),\r\n padding='valid',\r\n data_format=None,\r\n dilation_rate=(1, 1, 1),\r\n activation=None,\r\n use_bias=True,\r\n kernel_initializer='glorot_uniform',\r\n bias_initializer='zeros',\r\n kernel_regularizer=None,\r\n bias_regularizer=None,\r\n activity_regularizer=None,\r\n kernel_constraint=None,\r\n bias_constraint=None,\r\n **kwargs):\r\n if data_format is None:\r\n data_format = K.image_data_format()\r\n super(Conv3D, self).__init__(\r\n filters=filters,\r\n kernel_size=kernel_size,\r\n strides=strides,\r\n padding=padding,\r\n data_format=data_format,\r\n dilation_rate=dilation_rate,\r\n activation=activations.get(activation),\r\n use_bias=use_bias,\r\n kernel_initializer=initializers.get(kernel_initializer),\r\n bias_initializer=initializers.get(bias_initializer),\r\n kernel_regularizer=regularizers.get(kernel_regularizer),\r\n bias_regularizer=regularizers.get(bias_regularizer),\r\n activity_regularizer=regularizers.get(activity_regularizer),\r\n kernel_constraint=constraints.get(kernel_constraint),\r\n bias_constraint=constraints.get(bias_constraint),\r\n **kwargs)\r\n\r\n def get_config(self):\r\n config = {\r\n 'filters': self.filters,\r\n 'kernel_size': self.kernel_size,\r\n 'strides': self.strides,\r\n 'padding': self.padding,\r\n 'data_format': self.data_format,\r\n 'dilation_rate': self.dilation_rate,\r\n 'activation': activations.serialize(self.activation),\r\n 'use_bias': self.use_bias,\r\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\r\n 'bias_initializer': initializers.serialize(self.bias_initializer),\r\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\r\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\r\n 'activity_regularizer':\r\n regularizers.serialize(self.activity_regularizer),\r\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\r\n 'bias_constraint': constraints.serialize(self.bias_constraint)\r\n }\r\n base_config = super(Conv3D, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n\r\n@tf_export('keras.layers.Conv2DTranspose',\r\n 'keras.layers.Convolution2DTranspose')\r\nclass Conv2DTranspose(tf_convolutional_layers.Conv2DTranspose, Layer):\r\n \"\"\"Transposed convolution layer (sometimes called Deconvolution).\r\n\r\n The need for transposed convolutions generally arises\r\n from the desire to use a transformation going in the opposite direction\r\n of a normal convolution, i.e., from something that has the shape of the\r\n output of some convolution to something that has the shape of its input\r\n while maintaining a connectivity pattern that is compatible with\r\n said convolution.\r\n\r\n When using this layer as the first layer in a model,\r\n provide the keyword argument `input_shape`\r\n (tuple of integers, does not include the sample axis),\r\n e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures\r\n in `data_format=\"channels_last\"`.\r\n\r\n Arguments:\r\n filters: Integer, the dimensionality of the output space\r\n (i.e. the number of output filters in the convolution).\r\n kernel_size: An integer or tuple/list of 2 integers, specifying the\r\n width and height of the 2D convolution window.\r\n Can be a single integer to specify the same value for\r\n all spatial dimensions.\r\n strides: An integer or tuple/list of 2 integers,\r\n specifying the strides of the convolution along the width and height.\r\n Can be a single integer to specify the same value for\r\n all spatial dimensions.\r\n Specifying any stride value != 1 is incompatible with specifying\r\n any `dilation_rate` value != 1.\r\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\r\n data_format: A string,\r\n one of `channels_last` (default) or `channels_first`.\r\n The ordering of the dimensions in the inputs.\r\n `channels_last` corresponds to inputs with shape\r\n `(batch, height, width, channels)` while `channels_first`\r\n corresponds to inputs with shape\r\n `(batch, channels, height, width)`.\r\n It defaults to the `image_data_format` value found in your\r\n Keras config file at `~/.keras/keras.json`.\r\n If you never set it, then it will be \"channels_last\".\r\n dilation_rate: an integer or tuple/list of 2 integers, specifying\r\n the dilation rate to use for dilated convolution.\r\n Can be a single integer to specify the same value for\r\n all spatial dimensions.\r\n Currently, specifying any `dilation_rate` value != 1 is\r\n incompatible with specifying any stride value != 1.\r\n activation: Activation function to use.\r\n If you don't specify anything, no activation is applied\r\n (ie. \"linear\" activation: `a(x) = x`).\r\n use_bias: Boolean, whether the layer uses a bias vector.\r\n kernel_initializer: Initializer for the `kernel` weights matrix.\r\n bias_initializer: Initializer for the bias vector.\r\n kernel_regularizer: Regularizer function applied to\r\n the `kernel` weights matrix.\r\n bias_regularizer: Regularizer function applied to the bias vector.\r\n activity_regularizer: Regularizer function applied to\r\n the output of the layer (its \"activation\")..\r\n kernel_constraint: Constraint function applied to the kernel matrix.\r\n bias_constraint: Constraint function applied to the bias vector.\r\n\r\n Input shape:\r\n 4D tensor with shape:\r\n `(batch, channels, rows, cols)` if data_format='channels_first'\r\n or 4D tensor with shape:\r\n `(batch, rows, cols, channels)` if data_format='channels_last'.\r\n\r\n Output shape:\r\n 4D tensor with shape:\r\n `(batch, filters, new_rows, new_cols)` if data_format='channels_first'\r\n or 4D tensor with shape:\r\n `(batch, new_rows, new_cols, filters)` if data_format='channels_last'.\r\n `rows` and `cols` values might have changed due to padding.\r\n\r\n References:\r\n - [A guide to convolution arithmetic for deep\r\n learning](https://arxiv.org/abs/1603.07285v1)\r\n - [Deconvolutional\r\n Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf)\r\n \"\"\"\r\n\r\n def __init__(self,\r\n filters,\r\n kernel_size,\r\n strides=(1, 1),\r\n padding='valid',\r\n data_format=None,\r\n activation=None,\r\n use_bias=True,\r\n kernel_initializer='glorot_uniform',\r\n bias_initializer='zeros',\r\n kernel_regularizer=None,\r\n bias_regularizer=None,\r\n activity_regularizer=None,\r\n kernel_constraint=None,\r\n bias_constraint=None,\r\n **kwargs):\r\n if data_format is None:\r\n data_format = K.image_data_format()\r\n super(Conv2DTranspose, self).__init__(\r\n filters=filters,\r\n kernel_size=kernel_size,\r\n strides=strides,\r\n padding=padding,\r\n data_format=data_format,\r\n activation=activations.get(activation),\r\n use_bias=use_bias,\r\n kernel_initializer=initializers.get(kernel_initializer),\r\n bias_initializer=initializers.get(bias_initializer),\r\n kernel_regularizer=regularizers.get(kernel_regularizer),\r\n bias_regularizer=regularizers.get(bias_regularizer),\r\n activity_regularizer=regularizers.get(activity_regularizer),\r\n kernel_constraint=constraints.get(kernel_constraint),\r\n bias_constraint=constraints.get(bias_constraint),\r\n **kwargs)\r\n\r\n def get_config(self):\r\n config = {\r\n 'filters': self.filters,\r\n 'kernel_size': self.kernel_size,\r\n 'strides': self.strides,\r\n 'padding': self.padding,\r\n 'data_format': self.data_format,\r\n 'activation': activations.serialize(self.activation),\r\n 'use_bias': self.use_bias,\r\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\r\n 'bias_initializer': initializers.serialize(self.bias_initializer),\r\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\r\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\r\n 'activity_regularizer':\r\n regularizers.serialize(self.activity_regularizer),\r\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\r\n 'bias_constraint': constraints.serialize(self.bias_constraint)\r\n }\r\n base_config = super(Conv2DTranspose, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n\r\n@tf_export('keras.layers.Conv3DTranspose',\r\n 'keras.layers.Convolution3DTranspose')\r\nclass Conv3DTranspose(tf_convolutional_layers.Conv3DTranspose, Layer):\r\n \"\"\"Transposed convolution layer (sometimes called Deconvolution).\r\n\r\n The need for transposed convolutions generally arises\r\n from the desire to use a transformation going in the opposite direction\r\n of a normal convolution, i.e., from something that has the shape of the\r\n output of some convolution to something that has the shape of its input\r\n while maintaining a connectivity pattern that is compatible with\r\n said convolution.\r\n\r\n When using this layer as the first layer in a model,\r\n provide the keyword argument `input_shape`\r\n (tuple of integers, does not include the sample axis),\r\n e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels\r\n if `data_format=\"channels_last\"`.\r\n\r\n Arguments:\r\n filters: Integer, the dimensionality of the output space\r\n (i.e. the number of output filters in the convolution).\r\n kernel_size: An integer or tuple/list of 3 integers, specifying the\r\n depth, height and width of the 3D convolution window.\r\n Can be a single integer to specify the same value for\r\n all spatial dimensions.\r\n strides: An integer or tuple/list of 3 integers,\r\n specifying the strides of the convolution along the depth, height\r\n and width.\r\n Can be a single integer to specify the same value for\r\n all spatial dimensions.\r\n Specifying any stride value != 1 is incompatible with specifying\r\n any `dilation_rate` value != 1.\r\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\r\n data_format: A string,\r\n one of `channels_last` (default) or `channels_first`.\r\n The ordering of the dimensions in the inputs.\r\n `channels_last` corresponds to inputs with shape\r\n `(batch, depth, height, width, channels)` while `channels_first`\r\n corresponds to inputs with shape\r\n `(batch, channels, depth, height, width)`.\r\n It defaults to the `image_data_format` value found in your\r\n Keras config file at `~/.keras/keras.json`.\r\n If you never set it, then it will be \"channels_last\".\r\n dilation_rate: an integer or tuple/list of 3 integers, specifying\r\n the dilation rate to use for dilated convolution.\r\n Can be a single integer to specify the same value for\r\n all spatial dimensions.\r\n Currently, specifying any `dilation_rate` value != 1 is\r\n incompatible with specifying any stride value != 1.\r\n activation: Activation function to use\r\n (see [activations](../activations.md)).\r\n If you don't specify anything, no activation is applied\r\n (ie. \"linear\" activation: `a(x) = x`).\r\n use_bias: Boolean, whether the layer uses a bias vector.\r\n kernel_initializer: Initializer for the `kernel` weights matrix\r\n (see [initializers](../initializers.md)).\r\n bias_initializer: Initializer for the bias vector\r\n (see [initializers](../initializers.md)).\r\n kernel_regularizer: Regularizer function applied to\r\n the `kernel` weights matrix\r\n (see [regularizer](../regularizers.md)).\r\n bias_regularizer: Regularizer function applied to the bias vector\r\n (see [regularizer](../regularizers.md)).\r\n activity_regularizer: Regularizer function applied to\r\n the output of the layer (its \"activation\").\r\n (see [regularizer](../regularizers.md)).\r\n kernel_constraint: Constraint function applied to the kernel matrix\r\n (see [constraints](../constraints.md)).\r\n bias_constraint: Constraint function applied to the bias vector\r\n (see [constraints](../constraints.md)).\r\n\r\n Input shape:\r\n 5D tensor with shape:\r\n `(batch, channels, depth, rows, cols)` if data_format='channels_first'\r\n or 5D tensor with shape:\r\n `(batch, depth, rows, cols, channels)` if data_format='channels_last'.\r\n\r\n Output shape:\r\n 5D tensor with shape:\r\n `(batch, filters, new_depth, new_rows, new_cols)` if\r\n data_format='channels_first'\r\n or 5D tensor with shape:\r\n `(batch, new_depth, new_rows, new_cols, filters)` if\r\n data_format='channels_last'.\r\n `depth` and `rows` and `cols` values might have changed due to padding.\r\n\r\n References:\r\n - [A guide to convolution arithmetic for deep\r\n learning](https://arxiv.org/abs/1603.07285v1)\r\n - [Deconvolutional\r\n Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf)\r\n \"\"\"\r\n\r\n def __init__(self,\r\n filters,\r\n kernel_size,\r\n strides=(1, 1, 1),\r\n padding='valid',\r\n data_format=None,\r\n activation=None,\r\n use_bias=True,\r\n kernel_initializer='glorot_uniform',\r\n bias_initializer='zeros',\r\n kernel_regularizer=None,\r\n bias_regularizer=None,\r\n activity_regularizer=None,\r\n kernel_constraint=None,\r\n bias_constraint=None,\r\n **kwargs):\r\n if data_format is None:\r\n data_format = K.image_data_format()\r\n super(Conv3DTranspose, self).__init__(\r\n filters=filters,\r\n kernel_size=kernel_size,\r\n strides=strides,\r\n padding=padding,\r\n data_format=data_format,\r\n activation=activations.get(activation),\r\n use_bias=use_bias,\r\n kernel_initializer=initializers.get(kernel_initializer),\r\n bias_initializer=initializers.get(bias_initializer),\r\n kernel_regularizer=regularizers.get(kernel_regularizer),\r\n bias_regularizer=regularizers.get(bias_regularizer),\r\n activity_regularizer=regularizers.get(activity_regularizer),\r\n kernel_constraint=constraints.get(kernel_constraint),\r\n bias_constraint=constraints.get(bias_constraint),\r\n **kwargs)\r\n\r\n def get_config(self):\r\n config = {\r\n 'filters': self.filters,\r\n 'kernel_size': self.kernel_size,\r\n 'strides': self.strides,\r\n 'padding': self.padding,\r\n 'data_format': self.data_format,\r\n 'activation': activations.serialize(self.activation),\r\n 'use_bias': self.use_bias,\r\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\r\n 'bias_initializer': initializers.serialize(self.bias_initializer),\r\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\r\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\r\n 'activity_regularizer':\r\n regularizers.serialize(self.activity_regularizer),\r\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\r\n 'bias_constraint': constraints.serialize(self.bias_constraint)\r\n }\r\n base_config = super(Conv3DTranspose, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n\r\n@tf_export('keras.layers.SeparableConv1D',\r\n 'keras.layers.SeparableConvolution1D')\r\nclass SeparableConv1D(tf_convolutional_layers.SeparableConv1D, Layer):\r\n \"\"\"Depthwise separable 1D convolution.\r\n\r\n This layer performs a depthwise convolution that acts separately on\r\n channels, followed by a pointwise convolution that mixes channels.\r\n If `use_bias` is True and a bias initializer is provided,\r\n it adds a bias vector to the output.\r\n It then optionally applies an activation function to produce the final output.\r\n\r\n Arguments:\r\n filters: Integer, the dimensionality of the output space (i.e. the number\r\n of filters in the convolution).\r\n kernel_size: A single integer specifying the spatial\r\n dimensions of the filters.\r\n strides: A single integer specifying the strides\r\n of the convolution.\r\n Specifying any `stride` value != 1 is incompatible with specifying\r\n any `dilation_rate` value != 1.\r\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\r\n data_format: A string, one of `channels_last` (default) or `channels_first`.\r\n The ordering of the dimensions in the inputs.\r\n `channels_last` corresponds to inputs with shape\r\n `(batch, length, channels)` while `channels_first` corresponds to\r\n inputs with shape `(batch, channels, length)`.\r\n dilation_rate: A single integer, specifying\r\n the dilation rate to use for dilated convolution.\r\n Currently, specifying any `dilation_rate` value != 1 is\r\n incompatible with specifying any stride value != 1.\r\n depth_multiplier: The number of depthwise convolution output channels for\r\n each input channel. The total number of depthwise convolution output\r\n channels will be equal to `num_filters_in * depth_multiplier`.\r\n activation: Activation function. Set it to None to maintain a\r\n linear activation.\r\n use_bias: Boolean, whether the layer uses a bias.\r\n depthwise_initializer: An initializer for the depthwise convolution kernel.\r\n pointwise_initializer: An initializer for the pointwise convolution kernel.\r\n bias_initializer: An initializer for the bias vector. If None, the default\r\n initializer will be used.\r\n depthwise_regularizer: Optional regularizer for the depthwise\r\n convolution kernel.\r\n pointwise_regularizer: Optional regularizer for the pointwise\r\n convolution kernel.\r\n bias_regularizer: Optional regularizer for the bias vector.\r\n activity_regularizer: Optional regularizer function for the output.\r\n depthwise_constraint: Optional projection function to be applied to the\r\n depthwise kernel after being updated by an `Optimizer` (e.g. used for\r\n norm constraints or value constraints for layer weights). The function\r\n must take as input the unprojected variable and must return the\r\n projected variable (which must have the same shape). Constraints are\r\n not safe to use when doing asynchronous distributed training.\r\n pointwise_constraint: Optional projection function to be applied to the\r\n pointwise kernel after being updated by an `Optimizer`.\r\n bias_constraint: Optional projection function to be applied to the\r\n bias after being updated by an `Optimizer`.\r\n trainable: Boolean, if `True` also add variables to the graph collection\r\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\r\n name: A string, the name of the layer.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n filters,\r\n kernel_size,\r\n strides=1,\r\n padding='valid',\r\n data_format=None,\r\n dilation_rate=1,\r\n depth_multiplier=1,\r\n activation=None,\r\n use_bias=True,\r\n depthwise_initializer='glorot_uniform',\r\n pointwise_initializer='glorot_uniform',\r\n bias_initializer='zeros',\r\n depthwise_regularizer=None,\r\n pointwise_regularizer=None,\r\n bias_regularizer=None,\r\n activity_regularizer=None,\r\n depthwise_constraint=None,\r\n pointwise_constraint=None,\r\n bias_constraint=None,\r\n **kwargs):\r\n if data_format is None:\r\n data_format = K.image_data_format()\r\n super(SeparableConv1D, self).__init__(\r\n filters=filters,\r\n kernel_size=kernel_size,\r\n strides=strides,\r\n padding=padding,\r\n data_format=data_format,\r\n dilation_rate=dilation_rate,\r\n activation=activations.get(activation),\r\n use_bias=use_bias,\r\n depthwise_initializer=initializers.get(depthwise_initializer),\r\n pointwise_initializer=initializers.get(pointwise_initializer),\r\n bias_initializer=initializers.get(bias_initializer),\r\n depthwise_regularizer=regularizers.get(depthwise_regularizer),\r\n pointwise_regularizer=regularizers.get(pointwise_regularizer),\r\n bias_regularizer=regularizers.get(bias_regularizer),\r\n activity_regularizer=regularizers.get(activity_regularizer),\r\n depthwise_constraint=constraints.get(depthwise_constraint),\r\n pointwise_constraint=constraints.get(pointwise_constraint),\r\n bias_constraint=constraints.get(bias_constraint),\r\n **kwargs)\r\n\r\n def get_config(self):\r\n config = {\r\n 'filters': self.filters,\r\n 'kernel_size': self.kernel_size,\r\n 'strides': self.strides,\r\n 'padding': self.padding,\r\n 'data_format': self.data_format,\r\n 'dilation_rate': self.dilation_rate,\r\n 'activation': activations.serialize(self.activation),\r\n 'use_bias': self.use_bias,\r\n 'depthwise_initializer':\r\n initializers.serialize(self.depthwise_initializer),\r\n 'pointwise_initializer':\r\n initializers.serialize(self.pointwise_initializer),\r\n 'bias_initializer':\r\n initializers.serialize(self.bias_initializer),\r\n 'depthwise_regularizer':\r\n regularizers.serialize(self.depthwise_regularizer),\r\n 'pointwise_regularizer':\r\n regularizers.serialize(self.pointwise_regularizer),\r\n 'bias_regularizer':\r\n regularizers.serialize(self.bias_regularizer),\r\n 'activity_regularizer':\r\n regularizers.serialize(self.activity_regularizer),\r\n 'depthwise_constraint':\r\n constraints.serialize(self.depthwise_constraint),\r\n 'pointwise_constraint':\r\n constraints.serialize(self.pointwise_constraint),\r\n 'bias_constraint':\r\n constraints.serialize(self.bias_constraint)\r\n }\r\n base_config = super(SeparableConv1D, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n\r\n@tf_export('keras.layers.SeparableConv2D',\r\n 'keras.layers.SeparableConvolution2D')\r\nclass SeparableConv2D(tf_convolutional_layers.SeparableConv2D, Layer):\r\n \"\"\"Depthwise separable 2D convolution.\r\n\r\n Separable convolutions consist in first performing\r\n a depthwise spatial convolution\r\n (which acts on each input channel separately)\r\n followed by a pointwise convolution which mixes together the resulting\r\n output channels. The `depth_multiplier` argument controls how many\r\n output channels are generated per input channel in the depthwise step.\r\n\r\n Intuitively, separable convolutions can be understood as\r\n a way to factorize a convolution kernel into two smaller kernels,\r\n or as an extreme version of an Inception block.\r\n\r\n Arguments:\r\n filters: Integer, the dimensionality of the output space\r\n (i.e. the number of output filters in the convolution).\r\n kernel_size: An integer or tuple/list of 2 integers, specifying the\r\n width and height of the 2D convolution window.\r\n Can be a single integer to specify the same value for\r\n all spatial dimensions.\r\n strides: An integer or tuple/list of 2 integers,\r\n specifying the strides of the convolution along the width and height.\r\n Can be a single integer to specify the same value for\r\n all spatial dimensions.\r\n Specifying any stride value != 1 is incompatible with specifying\r\n any `dilation_rate` value != 1.\r\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\r\n data_format: A string,\r\n one of `channels_last` (default) or `channels_first`.\r\n The ordering of the dimensions in the inputs.\r\n `channels_last` corresponds to inputs with shape\r\n `(batch, height, width, channels)` while `channels_first`\r\n corresponds to inputs with shape\r\n `(batch, channels, height, width)`.\r\n It defaults to the `image_data_format` value found in your\r\n Keras config file at `~/.keras/keras.json`.\r\n If you never set it, then it will be \"channels_last\".\r\n depth_multiplier: The number of depthwise convolution output channels\r\n for each input channel.\r\n The total number of depthwise convolution output\r\n channels will be equal to `filterss_in * depth_multiplier`.\r\n activation: Activation function to use.\r\n If you don't specify anything, no activation is applied\r\n (ie. \"linear\" activation: `a(x) = x`).\r\n use_bias: Boolean, whether the layer uses a bias vector.\r\n depthwise_initializer: Initializer for the depthwise kernel matrix.\r\n pointwise_initializer: Initializer for the pointwise kernel matrix.\r\n bias_initializer: Initializer for the bias vector.\r\n depthwise_regularizer: Regularizer function applied to\r\n the depthwise kernel matrix.\r\n pointwise_regularizer: Regularizer function applied to\r\n the pointwise kernel matrix.\r\n bias_regularizer: Regularizer function applied to the bias vector.\r\n activity_regularizer: Regularizer function applied to\r\n the output of the layer (its \"activation\")..\r\n depthwise_constraint: Constraint function applied to\r\n the depthwise kernel matrix.\r\n pointwise_constraint: Constraint function applied to\r\n the pointwise kernel matrix.\r\n bias_constraint: Constraint function applied to the bias vector.\r\n\r\n Input shape:\r\n 4D tensor with shape:\r\n `(batch, channels, rows, cols)` if data_format='channels_first'\r\n or 4D tensor with shape:\r\n `(batch, rows, cols, channels)` if data_format='channels_last'.\r\n\r\n Output shape:\r\n 4D tensor with shape:\r\n `(batch, filters, new_rows, new_cols)` if data_format='channels_first'\r\n or 4D tensor with shape:\r\n `(batch, new_rows, new_cols, filters)` if data_format='channels_last'.\r\n `rows` and `cols` values might have changed due to padding.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n filters,\r\n kernel_size,\r\n strides=(1, 1),\r\n padding='valid',\r\n data_format=None,\r\n dilation_rate=1,\r\n depth_multiplier=1,\r\n activation=None,\r\n use_bias=True,\r\n depthwise_initializer='glorot_uniform',\r\n pointwise_initializer='glorot_uniform',\r\n bias_initializer='zeros',\r\n depthwise_regularizer=None,\r\n pointwise_regularizer=None,\r\n bias_regularizer=None,\r\n activity_regularizer=None,\r\n depthwise_constraint=None,\r\n pointwise_constraint=None,\r\n bias_constraint=None,\r\n **kwargs):\r\n if data_format is None:\r\n data_format = K.image_data_format()\r\n super(SeparableConv2D, self).__init__(\r\n filters=filters,\r\n kernel_size=kernel_size,\r\n strides=strides,\r\n padding=padding,\r\n data_format=data_format,\r\n dilation_rate=dilation_rate,\r\n activation=activations.get(activation),\r\n use_bias=use_bias,\r\n depthwise_initializer=initializers.get(depthwise_initializer),\r\n pointwise_initializer=initializers.get(pointwise_initializer),\r\n bias_initializer=initializers.get(bias_initializer),\r\n depthwise_regularizer=regularizers.get(depthwise_regularizer),\r\n pointwise_regularizer=regularizers.get(pointwise_regularizer),\r\n bias_regularizer=regularizers.get(bias_regularizer),\r\n activity_regularizer=regularizers.get(activity_regularizer),\r\n depthwise_constraint=constraints.get(depthwise_constraint),\r\n pointwise_constraint=constraints.get(pointwise_constraint),\r\n bias_constraint=constraints.get(bias_constraint),\r\n **kwargs)\r\n\r\n def get_config(self):\r\n config = {\r\n 'filters':\r\n self.filters,\r\n 'kernel_size':\r\n self.kernel_size,\r\n 'strides':\r\n self.strides,\r\n 'padding':\r\n self.padding,\r\n 'data_format':\r\n self.data_format,\r\n 'dilation_rate':\r\n self.dilation_rate,\r\n 'activation':\r\n activations.serialize(self.activation),\r\n 'use_bias':\r\n self.use_bias,\r\n 'depthwise_initializer':\r\n initializers.serialize(self.depthwise_initializer),\r\n 'pointwise_initializer':\r\n initializers.serialize(self.pointwise_initializer),\r\n 'bias_initializer':\r\n initializers.serialize(self.bias_initializer),\r\n 'depthwise_regularizer':\r\n regularizers.serialize(self.depthwise_regularizer),\r\n 'pointwise_regularizer':\r\n regularizers.serialize(self.pointwise_regularizer),\r\n 'bias_regularizer':\r\n regularizers.serialize(self.bias_regularizer),\r\n 'activity_regularizer':\r\n regularizers.serialize(self.activity_regularizer),\r\n 'depthwise_constraint':\r\n constraints.serialize(self.depthwise_constraint),\r\n 'pointwise_constraint':\r\n constraints.serialize(self.pointwise_constraint),\r\n 'bias_constraint':\r\n constraints.serialize(self.bias_constraint)\r\n }\r\n base_config = super(SeparableConv2D, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n\r\n@tf_export('keras.layers.UpSampling1D')\r\nclass UpSampling1D(Layer):\r\n \"\"\"Upsampling layer for 1D inputs.\r\n\r\n Repeats each temporal step `size` times along the time axis.\r\n\r\n Arguments:\r\n size: integer. Upsampling factor.\r\n\r\n Input shape:\r\n 3D tensor with shape: `(batch, steps, features)`.\r\n\r\n Output shape:\r\n 3D tensor with shape: `(batch, upsampled_steps, features)`.\r\n \"\"\"\r\n\r\n def __init__(self, size=2, **kwargs):\r\n super(UpSampling1D, self).__init__(**kwargs)\r\n self.size = int(size)\r\n self.input_spec = InputSpec(ndim=3)\r\n\r\n def compute_output_shape(self, input_shape):\r\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\r\n size = self.size * input_shape[1] if input_shape[1] is not None else None\r\n return tensor_shape.TensorShape([input_shape[0], size, input_shape[2]])\r\n\r\n def call(self, inputs):\r\n output = K.repeat_elements(inputs, self.size, axis=1)\r\n return output\r\n\r\n def get_config(self):\r\n config = {'size': self.size}\r\n base_config = super(UpSampling1D, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n\r\n@tf_export('keras.layers.UpSampling2D')\r\nclass UpSampling2D(Layer):\r\n \"\"\"Upsampling layer for 2D inputs.\r\n\r\n Repeats the rows and columns of the data\r\n by size[0] and size[1] respectively.\r\n\r\n Arguments:\r\n size: int, or tuple of 2 integers.\r\n The upsampling factors for rows and columns.\r\n data_format: A string,\r\n one of `channels_last` (default) or `channels_first`.\r\n The ordering of the dimensions in the inputs.\r\n `channels_last` corresponds to inputs with shape\r\n `(batch, height, width, channels)` while `channels_first`\r\n corresponds to inputs with shape\r\n `(batch, channels, height, width)`.\r\n It defaults to the `image_data_format` value found in your\r\n Keras config file at `~/.keras/keras.json`.\r\n If you never set it, then it will be \"channels_last\".\r\n\r\n Input shape:\r\n 4D tensor with shape:\r\n - If `data_format` is `\"channels_last\"`:\r\n `(batch, rows, cols, channels)`\r\n - If `data_format` is `\"channels_first\"`:\r\n `(batch, channels, rows, cols)`\r\n\r\n Output shape:\r\n 4D tensor with shape:\r\n - If `data_format` is `\"channels_last\"`:\r\n `(batch, upsampled_rows, upsampled_cols, channels)`\r\n - If `data_format` is `\"channels_first\"`:\r\n `(batch, channels, upsampled_rows, upsampled_cols)`\r\n \"\"\"\r\n\r\n def __init__(self, size=(2, 2), data_format=None, **kwargs):\r\n super(UpSampling2D, self).__init__(**kwargs)\r\n self.data_format = conv_utils.normalize_data_format(data_format)\r\n self.size = conv_utils.normalize_tuple(size, 2, 'size')\r\n self.input_spec = InputSpec(ndim=4)\r\n\r\n def compute_output_shape(self, input_shape):\r\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\r\n if self.data_format == 'channels_first':\r\n height = self.size[0] * input_shape[\r\n 2] if input_shape[2] is not None else None\r\n width = self.size[1] * input_shape[\r\n 3] if input_shape[3] is not None else None\r\n return tensor_shape.TensorShape(\r\n [input_shape[0], input_shape[1], height, width])\r\n else:\r\n height = self.size[0] * input_shape[\r\n 1] if input_shape[1] is not None else None\r\n width = self.size[1] * input_shape[\r\n 2] if input_shape[2] is not None else None\r\n return tensor_shape.TensorShape(\r\n [input_shape[0], height, width, input_shape[3]])\r\n\r\n def call(self, inputs):\r\n return K.resize_images(inputs, self.size[0], self.size[1], self.data_format)\r\n\r\n def get_config(self):\r\n config = {'size': self.size, 'data_format': self.data_format}\r\n base_config = super(UpSampling2D, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n\r\n@tf_export('keras.layers.UpSampling3D')\r\nclass UpSampling3D(Layer):\r\n \"\"\"Upsampling layer for 3D inputs.\r\n\r\n Repeats the 1st, 2nd and 3rd dimensions\r\n of the data by size[0], size[1] and size[2] respectively.\r\n\r\n Arguments:\r\n size: int, or tuple of 3 integers.\r\n The upsampling factors for dim1, dim2 and dim3.\r\n data_format: A string,\r\n one of `channels_last` (default) or `channels_first`.\r\n The ordering of the dimensions in the inputs.\r\n `channels_last` corresponds to inputs with shape\r\n `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\r\n while `channels_first` corresponds to inputs with shape\r\n `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\r\n It defaults to the `image_data_format` value found in your\r\n Keras config file at `~/.keras/keras.json`.\r\n If you never set it, then it will be \"channels_last\".\r\n\r\n Input shape:\r\n 5D tensor with shape:\r\n - If `data_format` is `\"channels_last\"`:\r\n `(batch, dim1, dim2, dim3, channels)`\r\n - If `data_format` is `\"channels_first\"`:\r\n `(batch, channels, dim1, dim2, dim3)`\r\n\r\n Output shape:\r\n 5D tensor with shape:\r\n - If `data_format` is `\"channels_last\"`:\r\n `(batch, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)`\r\n - If `data_format` is `\"channels_first\"`:\r\n `(batch, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)`\r\n \"\"\"\r\n\r\n def __init__(self, size=(2, 2, 2), data_format=None, **kwargs):\r\n self.data_format = conv_utils.normalize_data_format(data_format)\r\n self.size = conv_utils.normalize_tuple(size, 3, 'size')\r\n self.input_spec = InputSpec(ndim=5)\r\n super(UpSampling3D, self).__init__(**kwargs)\r\n\r\n def compute_output_shape(self, input_shape):\r\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\r\n if self.data_format == 'channels_first':\r\n dim1 = self.size[0] * input_shape[\r\n 2] if input_shape[2] is not None else None\r\n dim2 = self.size[1] * input_shape[\r\n 3] if input_shape[3] is not None else None\r\n dim3 = self.size[2] * input_shape[\r\n 4] if input_shape[4] is not None else None\r\n return tensor_shape.TensorShape(\r\n [input_shape[0], input_shape[1], dim1, dim2, dim3])\r\n else:\r\n dim1 = self.size[0] * input_shape[\r\n 1] if input_shape[1] is not None else None\r\n dim2 = self.size[1] * input_shape[\r\n 2] if input_shape[2] is not None else None\r\n dim3 = self.size[2] * input_shape[\r\n 3] if input_shape[3] is not None else None\r\n return tensor_shape.TensorShape(\r\n [input_shape[0], dim1, dim2, dim3, input_shape[4]])\r\n\r\n def call(self, inputs):\r\n return K.resize_volumes(inputs, self.size[0], self.size[1], self.size[2],\r\n self.data_format)\r\n\r\n def get_config(self):\r\n config = {'size': self.size, 'data_format': self.data_format}\r\n base_config = super(UpSampling3D, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n\r\n@tf_export('keras.layers.ZeroPadding1D')\r\nclass ZeroPadding1D(Layer):\r\n \"\"\"Zero-padding layer for 1D input (e.g. temporal sequence).\r\n\r\n Arguments:\r\n padding: int, or tuple of int (length 2), or dictionary.\r\n - If int:\r\n How many zeros to add at the beginning and end of\r\n the padding dimension (axis 1).\r\n - If tuple of int (length 2):\r\n How many zeros to add at the beginning and at the end of\r\n the padding dimension (`(left_pad, right_pad)`).\r\n\r\n Input shape:\r\n 3D tensor with shape `(batch, axis_to_pad, features)`\r\n\r\n Output shape:\r\n 3D tensor with shape `(batch, padded_axis, features)`\r\n \"\"\"\r\n\r\n def __init__(self, padding=1, **kwargs):\r\n super(ZeroPadding1D, self).__init__(**kwargs)\r\n self.padding = conv_utils.normalize_tuple(padding, 2, 'padding')\r\n self.input_spec = InputSpec(ndim=3)\r\n\r\n def compute_output_shape(self, input_shape):\r\n if input_shape[1] is not None:\r\n length = input_shape[1] + self.padding[0] + self.padding[1]\r\n else:\r\n length = None\r\n return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])\r\n\r\n def call(self, inputs):\r\n return K.temporal_padding(inputs, padding=self.padding)\r\n\r\n def get_config(self):\r\n config = {'padding': self.padding}\r\n base_config = super(ZeroPadding1D, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n\r\n@tf_export('keras.layers.ZeroPadding2D')\r\nclass ZeroPadding2D(Layer):\r\n \"\"\"Zero-padding layer for 2D input (e.g. picture).\r\n\r\n This layer can add rows and columns of zeros\r\n at the top, bottom, left and right side of an image tensor.\r\n\r\n Arguments:\r\n padding: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.\r\n - If int: the same symmetric padding\r\n is applied to width and height.\r\n - If tuple of 2 ints:\r\n interpreted as two different\r\n symmetric padding values for height and width:\r\n `(symmetric_height_pad, symmetric_width_pad)`.\r\n - If tuple of 2 tuples of 2 ints:\r\n interpreted as\r\n `((top_pad, bottom_pad), (left_pad, right_pad))`\r\n data_format: A string,\r\n one of `channels_last` (default) or `channels_first`.\r\n The ordering of the dimensions in the inputs.\r\n `channels_last` corresponds to inputs with shape\r\n `(batch, height, width, channels)` while `channels_first`\r\n corresponds to inputs with shape\r\n `(batch, channels, height, width)`.\r\n It defaults to the `image_data_format` value found in your\r\n Keras config file at `~/.keras/keras.json`.\r\n If you never set it, then it will be \"channels_last\".\r\n\r\n Input shape:\r\n 4D tensor with shape:\r\n - If `data_format` is `\"channels_last\"`:\r\n `(batch, rows, cols, channels)`\r\n - If `data_format` is `\"channels_first\"`:\r\n `(batch, channels, rows, cols)`\r\n\r\n Output shape:\r\n 4D tensor with shape:\r\n - If `data_format` is `\"channels_last\"`:\r\n `(batch, padded_rows, padded_cols, channels)`\r\n - If `data_format` is `\"channels_first\"`:\r\n `(batch, channels, padded_rows, padded_cols)`\r\n \"\"\"\r\n\r\n def __init__(self, padding=(1, 1), data_format=None, **kwargs):\r\n super(ZeroPadding2D, self).__init__(**kwargs)\r\n self.data_format = conv_utils.normalize_data_format(data_format)\r\n if isinstance(padding, int):\r\n self.padding = ((padding, padding), (padding, padding))\r\n elif hasattr(padding, '__len__'):\r\n if len(padding) != 2:\r\n raise ValueError('`padding` should have two elements. '\r\n 'Found: ' + str(padding))\r\n height_padding = conv_utils.normalize_tuple(padding[0], 2,\r\n '1st entry of padding')\r\n width_padding = conv_utils.normalize_tuple(padding[1], 2,\r\n '2nd entry of padding')\r\n self.padding = (height_padding, width_padding)\r\n else:\r\n raise ValueError('`padding` should be either an int, '\r\n 'a tuple of 2 ints '\r\n '(symmetric_height_pad, symmetric_width_pad), '\r\n 'or a tuple of 2 tuples of 2 ints '\r\n '((top_pad, bottom_pad), (left_pad, right_pad)). '\r\n 'Found: ' + str(padding))\r\n self.input_spec = InputSpec(ndim=4)\r\n\r\n def compute_output_shape(self, input_shape):\r\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\r\n if self.data_format == 'channels_first':\r\n if input_shape[2] is not None:\r\n rows = input_shape[2] + self.padding[0][0] + self.padding[0][1]\r\n else:\r\n rows = None\r\n if input_shape[3] is not None:\r\n cols = input_shape[3] + self.padding[1][0] + self.padding[1][1]\r\n else:\r\n cols = None\r\n return tensor_shape.TensorShape(\r\n [input_shape[0], input_shape[1], rows, cols])\r\n elif self.data_format == 'channels_last':\r\n if input_shape[1] is not None:\r\n rows = input_shape[1] + self.padding[0][0] + self.padding[0][1]\r\n else:\r\n rows = None\r\n if input_shape[2] is not None:\r\n cols = input_shape[2] + self.padding[1][0] + self.padding[1][1]\r\n else:\r\n cols = None\r\n return tensor_shape.TensorShape(\r\n [input_shape[0], rows, cols, input_shape[3]])\r\n\r\n def call(self, inputs):\r\n return K.spatial_2d_padding(\r\n inputs, padding=self.padding, data_format=self.data_format)\r\n\r\n def get_config(self):\r\n config = {'padding': self.padding, 'data_format': self.data_format}\r\n base_config = super(ZeroPadding2D, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n\r\n@tf_export('keras.layers.ZeroPadding3D')\r\nclass ZeroPadding3D(Layer):\r\n \"\"\"Zero-padding layer for 3D data (spatial or spatio-temporal).\r\n\r\n Arguments:\r\n padding: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.\r\n - If int: the same symmetric padding\r\n is applied to width and height.\r\n - If tuple of 2 ints:\r\n interpreted as two different\r\n symmetric padding values for height and width:\r\n `(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.\r\n - If tuple of 2 tuples of 2 ints:\r\n interpreted as\r\n `((left_dim1_pad, right_dim1_pad), (left_dim2_pad,\r\n right_dim2_pad), (left_dim3_pad, right_dim3_pad))`\r\n data_format: A string,\r\n one of `channels_last` (default) or `channels_first`.\r\n The ordering of the dimensions in the inputs.\r\n `channels_last` corresponds to inputs with shape\r\n `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\r\n while `channels_first` corresponds to inputs with shape\r\n `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\r\n It defaults to the `image_data_format` value found in your\r\n Keras config file at `~/.keras/keras.json`.\r\n If you never set it, then it will be \"channels_last\".\r\n\r\n Input shape:\r\n 5D tensor with shape:\r\n - If `data_format` is `\"channels_last\"`:\r\n `(batch, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad,\r\n depth)`\r\n - If `data_format` is `\"channels_first\"`:\r\n `(batch, depth, first_axis_to_pad, second_axis_to_pad,\r\n third_axis_to_pad)`\r\n\r\n Output shape:\r\n 5D tensor with shape:\r\n - If `data_format` is `\"channels_last\"`:\r\n `(batch, first_padded_axis, second_padded_axis, third_axis_to_pad,\r\n depth)`\r\n - If `data_format` is `\"channels_first\"`:\r\n `(batch, depth, first_padded_axis, second_padded_axis,\r\n third_axis_to_pad)`\r\n \"\"\"\r\n\r\n def __init__(self, padding=(1, 1, 1), data_format=None, **kwargs):\r\n super(ZeroPadding3D, self).__init__(**kwargs)\r\n self.data_format = conv_utils.normalize_data_format(data_format)\r\n if isinstance(padding, int):\r\n self.padding = ((padding, padding), (padding, padding), (padding,\r\n padding))\r\n elif hasattr(padding, '__len__'):\r\n if len(padding) != 3:\r\n raise ValueError('`padding` should have 3 elements. '\r\n 'Found: ' + str(padding))\r\n dim1_padding = conv_utils.normalize_tuple(padding[0], 2,\r\n '1st entry of padding')\r\n dim2_padding = conv_utils.normalize_tuple(padding[1], 2,\r\n '2nd entry of padding')\r\n dim3_padding = conv_utils.normalize_tuple(padding[2], 2,\r\n '3rd entry of padding')\r\n self.padding = (dim1_padding, dim2_padding, dim3_padding)\r\n else:\r\n raise ValueError(\r\n '`padding` should be either an int, '\r\n 'a tuple of 3 ints '\r\n '(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad), '\r\n 'or a tuple of 3 tuples of 2 ints '\r\n '((left_dim1_pad, right_dim1_pad),'\r\n ' (left_dim2_pad, right_dim2_pad),'\r\n ' (left_dim3_pad, right_dim2_pad)). '\r\n 'Found: ' + str(padding))\r\n self.input_spec = InputSpec(ndim=5)\r\n\r\n def compute_output_shape(self, input_shape):\r\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\r\n if self.data_format == 'channels_first':\r\n if input_shape[2] is not None:\r\n dim1 = input_shape[2] + 2 * self.padding[0][0]\r\n else:\r\n dim1 = None\r\n if input_shape[3] is not None:\r\n dim2 = input_shape[3] + 2 * self.padding[1][0]\r\n else:\r\n dim2 = None\r\n if input_shape[4] is not None:\r\n dim3 = input_shape[4] + 2 * self.padding[2][0]\r\n else:\r\n dim3 = None\r\n return tensor_shape.TensorShape(\r\n [input_shape[0], input_shape[1], dim1, dim2, dim3])\r\n elif self.data_format == 'channels_last':\r\n if input_shape[1] is not None:\r\n dim1 = input_shape[1] + 2 * self.padding[0][1]\r\n else:\r\n dim1 = None\r\n if input_shape[2] is not None:\r\n dim2 = input_shape[2] + 2 * self.padding[1][1]\r\n else:\r\n dim2 = None\r\n if input_shape[3] is not None:\r\n dim3 = input_shape[3] + 2 * self.padding[2][1]\r\n else:\r\n dim3 = None\r\n return tensor_shape.TensorShape(\r\n [input_shape[0], dim1, dim2, dim3, input_shape[4]])\r\n\r\n def call(self, inputs):\r\n return K.spatial_3d_padding(\r\n inputs, padding=self.padding, data_format=self.data_format)\r\n\r\n def get_config(self):\r\n config = {'padding': self.padding, 'data_format': self.data_format}\r\n base_config = super(ZeroPadding3D, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n\r\n@tf_export('keras.layers.Cropping1D')\r\nclass Cropping1D(Layer):\r\n \"\"\"Cropping layer for 1D input (e.g. temporal sequence).\r\n\r\n It crops along the time dimension (axis 1).\r\n\r\n Arguments:\r\n cropping: int or tuple of int (length 2)\r\n How many units should be trimmed off at the beginning and end of\r\n the cropping dimension (axis 1).\r\n If a single int is provided,\r\n the same value will be used for both.\r\n\r\n Input shape:\r\n 3D tensor with shape `(batch, axis_to_crop, features)`\r\n\r\n Output shape:\r\n 3D tensor with shape `(batch, cropped_axis, features)`\r\n \"\"\"\r\n\r\n def __init__(self, cropping=(1, 1), **kwargs):\r\n super(Cropping1D, self).__init__(**kwargs)\r\n self.cropping = conv_utils.normalize_tuple(cropping, 2, 'cropping')\r\n self.input_spec = InputSpec(ndim=3)\r\n\r\n def compute_output_shape(self, input_shape):\r\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\r\n if input_shape[1] is not None:\r\n length = input_shape[1] - self.cropping[0] - self.cropping[1]\r\n else:\r\n length = None\r\n return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])\r\n\r\n def call(self, inputs):\r\n if self.cropping[1] == 0:\r\n return inputs[:, self.cropping[0]:, :]\r\n else:\r\n return inputs[:, self.cropping[0]:-self.cropping[1], :]\r\n\r\n def get_config(self):\r\n config = {'cropping': self.cropping}\r\n base_config = super(Cropping1D, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n\r\n@tf_export('keras.layers.Cropping2D')\r\nclass Cropping2D(Layer):\r\n \"\"\"Cropping layer for 2D input (e.g. picture).\r\n\r\n It crops along spatial dimensions, i.e. width and height.\r\n\r\n Arguments:\r\n cropping: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.\r\n - If int: the same symmetric cropping\r\n is applied to width and height.\r\n - If tuple of 2 ints:\r\n interpreted as two different\r\n symmetric cropping values for height and width:\r\n `(symmetric_height_crop, symmetric_width_crop)`.\r\n - If tuple of 2 tuples of 2 ints:\r\n interpreted as\r\n `((top_crop, bottom_crop), (left_crop, right_crop))`\r\n data_format: A string,\r\n one of `channels_last` (default) or `channels_first`.\r\n The ordering of the dimensions in the inputs.\r\n `channels_last` corresponds to inputs with shape\r\n `(batch, height, width, channels)` while `channels_first`\r\n corresponds to inputs with shape\r\n `(batch, channels, height, width)`.\r\n It defaults to the `image_data_format` value found in your\r\n Keras config file at `~/.keras/keras.json`.\r\n If you never set it, then it will be \"channels_last\".\r\n\r\n Input shape:\r\n 4D tensor with shape:\r\n - If `data_format` is `\"channels_last\"`:\r\n `(batch, rows, cols, channels)`\r\n - If `data_format` is `\"channels_first\"`:\r\n `(batch, channels, rows, cols)`\r\n\r\n Output shape:\r\n 4D tensor with shape:\r\n - If `data_format` is `\"channels_last\"`:\r\n `(batch, cropped_rows, cropped_cols, channels)`\r\n - If `data_format` is `\"channels_first\"`:\r\n `(batch, channels, cropped_rows, cropped_cols)`\r\n\r\n Examples:\r\n\r\n ```python\r\n # Crop the input 2D images or feature maps\r\n model = Sequential()\r\n model.add(Cropping2D(cropping=((2, 2), (4, 4)),\r\n input_shape=(28, 28, 3)))\r\n # now model.output_shape == (None, 24, 20, 3)\r\n model.add(Conv2D(64, (3, 3), padding='same))\r\n model.add(Cropping2D(cropping=((2, 2), (2, 2))))\r\n # now model.output_shape == (None, 20, 16. 64)\r\n ```\r\n \"\"\"\r\n\r\n def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs):\r\n super(Cropping2D, self).__init__(**kwargs)\r\n self.data_format = conv_utils.normalize_data_format(data_format)\r\n if isinstance(cropping, int):\r\n self.cropping = ((cropping, cropping), (cropping, cropping))\r\n elif hasattr(cropping, '__len__'):\r\n if len(cropping) != 2:\r\n raise ValueError('`cropping` should have two elements. '\r\n 'Found: ' + str(cropping))\r\n height_cropping = conv_utils.normalize_tuple(cropping[0], 2,\r\n '1st entry of cropping')\r\n width_cropping = conv_utils.normalize_tuple(cropping[1], 2,\r\n '2nd entry of cropping')\r\n self.cropping = (height_cropping, width_cropping)\r\n else:\r\n raise ValueError('`cropping` should be either an int, '\r\n 'a tuple of 2 ints '\r\n '(symmetric_height_crop, symmetric_width_crop), '\r\n 'or a tuple of 2 tuples of 2 ints '\r\n '((top_crop, bottom_crop), (left_crop, right_crop)). '\r\n 'Found: ' + str(cropping))\r\n self.input_spec = InputSpec(ndim=4)\r\n\r\n def compute_output_shape(self, input_shape):\r\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\r\n # pylint: disable=invalid-unary-operand-type\r\n if self.data_format == 'channels_first':\r\n return tensor_shape.TensorShape([\r\n input_shape[0], input_shape[1],\r\n input_shape[2] - self.cropping[0][0] - self.cropping[0][1]\r\n if input_shape[2] else None,\r\n input_shape[3] - self.cropping[1][0] - self.cropping[1][1]\r\n if input_shape[3] else None\r\n ])\r\n else:\r\n return tensor_shape.TensorShape([\r\n input_shape[0],\r\n input_shape[1] - self.cropping[0][0] - self.cropping[0][1]\r\n if input_shape[1] else None,\r\n input_shape[2] - self.cropping[1][0] - self.cropping[1][1]\r\n if input_shape[2] else None, input_shape[3]\r\n ])\r\n # pylint: enable=invalid-unary-operand-type\r\n\r\n def call(self, inputs):\r\n # pylint: disable=invalid-unary-operand-type\r\n if self.data_format == 'channels_first':\r\n if self.cropping[0][1] == self.cropping[1][1] == 0:\r\n return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:]\r\n elif self.cropping[0][1] == 0:\r\n return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:\r\n -self.cropping[1][1]]\r\n elif self.cropping[1][1] == 0:\r\n return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],\r\n self.cropping[1][0]:]\r\n return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],\r\n self.cropping[1][0]:-self.cropping[1][1]]\r\n else:\r\n if self.cropping[0][1] == self.cropping[1][1] == 0:\r\n return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:, :]\r\n elif self.cropping[0][1] == 0:\r\n return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:\r\n -self.cropping[1][1], :]\r\n elif self.cropping[1][1] == 0:\r\n return inputs[:, self.cropping[0][0]:-self.cropping[0][1],\r\n self.cropping[1][0]:, :]\r\n return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[\r\n 1][0]:-self.cropping[1][1], :] # pylint: disable=invalid-unary-operand-type\r\n # pylint: enable=invalid-unary-operand-type\r\n\r\n def get_config(self):\r\n config = {'cropping': self.cropping, 'data_format': self.data_format}\r\n base_config = super(Cropping2D, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n\r\n@tf_export('keras.layers.Cropping3D')\r\nclass Cropping3D(Layer):\r\n \"\"\"Cropping layer for 3D data (e.g.\r\n\r\n spatial or spatio-temporal).\r\n\r\n Arguments:\r\n cropping: int, or tuple of 23ints, or tuple of 3 tuples of 2 ints.\r\n - If int: the same symmetric cropping\r\n is applied to depth, height, and width.\r\n - If tuple of 3 ints:\r\n interpreted as two different\r\n symmetric cropping values for depth, height, and width:\r\n `(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.\r\n - If tuple of 3 tuples of 2 ints:\r\n interpreted as\r\n `((left_dim1_crop, right_dim1_crop), (left_dim2_crop,\r\n right_dim2_crop), (left_dim3_crop, right_dim3_crop))`\r\n data_format: A string,\r\n one of `channels_last` (default) or `channels_first`.\r\n The ordering of the dimensions in the inputs.\r\n `channels_last` corresponds to inputs with shape\r\n `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\r\n while `channels_first` corresponds to inputs with shape\r\n `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\r\n It defaults to the `image_data_format` value found in your\r\n Keras config file at `~/.keras/keras.json`.\r\n If you never set it, then it will be \"channels_last\".\r\n\r\n Input shape:\r\n 5D tensor with shape:\r\n - If `data_format` is `\"channels_last\"`:\r\n `(batch, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop,\r\n depth)`\r\n - If `data_format` is `\"channels_first\"`:\r\n `(batch, depth, first_axis_to_crop, second_axis_to_crop,\r\n third_axis_to_crop)`\r\n\r\n Output shape:\r\n 5D tensor with shape:\r\n - If `data_format` is `\"channels_last\"`:\r\n `(batch, first_cropped_axis, second_cropped_axis, third_cropped_axis,\r\n depth)`\r\n - If `data_format` is `\"channels_first\"`:\r\n `(batch, depth, first_cropped_axis, second_cropped_axis,\r\n third_cropped_axis)`\r\n \"\"\"\r\n\r\n def __init__(self,\r\n cropping=((1, 1), (1, 1), (1, 1)),\r\n data_format=None,\r\n **kwargs):\r\n super(Cropping3D, self).__init__(**kwargs)\r\n self.data_format = conv_utils.normalize_data_format(data_format)\r\n if isinstance(cropping, int):\r\n self.cropping = ((cropping, cropping), (cropping, cropping), (cropping,\r\n cropping))\r\n elif hasattr(cropping, '__len__'):\r\n if len(cropping) != 3:\r\n raise ValueError('`cropping` should have 3 elements. '\r\n 'Found: ' + str(cropping))\r\n dim1_cropping = conv_utils.normalize_tuple(cropping[0], 2,\r\n '1st entry of cropping')\r\n dim2_cropping = conv_utils.normalize_tuple(cropping[1], 2,\r\n '2nd entry of cropping')\r\n dim3_cropping = conv_utils.normalize_tuple(cropping[2], 2,\r\n '3rd entry of cropping')\r\n self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping)\r\n else:\r\n raise ValueError(\r\n '`cropping` should be either an int, '\r\n 'a tuple of 3 ints '\r\n '(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop), '\r\n 'or a tuple of 3 tuples of 2 ints '\r\n '((left_dim1_crop, right_dim1_crop),'\r\n ' (left_dim2_crop, right_dim2_crop),'\r\n ' (left_dim3_crop, right_dim2_crop)). '\r\n 'Found: ' + str(cropping))\r\n self.input_spec = InputSpec(ndim=5)\r\n\r\n def compute_output_shape(self, input_shape):\r\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\r\n # pylint: disable=invalid-unary-operand-type\r\n if self.data_format == 'channels_first':\r\n if input_shape[2] is not None:\r\n dim1 = input_shape[2] - self.cropping[0][0] - self.cropping[0][1]\r\n else:\r\n dim1 = None\r\n if input_shape[3] is not None:\r\n dim2 = input_shape[3] - self.cropping[1][0] - self.cropping[1][1]\r\n else:\r\n dim2 = None\r\n if input_shape[4] is not None:\r\n dim3 = input_shape[4] - self.cropping[2][0] - self.cropping[2][1]\r\n else:\r\n dim3 = None\r\n return tensor_shape.TensorShape(\r\n [input_shape[0], input_shape[1], dim1, dim2, dim3])\r\n elif self.data_format == 'channels_last':\r\n if input_shape[1] is not None:\r\n dim1 = input_shape[1] - self.cropping[0][0] - self.cropping[0][1]\r\n else:\r\n dim1 = None\r\n if input_shape[2] is not None:\r\n dim2 = input_shape[2] - self.cropping[1][0] - self.cropping[1][1]\r\n else:\r\n dim2 = None\r\n if input_shape[3] is not None:\r\n dim3 = input_shape[3] - self.cropping[2][0] - self.cropping[2][1]\r\n else:\r\n dim3 = None\r\n return tensor_shape.TensorShape(\r\n [input_shape[0], dim1, dim2, dim3, input_shape[4]])\r\n # pylint: enable=invalid-unary-operand-type\r\n\r\n def call(self, inputs):\r\n # pylint: disable=invalid-unary-operand-type\r\n if self.data_format == 'channels_first':\r\n if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:\r\n return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,\r\n self.cropping[2][0]:]\r\n elif self.cropping[0][1] == self.cropping[1][1] == 0:\r\n return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,\r\n self.cropping[2][0]:-self.cropping[2][1]]\r\n elif self.cropping[1][1] == self.cropping[2][1] == 0:\r\n return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],\r\n self.cropping[1][0]:, self.cropping[2][0]:]\r\n elif self.cropping[0][1] == self.cropping[2][1] == 0:\r\n return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:\r\n -self.cropping[1][1], self.cropping[2][0]:]\r\n elif self.cropping[0][1] == 0:\r\n return inputs[:, :, self.cropping[0][0]:, self.cropping[1][\r\n 0]:-self.cropping[1][1], self.cropping[2][0]:-self.cropping[2][1]]\r\n elif self.cropping[1][1] == 0:\r\n return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.\r\n cropping[1][0]:, self.cropping[2][0]:-self.cropping[2][1]]\r\n elif self.cropping[2][1] == 0:\r\n return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.\r\n cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:]\r\n return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],\r\n self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][\r\n 0]:-self.cropping[2][1]]\r\n else:\r\n if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:\r\n return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,\r\n self.cropping[2][0]:, :]\r\n elif self.cropping[0][1] == self.cropping[1][1] == 0:\r\n return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,\r\n self.cropping[2][0]:-self.cropping[2][1], :]\r\n elif self.cropping[1][1] == self.cropping[2][1] == 0:\r\n return inputs[:, self.cropping[0][0]:-self.cropping[0][1],\r\n self.cropping[1][0]:, self.cropping[2][0]:, :]\r\n elif self.cropping[0][1] == self.cropping[2][1] == 0:\r\n return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:\r\n -self.cropping[1][1], self.cropping[2][0]:, :]\r\n elif self.cropping[0][1] == 0:\r\n return inputs[:, self.cropping[0][0]:, self.cropping[1][\r\n 0]:-self.cropping[1][1], self.cropping[2][0]:\r\n -self.cropping[2][1], :]\r\n elif self.cropping[1][1] == 0:\r\n return inputs[:, self.cropping[0][\r\n 0]:-self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]:\r\n -self.cropping[2][1], :]\r\n elif self.cropping[2][1] == 0:\r\n return inputs[:, self.cropping[0][0]:-self.cropping[0][1],\r\n self.cropping[1][0]:-self.cropping[1][1], self.cropping[\r\n 2][0]:, :]\r\n return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[\r\n 1][0]:-self.cropping[1][1], self.cropping[2][0]: # pylint: disable=invalid-unary-operand-type\r\n -self.cropping[2][1], :] # pylint: disable=invalid-unary-operand-type\r\n # pylint: enable=invalid-unary-operand-type\r\n\r\n def get_config(self):\r\n config = {'cropping': self.cropping, 'data_format': self.data_format}\r\n base_config = super(Cropping3D, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n\r\n# Aliases\r\n\r\nConvolution1D = Conv1D\r\nConvolution2D = Conv2D\r\nConvolution3D = Conv3D\r\nSeparableConvolution1D = SeparableConv1D\r\nSeparableConvolution2D = SeparableConv2D\r\nConvolution2DTranspose = Conv2DTranspose\r\nConvolution3DTranspose = Conv3DTranspose\r\nDeconvolution2D = Deconv2D = Conv2DTranspose\r\nDeconvolution3D = Deconv3D = Conv3DTranspose\r\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"AffineLinearOperator bijector.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.contrib.distributions.python.ops.shape import _DistributionShape\r\nfrom tensorflow.python.framework import constant_op\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import tensor_util\r\nfrom tensorflow.python.ops import check_ops\r\nfrom tensorflow.python.ops import control_flow_ops\r\nfrom tensorflow.python.ops.distributions import bijector\r\nfrom tensorflow.python.ops.linalg import linear_operator\r\n\r\n\r\n__all__ = [\r\n \"AffineLinearOperator\",\r\n]\r\n\r\n\r\nclass AffineLinearOperator(bijector.Bijector):\r\n \"\"\"Compute `Y = g(X; shift, scale) = scale @ X + shift`.\r\n\r\n `shift` is a numeric `Tensor` and `scale` is a `LinearOperator`.\r\n\r\n If `X` is a scalar then the forward transformation is: `scale * X + shift`\r\n where `*` denotes the scalar product.\r\n\r\n Note: we don't always simply transpose `X` (but write it this way for\r\n brevity). Actually the input `X` undergoes the following transformation\r\n before being premultiplied by `scale`:\r\n\r\n 1. If there are no sample dims, we call `X = tf.expand_dims(X, 0)`, i.e.,\r\n `new_sample_shape = [1]`. Otherwise do nothing.\r\n 2. The sample shape is flattened to have one dimension, i.e.,\r\n `new_sample_shape = [n]` where `n = tf.reduce_prod(old_sample_shape)`.\r\n 3. The sample dim is cyclically rotated left by 1, i.e.,\r\n `new_shape = [B1,...,Bb, k, n]` where `n` is as above, `k` is the\r\n event_shape, and `B1,...,Bb` are the batch shapes for each of `b` batch\r\n dimensions.\r\n\r\n (For more details see `shape.make_batch_of_event_sample_matrices`.)\r\n\r\n The result of the above transformation is that `X` can be regarded as a batch\r\n of matrices where each column is a draw from the distribution. After\r\n premultiplying by `scale`, we take the inverse of this procedure. The input\r\n `Y` also undergoes the same transformation before/after premultiplying by\r\n `inv(scale)`.\r\n\r\n Example Use:\r\n\r\n ```python\r\n linalg = tf.linalg\r\n\r\n x = [1., 2, 3]\r\n\r\n shift = [-1., 0., 1]\r\n diag = [1., 2, 3]\r\n scale = linalg.LinearOperatorDiag(diag)\r\n affine = AffineLinearOperator(shift, scale)\r\n # In this case, `forward` is equivalent to:\r\n # y = scale @ x + shift\r\n y = affine.forward(x) # [0., 4, 10]\r\n\r\n shift = [2., 3, 1]\r\n tril = [[1., 0, 0],\r\n [2, 1, 0],\r\n [3, 2, 1]]\r\n scale = linalg.LinearOperatorLowerTriangular(tril)\r\n affine = AffineLinearOperator(shift, scale)\r\n # In this case, `forward` is equivalent to:\r\n # np.squeeze(np.matmul(tril, np.expand_dims(x, -1)), -1) + shift\r\n y = affine.forward(x) # [3., 7, 11]\r\n ```\r\n\r\n \"\"\"\r\n\r\n def __init__(self,\r\n shift=None,\r\n scale=None,\r\n event_ndims=1,\r\n validate_args=False,\r\n name=\"affine_linear_operator\"):\r\n \"\"\"Instantiates the `AffineLinearOperator` bijector.\r\n\r\n Args:\r\n shift: Floating-point `Tensor`.\r\n scale: Subclass of `LinearOperator`. Represents the (batch) positive\r\n definite matrix `M` in `R^{k x k}`.\r\n event_ndims: Scalar `integer` `Tensor` indicating the number of dimensions\r\n associated with a particular draw from the distribution. Must be 0 or 1.\r\n validate_args: Python `bool` indicating whether arguments should be\r\n checked for correctness.\r\n name: Python `str` name given to ops managed by this object.\r\n\r\n Raises:\r\n ValueError: if `event_ndims` is not 0 or 1.\r\n TypeError: if `scale` is not a `LinearOperator`.\r\n TypeError: if `shift.dtype` does not match `scale.dtype`.\r\n ValueError: if not `scale.is_non_singular`.\r\n \"\"\"\r\n self._graph_parents = []\r\n self._name = name\r\n self._validate_args = validate_args\r\n graph_parents = []\r\n with self._name_scope(\"init\", values=[shift]):\r\n event_ndims = ops.convert_to_tensor(event_ndims, name=\"event_ndims\")\r\n if tensor_util.constant_value(event_ndims) is not None:\r\n event_ndims = tensor_util.constant_value(event_ndims)\r\n if event_ndims not in (0, 1):\r\n raise ValueError(\"event_ndims({}) was not 0 or 1\".format(event_ndims))\r\n else:\r\n if validate_args:\r\n # Shape tool will catch if event_ndims is negative.\r\n event_ndims = control_flow_ops.with_dependencies(\r\n [check_ops.assert_less(\r\n event_ndims, 2, message=\"event_ndims must be 0 or 1\")],\r\n event_ndims)\r\n graph_parents += [event_ndims]\r\n\r\n # In the absence of `loc` and `scale`, we'll assume `dtype` is `float32`.\r\n dtype = dtypes.float32\r\n\r\n if shift is not None:\r\n shift = ops.convert_to_tensor(shift, name=\"shift\")\r\n graph_parents += [shift]\r\n dtype = shift.dtype.base_dtype\r\n self._shift = shift\r\n\r\n if scale is not None:\r\n if (shift is not None and\r\n shift.dtype.base_dtype != scale.dtype.base_dtype):\r\n raise TypeError(\r\n \"shift.dtype({}) is incompatible with scale.dtype({}).\".format(\r\n shift.dtype, scale.dtype))\r\n if not isinstance(scale, linear_operator.LinearOperator):\r\n raise TypeError(\"scale is not an instance of tf.LinearOperator\")\r\n if validate_args and not scale.is_non_singular:\r\n raise ValueError(\"Scale matrix must be non-singular.\")\r\n graph_parents += scale.graph_parents\r\n if scale.tensor_rank is not None:\r\n batch_ndims = scale.tensor_rank - 2\r\n else:\r\n batch_ndims = scale.tensor_rank_tensor() - 2\r\n graph_parents += [batch_ndims]\r\n if scale.dtype is not None:\r\n dtype = scale.dtype.base_dtype\r\n else:\r\n batch_ndims = 0 # We won't need shape inference when scale is None.\r\n self._scale = scale\r\n self._shaper = _DistributionShape(\r\n batch_ndims=batch_ndims,\r\n event_ndims=event_ndims,\r\n validate_args=validate_args)\r\n super(AffineLinearOperator, self).__init__(\r\n event_ndims=event_ndims,\r\n graph_parents=graph_parents,\r\n is_constant_jacobian=True,\r\n dtype=dtype,\r\n validate_args=validate_args,\r\n name=name)\r\n\r\n @property\r\n def shift(self):\r\n \"\"\"The `shift` `Tensor` in `Y = scale @ X + shift`.\"\"\"\r\n return self._shift\r\n\r\n @property\r\n def scale(self):\r\n \"\"\"The `scale` `LinearOperator` in `Y = scale @ X + shift`.\"\"\"\r\n return self._scale\r\n\r\n def _forward(self, x):\r\n y = x\r\n if self.scale is not None:\r\n y, sample_shape = self._shaper.make_batch_of_event_sample_matrices(\r\n y, expand_batch_dim=False)\r\n with ops.control_dependencies(self._maybe_collect_assertions() if\r\n self.validate_args else []):\r\n y = self.scale.matmul(y)\r\n y = self._shaper.undo_make_batch_of_event_sample_matrices(\r\n y, sample_shape, expand_batch_dim=False)\r\n if self.shift is not None:\r\n y += self.shift\r\n return y\r\n\r\n def _inverse(self, y):\r\n x = y\r\n if self.shift is not None:\r\n x -= self.shift\r\n if self.scale is not None:\r\n x, sample_shape = self._shaper.make_batch_of_event_sample_matrices(\r\n x, expand_batch_dim=False)\r\n # Solve fails if the op is singular so we may safely skip this assertion.\r\n x = self.scale.solve(x)\r\n x = self._shaper.undo_make_batch_of_event_sample_matrices(\r\n x, sample_shape, expand_batch_dim=False)\r\n return x\r\n\r\n def _inverse_log_det_jacobian(self, y):\r\n return -self._forward_log_det_jacobian(y)\r\n\r\n def _forward_log_det_jacobian(self, x): # pylint: disable=unused-argument\r\n if self.scale is None:\r\n return constant_op.constant(0, dtype=x.dtype.base_dtype)\r\n with ops.control_dependencies(self._maybe_collect_assertions() if\r\n self.validate_args else []):\r\n return self.scale.log_abs_determinant()\r\n\r\n def _maybe_collect_assertions(self):\r\n try:\r\n return [self.scale.assert_non_singular()]\r\n except NotImplementedError:\r\n pass\r\n return []\r\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Factory functions for `Predictor`s.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.contrib.predictor import contrib_estimator_predictor\r\nfrom tensorflow.contrib.predictor import core_estimator_predictor\r\nfrom tensorflow.contrib.predictor import saved_model_predictor\r\n\r\nfrom tensorflow.contrib.learn.python.learn.estimators import estimator as contrib_estimator\r\nfrom tensorflow.python.estimator import estimator as core_estimator\r\n\r\n\r\ndef from_contrib_estimator(estimator,\r\n prediction_input_fn,\r\n input_alternative_key=None,\r\n output_alternative_key=None,\r\n graph=None):\r\n \"\"\"Constructs a `Predictor` from a `tf.contrib.learn.Estimator`.\r\n\r\n Args:\r\n estimator: an instance of `tf.contrib.learn.Estimator`.\r\n prediction_input_fn: a function that takes no arguments and returns an\r\n instance of `InputFnOps`.\r\n input_alternative_key: Optional. Specify the input alternative used for\r\n prediction.\r\n output_alternative_key: Specify the output alternative used for\r\n prediction. Not needed for single-headed models but required for\r\n multi-headed models.\r\n graph: Optional. The Tensorflow `graph` in which prediction should be\r\n done.\r\n\r\n Returns:\r\n An initialized `Predictor`.\r\n\r\n Raises:\r\n TypeError: if `estimator` is a core `Estimator` instead of a contrib\r\n `Estimator`.\r\n \"\"\"\r\n if isinstance(estimator, core_estimator.Estimator):\r\n raise TypeError('Espected estimator to be of type '\r\n 'tf.contrib.learn.Estimator, but got type '\r\n 'tf.python.estimator.Estimator. You likely want to call '\r\n 'from_estimator.')\r\n return contrib_estimator_predictor.ContribEstimatorPredictor(\r\n estimator,\r\n prediction_input_fn,\r\n input_alternative_key=input_alternative_key,\r\n output_alternative_key=output_alternative_key,\r\n graph=graph)\r\n\r\n\r\ndef from_estimator(estimator,\r\n serving_input_receiver_fn,\r\n output_key=None,\r\n graph=None):\r\n \"\"\"Constructs a `Predictor` from a `tf.python.estimator.Estimator`.\r\n\r\n Args:\r\n estimator: an instance of `learn.python.estimator.Estimator`.\r\n serving_input_receiver_fn: a function that takes no arguments and returns\r\n an instance of `ServingInputReceiver` compatible with `estimator`.\r\n output_key: Optional string specifying the export output to use. If\r\n `None`, then `DEFAULT_SERVING_SIGNATURE_DEF_KEY` is used.\r\n graph: Optional. The Tensorflow `graph` in which prediction should be\r\n done.\r\n\r\n Returns:\r\n An initialized `Predictor`.\r\n\r\n Raises:\r\n TypeError: if `estimator` is a contrib `Estimator` instead of a core\r\n `Estimator`.\r\n \"\"\"\r\n if isinstance(estimator, contrib_estimator.Estimator):\r\n raise TypeError('Espected estimator to be of type '\r\n 'tf.python.estimator.Estimator, but got type '\r\n 'tf.contrib.learn.Estimator. You likely want to call '\r\n 'from_contrib_estimator.')\r\n return core_estimator_predictor.CoreEstimatorPredictor(\r\n estimator, serving_input_receiver_fn, output_key=output_key, graph=graph)\r\n\r\n\r\ndef from_saved_model(export_dir,\r\n signature_def_key=None,\r\n signature_def=None,\r\n tags=None,\r\n graph=None):\r\n \"\"\"Constructs a `Predictor` from a `SavedModel` on disk.\r\n\r\n Args:\r\n export_dir: a path to a directory containing a `SavedModel`.\r\n signature_def_key: Optional string specifying the signature to use. If\r\n `None`, then `DEFAULT_SERVING_SIGNATURE_DEF_KEY` is used. Only one of\r\n `signature_def_key` and `signature_def`\r\n signature_def: A `SignatureDef` proto specifying the inputs and outputs\r\n for prediction. Only one of `signature_def_key` and `signature_def`\r\n should be specified.\r\n tags: Optional. Tags that will be used to retrieve the correct\r\n `SignatureDef`. Defaults to `DEFAULT_TAGS`.\r\n graph: Optional. The Tensorflow `graph` in which prediction should be\r\n done.\r\n\r\n Returns:\r\n An initialized `Predictor`.\r\n\r\n Raises:\r\n ValueError: More than one of `signature_def_key` and `signature_def` is\r\n specified.\r\n \"\"\"\r\n return saved_model_predictor.SavedModelPredictor(\r\n export_dir,\r\n signature_def_key=signature_def_key,\r\n signature_def=signature_def,\r\n tags=tags,\r\n graph=graph)\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Functions for specifying custom gradients.\r\n\r\n@@custom_gradient\r\n\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import math_ops\r\n\r\n__all__ = [\r\n \"custom_gradient\",\r\n]\r\n\r\n\r\ndef custom_gradient(fx, gx, x, axis=(), fx_gx_manually_stopped=False,\r\n name=None):\r\n \"\"\"Enables specifying a custom gradient.\r\n\r\n This function works by clever application of `stop_gradient`. I.e., observe\r\n that:\r\n\r\n ```none\r\n h(x) = x * stop_gradient(g(x)) + stop_gradient(f(x) - x * g(x))\r\n ```\r\n\r\n is such that `h(x) = stop_gradient(f(x))` and `grad[h(x), x] =\r\n stop_gradient(g(x)).`\r\n\r\n In addition to scalar-domain/scalar-range functions, this function also\r\n supports tensor-domain/scalar-range functions. However, in the latter case it\r\n is necessary to reduce `x` to a scalar. This can be done by indicating the\r\n `axis` over which `f` operates or by appropriately `reduce_sum`-ing `x`, prior\r\n to calling this function.\r\n\r\n Partial Custom Gradient:\r\n\r\n Suppose `h(x) = htilde(x, y)`. Note that `dh/dx = stop(g(x))` but `dh/dy =\r\n None`. This is because a `Tensor` cannot have only a portion of its gradient\r\n stopped. To circumvent this issue, one must manually `stop_gradient` the\r\n relevant portions of `f`, `g`. For example see the unit-test,\r\n `test_works_correctly_fx_gx_manually_stopped`.\r\n\r\n Args:\r\n fx: `Tensor`. Output of function evaluated at `x`.\r\n gx: `Tensor`. Gradient of function evaluated at `x`.\r\n x: `Tensor`. Point of evaluation for `f, g`.\r\n axis: 1D `int` `Tensor` representing dimensions of `x` which are the domain\r\n of `f`. If `()` (the default), `f` is assumed scalar-domain/scalar-range.\r\n If `None` `f` is assumed to render one scalar given all of `x`. Otherwise\r\n `f` is assumed to output one scalar for each of `axis` dimensions of `x`.\r\n fx_gx_manually_stopped: Python `bool` indicating that `fx`, `gx` manually\r\n have `stop_gradient` applied.\r\n name: Python `str` name prefixed to Ops created by this function.\r\n\r\n Returns:\r\n fx: Floating-type `Tensor` equal to `f(x)` but which has gradient\r\n `stop_gradient(g(x))`.\r\n \"\"\"\r\n with ops.name_scope(name, \"custom_gradient\", [fx, gx, x]):\r\n fx = ops.convert_to_tensor(fx, name=\"fx\")\r\n # We don't want to bother eagerly computing `gx` since we may not even need\r\n # it.\r\n with ops.control_dependencies([fx]):\r\n gx = ops.convert_to_tensor(gx, dtype=fx.dtype, name=\"gx\")\r\n gx = array_ops.identity(gx, name=\"gx\")\r\n # Proof of correctness:\r\n #\r\n # f(x) = x * stop[gx] + stop[fx - x * gx]\r\n # = stop[fx]\r\n #\r\n # g(x) = grad[fx]\r\n # = stop[gx] + grad[stop[fx - x * gx]]\r\n # = stop[gx] + 0\r\n #\r\n # Notice that when x is zero it still works:\r\n # grad[x * stop(gx) + stop(fx - x * gx)] = 1 * stop[gx] + 0 = stop[gx]\r\n #\r\n # The proof is similar for the tensor-domain case, except that `x` is\r\n # replaced by `reduce_sum(x)`.\r\n sum_x = math_ops.reduce_sum(x, axis=axis, name=\"sum_x\")\r\n if not fx_gx_manually_stopped:\r\n fx = array_ops.stop_gradient(fx)\r\n gx = array_ops.stop_gradient(gx)\r\n # IEEE754 ensures `(x-x)==0.` and that `0.*x==0.` so we make sure to write\r\n # the code this way, rather than, e.g.,\r\n # `sum_x * stop(gx) + stop(fx - sum_x * gx)`.\r\n # For more discussion regarding the relevant portions of the IEEE754\r\n # standard, see the StackOverflow question,\r\n # \"Is there a floating point value of x, for which x-x == 0 is false?\"\r\n # http://stackoverflow.com/q/2686644\r\n return (sum_x - array_ops.stop_gradient(sum_x)) * gx + fx\r\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Minimal runtime type checking library.\r\n\r\nThis module should not be considered public API.\r\n\"\"\"\r\n# TODO(ericmc,shoyer): Delete this in favor of using pytype or mypy\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport collections\r\nimport functools\r\nimport re\r\n\r\nfrom tensorflow.python.util import tf_inspect\r\n\r\n# used for register_type_abbreviation and _type_repr below.\r\n_TYPE_ABBREVIATIONS = {}\r\n\r\n\r\nclass Type(object):\r\n \"\"\"Base class for type checker types.\r\n\r\n The custom types defined in this module are based on types in the standard\r\n library's typing module (in Python 3.5):\r\n https://docs.python.org/3/library/typing.html\r\n\r\n The only difference should be that we use actual instances of Type classes to\r\n represent custom types rather than the metaclass magic typing uses to create\r\n new class objects. In practice, all this should mean is that we use\r\n `List(int)` rather than `List[int]`.\r\n\r\n Custom types should implement __instancecheck__ and inherit from Type. Every\r\n argument in the constructor must be a type or Type instance, and these\r\n arguments must be stored as a tuple on the `_types` attribute.\r\n \"\"\"\r\n\r\n def __init__(self, *types):\r\n self._types = types\r\n\r\n def __repr__(self):\r\n args_repr = \", \".join(repr(t) for t in self._types)\r\n return \"typecheck.%s(%s)\" % (type(self).__name__, args_repr)\r\n\r\n\r\nclass _SingleArgumentType(Type):\r\n \"\"\"Use this subclass for parametric types that accept only one argument.\"\"\"\r\n\r\n def __init__(self, tpe):\r\n super(_SingleArgumentType, self).__init__(tpe)\r\n\r\n @property\r\n def _type(self):\r\n tpe, = self._types # pylint: disable=unbalanced-tuple-unpacking\r\n return tpe\r\n\r\n\r\nclass _TwoArgumentType(Type):\r\n \"\"\"Use this subclass for parametric types that accept two arguments.\"\"\"\r\n\r\n def __init__(self, first_type, second_type):\r\n super(_TwoArgumentType, self).__init__(first_type, second_type)\r\n\r\n\r\nclass Union(Type):\r\n \"\"\"A sum type.\r\n\r\n A correct type is any of the types provided.\r\n \"\"\"\r\n\r\n def __instancecheck__(self, instance):\r\n return isinstance(instance, self._types)\r\n\r\n\r\nclass Optional(_SingleArgumentType):\r\n \"\"\"An optional type.\r\n\r\n A correct type is either the provided type or NoneType.\r\n \"\"\"\r\n\r\n def __instancecheck__(self, instance):\r\n # types.NoneType does not exist in Python 3\r\n return isinstance(instance, (self._type, type(None)))\r\n\r\n\r\nclass List(_SingleArgumentType):\r\n \"\"\"A typed list.\r\n\r\n A correct type is a list where each element has the single provided type.\r\n \"\"\"\r\n\r\n def __instancecheck__(self, instance):\r\n return (isinstance(instance, list)\r\n and all(isinstance(x, self._type) for x in instance))\r\n\r\n\r\nclass Sequence(_SingleArgumentType):\r\n \"\"\"A typed sequence.\r\n\r\n A correct type is a sequence where each element has the single provided type.\r\n \"\"\"\r\n\r\n def __instancecheck__(self, instance):\r\n return (isinstance(instance, collections.Sequence)\r\n and all(isinstance(x, self._type) for x in instance))\r\n\r\n\r\nclass Collection(_SingleArgumentType):\r\n \"\"\"A sized, iterable container.\r\n\r\n A correct type is an iterable and container with known size where each element\r\n has the single provided type.\r\n\r\n We use this in preference to Iterable because we check each instance of the\r\n iterable at runtime, and hence need to avoid iterables that could be\r\n exhausted.\r\n \"\"\"\r\n\r\n def __instancecheck__(self, instance):\r\n return (isinstance(instance, collections.Iterable)\r\n and isinstance(instance, collections.Sized)\r\n and isinstance(instance, collections.Container)\r\n and all(isinstance(x, self._type) for x in instance))\r\n\r\n\r\nclass Tuple(Type):\r\n \"\"\"A typed tuple.\r\n\r\n A correct type is a tuple with the correct length where each element has\r\n the correct type.\r\n \"\"\"\r\n\r\n def __instancecheck__(self, instance):\r\n return (isinstance(instance, tuple)\r\n and len(instance) == len(self._types)\r\n and all(isinstance(x, t) for x, t in zip(instance, self._types)))\r\n\r\n\r\nclass Mapping(_TwoArgumentType):\r\n \"\"\"A typed mapping.\r\n\r\n A correct type has the correct parametric types for keys and values.\r\n \"\"\"\r\n\r\n def __instancecheck__(self, instance):\r\n key_type, value_type = self._types # pylint: disable=unbalanced-tuple-unpacking\r\n return (isinstance(instance, collections.Mapping)\r\n and all(isinstance(k, key_type) for k in instance.keys())\r\n and all(isinstance(k, value_type) for k in instance.values()))\r\n\r\n\r\nclass Dict(Mapping):\r\n \"\"\"A typed dict.\r\n\r\n A correct type has the correct parametric types for keys and values.\r\n \"\"\"\r\n\r\n def __instancecheck__(self, instance):\r\n return (isinstance(instance, dict)\r\n and super(Dict, self).__instancecheck__(instance))\r\n\r\n\r\ndef _replace_forward_references(t, context):\r\n \"\"\"Replace forward references in the given type.\"\"\"\r\n if isinstance(t, str):\r\n return context[t]\r\n elif isinstance(t, Type):\r\n return type(t)(*[_replace_forward_references(t, context) for t in t._types]) # pylint: disable=protected-access\r\n else:\r\n return t\r\n\r\n\r\ndef register_type_abbreviation(name, alias):\r\n \"\"\"Register an abbreviation for a type in typecheck tracebacks.\r\n\r\n This makes otherwise very long typecheck errors much more readable.\r\n\r\n Example:\r\n typecheck.register_type_abbreviation(tf.Dimension, 'tf.Dimension')\r\n\r\n Args:\r\n name: type or class to abbreviate.\r\n alias: string alias to substitute.\r\n \"\"\"\r\n _TYPE_ABBREVIATIONS[name] = alias\r\n\r\n\r\ndef _type_repr(t):\r\n \"\"\"A more succinct repr for typecheck tracebacks.\"\"\"\r\n string = repr(t)\r\n for type_, alias in _TYPE_ABBREVIATIONS.items():\r\n string = string.replace(repr(type_), alias)\r\n string = re.sub(r\"<(class|type) '([\\w.]+)'>\", r\"\\2\", string)\r\n string = re.sub(r\"typecheck\\.(\\w+)\", r\"\\1\", string)\r\n return string\r\n\r\n\r\nclass Error(TypeError):\r\n \"\"\"Exception for typecheck failures.\"\"\"\r\n\r\n\r\ndef accepts(*types):\r\n \"\"\"A decorator which checks the input types of a function.\r\n\r\n Based on:\r\n http://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments\r\n The above draws from:\r\n https://www.python.org/dev/peps/pep-0318/\r\n\r\n Args:\r\n *types: A list of Python types.\r\n\r\n Returns:\r\n A function to use as a decorator.\r\n \"\"\"\r\n\r\n def check_accepts(f):\r\n \"\"\"Check the types.\"\"\"\r\n spec = tf_inspect.getargspec(f)\r\n\r\n num_function_arguments = len(spec.args)\r\n if len(types) != num_function_arguments:\r\n raise Error(\r\n \"Function %r has %d arguments but only %d types were provided in the \"\r\n \"annotation.\" % (f, num_function_arguments, len(types)))\r\n\r\n if spec.defaults:\r\n num_defaults = len(spec.defaults)\r\n for (name, a, t) in zip(spec.args[-num_defaults:],\r\n spec.defaults,\r\n types[-num_defaults:]):\r\n allowed_type = _replace_forward_references(t, f.__globals__)\r\n if not isinstance(a, allowed_type):\r\n raise Error(\"default argument value %r of type %r is not an instance \"\r\n \"of the allowed type %s for the %s argument to %r\"\r\n % (a, type(a), _type_repr(allowed_type), name, f))\r\n\r\n @functools.wraps(f)\r\n def new_f(*args, **kwds):\r\n \"\"\"A helper function.\"\"\"\r\n for (a, t) in zip(args, types):\r\n allowed_type = _replace_forward_references(t, f.__globals__)\r\n if not isinstance(a, allowed_type):\r\n raise Error(\"%r of type %r is not an instance of the allowed type %s \"\r\n \"for %r\" % (a, type(a), _type_repr(allowed_type), f))\r\n return f(*args, **kwds)\r\n\r\n return new_f\r\n\r\n return check_accepts\r\n\r\n\r\ndef returns(*types):\r\n \"\"\"A decorator which checks the return types of a function.\r\n\r\n Based on:\r\n http://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments\r\n The above draws from:\r\n https://www.python.org/dev/peps/pep-0318/\r\n\r\n Args:\r\n *types: A list of Python types.\r\n A list of one element corresponds to a single return value.\r\n A list of several elements corresponds to several return values.\r\n Note that a function with no explicit return value has an implicit\r\n NoneType return and should be annotated correspondingly.\r\n\r\n Returns:\r\n A function to use as a decorator.\r\n \"\"\"\r\n\r\n def check_returns(f):\r\n \"\"\"Check the types.\"\"\"\r\n if not types:\r\n raise TypeError(\"A return type annotation must contain at least one type\")\r\n\r\n @functools.wraps(f)\r\n def new_f(*args, **kwds):\r\n \"\"\"A helper function.\"\"\"\r\n return_value = f(*args, **kwds)\r\n\r\n if len(types) == 1:\r\n # The function has a single return value.\r\n allowed_type = _replace_forward_references(types[0], f.__globals__)\r\n if not isinstance(return_value, allowed_type):\r\n raise Error(\"%r of type %r is not an instance of the allowed type %s \"\r\n \"for %r\"\r\n % (return_value, type(return_value),\r\n _type_repr(allowed_type), f))\r\n\r\n else:\r\n if len(return_value) != len(types):\r\n raise Error(\r\n \"Function %r has %d return values but only %d types were \"\r\n \"provided in the annotation.\" %\r\n (f, len(return_value), len(types)))\r\n\r\n for (r, t) in zip(return_value, types):\r\n allowed_type = _replace_forward_references(t, f.__globals__)\r\n if not isinstance(r, allowed_type):\r\n raise Error(\"%r of type %r is not an instance of allowed type %s \"\r\n \"for %r\" % (r, type(r), _type_repr(allowed_type), f))\r\n\r\n return return_value\r\n\r\n return new_f\r\n\r\n return check_returns\r\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"A library of helpers for use with SamplingDecoders.\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport abc\r\n\r\nimport six\r\n\r\nfrom tensorflow.contrib.seq2seq.python.ops import decoder\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import tensor_shape\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import control_flow_ops\r\nfrom tensorflow.python.ops import embedding_ops\r\nfrom tensorflow.python.ops import gen_array_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import tensor_array_ops\r\nfrom tensorflow.python.ops.distributions import bernoulli\r\nfrom tensorflow.python.ops.distributions import categorical\r\nfrom tensorflow.python.util import nest\r\n\r\n__all__ = [\r\n \"Helper\",\r\n \"TrainingHelper\",\r\n \"GreedyEmbeddingHelper\",\r\n \"SampleEmbeddingHelper\",\r\n \"CustomHelper\",\r\n \"ScheduledEmbeddingTrainingHelper\",\r\n \"ScheduledOutputTrainingHelper\",\r\n \"InferenceHelper\",\r\n]\r\n\r\n_transpose_batch_time = decoder._transpose_batch_time # pylint: disable=protected-access\r\n\r\n\r\ndef _unstack_ta(inp):\r\n return tensor_array_ops.TensorArray(\r\n dtype=inp.dtype, size=array_ops.shape(inp)[0],\r\n element_shape=inp.get_shape()[1:]).unstack(inp)\r\n\r\n\r\[email protected]_metaclass(abc.ABCMeta)\r\nclass Helper(object):\r\n \"\"\"Interface for implementing sampling in seq2seq decoders.\r\n\r\n Helper instances are used by `BasicDecoder`.\r\n \"\"\"\r\n\r\n @abc.abstractproperty\r\n def batch_size(self):\r\n \"\"\"Batch size of tensor returned by `sample`.\r\n\r\n Returns a scalar int32 tensor.\r\n \"\"\"\r\n raise NotImplementedError(\"batch_size has not been implemented\")\r\n\r\n @abc.abstractproperty\r\n def sample_ids_shape(self):\r\n \"\"\"Shape of tensor returned by `sample`, excluding the batch dimension.\r\n\r\n Returns a `TensorShape`.\r\n \"\"\"\r\n raise NotImplementedError(\"sample_ids_shape has not been implemented\")\r\n\r\n @abc.abstractproperty\r\n def sample_ids_dtype(self):\r\n \"\"\"DType of tensor returned by `sample`.\r\n\r\n Returns a DType.\r\n \"\"\"\r\n raise NotImplementedError(\"sample_ids_dtype has not been implemented\")\r\n\r\n @abc.abstractmethod\r\n def initialize(self, name=None):\r\n \"\"\"Returns `(initial_finished, initial_inputs)`.\"\"\"\r\n pass\r\n\r\n @abc.abstractmethod\r\n def sample(self, time, outputs, state, name=None):\r\n \"\"\"Returns `sample_ids`.\"\"\"\r\n pass\r\n\r\n @abc.abstractmethod\r\n def next_inputs(self, time, outputs, state, sample_ids, name=None):\r\n \"\"\"Returns `(finished, next_inputs, next_state)`.\"\"\"\r\n pass\r\n\r\n\r\nclass CustomHelper(Helper):\r\n \"\"\"Base abstract class that allows the user to customize sampling.\"\"\"\r\n\r\n def __init__(self, initialize_fn, sample_fn, next_inputs_fn,\r\n sample_ids_shape=None, sample_ids_dtype=None):\r\n \"\"\"Initializer.\r\n\r\n Args:\r\n initialize_fn: callable that returns `(finished, next_inputs)`\r\n for the first iteration.\r\n sample_fn: callable that takes `(time, outputs, state)`\r\n and emits tensor `sample_ids`.\r\n next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`\r\n and emits `(finished, next_inputs, next_state)`.\r\n sample_ids_shape: Either a list of integers, or a 1-D Tensor of type\r\n `int32`, the shape of each value in the `sample_ids` batch. Defaults to\r\n a scalar.\r\n sample_ids_dtype: The dtype of the `sample_ids` tensor. Defaults to int32.\r\n \"\"\"\r\n self._initialize_fn = initialize_fn\r\n self._sample_fn = sample_fn\r\n self._next_inputs_fn = next_inputs_fn\r\n self._batch_size = None\r\n self._sample_ids_shape = tensor_shape.TensorShape(sample_ids_shape or [])\r\n self._sample_ids_dtype = sample_ids_dtype or dtypes.int32\r\n\r\n @property\r\n def batch_size(self):\r\n if self._batch_size is None:\r\n raise ValueError(\"batch_size accessed before initialize was called\")\r\n return self._batch_size\r\n\r\n @property\r\n def sample_ids_shape(self):\r\n return self._sample_ids_shape\r\n\r\n @property\r\n def sample_ids_dtype(self):\r\n return self._sample_ids_dtype\r\n\r\n def initialize(self, name=None):\r\n with ops.name_scope(name, \"%sInitialize\" % type(self).__name__):\r\n (finished, next_inputs) = self._initialize_fn()\r\n if self._batch_size is None:\r\n self._batch_size = array_ops.size(finished)\r\n return (finished, next_inputs)\r\n\r\n def sample(self, time, outputs, state, name=None):\r\n with ops.name_scope(\r\n name, \"%sSample\" % type(self).__name__, (time, outputs, state)):\r\n return self._sample_fn(time=time, outputs=outputs, state=state)\r\n\r\n def next_inputs(self, time, outputs, state, sample_ids, name=None):\r\n with ops.name_scope(\r\n name, \"%sNextInputs\" % type(self).__name__, (time, outputs, state)):\r\n return self._next_inputs_fn(\r\n time=time, outputs=outputs, state=state, sample_ids=sample_ids)\r\n\r\n\r\nclass TrainingHelper(Helper):\r\n \"\"\"A helper for use during training. Only reads inputs.\r\n\r\n Returned sample_ids are the argmax of the RNN output logits.\r\n \"\"\"\r\n\r\n def __init__(self, inputs, sequence_length, time_major=False, name=None):\r\n \"\"\"Initializer.\r\n\r\n Args:\r\n inputs: A (structure of) input tensors.\r\n sequence_length: An int32 vector tensor.\r\n time_major: Python bool. Whether the tensors in `inputs` are time major.\r\n If `False` (default), they are assumed to be batch major.\r\n name: Name scope for any created operations.\r\n\r\n Raises:\r\n ValueError: if `sequence_length` is not a 1D tensor.\r\n \"\"\"\r\n with ops.name_scope(name, \"TrainingHelper\", [inputs, sequence_length]):\r\n inputs = ops.convert_to_tensor(inputs, name=\"inputs\")\r\n self._inputs = inputs\r\n if not time_major:\r\n inputs = nest.map_structure(_transpose_batch_time, inputs)\r\n\r\n self._input_tas = nest.map_structure(_unstack_ta, inputs)\r\n self._sequence_length = ops.convert_to_tensor(\r\n sequence_length, name=\"sequence_length\")\r\n if self._sequence_length.get_shape().ndims != 1:\r\n raise ValueError(\r\n \"Expected sequence_length to be a vector, but received shape: %s\" %\r\n self._sequence_length.get_shape())\r\n\r\n self._zero_inputs = nest.map_structure(\r\n lambda inp: array_ops.zeros_like(inp[0, :]), inputs)\r\n\r\n self._batch_size = array_ops.size(sequence_length)\r\n\r\n @property\r\n def inputs(self):\r\n return self._inputs\r\n\r\n @property\r\n def sequence_length(self):\r\n return self._sequence_length\r\n\r\n @property\r\n def batch_size(self):\r\n return self._batch_size\r\n\r\n @property\r\n def sample_ids_shape(self):\r\n return tensor_shape.TensorShape([])\r\n\r\n @property\r\n def sample_ids_dtype(self):\r\n return dtypes.int32\r\n\r\n def initialize(self, name=None):\r\n with ops.name_scope(name, \"TrainingHelperInitialize\"):\r\n finished = math_ops.equal(0, self._sequence_length)\r\n all_finished = math_ops.reduce_all(finished)\r\n next_inputs = control_flow_ops.cond(\r\n all_finished, lambda: self._zero_inputs,\r\n lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas))\r\n return (finished, next_inputs)\r\n\r\n def sample(self, time, outputs, name=None, **unused_kwargs):\r\n with ops.name_scope(name, \"TrainingHelperSample\", [time, outputs]):\r\n sample_ids = math_ops.cast(\r\n math_ops.argmax(outputs, axis=-1), dtypes.int32)\r\n return sample_ids\r\n\r\n def next_inputs(self, time, outputs, state, name=None, **unused_kwargs):\r\n \"\"\"next_inputs_fn for TrainingHelper.\"\"\"\r\n with ops.name_scope(name, \"TrainingHelperNextInputs\",\r\n [time, outputs, state]):\r\n next_time = time + 1\r\n finished = (next_time >= self._sequence_length)\r\n all_finished = math_ops.reduce_all(finished)\r\n def read_from_ta(inp):\r\n return inp.read(next_time)\r\n next_inputs = control_flow_ops.cond(\r\n all_finished, lambda: self._zero_inputs,\r\n lambda: nest.map_structure(read_from_ta, self._input_tas))\r\n return (finished, next_inputs, state)\r\n\r\n\r\nclass ScheduledEmbeddingTrainingHelper(TrainingHelper):\r\n \"\"\"A training helper that adds scheduled sampling.\r\n\r\n Returns -1s for sample_ids where no sampling took place; valid sample id\r\n values elsewhere.\r\n \"\"\"\r\n\r\n def __init__(self, inputs, sequence_length, embedding, sampling_probability,\r\n time_major=False, seed=None, scheduling_seed=None, name=None):\r\n \"\"\"Initializer.\r\n\r\n Args:\r\n inputs: A (structure of) input tensors.\r\n sequence_length: An int32 vector tensor.\r\n embedding: A callable that takes a vector tensor of `ids` (argmax ids),\r\n or the `params` argument for `embedding_lookup`.\r\n sampling_probability: A 0D `float32` tensor: the probability of sampling\r\n categorically from the output ids instead of reading directly from the\r\n inputs.\r\n time_major: Python bool. Whether the tensors in `inputs` are time major.\r\n If `False` (default), they are assumed to be batch major.\r\n seed: The sampling seed.\r\n scheduling_seed: The schedule decision rule sampling seed.\r\n name: Name scope for any created operations.\r\n\r\n Raises:\r\n ValueError: if `sampling_probability` is not a scalar or vector.\r\n \"\"\"\r\n with ops.name_scope(name, \"ScheduledEmbeddingSamplingWrapper\",\r\n [embedding, sampling_probability]):\r\n if callable(embedding):\r\n self._embedding_fn = embedding\r\n else:\r\n self._embedding_fn = (\r\n lambda ids: embedding_ops.embedding_lookup(embedding, ids))\r\n self._sampling_probability = ops.convert_to_tensor(\r\n sampling_probability, name=\"sampling_probability\")\r\n if self._sampling_probability.get_shape().ndims not in (0, 1):\r\n raise ValueError(\r\n \"sampling_probability must be either a scalar or a vector. \"\r\n \"saw shape: %s\" % (self._sampling_probability.get_shape()))\r\n self._seed = seed\r\n self._scheduling_seed = scheduling_seed\r\n super(ScheduledEmbeddingTrainingHelper, self).__init__(\r\n inputs=inputs,\r\n sequence_length=sequence_length,\r\n time_major=time_major,\r\n name=name)\r\n\r\n def initialize(self, name=None):\r\n return super(ScheduledEmbeddingTrainingHelper, self).initialize(name=name)\r\n\r\n def sample(self, time, outputs, state, name=None):\r\n with ops.name_scope(name, \"ScheduledEmbeddingTrainingHelperSample\",\r\n [time, outputs, state]):\r\n # Return -1s where we did not sample, and sample_ids elsewhere\r\n select_sampler = bernoulli.Bernoulli(\r\n probs=self._sampling_probability, dtype=dtypes.bool)\r\n select_sample = select_sampler.sample(\r\n sample_shape=self.batch_size, seed=self._scheduling_seed)\r\n sample_id_sampler = categorical.Categorical(logits=outputs)\r\n return array_ops.where(\r\n select_sample,\r\n sample_id_sampler.sample(seed=self._seed),\r\n gen_array_ops.fill([self.batch_size], -1))\r\n\r\n def next_inputs(self, time, outputs, state, sample_ids, name=None):\r\n with ops.name_scope(name, \"ScheduledEmbeddingTrainingHelperNextInputs\",\r\n [time, outputs, state, sample_ids]):\r\n (finished, base_next_inputs, state) = (\r\n super(ScheduledEmbeddingTrainingHelper, self).next_inputs(\r\n time=time,\r\n outputs=outputs,\r\n state=state,\r\n sample_ids=sample_ids,\r\n name=name))\r\n\r\n def maybe_sample():\r\n \"\"\"Perform scheduled sampling.\"\"\"\r\n where_sampling = math_ops.cast(\r\n array_ops.where(sample_ids > -1), dtypes.int32)\r\n where_not_sampling = math_ops.cast(\r\n array_ops.where(sample_ids <= -1), dtypes.int32)\r\n sample_ids_sampling = array_ops.gather_nd(sample_ids, where_sampling)\r\n inputs_not_sampling = array_ops.gather_nd(\r\n base_next_inputs, where_not_sampling)\r\n sampled_next_inputs = self._embedding_fn(sample_ids_sampling)\r\n base_shape = array_ops.shape(base_next_inputs)\r\n return (array_ops.scatter_nd(indices=where_sampling,\r\n updates=sampled_next_inputs,\r\n shape=base_shape)\r\n + array_ops.scatter_nd(indices=where_not_sampling,\r\n updates=inputs_not_sampling,\r\n shape=base_shape))\r\n\r\n all_finished = math_ops.reduce_all(finished)\r\n next_inputs = control_flow_ops.cond(\r\n all_finished, lambda: base_next_inputs, maybe_sample)\r\n return (finished, next_inputs, state)\r\n\r\n\r\nclass ScheduledOutputTrainingHelper(TrainingHelper):\r\n \"\"\"A training helper that adds scheduled sampling directly to outputs.\r\n\r\n Returns False for sample_ids where no sampling took place; True elsewhere.\r\n \"\"\"\r\n\r\n def __init__(self, inputs, sequence_length, sampling_probability,\r\n time_major=False, seed=None, next_inputs_fn=None,\r\n auxiliary_inputs=None, name=None):\r\n \"\"\"Initializer.\r\n\r\n Args:\r\n inputs: A (structure) of input tensors.\r\n sequence_length: An int32 vector tensor.\r\n sampling_probability: A 0D `float32` tensor: the probability of sampling\r\n from the outputs instead of reading directly from the inputs.\r\n time_major: Python bool. Whether the tensors in `inputs` are time major.\r\n If `False` (default), they are assumed to be batch major.\r\n seed: The sampling seed.\r\n next_inputs_fn: (Optional) callable to apply to the RNN outputs to create\r\n the next input when sampling. If `None` (default), the RNN outputs will\r\n be used as the next inputs.\r\n auxiliary_inputs: An optional (structure of) auxiliary input tensors with\r\n a shape that matches `inputs` in all but (potentially) the final\r\n dimension. These tensors will be concatenated to the sampled output or\r\n the `inputs` when not sampling for use as the next input.\r\n name: Name scope for any created operations.\r\n\r\n Raises:\r\n ValueError: if `sampling_probability` is not a scalar or vector.\r\n \"\"\"\r\n with ops.name_scope(name, \"ScheduledOutputTrainingHelper\",\r\n [inputs, auxiliary_inputs, sampling_probability]):\r\n self._sampling_probability = ops.convert_to_tensor(\r\n sampling_probability, name=\"sampling_probability\")\r\n if self._sampling_probability.get_shape().ndims not in (0, 1):\r\n raise ValueError(\r\n \"sampling_probability must be either a scalar or a vector. \"\r\n \"saw shape: %s\" % (self._sampling_probability.get_shape()))\r\n\r\n if auxiliary_inputs is None:\r\n maybe_concatenated_inputs = inputs\r\n else:\r\n inputs = ops.convert_to_tensor(inputs, name=\"inputs\")\r\n auxiliary_inputs = ops.convert_to_tensor(\r\n auxiliary_inputs, name=\"auxiliary_inputs\")\r\n maybe_concatenated_inputs = nest.map_structure(\r\n lambda x, y: array_ops.concat((x, y), -1),\r\n inputs, auxiliary_inputs)\r\n if not time_major:\r\n auxiliary_inputs = nest.map_structure(\r\n _transpose_batch_time, auxiliary_inputs)\r\n\r\n self._auxiliary_input_tas = (\r\n nest.map_structure(_unstack_ta, auxiliary_inputs)\r\n if auxiliary_inputs is not None else None)\r\n\r\n self._seed = seed\r\n\r\n self._next_inputs_fn = next_inputs_fn\r\n\r\n super(ScheduledOutputTrainingHelper, self).__init__(\r\n inputs=maybe_concatenated_inputs,\r\n sequence_length=sequence_length,\r\n time_major=time_major,\r\n name=name)\r\n\r\n def initialize(self, name=None):\r\n return super(ScheduledOutputTrainingHelper, self).initialize(name=name)\r\n\r\n def sample(self, time, outputs, state, name=None):\r\n with ops.name_scope(name, \"ScheduledOutputTrainingHelperSample\",\r\n [time, outputs, state]):\r\n sampler = bernoulli.Bernoulli(probs=self._sampling_probability)\r\n return sampler.sample(sample_shape=self.batch_size, seed=self._seed)\r\n\r\n def next_inputs(self, time, outputs, state, sample_ids, name=None):\r\n with ops.name_scope(name, \"ScheduledOutputTrainingHelperNextInputs\",\r\n [time, outputs, state, sample_ids]):\r\n (finished, base_next_inputs, state) = (\r\n super(ScheduledOutputTrainingHelper, self).next_inputs(\r\n time=time,\r\n outputs=outputs,\r\n state=state,\r\n sample_ids=sample_ids,\r\n name=name))\r\n sample_ids = math_ops.cast(sample_ids, dtypes.bool)\r\n\r\n def maybe_sample():\r\n \"\"\"Perform scheduled sampling.\"\"\"\r\n\r\n def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):\r\n \"\"\"Concatenate outputs with auxiliary inputs, if they exist.\"\"\"\r\n if self._auxiliary_input_tas is None:\r\n return outputs_\r\n\r\n next_time = time + 1\r\n auxiliary_inputs = nest.map_structure(\r\n lambda ta: ta.read(next_time), self._auxiliary_input_tas)\r\n if indices is not None:\r\n auxiliary_inputs = array_ops.gather_nd(auxiliary_inputs, indices)\r\n return nest.map_structure(\r\n lambda x, y: array_ops.concat((x, y), -1),\r\n outputs_, auxiliary_inputs)\r\n\r\n if self._next_inputs_fn is None:\r\n return array_ops.where(\r\n sample_ids, maybe_concatenate_auxiliary_inputs(outputs),\r\n base_next_inputs)\r\n\r\n where_sampling = math_ops.cast(\r\n array_ops.where(sample_ids), dtypes.int32)\r\n where_not_sampling = math_ops.cast(\r\n array_ops.where(math_ops.logical_not(sample_ids)), dtypes.int32)\r\n outputs_sampling = array_ops.gather_nd(outputs, where_sampling)\r\n inputs_not_sampling = array_ops.gather_nd(base_next_inputs,\r\n where_not_sampling)\r\n sampled_next_inputs = maybe_concatenate_auxiliary_inputs(\r\n self._next_inputs_fn(outputs_sampling), where_sampling)\r\n\r\n base_shape = array_ops.shape(base_next_inputs)\r\n return (array_ops.scatter_nd(indices=where_sampling,\r\n updates=sampled_next_inputs,\r\n shape=base_shape)\r\n + array_ops.scatter_nd(indices=where_not_sampling,\r\n updates=inputs_not_sampling,\r\n shape=base_shape))\r\n\r\n all_finished = math_ops.reduce_all(finished)\r\n no_samples = math_ops.logical_not(math_ops.reduce_any(sample_ids))\r\n next_inputs = control_flow_ops.cond(\r\n math_ops.logical_or(all_finished, no_samples),\r\n lambda: base_next_inputs, maybe_sample)\r\n return (finished, next_inputs, state)\r\n\r\n\r\nclass GreedyEmbeddingHelper(Helper):\r\n \"\"\"A helper for use during inference.\r\n\r\n Uses the argmax of the output (treated as logits) and passes the\r\n result through an embedding layer to get the next input.\r\n \"\"\"\r\n\r\n def __init__(self, embedding, start_tokens, end_token):\r\n \"\"\"Initializer.\r\n\r\n Args:\r\n embedding: A callable that takes a vector tensor of `ids` (argmax ids),\r\n or the `params` argument for `embedding_lookup`. The returned tensor\r\n will be passed to the decoder input.\r\n start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.\r\n end_token: `int32` scalar, the token that marks end of decoding.\r\n\r\n Raises:\r\n ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a\r\n scalar.\r\n \"\"\"\r\n if callable(embedding):\r\n self._embedding_fn = embedding\r\n else:\r\n self._embedding_fn = (\r\n lambda ids: embedding_ops.embedding_lookup(embedding, ids))\r\n\r\n self._start_tokens = ops.convert_to_tensor(\r\n start_tokens, dtype=dtypes.int32, name=\"start_tokens\")\r\n self._end_token = ops.convert_to_tensor(\r\n end_token, dtype=dtypes.int32, name=\"end_token\")\r\n if self._start_tokens.get_shape().ndims != 1:\r\n raise ValueError(\"start_tokens must be a vector\")\r\n self._batch_size = array_ops.size(start_tokens)\r\n if self._end_token.get_shape().ndims != 0:\r\n raise ValueError(\"end_token must be a scalar\")\r\n self._start_inputs = self._embedding_fn(self._start_tokens)\r\n\r\n @property\r\n def batch_size(self):\r\n return self._batch_size\r\n\r\n @property\r\n def sample_ids_shape(self):\r\n return tensor_shape.TensorShape([])\r\n\r\n @property\r\n def sample_ids_dtype(self):\r\n return dtypes.int32\r\n\r\n def initialize(self, name=None):\r\n finished = array_ops.tile([False], [self._batch_size])\r\n return (finished, self._start_inputs)\r\n\r\n def sample(self, time, outputs, state, name=None):\r\n \"\"\"sample for GreedyEmbeddingHelper.\"\"\"\r\n del time, state # unused by sample_fn\r\n # Outputs are logits, use argmax to get the most probable id\r\n if not isinstance(outputs, ops.Tensor):\r\n raise TypeError(\"Expected outputs to be a single Tensor, got: %s\" %\r\n type(outputs))\r\n sample_ids = math_ops.argmax(outputs, axis=-1, output_type=dtypes.int32)\r\n return sample_ids\r\n\r\n def next_inputs(self, time, outputs, state, sample_ids, name=None):\r\n \"\"\"next_inputs_fn for GreedyEmbeddingHelper.\"\"\"\r\n del time, outputs # unused by next_inputs_fn\r\n finished = math_ops.equal(sample_ids, self._end_token)\r\n all_finished = math_ops.reduce_all(finished)\r\n next_inputs = control_flow_ops.cond(\r\n all_finished,\r\n # If we're finished, the next_inputs value doesn't matter\r\n lambda: self._start_inputs,\r\n lambda: self._embedding_fn(sample_ids))\r\n return (finished, next_inputs, state)\r\n\r\n\r\nclass SampleEmbeddingHelper(GreedyEmbeddingHelper):\r\n \"\"\"A helper for use during inference.\r\n\r\n Uses sampling (from a distribution) instead of argmax and passes the\r\n result through an embedding layer to get the next input.\r\n \"\"\"\r\n\r\n def __init__(self, embedding, start_tokens, end_token,\r\n softmax_temperature=None, seed=None):\r\n \"\"\"Initializer.\r\n\r\n Args:\r\n embedding: A callable that takes a vector tensor of `ids` (argmax ids),\r\n or the `params` argument for `embedding_lookup`. The returned tensor\r\n will be passed to the decoder input.\r\n start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.\r\n end_token: `int32` scalar, the token that marks end of decoding.\r\n softmax_temperature: (Optional) `float32` scalar, value to divide the\r\n logits by before computing the softmax. Larger values (above 1.0) result\r\n in more random samples, while smaller values push the sampling\r\n distribution towards the argmax. Must be strictly greater than 0.\r\n Defaults to 1.0.\r\n seed: (Optional) The sampling seed.\r\n\r\n Raises:\r\n ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a\r\n scalar.\r\n \"\"\"\r\n super(SampleEmbeddingHelper, self).__init__(\r\n embedding, start_tokens, end_token)\r\n self._softmax_temperature = softmax_temperature\r\n self._seed = seed\r\n\r\n def sample(self, time, outputs, state, name=None):\r\n \"\"\"sample for SampleEmbeddingHelper.\"\"\"\r\n del time, state # unused by sample_fn\r\n # Outputs are logits, we sample instead of argmax (greedy).\r\n if not isinstance(outputs, ops.Tensor):\r\n raise TypeError(\"Expected outputs to be a single Tensor, got: %s\" %\r\n type(outputs))\r\n if self._softmax_temperature is None:\r\n logits = outputs\r\n else:\r\n logits = outputs / self._softmax_temperature\r\n\r\n sample_id_sampler = categorical.Categorical(logits=logits)\r\n sample_ids = sample_id_sampler.sample(seed=self._seed)\r\n\r\n return sample_ids\r\n\r\n\r\nclass InferenceHelper(Helper):\r\n \"\"\"A helper to use during inference with a custom sampling function.\"\"\"\r\n\r\n def __init__(self, sample_fn, sample_shape, sample_dtype,\r\n start_inputs, end_fn, next_inputs_fn=None):\r\n \"\"\"Initializer.\r\n\r\n Args:\r\n sample_fn: A callable that takes `outputs` and emits tensor `sample_ids`.\r\n sample_shape: Either a list of integers, or a 1-D Tensor of type `int32`,\r\n the shape of the each sample in the batch returned by `sample_fn`.\r\n sample_dtype: the dtype of the sample returned by `sample_fn`.\r\n start_inputs: The initial batch of inputs.\r\n end_fn: A callable that takes `sample_ids` and emits a `bool` vector\r\n shaped `[batch_size]` indicating whether each sample is an end token.\r\n next_inputs_fn: (Optional) A callable that takes `sample_ids` and returns\r\n the next batch of inputs. If not provided, `sample_ids` is used as the\r\n next batch of inputs.\r\n \"\"\"\r\n self._sample_fn = sample_fn\r\n self._end_fn = end_fn\r\n self._sample_shape = tensor_shape.TensorShape(sample_shape)\r\n self._sample_dtype = sample_dtype\r\n self._next_inputs_fn = next_inputs_fn\r\n self._batch_size = array_ops.shape(start_inputs)[0]\r\n self._start_inputs = ops.convert_to_tensor(\r\n start_inputs, name=\"start_inputs\")\r\n\r\n @property\r\n def batch_size(self):\r\n return self._batch_size\r\n\r\n @property\r\n def sample_ids_shape(self):\r\n return self._sample_shape\r\n\r\n @property\r\n def sample_ids_dtype(self):\r\n return self._sample_dtype\r\n\r\n def initialize(self, name=None):\r\n finished = array_ops.tile([False], [self._batch_size])\r\n return (finished, self._start_inputs)\r\n\r\n def sample(self, time, outputs, state, name=None):\r\n del time, state # unused by sample\r\n return self._sample_fn(outputs)\r\n\r\n def next_inputs(self, time, outputs, state, sample_ids, name=None):\r\n del time, outputs # unused by next_inputs\r\n if self._next_inputs_fn is None:\r\n next_inputs = sample_ids\r\n else:\r\n next_inputs = self._next_inputs_fn(sample_ids)\r\n finished = self._end_fn(sample_ids)\r\n return (finished, next_inputs, state)\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Configurations for TensorFlow Debugger (TFDBG) command-line interfaces.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport collections\r\nimport json\r\nimport os\r\n\r\nfrom tensorflow.python.debug.cli import debugger_cli_common\r\nfrom tensorflow.python.platform import gfile\r\n\r\nRL = debugger_cli_common.RichLine\r\n\r\n\r\nclass CLIConfig(object):\r\n \"\"\"Client-facing configurations for TFDBG command-line interfaces.\"\"\"\r\n\r\n _CONFIG_FILE_NAME = \".tfdbg_config\"\r\n\r\n _DEFAULT_CONFIG = [\r\n (\"graph_recursion_depth\", 20),\r\n (\"mouse_mode\", True),\r\n ]\r\n\r\n def __init__(self, config_file_path=None):\r\n self._config_file_path = (config_file_path or\r\n self._default_config_file_path())\r\n self._config = collections.OrderedDict(self._DEFAULT_CONFIG)\r\n if gfile.Exists(self._config_file_path):\r\n config = self._load_from_file()\r\n for key, value in config.items():\r\n self._config[key] = value\r\n self._save_to_file()\r\n\r\n self._set_callbacks = dict()\r\n\r\n def get(self, property_name):\r\n if property_name not in self._config:\r\n raise KeyError(\"%s is not a valid property name.\" % property_name)\r\n return self._config[property_name]\r\n\r\n def set(self, property_name, property_val):\r\n \"\"\"Set the value of a property.\r\n\r\n Supports limitd property value types: `bool`, `int` and `str`.\r\n\r\n Args:\r\n property_name: Name of the property.\r\n property_val: Value of the property. If the property has `bool` type and\r\n this argument has `str` type, the `str` value will be parsed as a `bool`\r\n\r\n Raises:\r\n ValueError: if a `str` property_value fails to be parsed as a `bool`.\r\n KeyError: if `property_name` is an invalid property name.\r\n \"\"\"\r\n if property_name not in self._config:\r\n raise KeyError(\"%s is not a valid property name.\" % property_name)\r\n\r\n orig_val = self._config[property_name]\r\n if isinstance(orig_val, bool):\r\n if isinstance(property_val, str):\r\n if property_val.lower() in (\"1\", \"true\", \"t\", \"yes\", \"y\", \"on\"):\r\n property_val = True\r\n elif property_val.lower() in (\"0\", \"false\", \"f\", \"no\", \"n\", \"off\"):\r\n property_val = False\r\n else:\r\n raise ValueError(\r\n \"Invalid string value for bool type: %s\" % property_val)\r\n else:\r\n property_val = bool(property_val)\r\n elif isinstance(orig_val, int):\r\n property_val = int(property_val)\r\n elif isinstance(orig_val, str):\r\n property_val = str(property_val)\r\n else:\r\n raise TypeError(\"Unsupported property type: %s\" % type(orig_val))\r\n self._config[property_name] = property_val\r\n self._save_to_file()\r\n\r\n # Invoke set-callback.\r\n if property_name in self._set_callbacks:\r\n self._set_callbacks[property_name](self._config)\r\n\r\n def set_callback(self, property_name, callback):\r\n \"\"\"Set a set-callback for given property.\r\n\r\n Args:\r\n property_name: Name of the property.\r\n callback: The callback as a `callable` of signature:\r\n def cbk(config):\r\n where config is the config after it is set to the new value.\r\n The callback is invoked each time the set() method is called with the\r\n matching property_name.\r\n\r\n Raises:\r\n KeyError: If property_name does not exist.\r\n TypeError: If `callback` is not callable.\r\n \"\"\"\r\n if property_name not in self._config:\r\n raise KeyError(\"%s is not a valid property name.\" % property_name)\r\n if not callable(callback):\r\n raise TypeError(\"The callback object provided is not callable.\")\r\n self._set_callbacks[property_name] = callback\r\n\r\n def _default_config_file_path(self):\r\n return os.path.join(os.path.expanduser(\"~\"), self._CONFIG_FILE_NAME)\r\n\r\n def _save_to_file(self):\r\n try:\r\n with gfile.Open(self._config_file_path, \"w\") as config_file:\r\n json.dump(self._config, config_file)\r\n except IOError:\r\n pass\r\n\r\n def summarize(self, highlight=None):\r\n \"\"\"Get a text summary of the config.\r\n\r\n Args:\r\n highlight: A property name to highlight in the output.\r\n\r\n Returns:\r\n A `RichTextLines` output.\r\n \"\"\"\r\n lines = [RL(\"Command-line configuration:\", \"bold\"), RL(\"\")]\r\n for name, val in self._config.items():\r\n highlight_attr = \"bold\" if name == highlight else None\r\n line = RL(\" \")\r\n line += RL(name, [\"underline\", highlight_attr])\r\n line += RL(\": \")\r\n line += RL(str(val), font_attr=highlight_attr)\r\n lines.append(line)\r\n return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)\r\n\r\n def _load_from_file(self):\r\n try:\r\n with gfile.Open(self._config_file_path, \"r\") as config_file:\r\n config_dict = json.load(config_file)\r\n config = collections.OrderedDict()\r\n for key in sorted(config_dict.keys()):\r\n config[key] = config_dict[key]\r\n return config\r\n except (IOError, ValueError):\r\n # The reading of the config file may fail due to IO issues or file\r\n # corruption. We do not want tfdbg to error out just because of that.\r\n return dict()\r\n" ]
[ [ "tensorflow.python.keras._impl.keras.backend.elu", "tensorflow.python.keras._impl.keras.constraints.serialize", "tensorflow.python.keras._impl.keras.backend.abs", "tensorflow.python.keras._impl.keras.activations.softmax", "tensorflow.python.keras._impl.keras.regularizers.serialize", "tensorflow.python.keras._impl.keras.backend.floatx", "tensorflow.python.keras._impl.keras.backend.greater", "tensorflow.python.keras._impl.keras.backend.relu", "tensorflow.python.keras._impl.keras.initializers.get", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.keras._impl.keras.backend.cast_to_floatx", "tensorflow.python.keras._impl.keras.backend.pattern_broadcast", "tensorflow.python.keras._impl.keras.initializers.serialize", "tensorflow.python.keras._impl.keras.backend.backend", "tensorflow.python.keras._impl.keras.regularizers.get", "tensorflow.python.keras._impl.keras.constraints.get" ], [ "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.keras._impl.keras.preprocessing.sequence._remove_long_seq", "numpy.random.seed", "tensorflow.python.keras._impl.keras.utils.data_utils.get_file", "numpy.random.shuffle", "numpy.concatenate", "tensorflow.python.util.tf_export.tf_export", "numpy.load", "numpy.array" ], [ "tensorflow.python.framework.ops.name_scope", "tensorflow.contrib.distributions.python.ops.distribution_util.make_diag_scale" ], [ "tensorflow.python.eager.execute.args_to_matching_eager", "tensorflow.core.framework.op_def_pb2.OpList", "tensorflow.python.pywrap_tensorflow.TFE_Py_FastPathExecute", "tensorflow.python.eager.execute.make_bool", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.eager.execute.make_str", "tensorflow.python.eager.execute.record_gradient", "tensorflow.python.eager.core._status_to_exception", "tensorflow.python.framework.op_def_library.OpDefLibrary", "tensorflow.python.eager.execute.make_float", "tensorflow.python.eager.context.context", "tensorflow.python.framework.op_def_registry.register_op_list", "tensorflow.python.eager.execute.execute" ], [ "tensorflow.python.ops.math_ops.log", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.nn.softplus", "tensorflow.python.util.tf_export.tf_export", "numpy.finfo", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.framework.ops.convert_to_tensor" ], [ "tensorflow.python.framework.tensor_shape.scalar", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.array_ops.split", "tensorflow.python.ops.array_ops.unstack", "tensorflow.python.framework.ops.device", "tensorflow.python.ops.array_ops.stack" ], [ "tensorflow.python.ops.math_ops.range", "tensorflow.python.ops.array_ops.rank", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.framework.tensor_util.constant_value_as_shape", "tensorflow.python.util.all_util.remove_undocumented", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.python.ops.math_ops.rsqrt", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.ops.math_ops.sqrt", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.math_ops.to_float", "tensorflow.python.ops.array_ops.pad", "tensorflow.python.ops.math_ops.maximum", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.ops.array_ops.stack" ], [ "tensorflow.contrib.slim.learning.train" ], [ "tensorflow.python.util.all_util.remove_undocumented" ], [ "tensorflow.python.framework.tensor_shape.scalar", "tensorflow.python.ops.math_ops.log", "tensorflow.python.ops.array_ops.ones_like", "tensorflow.python.ops.distributions.special_math.ndtri", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.check_ops.assert_positive", "tensorflow.python.ops.check_ops.assert_same_float_dtype", "tensorflow.python.ops.math_ops.square", "tensorflow.python.ops.nn.softplus", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.ops.distributions.kullback_leibler.RegisterKL", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.random_ops.random_normal", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.framework.tensor_shape.scalar", "tensorflow.python.ops.math_ops.log", "tensorflow.python.ops.math_ops.abs", "tensorflow.python.ops.array_ops.shape", "numpy.sqrt", "tensorflow.python.ops.check_ops.assert_positive", "tensorflow.python.ops.nn.relu", "tensorflow.python.ops.distributions.special_math.erfinv", "tensorflow.python.ops.math_ops.exp", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.random_ops.random_normal", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.eager.execute.convert_to_mixed_eager_tensors", "tensorflow.core.framework.op_def_pb2.OpList", "tensorflow.python.eager.execute.make_type", "tensorflow.python.pywrap_tensorflow.TFE_Py_FastPathExecute", "tensorflow.python.eager.execute.make_str", "tensorflow.python.eager.execute.record_gradient", "tensorflow.python.eager.core._status_to_exception", "tensorflow.python.framework.op_def_library.OpDefLibrary", "tensorflow.python.eager.context.context", "tensorflow.python.framework.op_def_registry.register_op_list" ], [ "tensorflow.contrib.cloud.python.ops.gen_bigquery_reader_ops.big_query_reader", "tensorflow.python.framework.ops.NotDifferentiable", "tensorflow.contrib.cloud.python.ops.gen_bigquery_reader_ops.generate_big_query_reader_partitions" ], [ "tensorflow.python.estimator.util.fn_args", "tensorflow.python.training.saver.Saver", "tensorflow.python.layers.base._get_default_graph_uid_map", "tensorflow.python.training.training_util.get_global_step", "tensorflow.python.ops.variable_scope.get_variable_scope", "tensorflow.python.ops.variable_scope._get_default_variable_store", "tensorflow.python.framework.ops.get_default_session", "tensorflow.python.training.checkpoint_utils.list_variables", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.training.checkpoint_utils.load_checkpoint" ], [ "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.linalg_ops.eye", "tensorflow.contrib.timeseries.python.timeseries.state_space_models.state_space_model.StateSpaceModelConfiguration", "tensorflow.python.ops.init_ops.zeros_initializer", "tensorflow.python.ops.math_ops.reduce_mean", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.ops.array_ops.pad", "tensorflow.contrib.timeseries.python.timeseries.math_utils.variable_covariance_matrix" ], [ "tensorflow.python.util.all_util.remove_undocumented" ], [ "tensorflow.python.framework.tensor_shape.scalar", "tensorflow.python.data.util.nest.map_structure_up_to", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.framework.tensor_util.constant_value_as_shape", "numpy.iinfo", "tensorflow.python.framework.sparse_tensor.SparseTensor.from_value", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.gen_dataset_ops.sparse_tensor_slice_dataset", "tensorflow.python.framework.function.Defun", "tensorflow.python.data.util.sparse.deserialize_sparse_tensors", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.data.util.sparse.get_classes", "tensorflow.python.data.util.nest.pack_sequence_as", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.data.ops.iterator_ops.Iterator", "tensorflow.python.data.util.nest.assert_same_structure", "tensorflow.python.framework.ops.colocate_with", "tensorflow.python.data.util.sparse.serialize_sparse_tensors", "tensorflow.python.ops.script_ops.py_func", "tensorflow.python.data.util.random_seed.get_seed", "tensorflow.python.ops.math_ops.equal", "tensorflow.python.data.util.nest.flatten_up_to", "tensorflow.python.data.util.sparse.any_sparse", "tensorflow.python.data.util.nest.map_structure", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.data.util.sparse.as_dense_types", "tensorflow.python.data.util.nest.flatten", "numpy.array", "tensorflow.python.framework.sparse_tensor.is_sparse", "tensorflow.python.framework.tensor_shape.Dimension", "tensorflow.python.ops.math_ops.mod", "tensorflow.python.framework.tensor_shape.vector", "tensorflow.python.ops.gen_io_ops.matching_files", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.ops.script_ops.FuncRegistry._convert", "tensorflow.python.data.util.sparse.as_dense_shapes", "tensorflow.python.data.util.sparse.serialize_many_sparse_tensors", "tensorflow.python.framework.tensor_shape.as_shape", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.layers.core.dense", "tensorflow.python.ops.template.make_template", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.array_ops.split", "tensorflow.python.ops.math_ops.exp", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.math_ops.reduce_sum", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.platform.tf_logging.info", "tensorflow.core.protobuf.config_pb2.GPUOptions", "tensorflow.core.protobuf.config_pb2.ConfigProto", "tensorflow.python.util.deprecation.deprecated" ], [ "tensorflow.python.ops.math_ops.greater_equal", "tensorflow.python.ops.nn.softmax", "tensorflow.python.ops.array_ops.shape", "tensorflow.contrib.metrics.python.ops.metric_ops.streaming_mean", "tensorflow.python.ops.array_ops.squeeze", "tensorflow.python.ops.math_ops.to_float", "tensorflow.contrib.losses.python.losses.loss_ops.hinge_loss", "tensorflow.python.ops.array_ops.rank", "tensorflow.python.ops.math_ops.argmax", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.python.ops.math_ops.reduce_mean", "tensorflow.contrib.metrics.python.ops.metric_ops.streaming_accuracy", "tensorflow.contrib.framework.deprecated", "tensorflow.python.ops.math_ops.sigmoid", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.ops.math_ops.reduce_sum" ], [ "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.keras._impl.keras.backend.resize_volumes", "tensorflow.python.keras._impl.keras.constraints.serialize", "tensorflow.python.keras._impl.keras.regularizers.serialize", "tensorflow.python.keras._impl.keras.initializers.get", "tensorflow.python.keras._impl.keras.activations.get", "tensorflow.python.keras._impl.keras.utils.conv_utils.normalize_data_format", "tensorflow.python.keras._impl.keras.backend.image_data_format", "tensorflow.python.keras._impl.keras.initializers.serialize", "tensorflow.python.keras._impl.keras.backend.temporal_padding", "tensorflow.python.keras._impl.keras.backend.resize_images", "tensorflow.python.keras._impl.keras.backend.spatial_2d_padding", "tensorflow.python.keras._impl.keras.engine.InputSpec", "tensorflow.python.keras._impl.keras.backend.repeat_elements", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.keras._impl.keras.backend.spatial_3d_padding", "tensorflow.python.keras._impl.keras.utils.conv_utils.normalize_tuple", "tensorflow.python.keras._impl.keras.regularizers.get", "tensorflow.python.keras._impl.keras.activations.serialize", "tensorflow.python.keras._impl.keras.constraints.get" ], [ "tensorflow.python.ops.check_ops.assert_less", "tensorflow.contrib.distributions.python.ops.shape._DistributionShape", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.contrib.predictor.core_estimator_predictor.CoreEstimatorPredictor", "tensorflow.contrib.predictor.contrib_estimator_predictor.ContribEstimatorPredictor", "tensorflow.contrib.predictor.saved_model_predictor.SavedModelPredictor" ], [ "tensorflow.python.framework.ops.name_scope", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.math_ops.reduce_sum", "tensorflow.python.ops.array_ops.stop_gradient" ], [ "tensorflow.python.util.tf_inspect.getargspec" ], [ "tensorflow.python.ops.gen_array_ops.fill", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.array_ops.gather_nd", "tensorflow.python.ops.math_ops.reduce_any", "tensorflow.python.ops.control_flow_ops.cond", "tensorflow.python.ops.math_ops.logical_not", "tensorflow.python.ops.math_ops.reduce_all", "tensorflow.python.ops.math_ops.argmax", "tensorflow.python.ops.array_ops.where", "tensorflow.python.ops.array_ops.size", "tensorflow.python.ops.distributions.bernoulli.Bernoulli", "tensorflow.python.ops.math_ops.logical_or", "tensorflow.python.ops.embedding_ops.embedding_lookup", "tensorflow.python.util.nest.map_structure", "tensorflow.python.ops.distributions.categorical.Categorical", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.ops.array_ops.tile", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.python.ops.math_ops.equal", "tensorflow.python.ops.array_ops.scatter_nd", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.framework.ops.name_scope" ], [ "tensorflow.python.platform.gfile.Exists", "tensorflow.python.platform.gfile.Open", "tensorflow.python.debug.cli.debugger_cli_common.rich_text_lines_from_rich_line_list" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.5", "1.7", "1.4" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.5", "1.7", "1.4" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.5", "1.7", "1.4" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.5", "1.7", "1.4" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.13", "1.5", "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "1.7", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "1.0", "2.2", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "1.4", "2.6", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "2.2", "1.2", "2.10" ] } ]
WangY0906/mmdetection-for-study
[ "c89703006a2a5250f4d1c71e0aad958d72526885" ]
[ "mmdet/models/detectors/cascade_rcnn.py" ]
[ "from __future__ import division\n\nimport torch\nimport torch.nn as nn\n\nfrom .base import BaseDetector\nfrom .test_mixins import RPNTestMixin\nfrom .. import builder\nfrom ..registry import DETECTORS\nfrom mmdet.core import (build_assigner, bbox2roi, bbox2result, build_sampler,\n merge_aug_masks)\n\n\[email protected]_module\nclass CascadeRCNN(BaseDetector, RPNTestMixin):\n\n def __init__(self,\n num_stages,\n backbone,\n neck=None,\n shared_head=None,\n rpn_head=None,\n bbox_roi_extractor=None,\n bbox_head=None,\n mask_roi_extractor=None,\n mask_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None):\n assert bbox_roi_extractor is not None\n assert bbox_head is not None\n super(CascadeRCNN, self).__init__()\n\n self.num_stages = num_stages\n self.backbone = builder.build_backbone(backbone)\n\n if neck is not None:\n self.neck = builder.build_neck(neck)\n\n if rpn_head is not None:\n self.rpn_head = builder.build_head(rpn_head)\n\n if shared_head is not None:\n self.shared_head = builder.build_shared_head(shared_head)\n\n if bbox_head is not None:\n self.bbox_roi_extractor = nn.ModuleList()\n self.bbox_head = nn.ModuleList()\n if not isinstance(bbox_roi_extractor, list):\n bbox_roi_extractor = [\n bbox_roi_extractor for _ in range(num_stages)\n ]\n if not isinstance(bbox_head, list):\n bbox_head = [bbox_head for _ in range(num_stages)]\n assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages\n for roi_extractor, head in zip(bbox_roi_extractor, bbox_head):\n self.bbox_roi_extractor.append(\n builder.build_roi_extractor(roi_extractor))\n self.bbox_head.append(builder.build_head(head))\n\n if mask_head is not None:\n self.mask_head = nn.ModuleList()\n if not isinstance(mask_head, list):\n mask_head = [mask_head for _ in range(num_stages)]\n assert len(mask_head) == self.num_stages\n for head in mask_head:\n self.mask_head.append(builder.build_head(head))\n if mask_roi_extractor is not None:\n self.share_roi_extractor = False\n self.mask_roi_extractor = nn.ModuleList()\n if not isinstance(mask_roi_extractor, list):\n mask_roi_extractor = [\n mask_roi_extractor for _ in range(num_stages)\n ]\n assert len(mask_roi_extractor) == self.num_stages\n for roi_extractor in mask_roi_extractor:\n self.mask_roi_extractor.append(\n builder.build_roi_extractor(roi_extractor))\n else:\n self.share_roi_extractor = True\n self.mask_roi_extractor = self.bbox_roi_extractor\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n self.init_weights(pretrained=pretrained)\n\n @property\n def with_rpn(self):\n return hasattr(self, 'rpn_head') and self.rpn_head is not None\n\n def init_weights(self, pretrained=None):\n super(CascadeRCNN, self).init_weights(pretrained)\n self.backbone.init_weights(pretrained=pretrained)\n if self.with_neck:\n if isinstance(self.neck, nn.Sequential):\n for m in self.neck:\n m.init_weights()\n else:\n self.neck.init_weights()\n if self.with_rpn:\n self.rpn_head.init_weights()\n if self.with_shared_head:\n self.shared_head.init_weights(pretrained=pretrained)\n for i in range(self.num_stages):\n if self.with_bbox:\n self.bbox_roi_extractor[i].init_weights()\n self.bbox_head[i].init_weights()\n if self.with_mask:\n if not self.share_roi_extractor:\n self.mask_roi_extractor[i].init_weights()\n self.mask_head[i].init_weights()\n\n def extract_feat(self, img):\n x = self.backbone(img)\n if self.with_neck:\n x = self.neck(x)\n return x\n\n def forward_train(self,\n img,\n img_meta,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n gt_masks=None,\n proposals=None):\n x = self.extract_feat(img)\n\n losses = dict()\n\n if self.with_rpn:\n rpn_outs = self.rpn_head(x)\n rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta,\n self.train_cfg.rpn)\n rpn_losses = self.rpn_head.loss(\n *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)\n losses.update(rpn_losses)\n\n proposal_cfg = self.train_cfg.get('rpn_proposal',\n self.test_cfg.rpn)\n proposal_inputs = rpn_outs + (img_meta, proposal_cfg)\n proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)\n else:\n proposal_list = proposals\n\n for i in range(self.num_stages):\n self.current_stage = i\n rcnn_train_cfg = self.train_cfg.rcnn[i]\n lw = self.train_cfg.stage_loss_weights[i]\n\n # assign gts and sample proposals\n sampling_results = []\n if self.with_bbox or self.with_mask:\n bbox_assigner = build_assigner(rcnn_train_cfg.assigner)\n bbox_sampler = build_sampler(\n rcnn_train_cfg.sampler, context=self)\n num_imgs = img.size(0)\n if gt_bboxes_ignore is None:\n gt_bboxes_ignore = [None for _ in range(num_imgs)]\n\n for j in range(num_imgs):\n assign_result = bbox_assigner.assign(\n proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j],\n gt_labels[j])\n sampling_result = bbox_sampler.sample(\n assign_result,\n proposal_list[j],\n gt_bboxes[j],\n gt_labels[j],\n feats=[lvl_feat[j][None] for lvl_feat in x])\n sampling_results.append(sampling_result)\n\n # bbox head forward and loss\n bbox_roi_extractor = self.bbox_roi_extractor[i]\n bbox_head = self.bbox_head[i]\n\n rois = bbox2roi([res.bboxes for res in sampling_results])\n bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],\n rois)\n if self.with_shared_head:\n bbox_feats = self.shared_head(bbox_feats)\n cls_score, bbox_pred = bbox_head(bbox_feats)\n\n bbox_targets = bbox_head.get_target(sampling_results, gt_bboxes,\n gt_labels, rcnn_train_cfg)\n loss_bbox = bbox_head.loss(cls_score, bbox_pred, *bbox_targets)\n for name, value in loss_bbox.items():\n losses['s{}.{}'.format(i, name)] = (\n value * lw if 'loss' in name else value)\n\n # mask head forward and loss\n if self.with_mask:\n if not self.share_roi_extractor:\n mask_roi_extractor = self.mask_roi_extractor[i]\n pos_rois = bbox2roi(\n [res.pos_bboxes for res in sampling_results])\n mask_feats = mask_roi_extractor(\n x[:mask_roi_extractor.num_inputs], pos_rois)\n if self.with_shared_head:\n mask_feats = self.shared_head(mask_feats)\n else:\n # reuse positive bbox feats\n pos_inds = []\n device = bbox_feats.device\n for res in sampling_results:\n pos_inds.append(\n torch.ones(\n res.pos_bboxes.shape[0],\n device=device,\n dtype=torch.uint8))\n pos_inds.append(\n torch.zeros(\n res.neg_bboxes.shape[0],\n device=device,\n dtype=torch.uint8))\n pos_inds = torch.cat(pos_inds)\n mask_feats = bbox_feats[pos_inds]\n mask_head = self.mask_head[i]\n mask_pred = mask_head(mask_feats)\n mask_targets = mask_head.get_target(sampling_results, gt_masks,\n rcnn_train_cfg)\n pos_labels = torch.cat(\n [res.pos_gt_labels for res in sampling_results])\n loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels)\n for name, value in loss_mask.items():\n losses['s{}.{}'.format(i, name)] = (\n value * lw if 'loss' in name else value)\n\n # refine bboxes\n if i < self.num_stages - 1:\n pos_is_gts = [res.pos_is_gt for res in sampling_results]\n roi_labels = bbox_targets[0] # bbox_targets is a tuple\n with torch.no_grad():\n proposal_list = bbox_head.refine_bboxes(\n rois, roi_labels, bbox_pred, pos_is_gts, img_meta)\n\n return losses\n\n def simple_test(self, img, img_meta, proposals=None, rescale=False):\n x = self.extract_feat(img)\n proposal_list = self.simple_test_rpn(\n x, img_meta, self.test_cfg.rpn) if proposals is None else proposals\n\n img_shape = img_meta[0]['img_shape']\n ori_shape = img_meta[0]['ori_shape']\n scale_factor = img_meta[0]['scale_factor']\n\n # \"ms\" in variable names means multi-stage\n ms_bbox_result = {}\n ms_segm_result = {}\n ms_scores = []\n rcnn_test_cfg = self.test_cfg.rcnn\n\n rois = bbox2roi(proposal_list)\n for i in range(self.num_stages):\n bbox_roi_extractor = self.bbox_roi_extractor[i]\n bbox_head = self.bbox_head[i]\n\n bbox_feats = bbox_roi_extractor(\n x[:len(bbox_roi_extractor.featmap_strides)], rois)\n if self.with_shared_head:\n bbox_feats = self.shared_head(bbox_feats)\n\n cls_score, bbox_pred = bbox_head(bbox_feats)\n ms_scores.append(cls_score)\n\n if self.test_cfg.keep_all_stages:\n det_bboxes, det_labels = bbox_head.get_det_bboxes(\n rois,\n cls_score,\n bbox_pred,\n img_shape,\n scale_factor,\n rescale=rescale,\n cfg=rcnn_test_cfg)\n bbox_result = bbox2result(det_bboxes, det_labels,\n bbox_head.num_classes)\n ms_bbox_result['stage{}'.format(i)] = bbox_result\n\n if self.with_mask:\n mask_roi_extractor = self.mask_roi_extractor[i]\n mask_head = self.mask_head[i]\n if det_bboxes.shape[0] == 0:\n mask_classes = mask_head.num_classes - 1\n segm_result = [[] for _ in range(mask_classes)]\n else:\n _bboxes = (\n det_bboxes[:, :4] *\n scale_factor if rescale else det_bboxes)\n mask_rois = bbox2roi([_bboxes])\n mask_feats = mask_roi_extractor(\n x[:len(mask_roi_extractor.featmap_strides)],\n mask_rois)\n if self.with_shared_head:\n mask_feats = self.shared_head(mask_feats, i)\n mask_pred = mask_head(mask_feats)\n segm_result = mask_head.get_seg_masks(\n mask_pred, _bboxes, det_labels, rcnn_test_cfg,\n ori_shape, scale_factor, rescale)\n ms_segm_result['stage{}'.format(i)] = segm_result\n\n if i < self.num_stages - 1:\n bbox_label = cls_score.argmax(dim=1)\n rois = bbox_head.regress_by_class(rois, bbox_label, bbox_pred,\n img_meta[0])\n\n cls_score = sum(ms_scores) / self.num_stages\n det_bboxes, det_labels = self.bbox_head[-1].get_det_bboxes(\n rois,\n cls_score,\n bbox_pred,\n img_shape,\n scale_factor,\n rescale=rescale,\n cfg=rcnn_test_cfg)\n bbox_result = bbox2result(det_bboxes, det_labels,\n self.bbox_head[-1].num_classes)\n ms_bbox_result['ensemble'] = bbox_result\n\n if self.with_mask:\n if det_bboxes.shape[0] == 0:\n mask_classes = self.mask_head[-1].num_classes - 1\n segm_result = [[] for _ in range(mask_classes)]\n else:\n _bboxes = (\n det_bboxes[:, :4] *\n scale_factor if rescale else det_bboxes)\n mask_rois = bbox2roi([_bboxes])\n aug_masks = []\n for i in range(self.num_stages):\n mask_roi_extractor = self.mask_roi_extractor[i]\n mask_feats = mask_roi_extractor(\n x[:len(mask_roi_extractor.featmap_strides)], mask_rois)\n if self.with_shared_head:\n mask_feats = self.shared_head(mask_feats)\n mask_pred = self.mask_head[i](mask_feats)\n aug_masks.append(mask_pred.sigmoid().cpu().numpy())\n merged_masks = merge_aug_masks(aug_masks,\n [img_meta] * self.num_stages,\n self.test_cfg.rcnn)\n segm_result = self.mask_head[-1].get_seg_masks(\n merged_masks, _bboxes, det_labels, rcnn_test_cfg,\n ori_shape, scale_factor, rescale)\n ms_segm_result['ensemble'] = segm_result\n\n if not self.test_cfg.keep_all_stages:\n if self.with_mask:\n results = (ms_bbox_result['ensemble'],\n ms_segm_result['ensemble'])\n else:\n results = ms_bbox_result['ensemble']\n else:\n if self.with_mask:\n results = {\n stage: (ms_bbox_result[stage], ms_segm_result[stage])\n for stage in ms_bbox_result\n }\n else:\n results = ms_bbox_result\n\n return results\n\n def aug_test(self, img, img_meta, proposals=None, rescale=False):\n raise NotImplementedError\n\n def show_result(self, data, result, img_norm_cfg, **kwargs):\n if self.with_mask:\n ms_bbox_result, ms_segm_result = result\n if isinstance(ms_bbox_result, dict):\n result = (ms_bbox_result['ensemble'],\n ms_segm_result['ensemble'])\n else:\n if isinstance(result, dict):\n result = result['ensemble']\n super(CascadeRCNN, self).show_result(data, result, img_norm_cfg,\n **kwargs)\n" ]
[ [ "torch.ones", "torch.zeros", "torch.cat", "torch.nn.ModuleList", "torch.no_grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
KevinKronk/multiclass-classification
[ "8a938e5dd3418caad24118f75fa11f2aab856b2f" ]
[ "multiclass_classification/cost.py" ]
[ "import numpy as np\nfrom scipy.special import expit\n\n\ndef log_cost(theta, x, y_i, hyper_p):\n \"\"\"\n Logistic regression cost function with regularization.\n\n Parameters\n ----------\n theta : array_like\n Shape (n+1,). Parameter values for function.\n\n x : array_like\n Shape (m, n+1). Features in model.\n\n y_i : array_like\n Shape (m,). Labels for in current class i (1) or not (0).\n\n hyper_p : float\n Value of the hyperparameter for regularization.\n\n Returns\n -------\n cost : float\n Value of cost function at given parameters.\n \"\"\"\n\n size = y_i.size\n\n h = expit(x @ theta.T)\n\n first = -y_i * np.log(h)\n second = -(1 - y_i) * np.log(1 - h)\n reg = (hyper_p / (2 * size)) * np.sum(np.power(theta, 2))\n\n cost = (np.sum(first + second) / size) + reg\n return cost\n" ]
[ [ "numpy.log", "numpy.sum", "scipy.special.expit", "numpy.power" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Swapnil8991/RLonASG
[ "d040fb5ac4431198b92544958d70924d5bec92ff" ]
[ "GameEnvs/ConnectFour/generateData.py" ]
[ "import numpy as np\n\nfrom C4Board import C4Board\nfrom random import seed, choice\nfrom os import urandom\nfrom time import time\nfrom itertools import cycle\nfrom sys import argv\n\n\ndef getTrainingData(noOfGames, dataGenFlag, inpTrainFile, outTrainFile):\n\t\n\tturnFlag = 0\n\tgameEndState = 0\n\t\n\ttempOutTrainList = [] # stores expected input position\n\tboardStates = [] # stores the board state at input\n\n\tinpTrainList = []\n\toutTrainList = [] \n\n\tfor i in range(noOfGames):\n\t\tboardStates.append(b.board.tolist())\n\t\t# print(\"\\n First boardState: \\n\", boardStates)\n\t\temptyPositions = list(range(0, 7))\n\n\t\twhile b.count < 42:\n\t\t\tif b.count > 7:\n\t\t\t\tstatus, wSindex = b.checkWin()\n\t\t\t\tif status == 0:\n\t\t\t\t\tprint(f\"Game Draw! {b.getwStateSum()}\\n\")\n\t\t\t\t\tbreak\n\t\t\t\telif status == 1 and dataGenFlag == 1 and turnFlag == 1:\n\t\t\t\t\tprint(f\"Player X Wins! (wState[{wSindex}]: {b.wState[wSindex]}) {b.getwStateSum()}\\n\")\n\t\t\t\t\tfor i in range(len(tempOutTrainList)):\n\t\t\t\t\t\tif i % 2 == 0:\n\t\t\t\t\t\t\toutTrainList.append(tempOutTrainList[i])\n\t\t\t\t\t\n\t\t\t\t\t\t\tinpTrainList.append(boardStates[i])\n\t\t\t\t\tbreak\n\t\t\t\telif status == 2:\n\t\t\t\t\tprint(f\"Player O Wins! (wState[{wSindex}]: {b.wState[wSindex]}) {b.getwStateSum()}\\n\")\n\t\t\t\t\tbreak\n\n\t\t\tcPChar = next(playerCharToggler)\n\t\t\tcPNum = next(playerNumToggler)\n\t\t\tif cPChar == 'X' and turnFlag == 0:\n\t\t\t\tturnFlag = 1\n\t\t\telif cPChar == 'Y' and turnFlag == 0:\n\t\t\t\tturnFlag = 2\n\t\t\tposition = choice(emptyPositions)\n\t\t\t# print(f\"\\nPlayer {cPChar}: {position}\")\n\t\t\t# b.makeMove(cPNum, position)\n\t\t\t# print(f\"\\nPlayer {cPChar}: \", end='', flush=True)\n\t\t\tb.makeMove(cPNum, position )\n\t\t\tprint(f\"\\nPlayer {cPChar}: \", end='', flush=True)\n\t\t\tboardStates.append(b.board.tolist())\n\t\t\t# print(\"\\nboardStates: \\n\", boardStates)\n\n\t\t\tzeroList = [0, 0, 0, 0, 0, 0, 0]\n\t\t\tzeroList[position] = 1\n\t\t\ttempOutTrainList.append(zeroList)\n\t\t\t# print(\"\\nExpected output by NN: \\n\", tempOutTrainList)\n\t\t\tb.printBoard()\n\t\tb.resetBoard()\n\t\t\n\tprint(\"\\n\\n inpTrainList: \\n\", len(inpTrainList))\n\tprint(\"\\n\\n outTrainList: \\n\", len(outTrainList))\n\n\txOutArray = np.array(outTrainList)\n\txInpArray = np.array(inpTrainList)\n\tnp.savetxt('__data__/' + outTrainFile, xOutArray, fmt='%d')\n\tnp.savetxt('__data__/' + inpTrainFile, xInpArray, fmt='%d')\n\n\t\nif __name__ == '__main__':\n\n\tif len(argv) != 5:\n\t\tprint(\"Provide no. of games, dataGenFlag (1|2), inpTrainFile, outTrainFile\")\n\telse:\n\t\tstartTime = time()\n\n\t\tb = C4Board()\n\t\tplayerCharToggler = cycle(['X', 'O']) # D-Char\n\t\tplayerNumToggler = cycle([3, -2]) # D-Val\n\t\tseed(urandom(128))\n\n\t\tgetTrainingData(int(argv[1]), int(argv[2]), argv[3], argv[4])\n\n\t\tprint(f\"Time taken: {time() - startTime}s\\n\")" ]
[ [ "numpy.savetxt", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
alcunha/iwildcam2021ufam
[ "243c3e4b91d5756d1e7fcdf8ae75344a373d3b84", "243c3e4b91d5756d1e7fcdf8ae75344a373d3b84" ]
[ "classification/eval_main.py", "classification/multi_stage_train.py" ]
[ "# Copyright 2021 Fagner Cunha\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Tool to evaluate classifiers.\n\nSet the environment variable PYTHONHASHSEED to a reproducible value\nbefore you start the python process to ensure that the model trains\nor infers with reproducibility\n\"\"\"\nimport json\nimport os\nimport random\n\nfrom absl import app\nfrom absl import flags\nimport numpy as np\nfrom sklearn.metrics import accuracy_score\nimport tensorflow as tf\n\nfrom iwildcamlib import CategoryMap\nimport bags\nimport dataloader\nimport geoprior\nimport model_builder\n\nos.environ['TF_DETERMINISTIC_OPS'] = '1'\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'model_name', default='efficientnet-b0',\n help=('Model name of the archtecture'))\n\nflags.DEFINE_integer(\n 'input_size', default=224,\n help=('Input size of the model'))\n\nflags.DEFINE_bool(\n 'use_bags', default=False,\n help=('Use Balanced Group Softmax to train model'))\n\nflags.DEFINE_integer(\n 'empty_class_id', default=0,\n help=('Empty class id for balanced group softmax'))\n\nflags.DEFINE_bool(\n 'use_full_image', default=False,\n help=('Ignore bounding boxes and use full image'))\n\nflags.DEFINE_integer(\n 'batch_size', default=32,\n help=('Batch size used during training.'))\n\nflags.DEFINE_string(\n 'ckpt_dir', default=None,\n help=('Location of the model checkpoint files'))\n\nflags.DEFINE_string(\n 'annotations_json', default=None,\n help=('Path to json file containing the training annotations json for'\n ' the iWildCam2021 competition'))\n\nflags.DEFINE_string(\n 'train_dataset_split', default=None,\n help=('Path to json file containing the train/validation split based on'\n ' locations.'))\n\nflags.DEFINE_string(\n 'test_info_json', default=None,\n help=('Path to json file containing the test information json for'\n ' the iWildCam2021 competition'))\n\nflags.DEFINE_string(\n 'dataset_dir', default=None,\n help=('Path to directory containing test images.'))\n\nflags.DEFINE_string(\n 'megadetector_results_json', default=None,\n help=('Path to json file containing megadetector results.'))\n\nflags.DEFINE_integer(\n 'log_frequence', default=500,\n help=('Log prediction every n steps'))\n\nflags.DEFINE_string(\n 'geo_prior_ckpt_dir', default=None,\n help=('Location of the checkpoint files for the geo prior model'))\n\nflags.DEFINE_integer(\n 'geo_prior_input_size', default=6,\n help=('Input size for the geo prior model'))\n\nflags.DEFINE_bool(\n 'use_bn_geo_prior', default=False,\n help=('Include Batch Normalization to the geo prior model'))\n\nflags.DEFINE_integer(\n 'embed_dim', default=256,\n help=('Embedding dimension for geo prior model'))\n\nif 'random_seed' not in list(FLAGS):\n flags.DEFINE_integer(\n 'random_seed', default=42,\n help=('Random seed for reproductible experiments'))\n\nflags.mark_flag_as_required('ckpt_dir')\nflags.mark_flag_as_required('annotations_json')\nflags.mark_flag_as_required('test_info_json')\nflags.mark_flag_as_required('dataset_dir')\nflags.mark_flag_as_required('megadetector_results_json')\n\ndef load_train_validation_split():\n if FLAGS.train_dataset_split is None:\n return None, None\n\n with tf.io.gfile.GFile(FLAGS.train_dataset_split, 'r') as json_file:\n json_data = json.load(json_file)\n\n return json_data['train'], json_data['validation']\n\ndef _load_model(num_classes, bal_group_softmax=None):\n model = model_builder.create(model_name=FLAGS.model_name,\n num_classes=num_classes,\n input_size=FLAGS.input_size,\n unfreeze_layers=0,\n bags=bal_group_softmax)\n checkpoint_path = os.path.join(FLAGS.ckpt_dir, \"ckp\")\n model.load_weights(checkpoint_path)\n\n if bal_group_softmax is not None:\n model = bal_group_softmax.create_prediction_model(model)\n\n return model\n\ndef _load_geo_prior_model(num_classes):\n if FLAGS.geo_prior_ckpt_dir is not None:\n rand_sample_generator = dataloader.RandSpatioTemporalGenerator()\n\n geo_prior_model = geoprior.FCNet(\n num_inputs=FLAGS.geo_prior_input_size,\n embed_dim=FLAGS.embed_dim,\n num_classes=num_classes,\n use_bn=FLAGS.use_bn_geo_prior,\n rand_sample_generator=rand_sample_generator)\n\n checkpoint_path = os.path.join(FLAGS.geo_prior_ckpt_dir, \"ckp\")\n geo_prior_model.load_weights(checkpoint_path)\n\n return geo_prior_model\n else:\n return None\n\ndef _build_input_data(category_map):\n include_geo_data = FLAGS.geo_prior_ckpt_dir is not None\n\n input_data = dataloader.JsonWBBoxInputProcessor(\n dataset_json=FLAGS.test_info_json,\n dataset_dir=FLAGS.dataset_dir,\n megadetector_results_json=FLAGS.megadetector_results_json,\n batch_size=FLAGS.batch_size,\n batch_drop_remainder=False,\n category_map=category_map,\n is_training=False,\n output_size=FLAGS.input_size,\n crop_mode='full' if FLAGS.use_full_image else 'bbox',\n provide_validity_info_output=include_geo_data,\n provide_coord_date_encoded_input=include_geo_data,\n provide_instance_id=True,\n seed=FLAGS.random_seed)\n\n return input_data.make_source_dataset()\n\ndef mix_predictions(cnn_preds, prior_preds, valid):\n valid = tf.expand_dims(valid, axis=-1)\n return cnn_preds*prior_preds*valid + (1 - valid)*cnn_preds\n\ndef predict_w_geo_prior(batch, metadata, model, geo_prior_model):\n cnn_input = batch[:-1]\n prior_input = batch[-1]\n label, valid, _ = metadata\n\n cnn_preds = model(cnn_input, training=False)\n prior_preds = geo_prior_model(prior_input, training=False)\n preds = mix_predictions(cnn_preds, prior_preds, valid)\n\n return label, preds\n\ndef _decode_one_hot(one_hot_tensor):\n return tf.argmax(one_hot_tensor, axis=1).numpy()\n\ndef predict_classifier(model, geo_prior_model, dataset):\n labels = []\n predictions = []\n count = 0\n\n for batch, metadata in dataset:\n if geo_prior_model is not None:\n label, preds = predict_w_geo_prior(batch,\n metadata,\n model,\n geo_prior_model)\n else:\n preds = model(batch, training=False)\n label, _ = metadata\n\n labels += list(_decode_one_hot(label))\n predictions += list(_decode_one_hot(preds))\n\n if count % FLAGS.log_frequence == 0:\n tf.compat.v1.logging.info('Finished eval step %d' % count)\n count += 1\n\n return labels, predictions\n\ndef set_random_seeds():\n random.seed(FLAGS.random_seed)\n np.random.seed(FLAGS.random_seed)\n tf.random.set_seed(FLAGS.random_seed)\n\ndef main(_):\n set_random_seeds()\n\n category_map = CategoryMap(FLAGS.annotations_json)\n train_loc, _ = load_train_validation_split()\n bal_group_softmax = bags.BalancedGroupSoftmax(\n FLAGS.annotations_json,\n category_map,\n FLAGS.empty_class_id,\n selected_locations=train_loc) if FLAGS.use_bags else None\n dataset, _ = _build_input_data(category_map)\n num_classes = category_map.get_num_classes()\n model = _load_model(num_classes, bal_group_softmax)\n geo_prior_model = _load_geo_prior_model(num_classes)\n\n labels, predictions = predict_classifier(model, geo_prior_model, dataset)\n\n accuracy = accuracy_score(labels, predictions)\n\n print(\"Accuracy: %s\" % accuracy)\n\nif __name__ == '__main__':\n app.run(main)\n", "# Copyright 2021 Fagner Cunha\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Tool to train classifiers.\n\nSet the environment variable PYTHONHASHSEED to a reproducible value\nbefore you start the python process to ensure that the model trains\nor infers with reproducibility\n\"\"\"\nimport json\nimport os\nimport random\n\nfrom absl import app\nfrom absl import flags\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom iwildcamlib import CategoryMap\nimport bags\nimport dataloader\nimport model_builder\nimport train_image_classifier\nimport utils\n\nos.environ['TF_DETERMINISTIC_OPS'] = '1'\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'annotations_json', default=None,\n help=('Path to json file containing the training annotations json for'\n ' the iWildCam2021 competition'))\n\nflags.DEFINE_string(\n 'dataset_dir', default=None,\n help=('Path to directory containing training images.'))\n\nflags.DEFINE_string(\n 'megadetector_results_json', default=None,\n help=('Path to json file containing megadetector results.'))\n\nflags.DEFINE_string(\n 'train_dataset_split', default=None,\n help=('Path to json file containing the train/validation split based on'\n ' locations.'))\n\nflags.DEFINE_integer(\n 'input_size', default=224,\n help=('Input size of the model'))\n\nflags.DEFINE_integer(\n 'input_size_stage3', default=260,\n help=('Input size of the model on the stage 3 (fix train/test resolution)'))\n\nflags.DEFINE_string(\n 'base_model_weights', default='imagenet',\n help=('Path to h5 weights file to be loaded into the base model during'\n ' model build procedure.'))\n\nflags.DEFINE_bool(\n 'use_bags', default=False,\n help=('Use Balanced Group Softmax to train model'))\n\nflags.DEFINE_integer(\n 'empty_class_id', default=0,\n help=('Empty class id for balanced group softmax'))\n\nflags.DEFINE_float(\n 'label_smoothing', default=0.1,\n help=('When 0, no smoothing occurs. When > 0, we apply Label Smoothing to'\n ' the labels during training using this value for parameter e.'))\n\nflags.DEFINE_integer(\n 'batch_size', default=32,\n help=('Batch size used during training.'))\n\nflags.DEFINE_integer(\n 'randaug_num_layers', default=None,\n help=('Number of operations to be applied by Randaugment'))\n\nflags.DEFINE_integer(\n 'randaug_magnitude', default=None,\n help=('Magnitude for operations on Randaugment.'))\n\nflags.DEFINE_bool(\n 'use_full_image', default=False,\n help=('Ignore bounding boxes and use full image'))\n\nflags.DEFINE_string(\n 'model_name', default='efficientnet-b0',\n help=('Model name of the archtecture'))\n\nflags.DEFINE_string(\n 'model_dir', default=None,\n help=('Location of the model checkpoint files'))\n\nflags.DEFINE_string(\n 'load_checkpoint', default=None,\n help=('Path to weights checkpoint to be loaded into the model'))\n\nflags.DEFINE_float(\n 'lr_stage1', default=0.1,\n help=('Initial learning rate for stage 1'))\n\nflags.DEFINE_float(\n 'lr_stage2', default=0.1,\n help=('Initial learning rate for stage 2'))\n\nflags.DEFINE_float(\n 'lr_stage3', default=0.008,\n help=('Initial learning rate for stage 3'))\n\nflags.DEFINE_float(\n 'momentum', default=0,\n help=('Momentum for SGD optimizer'))\n\nflags.DEFINE_bool(\n 'use_scaled_lr', default=True,\n help=('Scale the initial learning rate by batch size'))\n\nflags.DEFINE_bool(\n 'use_cosine_decay', default=True,\n help=('Apply cosine decay during training'))\n\nflags.DEFINE_float(\n 'warmup_epochs', default=0.3,\n help=('Duration of warmp of learning rate in epochs. It can be a'\n ' fractionary value as long will be converted to steps.'))\n\nflags.DEFINE_integer(\n 'epochs_stage1', default=4,\n help=('Number of epochs to training during stage 1. Set to 0 do skip this'\n ' stage.'))\n\nflags.DEFINE_integer(\n 'epochs_stage2', default=10,\n help=('Number of epochs to training during stage 2. Set to 0 do skip this'\n ' stage.'))\n\nflags.DEFINE_integer(\n 'epochs_stage3', default=2,\n help=('Number of epochs to training during stage 3. Set to 0 do skip this'\n ' stage.'))\n\nflags.DEFINE_integer(\n 'unfreeze_layers', default=0,\n help=('Number of layers to unfreeze at the end of the image base model '\n ' during stage 3.'))\n\nflags.DEFINE_bool(\n 'use_coordinates_inputs', default=False,\n help=('Use coordinates as aditional input of the model'))\n\nif 'random_seed' not in list(FLAGS):\n flags.DEFINE_integer(\n 'random_seed', default=42,\n help=('Random seed for reproductible experiments'))\n\nflags.mark_flag_as_required('annotations_json')\nflags.mark_flag_as_required('dataset_dir')\nflags.mark_flag_as_required('megadetector_results_json')\nflags.mark_flag_as_required('model_dir')\n\ndef load_train_validation_split():\n with tf.io.gfile.GFile(FLAGS.train_dataset_split, 'r') as json_file:\n json_data = json.load(json_file)\n\n return json_data['train'], json_data['validation']\n\ndef build_input_data(category_map,\n input_size,\n locations=None,\n is_training=True,\n use_eval_preprocess=False,\n bal_group_softmax=None):\n\n input_data = dataloader.JsonWBBoxInputProcessor(\n dataset_json=FLAGS.annotations_json,\n dataset_dir=FLAGS.dataset_dir,\n megadetector_results_json=FLAGS.megadetector_results_json,\n batch_size=FLAGS.batch_size,\n category_map=category_map,\n bal_group_softmax=bal_group_softmax,\n selected_locations=locations,\n is_training=is_training,\n use_eval_preprocess=use_eval_preprocess,\n output_size=input_size,\n crop_mode='full' if FLAGS.use_full_image else 'bbox',\n randaug_num_layers=FLAGS.randaug_num_layers,\n randaug_magnitude=FLAGS.randaug_magnitude,\n seed=FLAGS.random_seed)\n\n return input_data.make_source_dataset()\n\ndef get_model(num_classes, input_size, unfreeze_layers, bal_group_softmax=None):\n model = model_builder.create(\n model_name=FLAGS.model_name,\n num_classes=num_classes,\n input_size=input_size,\n unfreeze_layers=unfreeze_layers,\n bags=bal_group_softmax,\n base_model_weights=FLAGS.base_model_weights,\n seed=FLAGS.random_seed)\n\n return model\n\ndef train_model(model,\n lr,\n epochs,\n model_dir,\n train_data_and_size,\n val_data_and_size,\n strategy):\n\n if FLAGS.use_scaled_lr:\n lr = lr * FLAGS.batch_size / 256\n\n _, train_size = train_data_and_size\n warmup_steps = int(FLAGS.warmup_epochs * (train_size // FLAGS.batch_size))\n\n hparams = train_image_classifier.get_default_hparams()\n hparams = hparams._replace(\n lr=lr,\n momentum=FLAGS.momentum,\n epochs=epochs,\n warmup_steps=warmup_steps,\n use_cosine_decay=FLAGS.use_cosine_decay,\n batch_size=FLAGS.batch_size,\n model_dir=model_dir,\n label_smoothing=FLAGS.label_smoothing\n )\n\n history = train_image_classifier.train_model(\n model,\n hparams,\n train_data_and_size,\n val_data_and_size,\n strategy\n )\n\n return history\n\ndef set_random_seeds():\n random.seed(FLAGS.random_seed)\n np.random.seed(FLAGS.random_seed)\n tf.random.set_seed(FLAGS.random_seed)\n\ndef main(_):\n if utils.xor(FLAGS.randaug_num_layers is None,\n FLAGS.randaug_magnitude is None):\n raise RuntimeError('To apply Randaugment during training you must specify'\n ' both --randaug_num_layers and --randaug_magnitude')\n\n set_random_seeds()\n\n category_map = CategoryMap(FLAGS.annotations_json)\n if FLAGS.train_dataset_split is not None:\n train_loc, val_loc = load_train_validation_split()\n bal_group_softmax = bags.BalancedGroupSoftmax(\n FLAGS.annotations_json,\n category_map,\n FLAGS.empty_class_id,\n selected_locations=train_loc) if FLAGS.use_bags else None\n\n dataset, num_instances = build_input_data(category_map,\n FLAGS.input_size,\n locations=train_loc,\n is_training=True,\n bal_group_softmax=bal_group_softmax)\n val_dataset, val_num_instances = build_input_data(category_map,\n FLAGS.input_size,\n locations=val_loc,\n is_training=False,\n bal_group_softmax=bal_group_softmax)\n else:\n bal_group_softmax = bags.BalancedGroupSoftmax(\n FLAGS.annotations_json,\n category_map,\n FLAGS.empty_class_id) if FLAGS.use_bags else None\n dataset, num_instances = build_input_data(category_map,\n FLAGS.input_size,\n is_training=True,\n bal_group_softmax=bal_group_softmax)\n val_dataset = None\n val_num_instances = 0\n\n strategy = tf.distribute.MirroredStrategy()\n print('Number of devices: {}'.format(strategy.num_replicas_in_sync))\n\n prev_checkpoint = FLAGS.load_checkpoint\n\n # Stage 1 - we train only the classifier layer\n with strategy.scope():\n model = get_model(category_map.get_num_classes(),\n FLAGS.input_size,\n unfreeze_layers=0,\n bal_group_softmax=bal_group_softmax)\n model.summary()\n if prev_checkpoint is not None:\n checkpoint_path = os.path.join(prev_checkpoint, \"ckp\")\n model.load_weights(checkpoint_path)\n train_model(model,\n lr=FLAGS.lr_stage1,\n epochs=FLAGS.epochs_stage1,\n model_dir=os.path.join(FLAGS.model_dir, 'stage1'),\n train_data_and_size=(dataset, num_instances),\n val_data_and_size=(val_dataset, val_num_instances),\n strategy=strategy)\n if FLAGS.epochs_stage1 > 0:\n prev_checkpoint = os.path.join(FLAGS.model_dir, 'stage1')\n\n # Stage 2 - we fine tune all layers\n with strategy.scope():\n model = get_model(category_map.get_num_classes(),\n FLAGS.input_size,\n unfreeze_layers=-1,\n bal_group_softmax=bal_group_softmax)\n model.summary()\n if prev_checkpoint is not None:\n checkpoint_path = os.path.join(prev_checkpoint, \"ckp\")\n model.load_weights(checkpoint_path)\n train_model(model,\n lr=FLAGS.lr_stage2,\n epochs=FLAGS.epochs_stage2,\n model_dir=os.path.join(FLAGS.model_dir, 'stage2'),\n train_data_and_size=(dataset, num_instances),\n val_data_and_size=(val_dataset, val_num_instances),\n strategy=strategy)\n if FLAGS.epochs_stage2 > 0:\n prev_checkpoint = os.path.join(FLAGS.model_dir, 'stage2')\n\n # Stage 3 - we fine tune the last N layers and use higher input size; we use\n # the evaluation preprocessing of images during training\n if FLAGS.train_dataset_split is not None:\n dataset, _ = build_input_data(category_map,\n FLAGS.input_size_stage3,\n locations=train_loc,\n is_training=True,\n use_eval_preprocess=True,\n bal_group_softmax=bal_group_softmax)\n val_dataset, _ = build_input_data(category_map,\n FLAGS.input_size_stage3,\n locations=val_loc,\n is_training=False,\n use_eval_preprocess=True,\n bal_group_softmax=bal_group_softmax)\n else:\n dataset, _ = build_input_data(category_map,\n FLAGS.input_size_stage3,\n is_training=True,\n use_eval_preprocess=True,\n bal_group_softmax=bal_group_softmax)\n val_dataset = None\n with strategy.scope():\n model = get_model(category_map.get_num_classes(),\n FLAGS.input_size_stage3,\n unfreeze_layers=FLAGS.unfreeze_layers,\n bal_group_softmax=bal_group_softmax)\n model.summary()\n if prev_checkpoint is not None:\n checkpoint_path = os.path.join(prev_checkpoint, \"ckp\")\n model.load_weights(checkpoint_path)\n train_model(model,\n lr=FLAGS.lr_stage3,\n epochs=FLAGS.epochs_stage3,\n model_dir=FLAGS.model_dir,\n train_data_and_size=(dataset, num_instances),\n val_data_and_size=(val_dataset, val_num_instances),\n strategy=strategy)\n\nif __name__ == '__main__':\n app.run(main)\n" ]
[ [ "numpy.random.seed", "tensorflow.io.gfile.GFile", "tensorflow.expand_dims", "tensorflow.compat.v1.logging.info", "tensorflow.argmax", "tensorflow.random.set_seed", "sklearn.metrics.accuracy_score" ], [ "tensorflow.io.gfile.GFile", "tensorflow.distribute.MirroredStrategy", "numpy.random.seed", "tensorflow.random.set_seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wsuchy/estimator
[ "b22a912de2693322622d6f50e3a19e98fecac441", "b22a912de2693322622d6f50e3a19e98fecac441", "b22a912de2693322622d6f50e3a19e98fecac441" ]
[ "tensorflow_estimator/python/estimator/hooks/basic_session_run_hooks_test.py", "tensorflow_estimator/python/estimator/canned/boosted_trees.py", "tensorflow_estimator/python/estimator/canned/dnn_linear_combined_test.py" ]
[ "# pylint: disable=g-bad-file-header\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for basic_session_run_hooks.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os.path\nimport shutil\nimport tempfile\nimport time\n\nfrom tensorflow.python.training import checkpoint_utils\nfrom tensorflow.python.client import session as session_lib\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import meta_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables as variables_lib\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.platform import tf_logging\nfrom tensorflow.python.summary import summary as summary_lib\nfrom tensorflow.python.summary.writer import writer_cache\nfrom tensorflow.python.training import monitored_session\nfrom tensorflow.python.training import session_run_hook\nfrom tensorflow.python.training import training_util\nfrom tensorflow_estimator.python.estimator.hooks import basic_session_run_hooks\nfrom tensorflow_estimator.python.estimator.hooks import fake_summary_writer\n\n# Provide a realistic start time for unit tests where we need to mock out\n# calls to time.time().\nMOCK_START_TIME = 1484695987.209386\n\n\nclass MockCheckpointSaverListener(\n basic_session_run_hooks.CheckpointSaverListener):\n\n def __init__(self):\n self.begin_count = 0\n self.before_save_count = 0\n self.after_save_count = 0\n self.end_count = 0\n self.ask_for_stop = False\n\n def begin(self):\n self.begin_count += 1\n\n def before_save(self, session, global_step):\n self.before_save_count += 1\n\n def after_save(self, session, global_step):\n self.after_save_count += 1\n if self.ask_for_stop:\n return True\n\n def end(self, session, global_step):\n self.end_count += 1\n\n def get_counts(self):\n return {\n 'begin': self.begin_count,\n 'before_save': self.before_save_count,\n 'after_save': self.after_save_count,\n 'end': self.end_count\n }\n\n\n@test_util.deprecated_graph_mode_only\nclass SecondOrStepTimerTest(test.TestCase):\n\n def test_raise_in_both_secs_and_steps(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.SecondOrStepTimer(every_secs=2.0, every_steps=10)\n\n def test_raise_in_none_secs_and_steps(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.SecondOrStepTimer()\n\n @test.mock.patch.object(time, 'time')\n def test_every_secs(self, mock_time):\n mock_time.return_value = MOCK_START_TIME\n timer = basic_session_run_hooks.SecondOrStepTimer(every_secs=1.0)\n self.assertTrue(timer.should_trigger_for_step(1))\n\n timer.update_last_triggered_step(1)\n self.assertFalse(timer.should_trigger_for_step(1))\n self.assertFalse(timer.should_trigger_for_step(2))\n\n mock_time.return_value += 1.0\n self.assertFalse(timer.should_trigger_for_step(1))\n self.assertTrue(timer.should_trigger_for_step(2))\n\n def test_every_steps(self):\n timer = basic_session_run_hooks.SecondOrStepTimer(every_steps=3)\n self.assertTrue(timer.should_trigger_for_step(1))\n\n timer.update_last_triggered_step(1)\n self.assertFalse(timer.should_trigger_for_step(1))\n self.assertFalse(timer.should_trigger_for_step(2))\n self.assertFalse(timer.should_trigger_for_step(3))\n self.assertTrue(timer.should_trigger_for_step(4))\n\n def test_update_last_triggered_step(self):\n timer = basic_session_run_hooks.SecondOrStepTimer(every_steps=1)\n\n elapsed_secs, elapsed_steps = timer.update_last_triggered_step(1)\n self.assertEqual(None, elapsed_secs)\n self.assertEqual(None, elapsed_steps)\n\n elapsed_secs, elapsed_steps = timer.update_last_triggered_step(5)\n self.assertLess(0, elapsed_secs)\n self.assertEqual(4, elapsed_steps)\n\n elapsed_secs, elapsed_steps = timer.update_last_triggered_step(7)\n self.assertLess(0, elapsed_secs)\n self.assertEqual(2, elapsed_steps)\n\n\n@test_util.deprecated_graph_mode_only\nclass StopAtStepTest(test.TestCase):\n\n def test_raise_in_both_last_step_and_num_steps(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.StopAtStepHook(num_steps=10, last_step=20)\n\n def test_stop_based_on_last_step(self):\n h = basic_session_run_hooks.StopAtStepHook(last_step=10)\n with ops.Graph().as_default():\n global_step = training_util.get_or_create_global_step()\n no_op = control_flow_ops.no_op()\n h.begin()\n with session_lib.Session() as sess:\n mon_sess = monitored_session._HookedSession(sess, [h])\n sess.run(state_ops.assign(global_step, 5))\n h.after_create_session(sess, None)\n mon_sess.run(no_op)\n self.assertFalse(mon_sess.should_stop())\n sess.run(state_ops.assign(global_step, 9))\n mon_sess.run(no_op)\n self.assertFalse(mon_sess.should_stop())\n sess.run(state_ops.assign(global_step, 10))\n mon_sess.run(no_op)\n self.assertTrue(mon_sess.should_stop())\n sess.run(state_ops.assign(global_step, 11))\n mon_sess._should_stop = False\n mon_sess.run(no_op)\n self.assertTrue(mon_sess.should_stop())\n\n def test_stop_based_on_num_step(self):\n h = basic_session_run_hooks.StopAtStepHook(num_steps=10)\n\n with ops.Graph().as_default():\n global_step = training_util.get_or_create_global_step()\n no_op = control_flow_ops.no_op()\n h.begin()\n with session_lib.Session() as sess:\n mon_sess = monitored_session._HookedSession(sess, [h])\n sess.run(state_ops.assign(global_step, 5))\n h.after_create_session(sess, None)\n mon_sess.run(no_op)\n self.assertFalse(mon_sess.should_stop())\n sess.run(state_ops.assign(global_step, 13))\n mon_sess.run(no_op)\n self.assertFalse(mon_sess.should_stop())\n sess.run(state_ops.assign(global_step, 14))\n mon_sess.run(no_op)\n self.assertFalse(mon_sess.should_stop())\n sess.run(state_ops.assign(global_step, 15))\n mon_sess.run(no_op)\n self.assertTrue(mon_sess.should_stop())\n sess.run(state_ops.assign(global_step, 16))\n mon_sess._should_stop = False\n mon_sess.run(no_op)\n self.assertTrue(mon_sess.should_stop())\n\n def test_stop_based_with_multiple_steps(self):\n h = basic_session_run_hooks.StopAtStepHook(num_steps=10)\n\n with ops.Graph().as_default():\n global_step = training_util.get_or_create_global_step()\n no_op = control_flow_ops.no_op()\n h.begin()\n with session_lib.Session() as sess:\n mon_sess = monitored_session._HookedSession(sess, [h])\n sess.run(state_ops.assign(global_step, 5))\n h.after_create_session(sess, None)\n mon_sess.run(no_op)\n self.assertFalse(mon_sess.should_stop())\n sess.run(state_ops.assign(global_step, 15))\n mon_sess.run(no_op)\n self.assertTrue(mon_sess.should_stop())\n\n\n@test_util.deprecated_graph_mode_only\nclass LoggingTensorHookTest(test.TestCase):\n\n def setUp(self):\n # Mock out logging calls so we can verify whether correct tensors are being\n # monitored.\n self._actual_log = tf_logging.info\n self.logged_message = None\n\n def mock_log(*args, **kwargs):\n self.logged_message = args\n self._actual_log(*args, **kwargs)\n\n tf_logging.info = mock_log\n\n def tearDown(self):\n tf_logging.info = self._actual_log\n\n def test_illegal_args(self):\n with self.assertRaisesRegexp(ValueError, 'nvalid every_n_iter'):\n basic_session_run_hooks.LoggingTensorHook(tensors=['t'], every_n_iter=0)\n with self.assertRaisesRegexp(ValueError, 'nvalid every_n_iter'):\n basic_session_run_hooks.LoggingTensorHook(tensors=['t'], every_n_iter=-10)\n with self.assertRaisesRegexp(ValueError, 'xactly one of'):\n basic_session_run_hooks.LoggingTensorHook(\n tensors=['t'], every_n_iter=5, every_n_secs=5)\n with self.assertRaisesRegexp(ValueError, 'xactly one of'):\n basic_session_run_hooks.LoggingTensorHook(tensors=['t'])\n\n def test_print_at_end_only(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n t = constant_op.constant(42.0, name='foo')\n train_op = constant_op.constant(3)\n hook = basic_session_run_hooks.LoggingTensorHook(\n tensors=[t.name], at_end=True)\n hook.begin()\n mon_sess = monitored_session._HookedSession(sess, [hook])\n self.evaluate(variables_lib.global_variables_initializer())\n self.logged_message = ''\n for _ in range(3):\n mon_sess.run(train_op)\n # assertNotRegexpMatches is not supported by python 3.1 and later\n self.assertEqual(str(self.logged_message).find(t.name), -1)\n\n hook.end(sess)\n self.assertRegexpMatches(str(self.logged_message), t.name)\n\n def _validate_print_every_n_steps(self, sess, at_end):\n t = constant_op.constant(42.0, name='foo')\n\n train_op = constant_op.constant(3)\n hook = basic_session_run_hooks.LoggingTensorHook(\n tensors=[t.name], every_n_iter=10, at_end=at_end)\n hook.begin()\n mon_sess = monitored_session._HookedSession(sess, [hook])\n self.evaluate(variables_lib.global_variables_initializer())\n mon_sess.run(train_op)\n self.assertRegexpMatches(str(self.logged_message), t.name)\n for _ in range(3):\n self.logged_message = ''\n for _ in range(9):\n mon_sess.run(train_op)\n # assertNotRegexpMatches is not supported by python 3.1 and later\n self.assertEqual(str(self.logged_message).find(t.name), -1)\n mon_sess.run(train_op)\n self.assertRegexpMatches(str(self.logged_message), t.name)\n\n # Add additional run to verify proper reset when called multiple times.\n self.logged_message = ''\n mon_sess.run(train_op)\n # assertNotRegexpMatches is not supported by python 3.1 and later\n self.assertEqual(str(self.logged_message).find(t.name), -1)\n\n self.logged_message = ''\n hook.end(sess)\n if at_end:\n self.assertRegexpMatches(str(self.logged_message), t.name)\n else:\n # assertNotRegexpMatches is not supported by python 3.1 and later\n self.assertEqual(str(self.logged_message).find(t.name), -1)\n\n def test_print_every_n_steps(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n self._validate_print_every_n_steps(sess, at_end=False)\n # Verify proper reset.\n self._validate_print_every_n_steps(sess, at_end=False)\n\n def test_print_every_n_steps_and_end(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n self._validate_print_every_n_steps(sess, at_end=True)\n # Verify proper reset.\n self._validate_print_every_n_steps(sess, at_end=True)\n\n def test_print_first_step(self):\n # if it runs every iteration, first iteration has None duration.\n with ops.Graph().as_default(), session_lib.Session() as sess:\n t = constant_op.constant(42.0, name='foo')\n train_op = constant_op.constant(3)\n hook = basic_session_run_hooks.LoggingTensorHook(\n tensors={'foo': t}, every_n_iter=1)\n hook.begin()\n mon_sess = monitored_session._HookedSession(sess, [hook])\n self.evaluate(variables_lib.global_variables_initializer())\n mon_sess.run(train_op)\n self.assertRegexpMatches(str(self.logged_message), 'foo')\n # in first run, elapsed time is None.\n self.assertEqual(str(self.logged_message).find('sec'), -1)\n\n def _validate_print_every_n_secs(self, sess, at_end, mock_time):\n t = constant_op.constant(42.0, name='foo')\n train_op = constant_op.constant(3)\n\n hook = basic_session_run_hooks.LoggingTensorHook(\n tensors=[t.name], every_n_secs=1.0, at_end=at_end)\n hook.begin()\n mon_sess = monitored_session._HookedSession(sess, [hook])\n self.evaluate(variables_lib.global_variables_initializer())\n\n mon_sess.run(train_op)\n self.assertRegexpMatches(str(self.logged_message), t.name)\n\n # assertNotRegexpMatches is not supported by python 3.1 and later\n self.logged_message = ''\n mon_sess.run(train_op)\n self.assertEqual(str(self.logged_message).find(t.name), -1)\n mock_time.return_value += 1.0\n\n self.logged_message = ''\n mon_sess.run(train_op)\n self.assertRegexpMatches(str(self.logged_message), t.name)\n\n self.logged_message = ''\n hook.end(sess)\n if at_end:\n self.assertRegexpMatches(str(self.logged_message), t.name)\n else:\n # assertNotRegexpMatches is not supported by python 3.1 and later\n self.assertEqual(str(self.logged_message).find(t.name), -1)\n\n @test.mock.patch.object(time, 'time')\n def test_print_every_n_secs(self, mock_time):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n mock_time.return_value = MOCK_START_TIME\n self._validate_print_every_n_secs(sess, at_end=False, mock_time=mock_time)\n # Verify proper reset.\n self._validate_print_every_n_secs(sess, at_end=False, mock_time=mock_time)\n\n @test.mock.patch.object(time, 'time')\n def test_print_every_n_secs_and_end(self, mock_time):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n mock_time.return_value = MOCK_START_TIME\n self._validate_print_every_n_secs(sess, at_end=True, mock_time=mock_time)\n # Verify proper reset.\n self._validate_print_every_n_secs(sess, at_end=True, mock_time=mock_time)\n\n def test_print_formatter(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n t = constant_op.constant(42.0, name='foo')\n train_op = constant_op.constant(3)\n hook = basic_session_run_hooks.LoggingTensorHook(\n tensors=[t.name], every_n_iter=10,\n formatter=lambda items: 'qqq=%s' % items[t.name])\n hook.begin()\n mon_sess = monitored_session._HookedSession(sess, [hook])\n self.evaluate(variables_lib.global_variables_initializer())\n mon_sess.run(train_op)\n self.assertEqual(self.logged_message[0], 'qqq=42.0')\n\n\n@test_util.deprecated_graph_mode_only\nclass CheckpointSaverHookTest(test.TestCase):\n\n def setUp(self):\n self.model_dir = tempfile.mkdtemp()\n self.graph = ops.Graph()\n with self.graph.as_default():\n self.scaffold = monitored_session.Scaffold()\n self.global_step = training_util.get_or_create_global_step()\n self.train_op = training_util._increment_global_step(1)\n\n def tearDown(self):\n shutil.rmtree(self.model_dir, ignore_errors=True)\n\n def test_saves_when_saver_and_scaffold_both_missing(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, save_steps=1)\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n self.assertEqual(1,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n def test_raise_when_saver_and_scaffold_both_present(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, saver=self.scaffold.saver, scaffold=self.scaffold)\n\n def test_raise_in_both_secs_and_steps(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, save_secs=10, save_steps=20)\n\n def test_raise_in_none_secs_and_steps(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.CheckpointSaverHook(self.model_dir)\n\n def test_save_secs_saves_in_first_step(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, save_secs=2, scaffold=self.scaffold)\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n self.assertEqual(1,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n def test_save_secs_calls_listeners_at_begin_and_end(self):\n with self.graph.as_default():\n listener = MockCheckpointSaverListener()\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir,\n save_secs=2,\n scaffold=self.scaffold,\n listeners=[listener])\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op) # hook runs here\n mon_sess.run(self.train_op) # hook won't run here, so it does at end\n hook.end(sess) # hook runs here\n self.assertEqual({\n 'begin': 1,\n 'before_save': 2,\n 'after_save': 2,\n 'end': 1\n }, listener.get_counts())\n\n def test_listener_with_monitored_session(self):\n with ops.Graph().as_default():\n scaffold = monitored_session.Scaffold()\n global_step = training_util.get_or_create_global_step()\n train_op = training_util._increment_global_step(1)\n listener = MockCheckpointSaverListener()\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir,\n save_steps=1,\n scaffold=scaffold,\n listeners=[listener])\n with monitored_session.SingularMonitoredSession(\n hooks=[hook],\n scaffold=scaffold,\n checkpoint_dir=self.model_dir) as sess:\n sess.run(train_op)\n sess.run(train_op)\n global_step_val = sess.raw_session().run(global_step)\n listener_counts = listener.get_counts()\n self.assertEqual(2, global_step_val)\n self.assertEqual({\n 'begin': 1,\n 'before_save': 3,\n 'after_save': 3,\n 'end': 1\n }, listener_counts)\n\n def test_listener_stops_training_in_after_save(self):\n with ops.Graph().as_default():\n scaffold = monitored_session.Scaffold()\n training_util.get_or_create_global_step()\n train_op = training_util._increment_global_step(1)\n listener = MockCheckpointSaverListener()\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, save_steps=1, scaffold=scaffold, listeners=[listener])\n with monitored_session.SingularMonitoredSession(\n hooks=[hook], scaffold=scaffold,\n checkpoint_dir=self.model_dir) as sess:\n sess.run(train_op)\n self.assertFalse(sess.should_stop())\n sess.run(train_op)\n self.assertFalse(sess.should_stop())\n listener.ask_for_stop = True\n sess.run(train_op)\n self.assertTrue(sess.should_stop())\n\n def test_listener_with_default_saver(self):\n with ops.Graph().as_default():\n global_step = training_util.get_or_create_global_step()\n train_op = training_util._increment_global_step(1)\n listener = MockCheckpointSaverListener()\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir,\n save_steps=1,\n listeners=[listener])\n with monitored_session.SingularMonitoredSession(\n hooks=[hook],\n checkpoint_dir=self.model_dir) as sess:\n sess.run(train_op)\n sess.run(train_op)\n global_step_val = sess.raw_session().run(global_step)\n listener_counts = listener.get_counts()\n self.assertEqual(2, global_step_val)\n self.assertEqual({\n 'begin': 1,\n 'before_save': 3,\n 'after_save': 3,\n 'end': 1\n }, listener_counts)\n\n with ops.Graph().as_default():\n global_step = training_util.get_or_create_global_step()\n with monitored_session.SingularMonitoredSession(\n checkpoint_dir=self.model_dir) as sess2:\n global_step_saved_val = sess2.run(global_step)\n self.assertEqual(2, global_step_saved_val)\n\n def test_two_listeners_with_default_saver(self):\n with ops.Graph().as_default():\n global_step = training_util.get_or_create_global_step()\n train_op = training_util._increment_global_step(1)\n listener1 = MockCheckpointSaverListener()\n listener2 = MockCheckpointSaverListener()\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir,\n save_steps=1,\n listeners=[listener1, listener2])\n with monitored_session.SingularMonitoredSession(\n hooks=[hook],\n checkpoint_dir=self.model_dir) as sess:\n sess.run(train_op)\n sess.run(train_op)\n global_step_val = sess.raw_session().run(global_step)\n listener1_counts = listener1.get_counts()\n listener2_counts = listener2.get_counts()\n self.assertEqual(2, global_step_val)\n self.assertEqual({\n 'begin': 1,\n 'before_save': 3,\n 'after_save': 3,\n 'end': 1\n }, listener1_counts)\n self.assertEqual(listener1_counts, listener2_counts)\n\n with ops.Graph().as_default():\n global_step = training_util.get_or_create_global_step()\n with monitored_session.SingularMonitoredSession(\n checkpoint_dir=self.model_dir) as sess2:\n global_step_saved_val = sess2.run(global_step)\n self.assertEqual(2, global_step_saved_val)\n\n @test.mock.patch.object(time, 'time')\n def test_save_secs_saves_periodically(self, mock_time):\n with self.graph.as_default():\n mock_time.return_value = MOCK_START_TIME\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, save_secs=2, scaffold=self.scaffold)\n hook.begin()\n self.scaffold.finalize()\n\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n\n mock_time.return_value = MOCK_START_TIME\n mon_sess.run(self.train_op) # Saved.\n\n mock_time.return_value = MOCK_START_TIME + 0.5\n mon_sess.run(self.train_op) # Not saved.\n\n self.assertEqual(1,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n # Simulate 2.5 seconds of sleep.\n mock_time.return_value = MOCK_START_TIME + 2.5\n mon_sess.run(self.train_op) # Saved.\n\n mock_time.return_value = MOCK_START_TIME + 2.6\n mon_sess.run(self.train_op) # Not saved.\n\n mock_time.return_value = MOCK_START_TIME + 2.7\n mon_sess.run(self.train_op) # Not saved.\n\n self.assertEqual(3,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n # Simulate 7.5 more seconds of sleep (10 seconds from start.\n mock_time.return_value = MOCK_START_TIME + 10\n mon_sess.run(self.train_op) # Saved.\n self.assertEqual(6,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n @test.mock.patch.object(time, 'time')\n def test_save_secs_calls_listeners_periodically(self, mock_time):\n with self.graph.as_default():\n mock_time.return_value = MOCK_START_TIME\n listener = MockCheckpointSaverListener()\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir,\n save_secs=2,\n scaffold=self.scaffold,\n listeners=[listener])\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n\n mock_time.return_value = MOCK_START_TIME + 0.5\n mon_sess.run(self.train_op) # hook runs here\n\n mock_time.return_value = MOCK_START_TIME + 0.5\n mon_sess.run(self.train_op)\n\n mock_time.return_value = MOCK_START_TIME + 3.0\n mon_sess.run(self.train_op) # hook runs here\n\n mock_time.return_value = MOCK_START_TIME + 3.5\n mon_sess.run(self.train_op)\n\n mock_time.return_value = MOCK_START_TIME + 4.0\n mon_sess.run(self.train_op)\n\n mock_time.return_value = MOCK_START_TIME + 6.5\n mon_sess.run(self.train_op) # hook runs here\n\n mock_time.return_value = MOCK_START_TIME + 7.0\n mon_sess.run(self.train_op) # hook won't run here, so it does at end\n\n mock_time.return_value = MOCK_START_TIME + 7.5\n hook.end(sess) # hook runs here\n self.assertEqual({\n 'begin': 1,\n 'before_save': 4,\n 'after_save': 4,\n 'end': 1\n }, listener.get_counts())\n\n def test_save_steps_saves_in_first_step(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, save_steps=2, scaffold=self.scaffold)\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n self.assertEqual(1,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n def test_save_steps_saves_periodically(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, save_steps=2, scaffold=self.scaffold)\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n mon_sess.run(self.train_op)\n # Not saved\n self.assertEqual(1,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n mon_sess.run(self.train_op)\n # saved\n self.assertEqual(3,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n mon_sess.run(self.train_op)\n # Not saved\n self.assertEqual(3,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n mon_sess.run(self.train_op)\n # saved\n self.assertEqual(5,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n def test_save_saves_at_end(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, save_secs=2, scaffold=self.scaffold)\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n mon_sess.run(self.train_op)\n hook.end(sess)\n self.assertEqual(2,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n def test_summary_writer_defs(self):\n fake_summary_writer.FakeSummaryWriter.install()\n writer_cache.FileWriterCache.clear()\n summary_writer = writer_cache.FileWriterCache.get(self.model_dir)\n\n with self.graph.as_default():\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, save_steps=2, scaffold=self.scaffold)\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n hook.after_create_session(sess, None)\n mon_sess.run(self.train_op)\n summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.model_dir,\n expected_added_meta_graphs=[\n meta_graph.create_meta_graph_def(\n graph_def=self.graph.as_graph_def(add_shapes=True),\n saver_def=self.scaffold.saver.saver_def)\n ])\n\n fake_summary_writer.FakeSummaryWriter.uninstall()\n\n def test_save_checkpoint_before_first_train_step(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, save_steps=2, scaffold=self.scaffold)\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n mon_sess = monitored_session._HookedSession(sess, [hook])\n sess.run(self.scaffold.init_op)\n hook.after_create_session(sess, None)\n # Verifies that checkpoint is saved at step 0.\n self.assertEqual(0,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n # Verifies that no checkpoint is saved after one training step.\n mon_sess.run(self.train_op)\n self.assertEqual(0,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n # Verifies that checkpoint is saved after save_steps.\n mon_sess.run(self.train_op)\n self.assertEqual(2,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n\n@test_util.deprecated_graph_mode_only\nclass CheckpointSaverHookMultiStepTest(test.TestCase):\n\n def setUp(self):\n self.model_dir = tempfile.mkdtemp()\n self.graph = ops.Graph()\n self.steps_per_run = 5\n with self.graph.as_default():\n self.scaffold = monitored_session.Scaffold()\n self.global_step = training_util.get_or_create_global_step()\n self.train_op = training_util._increment_global_step(self.steps_per_run)\n\n def tearDown(self):\n shutil.rmtree(self.model_dir, ignore_errors=True)\n\n def test_save_steps_saves_in_first_step(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir,\n save_steps=2*self.steps_per_run,\n scaffold=self.scaffold)\n hook._set_steps_per_run(self.steps_per_run)\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n self.assertEqual(5,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n def test_save_steps_saves_periodically(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir,\n save_steps=2*self.steps_per_run,\n scaffold=self.scaffold)\n hook._set_steps_per_run(self.steps_per_run)\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n # Saved (step=5)\n self.assertEqual(5,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n mon_sess.run(self.train_op)\n # Not saved (step=10)\n self.assertEqual(5,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n mon_sess.run(self.train_op)\n # Saved (step=15)\n self.assertEqual(15,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n mon_sess.run(self.train_op)\n # Not saved (step=20)\n self.assertEqual(15,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n mon_sess.run(self.train_op)\n # Saved (step=25)\n self.assertEqual(25,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n def test_save_steps_saves_at_end(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir,\n save_steps=2*self.steps_per_run,\n scaffold=self.scaffold)\n hook._set_steps_per_run(self.steps_per_run)\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n mon_sess.run(self.train_op)\n hook.end(sess)\n self.assertEqual(10,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n\n@test_util.deprecated_graph_mode_only\nclass ResourceCheckpointSaverHookTest(test.TestCase):\n\n def setUp(self):\n self.model_dir = tempfile.mkdtemp()\n self.graph = ops.Graph()\n with self.graph.as_default():\n self.scaffold = monitored_session.Scaffold()\n with variable_scope.variable_scope('foo', use_resource=True):\n self.global_step = training_util.get_or_create_global_step()\n self.train_op = training_util._increment_global_step(1)\n\n def test_save_steps_saves_periodically(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.CheckpointSaverHook(\n self.model_dir, save_steps=2, scaffold=self.scaffold)\n hook.begin()\n self.scaffold.finalize()\n with session_lib.Session() as sess:\n sess.run(self.scaffold.init_op)\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(self.train_op)\n mon_sess.run(self.train_op)\n # Not saved\n self.assertEqual(1,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n mon_sess.run(self.train_op)\n # saved\n self.assertEqual(3,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n mon_sess.run(self.train_op)\n # Not saved\n self.assertEqual(3,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n mon_sess.run(self.train_op)\n # saved\n self.assertEqual(5,\n checkpoint_utils.load_variable(self.model_dir,\n self.global_step.name))\n\n\n@test_util.deprecated_graph_mode_only\nclass StepCounterHookTest(test.TestCase):\n\n def setUp(self):\n self.log_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.log_dir, ignore_errors=True)\n\n @test.mock.patch.object(time, 'time')\n def test_step_counter_every_n_steps(self, mock_time):\n mock_time.return_value = MOCK_START_TIME\n with ops.Graph().as_default() as g, session_lib.Session() as sess:\n training_util.get_or_create_global_step()\n train_op = training_util._increment_global_step(1)\n summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir, g)\n hook = basic_session_run_hooks.StepCounterHook(\n summary_writer=summary_writer, every_n_steps=10)\n hook.begin()\n self.evaluate(variables_lib.global_variables_initializer())\n mon_sess = monitored_session._HookedSession(sess, [hook])\n with test.mock.patch.object(tf_logging, 'warning') as mock_log:\n for _ in range(30):\n mock_time.return_value += 0.01\n mon_sess.run(train_op)\n # logging.warning should not be called.\n self.assertIsNone(mock_log.call_args)\n hook.end(sess)\n summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_graph=g,\n expected_summaries={})\n self.assertItemsEqual([11, 21], summary_writer.summaries.keys())\n for step in [11, 21]:\n summary_value = summary_writer.summaries[step][0].value[0]\n self.assertEqual('global_step/sec', summary_value.tag)\n self.assertGreater(summary_value.simple_value, 0)\n\n @test.mock.patch.object(time, 'time')\n def test_step_counter_every_n_secs(self, mock_time):\n mock_time.return_value = MOCK_START_TIME\n with ops.Graph().as_default() as g, session_lib.Session() as sess:\n training_util.get_or_create_global_step()\n train_op = training_util._increment_global_step(1)\n summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir, g)\n hook = basic_session_run_hooks.StepCounterHook(\n summary_writer=summary_writer, every_n_steps=None, every_n_secs=0.1)\n\n hook.begin()\n self.evaluate(variables_lib.global_variables_initializer())\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(train_op)\n mock_time.return_value += 0.2\n mon_sess.run(train_op)\n mock_time.return_value += 0.2\n mon_sess.run(train_op)\n hook.end(sess)\n\n summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_graph=g,\n expected_summaries={})\n self.assertTrue(summary_writer.summaries, 'No summaries were created.')\n self.assertItemsEqual([2, 3], summary_writer.summaries.keys())\n for summary in summary_writer.summaries.values():\n summary_value = summary[0].value[0]\n self.assertEqual('global_step/sec', summary_value.tag)\n self.assertGreater(summary_value.simple_value, 0)\n\n def test_global_step_name(self):\n with ops.Graph().as_default() as g, session_lib.Session() as sess:\n with variable_scope.variable_scope('bar'):\n variable_scope.get_variable(\n 'foo',\n initializer=0,\n trainable=False,\n collections=[\n ops.GraphKeys.GLOBAL_STEP, ops.GraphKeys.GLOBAL_VARIABLES\n ])\n train_op = training_util._increment_global_step(1)\n summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir, g)\n hook = basic_session_run_hooks.StepCounterHook(\n summary_writer=summary_writer, every_n_steps=1, every_n_secs=None)\n\n hook.begin()\n self.evaluate(variables_lib.global_variables_initializer())\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(train_op)\n mon_sess.run(train_op)\n hook.end(sess)\n\n summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_graph=g,\n expected_summaries={})\n self.assertTrue(summary_writer.summaries, 'No summaries were created.')\n self.assertItemsEqual([2], summary_writer.summaries.keys())\n summary_value = summary_writer.summaries[2][0].value[0]\n self.assertEqual('bar/foo/sec', summary_value.tag)\n\n def test_log_warning_if_global_step_not_increased(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n training_util.get_or_create_global_step()\n train_op = training_util._increment_global_step(0) # keep same.\n self.evaluate(variables_lib.global_variables_initializer())\n hook = basic_session_run_hooks.StepCounterHook(\n every_n_steps=1, every_n_secs=None)\n hook.begin()\n mon_sess = monitored_session._HookedSession(sess, [hook])\n mon_sess.run(train_op) # Run one step to record global step.\n with test.mock.patch.object(tf_logging, 'log_first_n') as mock_log:\n for _ in range(30):\n mon_sess.run(train_op)\n self.assertRegexpMatches(\n str(mock_log.call_args),\n 'global step.*has not been increased')\n hook.end(sess)\n\n def _setup_steps_per_run_test(self,\n every_n_steps,\n steps_per_run,\n graph,\n sess):\n training_util.get_or_create_global_step()\n self.train_op = training_util._increment_global_step(steps_per_run)\n self.summary_writer = fake_summary_writer.FakeSummaryWriter(\n self.log_dir, graph)\n self.hook = basic_session_run_hooks.StepCounterHook(\n summary_writer=self.summary_writer, every_n_steps=every_n_steps)\n self.hook._set_steps_per_run(steps_per_run)\n self.hook.begin()\n self.evaluate(variables_lib.global_variables_initializer())\n self.mon_sess = monitored_session._HookedSession(sess, [self.hook])\n\n @test.mock.patch.object(time, 'time')\n def test_steps_per_run_less_than_every_n_steps(self, mock_time):\n mock_time.return_value = MOCK_START_TIME\n with ops.Graph().as_default() as g, session_lib.Session() as sess:\n self._setup_steps_per_run_test(10, 5, g, sess)\n\n # Logs at 15, 25\n for _ in range(5):\n mock_time.return_value += 0.01\n self.mon_sess.run(self.train_op)\n\n self.hook.end(sess)\n self.summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_graph=g,\n expected_summaries={})\n self.assertItemsEqual([15, 25], self.summary_writer.summaries.keys())\n for step in [15, 25]:\n summary_value = self.summary_writer.summaries[step][0].value[0]\n self.assertEqual('global_step/sec', summary_value.tag)\n self.assertGreater(summary_value.simple_value, 0)\n\n @test.mock.patch.object(time, 'time')\n def test_steps_per_run_equal_every_n_steps(self, mock_time):\n mock_time.return_value = MOCK_START_TIME\n with ops.Graph().as_default() as g, session_lib.Session() as sess:\n self._setup_steps_per_run_test(5, 5, g, sess)\n\n # Logs at 10, 15, 20, 25\n for _ in range(5):\n mock_time.return_value += 0.01\n self.mon_sess.run(self.train_op)\n\n self.hook.end(sess)\n self.summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_graph=g,\n expected_summaries={})\n self.assertItemsEqual([10, 15, 20, 25],\n self.summary_writer.summaries.keys())\n for step in [10, 15, 20, 25]:\n summary_value = self.summary_writer.summaries[step][0].value[0]\n self.assertEqual('global_step/sec', summary_value.tag)\n self.assertGreater(summary_value.simple_value, 0)\n\n @test.mock.patch.object(time, 'time')\n def test_steps_per_run_greater_than_every_n_steps(self, mock_time):\n mock_time.return_value = MOCK_START_TIME\n with ops.Graph().as_default() as g, session_lib.Session() as sess:\n self._setup_steps_per_run_test(5, 10, g, sess)\n\n # Logs at 20, 30, 40, 50\n for _ in range(5):\n mock_time.return_value += 0.01\n self.mon_sess.run(self.train_op)\n\n self.hook.end(sess)\n self.summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_graph=g,\n expected_summaries={})\n self.assertItemsEqual([20, 30, 40, 50],\n self.summary_writer.summaries.keys())\n for step in [20, 30, 40, 50]:\n summary_value = self.summary_writer.summaries[step][0].value[0]\n self.assertEqual('global_step/sec', summary_value.tag)\n self.assertGreater(summary_value.simple_value, 0)\n\n\n@test_util.deprecated_graph_mode_only\nclass SummarySaverHookTest(test.TestCase):\n\n def setUp(self):\n test.TestCase.setUp(self)\n\n self.log_dir = 'log/dir'\n self.summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir)\n\n var = variables_lib.Variable(0.0)\n tensor = state_ops.assign_add(var, 1.0)\n tensor2 = tensor * 2\n self.summary_op = summary_lib.scalar('my_summary', tensor)\n self.summary_op2 = summary_lib.scalar('my_summary2', tensor2)\n\n training_util.get_or_create_global_step()\n self.train_op = training_util._increment_global_step(1)\n\n def test_raise_when_scaffold_and_summary_op_both_missing(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.SummarySaverHook()\n\n def test_raise_when_scaffold_and_summary_op_both_present(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.SummarySaverHook(\n scaffold=monitored_session.Scaffold(), summary_op=self.summary_op)\n\n def test_raise_in_both_secs_and_steps(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.SummarySaverHook(\n save_secs=10, save_steps=20, summary_writer=self.summary_writer)\n\n def test_raise_in_none_secs_and_steps(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.SummarySaverHook(\n save_secs=None, save_steps=None, summary_writer=self.summary_writer)\n\n def test_save_steps(self):\n hook = basic_session_run_hooks.SummarySaverHook(\n save_steps=8,\n summary_writer=self.summary_writer,\n summary_op=self.summary_op)\n\n with self.cached_session() as sess:\n hook.begin()\n self.evaluate(variables_lib.global_variables_initializer())\n mon_sess = monitored_session._HookedSession(sess, [hook])\n for _ in range(30):\n mon_sess.run(self.train_op)\n hook.end(sess)\n\n self.summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_summaries={\n 1: {\n 'my_summary': 1.0\n },\n 9: {\n 'my_summary': 2.0\n },\n 17: {\n 'my_summary': 3.0\n },\n 25: {\n 'my_summary': 4.0\n },\n })\n\n def test_multiple_summaries(self):\n hook = basic_session_run_hooks.SummarySaverHook(\n save_steps=8,\n summary_writer=self.summary_writer,\n summary_op=[self.summary_op, self.summary_op2])\n\n with self.cached_session() as sess:\n hook.begin()\n self.evaluate(variables_lib.global_variables_initializer())\n mon_sess = monitored_session._HookedSession(sess, [hook])\n for _ in range(10):\n mon_sess.run(self.train_op)\n hook.end(sess)\n\n self.summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_summaries={\n 1: {\n 'my_summary': 1.0,\n 'my_summary2': 2.0\n },\n 9: {\n 'my_summary': 2.0,\n 'my_summary2': 4.0\n },\n })\n\n @test.mock.patch.object(time, 'time')\n def test_save_secs_saving_once_every_step(self, mock_time):\n mock_time.return_value = MOCK_START_TIME\n hook = basic_session_run_hooks.SummarySaverHook(\n save_secs=0.5,\n summary_writer=self.summary_writer,\n summary_op=self.summary_op)\n\n with self.cached_session() as sess:\n hook.begin()\n self.evaluate(variables_lib.global_variables_initializer())\n mon_sess = monitored_session._HookedSession(sess, [hook])\n for _ in range(4):\n mon_sess.run(self.train_op)\n mock_time.return_value += 0.5\n hook.end(sess)\n\n self.summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_summaries={\n 1: {\n 'my_summary': 1.0\n },\n 2: {\n 'my_summary': 2.0\n },\n 3: {\n 'my_summary': 3.0\n },\n 4: {\n 'my_summary': 4.0\n },\n })\n\n @test.mock.patch.object(time, 'time')\n def test_save_secs_saving_once_every_three_steps(self, mock_time):\n mock_time.return_value = 1484695987.209386\n hook = basic_session_run_hooks.SummarySaverHook(\n save_secs=9.,\n summary_writer=self.summary_writer,\n summary_op=self.summary_op)\n\n with self.cached_session() as sess:\n hook.begin()\n self.evaluate(variables_lib.global_variables_initializer())\n mon_sess = monitored_session._HookedSession(sess, [hook])\n for _ in range(8):\n mon_sess.run(self.train_op)\n mock_time.return_value += 3.1\n hook.end(sess)\n\n # 24.8 seconds passed (3.1*8), it saves every 9 seconds starting from first:\n self.summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_summaries={\n 1: {\n 'my_summary': 1.0\n },\n 4: {\n 'my_summary': 2.0\n },\n 7: {\n 'my_summary': 3.0\n },\n })\n\n\n@test_util.deprecated_graph_mode_only\nclass GlobalStepWaiterHookTest(test.TestCase):\n\n def test_not_wait_for_step_zero(self):\n with ops.Graph().as_default():\n training_util.get_or_create_global_step()\n hook = basic_session_run_hooks.GlobalStepWaiterHook(wait_until_step=0)\n hook.begin()\n with session_lib.Session() as sess:\n # Before run should return without waiting gstep increment.\n hook.before_run(\n session_run_hook.SessionRunContext(\n original_args=None, session=sess))\n\n @test.mock.patch.object(time, 'sleep')\n def test_wait_for_step(self, mock_sleep):\n with ops.Graph().as_default():\n gstep = training_util.get_or_create_global_step()\n hook = basic_session_run_hooks.GlobalStepWaiterHook(wait_until_step=1000)\n hook.begin()\n\n with session_lib.Session() as sess:\n # Mock out calls to time.sleep() to update the global step.\n\n class Context(object):\n counter = 0\n\n def mock_sleep_side_effect(seconds):\n del seconds # argument is ignored\n Context.counter += 1\n if Context.counter == 1:\n # The first time sleep() is called, we update the global_step from\n # 0 to 500.\n sess.run(state_ops.assign(gstep, 500))\n elif Context.counter == 2:\n # The second time sleep() is called, we update the global_step from\n # 500 to 1100.\n sess.run(state_ops.assign(gstep, 1100))\n else:\n raise AssertionError(\n 'Expected before_run() to terminate after the second call to '\n 'time.sleep()')\n\n mock_sleep.side_effect = mock_sleep_side_effect\n\n # Run the mocked-out interaction with the hook.\n self.evaluate(variables_lib.global_variables_initializer())\n run_context = session_run_hook.SessionRunContext(\n original_args=None, session=sess)\n hook.before_run(run_context)\n self.assertEqual(Context.counter, 2)\n\n\n@test_util.deprecated_graph_mode_only\nclass FinalOpsHookTest(test.TestCase):\n\n def test_final_ops_is_scalar_tensor(self):\n with ops.Graph().as_default():\n expected_value = 4\n final_ops = constant_op.constant(expected_value)\n\n hook = basic_session_run_hooks.FinalOpsHook(final_ops)\n hook.begin()\n\n with session_lib.Session() as session:\n hook.end(session)\n self.assertEqual(expected_value,\n hook.final_ops_values)\n\n def test_final_ops_is_tensor(self):\n with ops.Graph().as_default():\n expected_values = [1, 6, 3, 5, 2, 4]\n final_ops = constant_op.constant(expected_values)\n\n hook = basic_session_run_hooks.FinalOpsHook(final_ops)\n hook.begin()\n\n with session_lib.Session() as session:\n hook.end(session)\n self.assertListEqual(expected_values,\n hook.final_ops_values.tolist())\n\n def test_final_ops_triggers_out_of_range_error(self):\n with ops.Graph().as_default():\n dataset = dataset_ops.Dataset.range(1)\n iterator = dataset_ops.make_one_shot_iterator(dataset)\n read_ops = iterator.get_next()\n final_ops = read_ops\n\n hook = basic_session_run_hooks.FinalOpsHook(final_ops)\n hook.begin()\n\n with session_lib.Session() as session:\n session.run(read_ops)\n with test.mock.patch.object(tf_logging, 'warning') as mock_log:\n with self.assertRaisesRegexp(errors.OutOfRangeError,\n 'End of sequence'):\n hook.end(session)\n self.assertRegexpMatches(\n str(mock_log.call_args),\n 'dependency back to some input source')\n\n def test_final_ops_with_dictionary(self):\n with ops.Graph().as_default():\n expected_values = [4, -3]\n final_ops = array_ops.placeholder(dtype=dtypes.float32)\n final_ops_feed_dict = {final_ops: expected_values}\n\n hook = basic_session_run_hooks.FinalOpsHook(\n final_ops, final_ops_feed_dict)\n hook.begin()\n\n with session_lib.Session() as session:\n hook.end(session)\n self.assertListEqual(expected_values,\n hook.final_ops_values.tolist())\n\n\n@test_util.deprecated_graph_mode_only\nclass ResourceSummarySaverHookTest(test.TestCase):\n\n def setUp(self):\n test.TestCase.setUp(self)\n\n self.log_dir = 'log/dir'\n self.summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir)\n\n var = variable_scope.get_variable('var', initializer=0.0, use_resource=True)\n tensor = state_ops.assign_add(var, 1.0)\n self.summary_op = summary_lib.scalar('my_summary', tensor)\n\n with variable_scope.variable_scope('foo', use_resource=True):\n training_util.create_global_step()\n self.train_op = training_util._increment_global_step(1)\n\n def test_save_steps(self):\n hook = basic_session_run_hooks.SummarySaverHook(\n save_steps=8,\n summary_writer=self.summary_writer,\n summary_op=self.summary_op)\n\n with self.cached_session() as sess:\n hook.begin()\n self.evaluate(variables_lib.global_variables_initializer())\n mon_sess = monitored_session._HookedSession(sess, [hook])\n for _ in range(30):\n mon_sess.run(self.train_op)\n hook.end(sess)\n\n self.summary_writer.assert_summaries(\n test_case=self,\n expected_logdir=self.log_dir,\n expected_summaries={\n 1: {\n 'my_summary': 1.0\n },\n 9: {\n 'my_summary': 2.0\n },\n 17: {\n 'my_summary': 3.0\n },\n 25: {\n 'my_summary': 4.0\n },\n })\n\n\n@test_util.deprecated_graph_mode_only\nclass FeedFnHookTest(test.TestCase):\n\n def test_feeding_placeholder(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n x = array_ops.placeholder(dtype=dtypes.float32)\n y = x + 1\n hook = basic_session_run_hooks.FeedFnHook(\n feed_fn=lambda: {x: 1.0})\n hook.begin()\n mon_sess = monitored_session._HookedSession(sess, [hook])\n self.assertEqual(mon_sess.run(y), 2)\n\n\n@test_util.deprecated_graph_mode_only\nclass ProfilerHookTest(test.TestCase):\n\n def setUp(self):\n super(ProfilerHookTest, self).setUp()\n self.output_dir = tempfile.mkdtemp()\n self.graph = ops.Graph()\n self.filepattern = os.path.join(self.output_dir, 'timeline-*.json')\n with self.graph.as_default():\n self.global_step = training_util.get_or_create_global_step()\n self.train_op = state_ops.assign_add(self.global_step, 1)\n\n def tearDown(self):\n super(ProfilerHookTest, self).tearDown()\n shutil.rmtree(self.output_dir, ignore_errors=True)\n\n def _count_timeline_files(self):\n return len(gfile.Glob(self.filepattern))\n\n def test_raise_in_both_secs_and_steps(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.ProfilerHook(save_secs=10, save_steps=20)\n\n def test_raise_in_none_secs_and_steps(self):\n with self.assertRaises(ValueError):\n basic_session_run_hooks.ProfilerHook(save_secs=None, save_steps=None)\n\n def test_save_secs_does_not_save_in_first_step(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.ProfilerHook(\n save_secs=2, output_dir=self.output_dir)\n with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess:\n sess.run(self.train_op)\n self.assertEqual(0, self._count_timeline_files())\n\n @test.mock.patch.object(time, 'time')\n def test_save_secs_saves_periodically(self, mock_time):\n # Pick a fixed start time.\n with self.graph.as_default():\n mock_time.return_value = MOCK_START_TIME\n hook = basic_session_run_hooks.ProfilerHook(\n save_secs=2, output_dir=self.output_dir)\n with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess:\n sess.run(self.train_op) # Not saved.\n self.assertEqual(0, self._count_timeline_files())\n # Simulate 2.5 seconds of sleep.\n mock_time.return_value = MOCK_START_TIME + 2.5\n sess.run(self.train_op) # Saved.\n self.assertEqual(1, self._count_timeline_files())\n\n # Pretend some small amount of time has passed.\n mock_time.return_value = MOCK_START_TIME + 2.6\n sess.run(self.train_op) # Not saved.\n # Edge test just before we should save the timeline.\n mock_time.return_value = MOCK_START_TIME + 4.4\n sess.run(self.train_op) # Not saved.\n self.assertEqual(1, self._count_timeline_files())\n\n mock_time.return_value = MOCK_START_TIME + 4.5\n sess.run(self.train_op) # Saved.\n self.assertEqual(2, self._count_timeline_files())\n\n def test_save_steps_does_not_save_in_first_step(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.ProfilerHook(\n save_steps=1, output_dir=self.output_dir)\n with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess:\n sess.run(self.train_op) # Not saved.\n self.assertEqual(0, self._count_timeline_files())\n\n def test_save_steps_saves_periodically(self):\n with self.graph.as_default():\n hook = basic_session_run_hooks.ProfilerHook(\n save_steps=2, output_dir=self.output_dir)\n with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess:\n self.assertEqual(0, self._count_timeline_files())\n sess.run(self.train_op) # Not saved.\n self.assertEqual(0, self._count_timeline_files())\n sess.run(self.train_op) # Saved.\n self.assertEqual(1, self._count_timeline_files())\n sess.run(self.train_op) # Not saved.\n self.assertEqual(1, self._count_timeline_files())\n sess.run(self.train_op) # Saved.\n self.assertEqual(2, self._count_timeline_files())\n sess.run(self.train_op) # Not saved.\n self.assertEqual(2, self._count_timeline_files())\n\n def test_run_metadata_saves(self):\n writer_cache.FileWriterCache.clear()\n fake_summary_writer.FakeSummaryWriter.install()\n fake_writer = writer_cache.FileWriterCache.get(self.output_dir)\n with self.graph.as_default():\n hook = basic_session_run_hooks.ProfilerHook(\n save_steps=1, output_dir=self.output_dir)\n with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess:\n sess.run(self.train_op) # Not saved.\n sess.run(self.train_op) # Saved.\n self.assertEqual(\n list(fake_writer._added_run_metadata.keys()), ['step_2'])\n fake_summary_writer.FakeSummaryWriter.uninstall()\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Estimator classes for BoostedTrees.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport collections\nimport contextlib\nimport functools\n\nimport numpy as np\nimport six\n\nfrom tensorflow.core.kernels.boosted_trees import boosted_trees_pb2\nfrom tensorflow.python.compat import compat\nfrom tensorflow.python.feature_column import feature_column as fc_old\nfrom tensorflow.python.feature_column import feature_column_lib\nfrom tensorflow.python.feature_column import feature_column_v2\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import boosted_trees_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import control_flow_v2_toggles\nfrom tensorflow.python.ops import data_flow_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops.array_ops import identity as tf_identity\nfrom tensorflow.python.ops.losses import losses\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.training import checkpoint_utils\nfrom tensorflow.python.training import session_run_hook\nfrom tensorflow.python.training import training_util\nfrom tensorflow.python.util.tf_export import estimator_export\nfrom tensorflow_estimator.python.estimator import estimator\nfrom tensorflow_estimator.python.estimator.canned import boosted_trees_utils\nfrom tensorflow_estimator.python.estimator.canned import head as head_lib\nfrom tensorflow_estimator.python.estimator.mode_keys import ModeKeys\nfrom tensorflow.python.ops import cond_v2\n\n# TODO(nponomareva): Reveal pruning params here.\n_TreeHParams = collections.namedtuple('TreeHParams', [\n 'n_trees', 'max_depth', 'learning_rate', 'l1', 'l2', 'tree_complexity',\n 'min_node_weight', 'center_bias', 'pruning_mode', 'quantile_sketch_epsilon'\n])\n\n_HOLD_FOR_MULTI_CLASS_SUPPORT = object()\n_HOLD_FOR_MULTI_DIM_SUPPORT = object()\n_DUMMY_NUM_BUCKETS = -1\n_DUMMY_NODE_ID = -1\n_QUANTILE_ACCUMULATOR_RESOURCE_NAME = 'QuantileAccumulator'\n\n\ndef _is_numeric_column(feature_column):\n \"\"\"Returns True if column is a continuous numeric that should be bucketized.\"\"\"\n # These columns always produce categorical integers and do not require\n # additional bucketization.\n if isinstance(\n feature_column,\n (\n feature_column_lib.CategoricalColumn,\n fc_old._CategoricalColumn, # pylint:disable=protected-access\n feature_column_lib.BucketizedColumn,\n fc_old._BucketizedColumn, # pylint:disable=protected-access\n feature_column_lib.IndicatorColumn,\n fc_old._IndicatorColumn)): # pylint:disable=protected-access\n return False\n # NumericColumns are always interpreted as continuous numerics.\n if isinstance(feature_column,\n (feature_column_lib.NumericColumn, fc_old._NumericColumn)):\n return True\n # For other dense columns, the dtype is used.\n if isinstance(feature_column,\n (feature_column_lib.DenseColumn, fc_old._DenseColumn)):\n # NOTE: GBDT requires that all DenseColumns expose a dtype attribute\n return feature_column.dtype.is_floating\n else:\n raise ValueError('Encountered unexpected column {}'.format(feature_column))\n\n\ndef _get_float_feature_columns(sorted_feature_columns):\n \"\"\"Get float feature columns.\n\n Args:\n sorted_feature_columns: a list of feature columns sorted by name.\n\n Returns:\n float_columns: a list of float feature columns sorted by name.\n \"\"\"\n float_columns = []\n for feature_column in sorted_feature_columns:\n if _is_numeric_column(feature_column):\n float_columns.append(feature_column)\n return float_columns\n\n\ndef _apply_feature_transformations(features, feature_columns):\n \"\"\"Applies feature column transformations to the provided features.\n\n Supports V1 and V2 FeatureColumns.\n\n Args:\n features: a dicionary of feature name to Tensor.\n feature_columns: an iterable of tf.feature_columns.\n\n Returns:\n A dict from feature_column to transformed feature tensor.\n \"\"\"\n v2_columns, v1_columns = [], []\n for fc in feature_columns:\n if feature_column_lib.is_feature_column_v2([fc]):\n v2_columns.append(fc)\n else:\n v1_columns.append(fc)\n\n if v2_columns:\n state_manager = feature_column_v2._StateManagerImpl(\n layer=None, trainable=False)\n\n transformed_columns = feature_column_v2._transform_features_v2(\n features, v2_columns, state_manager)\n else:\n transformed_columns = {}\n if v1_columns:\n transformed_columns.update(fc_old._transform_features(features, v1_columns))\n return transformed_columns\n\n\ndef _get_transformed_features(\n features,\n sorted_feature_columns,\n bucket_boundaries_dict=None,\n):\n \"\"\"Gets the transformed features from features/feature_columns pair.\n\n Args:\n features: a dicionary of name to Tensor.\n sorted_feature_columns: a list/set of tf.feature_column, sorted by name.\n bucket_boundaries_dict: a dict of name to list of Tensors.\n\n Returns:\n result_features: a list of the transformed features, sorted by the name.\n\n Raises:\n ValueError: when unsupported features/columns are tried.\n \"\"\"\n return _get_transformed_features_and_merge_with_previously_transformed(\n features, sorted_feature_columns, sorted_feature_columns,\n bucket_boundaries_dict)\n\n\ndef _get_transformed_features_and_merge_with_previously_transformed(\n features,\n sorted_feature_columns,\n all_sorted_columns,\n bucket_boundaries_dict=None,\n already_transformed_features={},\n):\n \"\"\"Gets the transformed features from features/feature_columns pair.\n\n This signature allows to pass in previously transformed features.\n\n Args:\n features: a dicionary of name to Tensor.\n sorted_feature_columns: a list/set of tf.feature_column, sorted by name, to\n be used for transforming features.\n all_sorted_columns: a total list of feature columns, including those that\n were already used for transformation.\n bucket_boundaries_dict: a dict of name to list of Tensors.\n already_transformed_features: features that were already transformed (for\n columns all_sorted_columns-sorted_feature_columns)\n\n Returns:\n result_features: a list of the transformed features, sorted by the name.\n\n Raises:\n ValueError: when unsupported features/columns are tried.\n \"\"\"\n # pylint:disable=protected-access\n transformed_features = _apply_feature_transformations(features,\n sorted_feature_columns)\n result_features = []\n\n if sorted_feature_columns != all_sorted_columns:\n # Add previously transformed features.\n transformed_features.update(already_transformed_features)\n\n for column in all_sorted_columns:\n if isinstance(\n column,\n (feature_column_lib.BucketizedColumn, fc_old._BucketizedColumn)):\n source_name = column.source_column.name\n squeezed_tensor = array_ops.squeeze(transformed_features[column], axis=1)\n if len(squeezed_tensor.shape) > 1:\n raise ValueError('For now, only supports features equivalent to rank 1 '\n 'but column `{}` got: {}'.format(\n source_name, features[source_name].shape))\n result_features.append(squeezed_tensor)\n elif isinstance(\n column, (feature_column_lib.IndicatorColumn, fc_old._IndicatorColumn)):\n source_name = column.categorical_column.name\n tensor = math_ops.cast(transformed_features[column], dtype=dtypes.int32)\n if len(tensor.shape) > 2:\n raise ValueError('Rank of indicator column must be no more than 2, '\n 'but column `{}` got: {}'.format(\n source_name, features[source_name].shape))\n unstacked = array_ops.unstack(tensor, axis=1)\n result_features.extend(unstacked)\n elif isinstance(column,\n (feature_column_lib.DenseColumn, fc_old._DenseColumn)):\n source_name = column.name\n tensor = transformed_features[column]\n # TODO(tanzheny): Add support for multi dim with rank > 1\n if _get_variable_shape(column).rank > 1:\n raise ValueError('For now, we only support Dense column with rank of '\n '1, but column `{}` got: {}'.format(\n source_name, column.variable_shape))\n unstacked = array_ops.unstack(tensor, axis=1)\n if not bucket_boundaries_dict:\n result_features.extend(unstacked)\n else:\n assert source_name in bucket_boundaries_dict\n num_float_features = (\n _get_variable_shape(column)[0]\n if _get_variable_shape(column).as_list() else 1)\n assert num_float_features == len(bucket_boundaries_dict[source_name])\n bucketized = boosted_trees_ops.boosted_trees_bucketize(\n unstacked, bucket_boundaries_dict[source_name])\n result_features.extend(bucketized)\n elif isinstance(\n column,\n (feature_column_lib.CategoricalColumn, fc_old._CategoricalColumn)):\n raise ValueError(\n 'CategoricalColumn must be wrapped by IndicatorColumn, got: {}'\n .format(column))\n else:\n raise ValueError('Got unexpected feature column type'.format(column))\n # pylint:enable=protected-access\n\n return result_features\n\n\ndef _variable(initial_value, trainable=False, name=None):\n \"\"\"Stores a tensor as a local Variable for faster read.\"\"\"\n if compat.forward_compatible(2019, 8, 8):\n return variable_scope.variable(\n initial_value=initial_value,\n trainable=trainable,\n validate_shape=False,\n name=name,\n use_resource=True)\n return variable_scope.variable(\n initial_value=initial_value,\n trainable=trainable,\n validate_shape=False,\n name=name,\n use_resource=False)\n\n\ndef _group_features_by_num_buckets(sorted_feature_columns, num_quantiles):\n \"\"\"Groups feature ids by the number of buckets.\n\n Derives the feature ids based on iterating through ordered feature columns\n and groups them by the number of buckets each feature require. Returns a\n sorted list of buckets and a list of lists of feature ids for each of those\n buckets.\n\n Args:\n sorted_feature_columns: a list/set of tf.feature_column sorted by name.\n num_quantiles: int representing the number of quantile buckets for all\n numeric columns.\n\n Returns:\n bucket_size_list: a list of required bucket sizes.\n feature_ids_list: a list of lists of feature ids for each bucket size.\n\n Raises:\n ValueError: when unsupported features columns are provided.\n \"\"\"\n bucket_size_to_feature_ids_dict = collections.OrderedDict()\n\n # TODO(nponomareva) for now we preserve the previous functionality and bucket\n # all numeric into the same num of buckets. Can be easily changed to using\n # each numeric's real buckets num, but we need to test that it does not cause\n # a performance hit.\n\n # We will replace this dummy key with the real max after we calculate it.\n bucket_size_to_feature_ids_dict[_DUMMY_NUM_BUCKETS] = []\n\n max_buckets_for_bucketized = 2\n max_buckets_for_indicator = 2\n\n feature_idx = 0\n # pylint:disable=protected-access\n\n for column in sorted_feature_columns:\n if isinstance(\n column, (feature_column_lib.IndicatorColumn, fc_old._IndicatorColumn)):\n num_categorical_features = column.categorical_column._num_buckets\n if max_buckets_for_indicator not in bucket_size_to_feature_ids_dict:\n bucket_size_to_feature_ids_dict[max_buckets_for_indicator] = []\n\n for _ in range(num_categorical_features):\n # We use bucket size of 2 for categorical.\n bucket_size_to_feature_ids_dict[max_buckets_for_indicator].append(\n feature_idx)\n feature_idx += 1\n elif isinstance(\n column,\n (feature_column_lib.BucketizedColumn, fc_old._BucketizedColumn)):\n max_buckets_for_bucketized = max(max_buckets_for_bucketized,\n len(column.boundaries) + 1)\n bucket_size_to_feature_ids_dict[_DUMMY_NUM_BUCKETS].append(feature_idx)\n feature_idx += 1\n elif isinstance(column,\n (feature_column_lib.DenseColumn, fc_old._DenseColumn)):\n if num_quantiles not in bucket_size_to_feature_ids_dict:\n bucket_size_to_feature_ids_dict[num_quantiles] = []\n num_float_features = _get_variable_shape(\n column)[0] if _get_variable_shape(column).as_list() else 1\n for _ in range(num_float_features):\n bucket_size_to_feature_ids_dict[num_quantiles].append(feature_idx)\n feature_idx += 1\n elif isinstance(\n column,\n (feature_column_lib.CategoricalColumn, fc_old._CategoricalColumn)):\n raise ValueError(\n 'CategoricalColumn must be wrapped by IndicatorColumn, got: {}'\n .format(column))\n else:\n raise ValueError('Got unexpected feature column type'.format(column))\n\n # Replace the dummy key with the real max num of buckets for all bucketized\n # columns.\n bucketized_feature_ids = bucket_size_to_feature_ids_dict[_DUMMY_NUM_BUCKETS]\n if max_buckets_for_bucketized in bucket_size_to_feature_ids_dict:\n bucket_size_to_feature_ids_dict[max_buckets_for_bucketized].extend(\n bucketized_feature_ids)\n elif bucketized_feature_ids:\n bucket_size_to_feature_ids_dict[\n max_buckets_for_bucketized] = bucketized_feature_ids\n del bucket_size_to_feature_ids_dict[_DUMMY_NUM_BUCKETS]\n\n # pylint:enable=protected-access\n feature_ids_list = list(bucket_size_to_feature_ids_dict.values())\n bucket_size_list = list(bucket_size_to_feature_ids_dict.keys())\n return bucket_size_list, feature_ids_list\n\n\ndef _calculate_num_features(sorted_feature_columns):\n \"\"\"Calculate the total number of features.\"\"\"\n num_features = 0\n # pylint:disable=protected-access\n for column in sorted_feature_columns:\n if isinstance(\n column, (fc_old._IndicatorColumn, feature_column_lib.IndicatorColumn)):\n num_features += column.categorical_column._num_buckets\n elif isinstance(\n column,\n (fc_old._BucketizedColumn, feature_column_lib.BucketizedColumn)):\n num_features += 1\n elif isinstance(column,\n (feature_column_lib.DenseColumn, fc_old._DenseColumn)):\n num_features += _get_variable_shape(column)[0] if _get_variable_shape(\n column).as_list() else 1\n elif isinstance(\n column,\n (feature_column_lib.CategoricalColumn, fc_old._CategoricalColumn)):\n raise ValueError(\n 'CategoricalColumn must be wrapped by IndicatorColumn, got: {}'\n .format(column))\n else:\n raise ValueError('Got unexpected feature column type'.format(column))\n # pylint:enable=protected-access\n return num_features\n\n\ndef _generate_feature_col_name_mapping(sorted_feature_columns):\n \"\"\"Return a list of feature column names for feature ids.\n\n Example:\n\n ```\n gender_col = indicator_column(\n categorical_column_with_vocabulary_list(\n 'gender', ['male', 'female', 'n/a']))\n # Results in 3 binary features for which we store the mapping to the\n # original feature column.\n _generate_feature_col_name_mapping([gender_col])\n ['gender', 'gender', 'gender]\n ```\n\n Args:\n sorted_feature_columns: a list/set of tf.feature_column sorted by name.\n\n Returns:\n feature_col_name_mapping: a list of feature column names indexed by the\n feature ids.\n\n Raises:\n ValueError: when unsupported features/columns are tried.\n \"\"\"\n # pylint:disable=protected-access\n names = []\n for column in sorted_feature_columns:\n if isinstance(\n column, (feature_column_lib.IndicatorColumn, fc_old._IndicatorColumn)):\n categorical_column = column.categorical_column\n if hasattr(categorical_column, 'num_buckets'):\n one_hot_depth = categorical_column.num_buckets\n else:\n assert hasattr(categorical_column, '_num_buckets')\n one_hot_depth = categorical_column._num_buckets\n for _ in range(one_hot_depth):\n names.append(categorical_column.name)\n elif isinstance(\n column,\n (feature_column_lib.BucketizedColumn, fc_old._BucketizedColumn)):\n names.append(column.name)\n elif isinstance(column,\n (fc_old._DenseColumn, feature_column_lib.DenseColumn)):\n num_float_features = _get_variable_shape(\n column)[0] if _get_variable_shape(column).as_list() else 1\n for _ in range(num_float_features):\n names.append(column.name)\n elif isinstance(\n column,\n (feature_column_lib.CategoricalColumn, fc_old._CategoricalColumn)):\n raise ValueError(\n 'CategoricalColumn must be wrapped by IndicatorColumn, got: {}'\n .format(column))\n else:\n raise ValueError('Got unexpected feature column type'.format(column))\n return names\n # pylint:enable=protected-access\n\n\ndef _cond(var, true_branch, false_branch, name=None):\n if compat.forward_compatible(2019, 8, 8):\n # Always force to use cond v2 (even in v1 setting).\n return cond_v2.cond_v2(var, true_branch, false_branch, name=name)\n\n @contextlib.contextmanager\n def disable_control_flow_v2():\n control_flow_v2_enabled = control_flow_v2_toggles.control_flow_v2_enabled()\n control_flow_v2_toggles.disable_control_flow_v2()\n yield\n if control_flow_v2_enabled:\n control_flow_v2_toggles.enable_control_flow_v2()\n\n with disable_control_flow_v2():\n return control_flow_ops.cond(\n math_ops.logical_and(var, array_ops.constant(True)),\n true_branch,\n false_branch,\n name=name)\n\n\ndef _accumulator(dtype, shape, shared_name):\n return data_flow_ops.ConditionalAccumulator(\n dtype=dtype, shape=shape, shared_name=shared_name)\n\n\ndef _cache_transformed_features(features, sorted_feature_columns, cat_columns,\n other_columns, batch_size,\n bucket_boundaries_dict, are_boundaries_ready):\n \"\"\"Transform features and cache, then returns (cached_features, cache_op).\"\"\"\n num_features = _calculate_num_features(sorted_feature_columns)\n cached_features = [\n _variable(\n array_ops.zeros([batch_size], dtype=dtypes.int32),\n name='cached_feature_{}'.format(i)) for i in range(num_features)\n ]\n are_features_cached = _variable(False, name='are_features_cached')\n\n # An ugly hack - for categorical features, in order to have lookup tables\n # initialized, transform should happen outside of cond. So we always transform\n # cat columns separately (it is not as expensive as bucketizing) and then\n # merge these processed features with other columns in cond branches.\n cat_transformed = []\n if len(cat_columns) > 0:\n cat_transformed = _apply_feature_transformations(features, cat_columns)\n\n def get_features_without_cache():\n \"\"\"Returns transformed features\"\"\"\n transformed_features = _get_transformed_features_and_merge_with_previously_transformed(\n features, other_columns, sorted_feature_columns, bucket_boundaries_dict,\n cat_transformed)\n\n return transformed_features, control_flow_ops.no_op()\n\n def get_features_with_cache():\n \"\"\"Either returns from cache or transforms and caches features.\"\"\"\n\n def _cache_features_and_return():\n \"\"\"Caches transformed features.\n\n The intention is to hide get_transformed_features() from the graph by\n caching the result except the first step, since bucketize operation\n (inside get_transformed_features) is expensive.\n\n Returns:\n input_feature_list: a list of input features.\n cache_flip_op: op to add to graph to make sure cache update is included\n to\n the graph.\n \"\"\"\n transformed_features = _get_transformed_features_and_merge_with_previously_transformed(\n features, other_columns, sorted_feature_columns,\n bucket_boundaries_dict, cat_transformed)\n\n cached = [\n state_ops.assign(cached_features[i], transformed_features[i])\n for i in range(num_features)\n ]\n # TODO(youngheek): Try other combination of dependencies so that the\n # function returns a single result, not a tuple.\n with ops.control_dependencies(cached):\n cache_flip_op = are_features_cached.assign(True)\n return cached, cache_flip_op\n\n return _cond(are_features_cached, lambda: (cached_features,\n control_flow_ops.no_op()),\n _cache_features_and_return)\n\n input_feature_list, cache_flip_op = _cond(are_boundaries_ready,\n get_features_without_cache,\n get_features_with_cache)\n\n return input_feature_list, cache_flip_op\n\n\nclass _CacheTrainingStatesUsingHashTable(object):\n \"\"\"Caching logits, etc. using MutableHashTable.\"\"\"\n\n def __init__(self, example_ids, logits_dimension):\n \"\"\"Creates a cache with the given configuration.\n\n It maintains a MutableDenseHashTable for all values.\n The API lookup() and insert() would have those specs,\n tree_ids: shape=[batch_size], dtype=int32\n node_ids: shape=[batch_size], dtype=int32\n logits: shape=[batch_size, logits_dimension], dtype=float32\n However in the MutableDenseHashTable, ids are bitcasted into float32 and\n all values are concatenated as a single tensor (of float32).\n\n Hence conversion happens internally before inserting to the HashTable and\n after lookup from it.\n\n Args:\n example_ids: a Rank 1 tensor to be used as a key of the cache.\n logits_dimension: a constant (int) for the dimension of logits.\n\n Raises:\n ValueError: if example_ids is other than int64 or string.\n \"\"\"\n if dtypes.as_dtype(dtypes.int64).is_compatible_with(example_ids.dtype):\n empty_key = -1 << 62\n deleted_key = -1 << 61\n elif dtypes.as_dtype(dtypes.string).is_compatible_with(example_ids.dtype):\n empty_key = ''\n deleted_key = 'NEVER_USED_DELETED_KEY'\n else:\n raise ValueError('Unsupported example_id_feature dtype %s.' %\n example_ids.dtype)\n # Cache holds latest <tree_id, node_id, logits> for each example.\n # tree_id and node_id are both int32 but logits is a float32.\n # To reduce the overhead, we store all of them together as float32 and\n # bitcast the ids to int32.\n self._table_ref = lookup_ops.mutable_dense_hash_table_v2(\n empty_key=empty_key,\n deleted_key=deleted_key,\n value_dtype=dtypes.float32,\n value_shape=[3])\n self._example_ids = ops.convert_to_tensor(example_ids)\n if self._example_ids.shape.ndims not in (None, 1):\n raise ValueError('example_id should have rank 1, but got %s' %\n self._example_ids)\n self._logits_dimension = logits_dimension\n\n def lookup(self):\n \"\"\"Returns cached_tree_ids, cached_node_ids, cached_logits.\"\"\"\n cached_tree_ids, cached_node_ids, cached_logits = array_ops.split(\n lookup_ops.lookup_table_find_v2(\n self._table_ref,\n self._example_ids,\n default_value=[0.0, _DUMMY_NODE_ID, 0.0]),\n [1, 1, self._logits_dimension],\n axis=1)\n cached_tree_ids = array_ops.squeeze(\n array_ops.bitcast(cached_tree_ids, dtypes.int32))\n cached_node_ids = array_ops.squeeze(\n array_ops.bitcast(cached_node_ids, dtypes.int32))\n if self._example_ids.shape.ndims is not None:\n cached_logits.set_shape(\n [self._example_ids.shape[0], self._logits_dimension])\n return (cached_tree_ids, cached_node_ids, cached_logits)\n\n def insert(self, tree_ids, node_ids, logits):\n \"\"\"Inserts values and returns the op.\"\"\"\n insert_op = lookup_ops.lookup_table_insert_v2(\n self._table_ref, self._example_ids,\n array_ops.concat([\n array_ops.expand_dims(\n array_ops.bitcast(tree_ids, dtypes.float32), 1),\n array_ops.expand_dims(\n array_ops.bitcast(node_ids, dtypes.float32), 1),\n logits,\n ],\n axis=1,\n name='value_concat_for_cache_insert'))\n return insert_op\n\n\nclass _CacheTrainingStatesUsingVariables(object):\n \"\"\"Caching logits, etc. using Variables.\"\"\"\n\n def __init__(self, batch_size, logits_dimension):\n \"\"\"Creates a cache with the given configuration.\n\n It maintains three variables, tree_ids, node_ids, logits, for caching.\n tree_ids: shape=[batch_size], dtype=int32\n node_ids: shape=[batch_size], dtype=int32\n logits: shape=[batch_size, logits_dimension], dtype=float32\n\n Note, this can be used only with in-memory data setting.\n\n Args:\n batch_size: `int`, the size of the cache.\n logits_dimension: a constant (int) for the dimension of logits.\n \"\"\"\n self._logits_dimension = logits_dimension\n self._tree_ids = _variable(\n array_ops.zeros([batch_size], dtype=dtypes.int32),\n name='tree_ids_cache')\n self._node_ids = _variable(\n _DUMMY_NODE_ID * array_ops.ones([batch_size], dtype=dtypes.int32),\n name='node_ids_cache')\n self._logits = _variable(\n array_ops.zeros([batch_size, logits_dimension], dtype=dtypes.float32),\n name='logits_cache')\n\n def lookup(self):\n \"\"\"Returns cached_tree_ids, cached_node_ids, cached_logits.\"\"\"\n return (self._tree_ids, self._node_ids, self._logits)\n\n def insert(self, tree_ids, node_ids, logits):\n \"\"\"Inserts values and returns the op.\"\"\"\n return control_flow_ops.group([\n self._tree_ids.assign(tree_ids),\n self._node_ids.assign(node_ids),\n self._logits.assign(logits)\n ],\n name='cache_insert')\n\n\nclass _StopAtAttemptsHook(session_run_hook.SessionRunHook):\n \"\"\"Hook that requests stop at the number of attempts.\"\"\"\n\n def __init__(self, num_finalized_trees_tensor, num_attempted_layers_tensor,\n max_trees, max_depth):\n self._num_finalized_trees_tensor = num_finalized_trees_tensor\n self._num_attempted_layers_tensor = num_attempted_layers_tensor\n self._max_trees = max_trees\n self._max_depth = max_depth\n\n def before_run(self, run_context):\n return session_run_hook.SessionRunArgs(\n [self._num_finalized_trees_tensor, self._num_attempted_layers_tensor])\n\n def after_run(self, run_context, run_values):\n # num_* tensors should be retrieved by a separate session than the training\n # one, in order to read the values after growing.\n # So, if it's approaching to the limit, get the actual value by additional\n # session.\n num_finalized_trees, num_attempted_layers = run_values.results\n if (num_finalized_trees >= self._max_trees - 1 or\n num_attempted_layers > 2 * self._max_trees * self._max_depth - 1):\n num_finalized_trees, num_attempted_layers = run_context.session.run(\n [self._num_finalized_trees_tensor, self._num_attempted_layers_tensor])\n if (num_finalized_trees >= self._max_trees or\n num_attempted_layers > 2 * self._max_trees * self._max_depth):\n run_context.request_stop()\n\n\ndef _get_max_splits(tree_hparams):\n \"\"\"Calculates the max possible number of splits based on tree params.\"\"\"\n # maximum number of splits possible in the whole tree =2^(D-1)-1\n max_splits = (1 << tree_hparams.max_depth) - 1\n return max_splits\n\n\nclass _EnsembleGrower(object):\n \"\"\"Abstract base class for different types of ensemble growers.\n\n Use it to receive training ops for growing and centering bias, depending\n on the implementation (for example, in memory or accumulator-based\n distributed):\n grower = ...create subclass grower(tree_ensemble, tree_hparams)\n grow_op = grower.grow_tree(stats_summaries_list, feature_ids_list,\n last_layer_nodes_range)\n training_ops.append(grow_op)\n \"\"\"\n\n def __init__(self, tree_ensemble, quantile_accumulator, tree_hparams,\n feature_ids_list):\n \"\"\"Initializes a grower object.\n\n Args:\n tree_ensemble: A TreeEnsemble variable.\n quantile_accumulator: A QuantileAccumulator variable.\n tree_hparams: TODO. collections.namedtuple for hyper parameters.\n feature_ids_list: a list of lists of feature ids for each bucket size.\n\n Raises:\n ValueError: when pruning mode is invalid or pruning is used and no tree\n complexity is set.\n \"\"\"\n self._tree_ensemble = tree_ensemble\n self._tree_hparams = tree_hparams\n self._quantile_accumulator = quantile_accumulator\n self._feature_ids_list = feature_ids_list\n # pylint: disable=protected-access\n self._pruning_mode_parsed = boosted_trees_ops.PruningMode.from_str(\n tree_hparams.pruning_mode)\n\n if tree_hparams.tree_complexity > 0:\n if self._pruning_mode_parsed == boosted_trees_ops.PruningMode.NO_PRUNING:\n raise ValueError(\n 'Tree complexity have no effect unless pruning mode is chosen.')\n else:\n if self._pruning_mode_parsed != boosted_trees_ops.PruningMode.NO_PRUNING:\n raise ValueError('For pruning, tree_complexity must be positive.')\n # pylint: enable=protected-access\n\n @abc.abstractmethod\n def accumulate_quantiles(self, float_features, weights, are_boundaries_ready):\n \"\"\"Accumulate quantile information for float features.\n\n Args:\n float_features: float features.\n weights: weights Tensor.\n are_boundaries_ready: bool variable.\n\n Returns:\n An operation for accumulate quantile.\n \"\"\"\n\n @abc.abstractmethod\n def center_bias(self, center_bias_var, gradients, hessians):\n \"\"\"Centers bias, if ready, based on statistics.\n\n Args:\n center_bias_var: A variable that will be updated when bias centering\n finished.\n gradients: A rank 2 tensor of gradients.\n hessians: A rank 2 tensor of hessians.\n\n Returns:\n An operation for centering bias.\n \"\"\"\n\n @abc.abstractmethod\n def grow_tree(self, stats_summaries_list, last_layer_nodes_range):\n \"\"\"Grows a tree, if ready, based on provided statistics.\n\n Args:\n stats_summaries_list: List of stats summary tensors, representing sums of\n gradients and hessians for each feature bucket.\n last_layer_nodes_range: A tensor representing ids of the nodes in the\n current layer, to be split.\n\n Returns:\n An op for growing a tree.\n \"\"\"\n\n def chief_init_op(self):\n \"\"\"Ops that chief needs to run to initialize the state.\"\"\"\n return control_flow_ops.no_op()\n\n # ============= Helper methods ===========\n\n def _center_bias_fn(self, center_bias_var, mean_gradients, mean_hessians):\n \"\"\"Updates the ensembles and cache (if needed) with logits prior.\"\"\"\n continue_centering = boosted_trees_ops.center_bias(\n self._tree_ensemble.resource_handle,\n mean_gradients=mean_gradients,\n mean_hessians=mean_hessians,\n l1=self._tree_hparams.l1,\n l2=self._tree_hparams.l2)\n return center_bias_var.assign(continue_centering)\n\n def _grow_tree_from_stats_summaries(self, stats_summaries_list,\n last_layer_nodes_range):\n \"\"\"Updates ensemble based on the best gains from stats summaries.\"\"\"\n node_ids_per_feature = []\n gains_list = []\n thresholds_list = []\n left_node_contribs_list = []\n right_node_contribs_list = []\n all_feature_ids = []\n assert len(stats_summaries_list) == len(self._feature_ids_list)\n\n max_splits = _get_max_splits(self._tree_hparams)\n\n for i, feature_ids in enumerate(self._feature_ids_list):\n (numeric_node_ids_per_feature, numeric_gains_list,\n numeric_thresholds_list, numeric_left_node_contribs_list,\n numeric_right_node_contribs_list) = (\n boosted_trees_ops.calculate_best_gains_per_feature(\n node_id_range=last_layer_nodes_range,\n stats_summary_list=stats_summaries_list[i],\n l1=self._tree_hparams.l1,\n l2=self._tree_hparams.l2,\n tree_complexity=self._tree_hparams.tree_complexity,\n min_node_weight=self._tree_hparams.min_node_weight,\n max_splits=max_splits))\n\n all_feature_ids += feature_ids\n node_ids_per_feature += numeric_node_ids_per_feature\n gains_list += numeric_gains_list\n thresholds_list += numeric_thresholds_list\n left_node_contribs_list += numeric_left_node_contribs_list\n right_node_contribs_list += numeric_right_node_contribs_list\n\n grow_op = boosted_trees_ops.update_ensemble(\n # Confirm if local_tree_ensemble or tree_ensemble should be used.\n self._tree_ensemble.resource_handle,\n feature_ids=all_feature_ids,\n node_ids=node_ids_per_feature,\n gains=gains_list,\n thresholds=thresholds_list,\n left_node_contribs=left_node_contribs_list,\n right_node_contribs=right_node_contribs_list,\n learning_rate=self._tree_hparams.learning_rate,\n max_depth=self._tree_hparams.max_depth,\n pruning_mode=self._pruning_mode_parsed)\n return grow_op\n\n\nclass _InMemoryEnsembleGrower(_EnsembleGrower):\n \"\"\"An in-memory ensemble grower.\"\"\"\n\n def __init__(self, tree_ensemble, quantile_accumulator, tree_hparams,\n feature_ids_list):\n\n super(_InMemoryEnsembleGrower, self).__init__(\n tree_ensemble=tree_ensemble,\n quantile_accumulator=quantile_accumulator,\n tree_hparams=tree_hparams,\n feature_ids_list=feature_ids_list)\n\n def accumulate_quantiles(self, float_features, weights, are_boundaries_ready):\n summary_op = self._quantile_accumulator.add_summaries(\n float_features, weights)\n with ops.control_dependencies([summary_op]):\n flush = self._quantile_accumulator.flush()\n with ops.control_dependencies([flush]):\n return are_boundaries_ready.assign(True).op\n\n def center_bias(self, center_bias_var, gradients, hessians):\n # For in memory, we already have a full batch of gradients and hessians,\n # so just take a mean and proceed with centering.\n mean_gradients = array_ops.expand_dims(\n math_ops.reduce_mean(gradients, 0), 0)\n mean_heassians = array_ops.expand_dims(math_ops.reduce_mean(hessians, 0), 0)\n return self._center_bias_fn(center_bias_var, mean_gradients, mean_heassians)\n\n def grow_tree(self, stats_summaries_list, last_layer_nodes_range):\n # For in memory, we already have full data in one batch, so we can grow the\n # tree immediately.\n return self._grow_tree_from_stats_summaries(stats_summaries_list,\n last_layer_nodes_range)\n\n\nclass _AccumulatorEnsembleGrower(_EnsembleGrower):\n \"\"\"An accumulator based ensemble grower.\"\"\"\n\n def __init__(self, tree_ensemble, quantile_accumulator, tree_hparams,\n stamp_token, n_batches_per_layer, bucket_size_list, is_chief,\n center_bias, feature_ids_list):\n super(_AccumulatorEnsembleGrower, self).__init__(\n tree_ensemble=tree_ensemble,\n quantile_accumulator=quantile_accumulator,\n tree_hparams=tree_hparams,\n feature_ids_list=feature_ids_list)\n self._stamp_token = stamp_token\n self._n_batches_per_layer = n_batches_per_layer\n self._bucket_size_list = bucket_size_list\n self._is_chief = is_chief\n self._growing_accumulators = []\n self._chief_init_ops = []\n max_splits = _get_max_splits(self._tree_hparams)\n for i, feature_ids in enumerate(self._feature_ids_list):\n accumulator = _accumulator(\n dtype=dtypes.float32,\n # The stats consist of grads and hessians (the last dimension).\n shape=[len(feature_ids), max_splits, self._bucket_size_list[i], 2],\n shared_name='numeric_stats_summary_accumulator_' + str(i))\n self._chief_init_ops.append(\n accumulator.set_global_step(self._stamp_token))\n self._growing_accumulators.append(accumulator)\n self._center_bias = center_bias\n if center_bias:\n self._bias_accumulator = _accumulator(\n dtype=dtypes.float32,\n # The stats consist of grads and hessians means only.\n # TODO(nponomareva): this will change for a multiclass\n shape=[2, 1],\n shared_name='bias_accumulator')\n self._chief_init_ops.append(\n self._bias_accumulator.set_global_step(self._stamp_token))\n\n def accumulate_quantiles(self, float_features, weights, are_boundaries_ready):\n summary_op = self._quantile_accumulator.add_summaries(\n float_features, weights)\n cond_accum = _accumulator(\n dtype=dtypes.float32, shape={}, shared_name='quantile_summary_accum')\n cond_accum_step = cond_accum.set_global_step(self._stamp_token)\n apply_grad = cond_accum.apply_grad(\n array_ops.constant(0.), self._stamp_token)\n update_quantile_op = control_flow_ops.group(summary_op, cond_accum_step,\n apply_grad)\n if not self._is_chief:\n return update_quantile_op\n\n with ops.control_dependencies([update_quantile_op]):\n\n def flush_fn():\n grad = cond_accum.take_grad(1)\n flush_op = self._quantile_accumulator.flush()\n boundaries_ready_op = are_boundaries_ready.assign(True).op\n return control_flow_ops.group(flush_op, grad, boundaries_ready_op)\n\n finalize_quantile_op = _cond(\n math_ops.greater_equal(cond_accum.num_accumulated(),\n self._n_batches_per_layer),\n flush_fn,\n control_flow_ops.no_op,\n name='wait_until_quaniles_accumulated')\n return finalize_quantile_op\n\n def center_bias(self, center_bias_var, gradients, hessians):\n # For not in memory situation, we need to accumulate enough of batches first\n # before proceeding with centering bias.\n\n # Create an accumulator.\n if not self._center_bias:\n raise RuntimeError('center_bias called but bias centering is disabled.')\n bias_dependencies = []\n grads_and_hess = array_ops.stack([gradients, hessians], axis=0)\n grads_and_hess = math_ops.reduce_mean(grads_and_hess, axis=1)\n\n apply_grad = self._bias_accumulator.apply_grad(grads_and_hess,\n self._stamp_token)\n bias_dependencies.append(apply_grad)\n\n # Center bias if enough batches were processed.\n with ops.control_dependencies(bias_dependencies):\n if not self._is_chief:\n return control_flow_ops.no_op()\n\n def _set_accumulators_stamp():\n return control_flow_ops.group([\n acc.set_global_step(self._stamp_token + 1)\n for acc in self._growing_accumulators\n ])\n\n def center_bias_from_accumulator():\n accumulated = array_ops.unstack(\n self._bias_accumulator.take_grad(1), axis=0)\n center_bias_op = self._center_bias_fn(\n center_bias_var, array_ops.expand_dims(accumulated[0], 0),\n array_ops.expand_dims(accumulated[1], 0))\n with ops.control_dependencies([center_bias_op]):\n return _cond(center_bias_var, control_flow_ops.no_op,\n _set_accumulators_stamp)\n\n center_bias_op = _cond(\n math_ops.greater_equal(self._bias_accumulator.num_accumulated(),\n self._n_batches_per_layer),\n center_bias_from_accumulator,\n control_flow_ops.no_op,\n name='wait_until_n_batches_for_bias_accumulated')\n return center_bias_op\n\n def grow_tree(self, stats_summaries_list, last_layer_nodes_range):\n dependencies = []\n for i in range(len(self._feature_ids_list)):\n stats_summaries = stats_summaries_list[i]\n apply_grad = self._growing_accumulators[i].apply_grad(\n array_ops.stack(stats_summaries, axis=0), self._stamp_token)\n dependencies.append(apply_grad)\n\n # Grow the tree if enough batches is accumulated.\n with ops.control_dependencies(dependencies):\n if not self._is_chief:\n return control_flow_ops.no_op()\n\n min_accumulated = math_ops.reduce_min(\n array_ops.stack(\n [acc.num_accumulated() for acc in self._growing_accumulators]))\n\n def grow_tree_from_accumulated_summaries_fn():\n \"\"\"Updates tree with the best layer from accumulated summaries.\"\"\"\n # Take out the accumulated summaries from the accumulator and grow.\n stats_summaries_list = []\n stats_summaries_list = [\n array_ops.unstack(accumulator.take_grad(1), axis=0)\n for accumulator in self._growing_accumulators\n ]\n grow_op = self._grow_tree_from_stats_summaries(stats_summaries_list,\n last_layer_nodes_range)\n return grow_op\n\n grow_model = _cond(\n math_ops.greater_equal(min_accumulated, self._n_batches_per_layer),\n grow_tree_from_accumulated_summaries_fn,\n control_flow_ops.no_op,\n name='wait_until_n_batches_accumulated')\n return grow_model\n\n def chief_init_op(self):\n \"\"\"Ops that chief needs to run to initialize the state.\"\"\"\n return control_flow_ops.group(self._chief_init_ops)\n\n\ndef _bt_model_fn(features,\n labels,\n mode,\n head,\n feature_columns,\n tree_hparams,\n n_batches_per_layer,\n config,\n closed_form_grad_and_hess_fn=None,\n example_id_column_name=None,\n weight_column=None,\n train_in_memory=False,\n name='boosted_trees'):\n \"\"\"Gradient Boosted Trees model_fn.\n\n Args:\n features: dict of `Tensor`.\n labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype\n `int32` or `int64` in the range `[0, n_classes)`.\n mode: Defines whether this is training, evaluation or prediction. See\n `ModeKeys`.\n head: A `head_lib._Head` instance.\n feature_columns: Iterable of `fc_old._FeatureColumn` model inputs.\n tree_hparams: TODO. collections.namedtuple for hyper parameters.\n n_batches_per_layer: A `Tensor` of `int64`. Each layer is built after at\n least n_batches_per_layer accumulations.\n config: `RunConfig` object to configure the runtime settings.\n closed_form_grad_and_hess_fn: a function that accepts logits and labels and\n returns gradients and hessians. By default, they are created by\n tf.gradients() from the loss.\n example_id_column_name: Name of the feature for a unique ID per example.\n Currently experimental -- not exposed to public API.\n weight_column: A string or a `_NumericColumn` created by\n `tf.fc_old.numeric_column` defining feature column representing weights.\n It is used to downweight or boost examples during training. It will be\n multiplied by the loss of the example. If it is a string, it is used as a\n key to fetch weight tensor from the `features`. If it is a\n `_NumericColumn`, raw tensor is fetched by key `weight_column.key`, then\n weight_column.normalizer_fn is applied on it to get weight tensor.\n train_in_memory: `bool`, when true, it assumes the dataset is in memory,\n i.e., input_fn should return the entire dataset as a single batch,\n n_batches_per_layer should be set as 1, num_worker_replicas should be 1,\n and num_ps_replicas should be 0 in `tf.Estimator.RunConfig`.\n name: Name to use for the model.\n\n Returns:\n An `EstimatorSpec` instance.\n\n Raises:\n ValueError: mode or params are invalid, or features has the wrong type.\n \"\"\"\n sorted_feature_columns = sorted(feature_columns, key=lambda tc: tc.name)\n float_columns = _get_float_feature_columns(sorted_feature_columns)\n\n with ops.name_scope(name) as name:\n # Prepare.\n global_step = training_util.get_or_create_global_step()\n # Create Ensemble resources.\n tree_ensemble = boosted_trees_ops.TreeEnsemble(name=name)\n\n # Create Quantile accumulator resource.\n eps = tree_hparams.quantile_sketch_epsilon\n num_quantiles = int(1. / eps)\n bucket_boundaries_dict = {}\n quantile_accumulator = None\n\n if float_columns:\n num_float_features = _calculate_num_features(float_columns)\n quantile_accumulator = boosted_trees_ops.QuantileAccumulator(\n epsilon=eps,\n num_streams=num_float_features,\n num_quantiles=num_quantiles,\n name=_QUANTILE_ACCUMULATOR_RESOURCE_NAME)\n bucket_boundaries = quantile_accumulator.get_bucket_boundaries()\n bucket_boundaries_dict = _get_float_boundaries_dict(\n float_columns, bucket_boundaries)\n are_boundaries_ready_initial = False\n else:\n are_boundaries_ready_initial = True\n\n bucket_size_list, feature_ids_list = _group_features_by_num_buckets(\n sorted_feature_columns, num_quantiles)\n\n # Create logits.\n if mode != ModeKeys.TRAIN:\n input_feature_list = _get_transformed_features(features,\n sorted_feature_columns,\n bucket_boundaries_dict)\n logits = boosted_trees_ops.predict(\n # For non-TRAIN mode, ensemble doesn't change after initialization,\n # so no local copy is needed; using tree_ensemble directly.\n tree_ensemble_handle=tree_ensemble.resource_handle,\n bucketized_features=input_feature_list,\n logits_dimension=head.logits_dimension)\n return head.create_estimator_spec(\n features=features,\n mode=mode,\n labels=labels,\n train_op_fn=control_flow_ops.no_op,\n logits=logits)\n\n # ============== Training graph ==============\n center_bias = tree_hparams.center_bias\n is_single_machine = (config.num_worker_replicas <= 1)\n\n if train_in_memory:\n assert n_batches_per_layer == 1, (\n 'When train_in_memory is enabled, input_fn should return the entire '\n 'dataset as a single batch, and n_batches_per_layer should be set as '\n '1.')\n if (not config.is_chief or config.num_worker_replicas > 1 or\n config.num_ps_replicas > 0):\n raise ValueError('train_in_memory is supported only for '\n 'non-distributed training.')\n worker_device = control_flow_ops.no_op().device\n # Extract input features and set up cache for training.\n training_state_cache = None\n\n are_boundaries_ready = _variable(\n initial_value=are_boundaries_ready_initial,\n name='are_boundaries_ready',\n trainable=False)\n\n if train_in_memory:\n # cache transformed features as well for in-memory training.\n batch_size = array_ops.shape(labels)[0]\n\n def _split_into_cat_and_other_columns():\n cat_columns = []\n other_columns = []\n for fc in sorted_feature_columns:\n if isinstance(\n fc,\n (feature_column_lib.IndicatorColumn, fc_old._IndicatorColumn)):\n cat_columns.append(fc)\n else:\n other_columns.append(fc)\n return cat_columns, other_columns\n\n # Split columns into categorical and other columns.\n cat_columns, other_columns = _split_into_cat_and_other_columns()\n\n input_feature_list, input_cache_op = _cache_transformed_features(\n features, sorted_feature_columns, cat_columns, other_columns,\n batch_size, bucket_boundaries_dict, are_boundaries_ready)\n\n training_state_cache = _CacheTrainingStatesUsingVariables(\n batch_size, head.logits_dimension)\n else:\n input_feature_list = _get_transformed_features(features,\n sorted_feature_columns,\n bucket_boundaries_dict)\n if example_id_column_name:\n example_ids = features[example_id_column_name]\n training_state_cache = _CacheTrainingStatesUsingHashTable(\n example_ids, head.logits_dimension)\n\n if training_state_cache:\n cached_tree_ids, cached_node_ids, cached_logits = (\n training_state_cache.lookup())\n else:\n # Always start from the beginning when no cache is set up.\n batch_size = array_ops.shape(labels)[0]\n cached_tree_ids, cached_node_ids, cached_logits = (\n array_ops.zeros([batch_size], dtype=dtypes.int32),\n _DUMMY_NODE_ID * array_ops.ones([batch_size], dtype=dtypes.int32),\n array_ops.zeros([batch_size, head.logits_dimension],\n dtype=dtypes.float32))\n\n if is_single_machine:\n local_tree_ensemble = tree_ensemble\n ensemble_reload = control_flow_ops.no_op()\n else:\n # Have a local copy of ensemble for the distributed setting.\n with ops.device(worker_device):\n local_tree_ensemble = boosted_trees_ops.TreeEnsemble(\n name=name + '_local', is_local=True)\n # TODO(soroush): Do partial updates if this becomes a bottleneck.\n ensemble_reload = local_tree_ensemble.deserialize(\n *tree_ensemble.serialize())\n with ops.control_dependencies([ensemble_reload]):\n (stamp_token, num_trees, num_finalized_trees, num_attempted_layers,\n last_layer_nodes_range) = local_tree_ensemble.get_states()\n partial_logits, tree_ids, node_ids = boosted_trees_ops.training_predict(\n tree_ensemble_handle=local_tree_ensemble.resource_handle,\n cached_tree_ids=cached_tree_ids,\n cached_node_ids=cached_node_ids,\n bucketized_features=input_feature_list,\n logits_dimension=head.logits_dimension)\n logits = cached_logits + partial_logits\n\n if train_in_memory:\n grower = _InMemoryEnsembleGrower(tree_ensemble, quantile_accumulator,\n tree_hparams, feature_ids_list)\n else:\n grower = _AccumulatorEnsembleGrower(tree_ensemble, quantile_accumulator,\n tree_hparams, stamp_token,\n n_batches_per_layer, bucket_size_list,\n config.is_chief, center_bias,\n feature_ids_list)\n\n summary.scalar('ensemble/num_trees', num_trees)\n summary.scalar('ensemble/num_finalized_trees', num_finalized_trees)\n summary.scalar('ensemble/num_attempted_layers', num_attempted_layers)\n\n # Variable that determines whether bias centering is needed.\n center_bias_var = _variable(\n initial_value=center_bias, name='center_bias_needed', trainable=False)\n if weight_column is None:\n weights = array_ops.constant(1., shape=[1])\n else:\n if isinstance(weight_column, six.string_types):\n weight_column = feature_column_lib.numeric_column(\n key=weight_column, shape=(1,))\n weights = _get_transformed_features(features, [weight_column])[0]\n\n # Create training graph.\n def _train_op_fn(loss):\n \"\"\"Run one training iteration.\"\"\"\n\n def _update_quantile_fn():\n \"\"\"Accumulates quantiles.\"\"\"\n with ops.name_scope('UpdateQuantile'):\n float_features = _get_transformed_features(features, float_columns)\n return grower.accumulate_quantiles(float_features, weights,\n are_boundaries_ready)\n\n def _grow_tree_fn():\n \"\"\"Grow tree.\"\"\"\n grow_op = [input_cache_op] if train_in_memory else []\n if training_state_cache:\n # Cache logits only after center_bias is complete,\n # if it's in progress.\n def insert_fn():\n return training_state_cache.insert(tree_ids, node_ids, logits)\n\n grow_op.append(\n _cond(center_bias_var, control_flow_ops.no_op, insert_fn))\n\n if closed_form_grad_and_hess_fn:\n gradients, hessians = closed_form_grad_and_hess_fn(logits, labels)\n else:\n gradients = gradients_impl.gradients(\n loss, logits, name='Gradients')[0]\n hessians = gradients_impl.gradients(\n gradients, logits, name='Hessians')[0]\n\n # TODO(youngheek): perhaps storage could be optimized by storing stats\n # with the dimension max_splits_per_layer, instead of max_splits (for\n # the entire tree).\n max_splits = _get_max_splits(tree_hparams)\n\n stats_summaries_list = []\n for i, feature_ids in enumerate(feature_ids_list):\n num_buckets = bucket_size_list[i]\n summaries = [\n array_ops.squeeze(\n boosted_trees_ops.make_stats_summary(\n node_ids=node_ids,\n gradients=gradients,\n hessians=hessians,\n bucketized_features_list=[input_feature_list[f]],\n max_splits=max_splits,\n num_buckets=num_buckets),\n axis=0) for f in feature_ids\n ]\n stats_summaries_list.append(summaries)\n if center_bias:\n update_model = _cond(\n center_bias_var,\n functools.partial(\n grower.center_bias,\n center_bias_var,\n gradients,\n hessians,\n ),\n functools.partial(grower.grow_tree, stats_summaries_list,\n last_layer_nodes_range))\n else:\n update_model = grower.grow_tree(stats_summaries_list,\n last_layer_nodes_range)\n grow_op.append(update_model)\n\n with ops.control_dependencies([update_model]):\n increment_global = state_ops.assign_add(global_step, 1).op\n grow_op.append(increment_global)\n\n return control_flow_ops.group(grow_op, name='grow_op')\n\n if not float_columns:\n return _grow_tree_fn()\n else:\n return _cond(are_boundaries_ready, _grow_tree_fn, _update_quantile_fn)\n\n estimator_spec = head.create_estimator_spec(\n features=features,\n mode=mode,\n labels=labels,\n train_op_fn=_train_op_fn,\n logits=logits)\n # Add an early stop hook.\n estimator_spec = estimator_spec._replace(\n training_hooks=estimator_spec.training_hooks +\n (_StopAtAttemptsHook(num_finalized_trees, num_attempted_layers,\n tree_hparams.n_trees, tree_hparams.max_depth),),\n training_chief_hooks=[GrowerInitializationHook(grower.chief_init_op())] +\n list(estimator_spec.training_chief_hooks))\n return estimator_spec\n\n\nclass GrowerInitializationHook(session_run_hook.SessionRunHook):\n \"\"\"A SessionRunHook handles initialization of `_EnsembleGrower`.\"\"\"\n\n def __init__(self, init_op):\n self._init_op = init_op\n\n def after_create_session(self, session, coord):\n session.run(self._init_op)\n\n\ndef _create_classification_head(n_classes,\n weight_column=None,\n label_vocabulary=None):\n \"\"\"Creates a classification head. Refer to canned.head for details on args.\"\"\"\n # TODO(nponomareva): Support multi-class cases.\n if n_classes == 2:\n # pylint: disable=protected-access\n return head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n weight_column=weight_column,\n label_vocabulary=label_vocabulary,\n loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)\n # pylint: enable=protected-access\n else:\n raise ValueError('For now only binary classification is supported.'\n 'n_classes given as {}'.format(n_classes))\n\n\ndef _create_classification_head_and_closed_form(n_classes, weight_column,\n label_vocabulary):\n \"\"\"Creates a head for classifier and the closed form gradients/hessians.\"\"\"\n head = _create_classification_head(n_classes, weight_column, label_vocabulary)\n if (n_classes == 2 and head.logits_dimension == 1 and\n weight_column is None and label_vocabulary is None):\n # Use the closed-form gradients/hessians for 2 class.\n def _grad_and_hess_for_logloss(logits, labels):\n \"\"\"A closed form gradient and hessian for logistic loss.\"\"\"\n # TODO(youngheek): add weights handling.\n predictions = math_ops.reciprocal(math_ops.exp(-logits) + 1.0)\n normalizer = math_ops.reciprocal(\n math_ops.cast(array_ops.size(predictions), dtypes.float32))\n labels = math_ops.cast(labels, dtypes.float32)\n labels = head_lib._check_dense_labels_match_logits_and_reshape( # pylint: disable=protected-access\n labels, logits, head.logits_dimension)\n gradients = (predictions - labels) * normalizer\n hessians = predictions * (1.0 - predictions) * normalizer\n return gradients, hessians\n\n closed_form = _grad_and_hess_for_logloss\n else:\n closed_form = None\n return (head, closed_form)\n\n\ndef _create_regression_head(label_dimension, weight_column=None):\n if label_dimension != 1:\n raise ValueError('For now only 1 dimension regression is supported.'\n 'label_dimension given as {}'.format(label_dimension))\n # pylint: disable=protected-access\n return head_lib._regression_head(\n label_dimension=label_dimension,\n weight_column=weight_column,\n loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)\n # pylint: enable=protected-access\n\n\ndef _compute_feature_importances_per_tree(tree, num_features):\n \"\"\"Computes the importance of each feature in the tree.\"\"\"\n importances = np.zeros(num_features)\n\n for node in tree.nodes:\n node_type = node.WhichOneof('node')\n if node_type == 'bucketized_split':\n feature_id = node.bucketized_split.feature_id\n importances[feature_id] += node.metadata.gain\n elif node_type == 'leaf':\n assert node.metadata.gain == 0\n else:\n raise ValueError('Unexpected split type %s' % node_type)\n\n return importances\n\n\ndef _compute_feature_importances(tree_ensemble, num_features, normalize):\n \"\"\"Computes gain-based feature importances.\n\n The higher the value, the more important the feature.\n\n Args:\n tree_ensemble: a trained tree ensemble, instance of proto\n boosted_trees.TreeEnsemble.\n num_features: The total number of feature ids.\n normalize: If True, normalize the feature importances.\n\n Returns:\n feature_importances: A list of corresponding feature importances indexed by\n the original feature ids.\n\n Raises:\n AssertionError: When normalize = True, if feature importances\n contain negative value, or if normalization is not possible\n (e.g. ensemble is empty or trees contain only a root node).\n \"\"\"\n tree_importances = [\n _compute_feature_importances_per_tree(tree, num_features)\n for tree in tree_ensemble.trees\n ]\n tree_importances = np.array(tree_importances)\n tree_weights = np.array(tree_ensemble.tree_weights).reshape(-1, 1)\n feature_importances = np.sum(tree_importances * tree_weights, axis=0)\n if normalize:\n assert np.all(feature_importances >= 0), ('feature_importances '\n 'must be non-negative.')\n normalizer = np.sum(feature_importances)\n assert normalizer > 0, 'Trees are all empty or contain only a root node.'\n feature_importances /= normalizer\n\n return feature_importances\n\n\ndef _bt_explanations_fn(features,\n head,\n sorted_feature_columns,\n quantile_sketch_epsilon,\n name='boosted_trees'):\n \"\"\"Gradient Boosted Trees predict with explanations model_fn.\n\n Args:\n features: dict of `Tensor`.\n head: A `head_lib._Head` instance.\n sorted_feature_columns: Sorted iterable of `fc_old._FeatureColumn` model\n inputs.\n quantile_sketch_epsilon: float between 0 and 1. Error bound for quantile\n computation. This is only used for float feature columns, and the number\n of buckets generated per float feature is 1/quantile_sketch_epsilon.\n name: Name used for the model.\n\n Returns:\n An `EstimatorSpec` instance.\n\n Raises:\n ValueError: mode or params are invalid, or features has the wrong type.\n \"\"\"\n mode = ModeKeys.PREDICT\n with ops.name_scope(name) as name:\n # Create Ensemble resources.\n tree_ensemble = boosted_trees_ops.TreeEnsemble(name=name)\n\n # pylint: disable=protected-access\n float_columns = _get_float_feature_columns(sorted_feature_columns)\n num_float_features = _calculate_num_features(float_columns)\n # pylint: enable=protected-access\n num_quantiles = int(1. / quantile_sketch_epsilon)\n if not num_float_features:\n input_feature_list = _get_transformed_features(features,\n sorted_feature_columns)\n # Create Quantile accumulator resource.\n else:\n quantile_accumulator = boosted_trees_ops.QuantileAccumulator(\n epsilon=quantile_sketch_epsilon,\n num_streams=num_float_features,\n num_quantiles=num_quantiles,\n name=_QUANTILE_ACCUMULATOR_RESOURCE_NAME)\n bucket_boundaries = quantile_accumulator.get_bucket_boundaries()\n bucket_boundaries_dict = _get_float_boundaries_dict(\n float_columns, bucket_boundaries)\n input_feature_list = _get_transformed_features(features,\n sorted_feature_columns,\n bucket_boundaries_dict)\n logits = boosted_trees_ops.predict(\n # For non-TRAIN mode, ensemble doesn't change after initialization,\n # so no local copy is needed; using tree_ensemble directly.\n tree_ensemble_handle=tree_ensemble.resource_handle,\n bucketized_features=input_feature_list,\n logits_dimension=head.logits_dimension)\n\n estimator_spec = head.create_estimator_spec(\n features=features,\n mode=mode,\n labels=None,\n train_op_fn=control_flow_ops.no_op,\n logits=logits)\n\n debug_op = boosted_trees_ops.example_debug_outputs(\n tree_ensemble.resource_handle,\n bucketized_features=input_feature_list,\n logits_dimension=head.logits_dimension)\n estimator_spec.predictions[boosted_trees_utils._DEBUG_PROTO_KEY] = debug_op # pylint: disable=protected-access\n return estimator_spec\n\n\ndef _get_float_boundaries_dict(float_columns, bucket_boundaries):\n \"\"\"Create a dict where key is column name, value is bucket boundaries.\"\"\"\n bucket_boundaries_dict = {}\n feature_idx = 0\n for column in float_columns:\n num_column_dimensions = _get_variable_shape(\n column)[0] if _get_variable_shape(column).as_list() else 1\n bucket_boundaries_dict[\n column.name] = bucket_boundaries[feature_idx:feature_idx +\n num_column_dimensions]\n feature_idx += num_column_dimensions\n return bucket_boundaries_dict\n\n\nclass _BoostedTreesBase(estimator.Estimator):\n \"\"\"Base class for boosted trees estimators.\n\n This class is intended to keep tree-specific functions (E.g., methods for\n feature importances and directional feature contributions) in one central\n place.\n\n It is not a valid (working) Estimator on its own and should only be used as a\n base class.\n \"\"\"\n\n def __init__(self, model_fn, model_dir, config, feature_columns, head,\n center_bias, is_classification, quantile_sketch_epsilon):\n \"\"\"Initializes a `_BoostedTreesBase` instance.\n\n Args:\n model_fn: model_fn: Model function. See base class for more detail.\n model_dir: Directory to save model parameters, graph and etc. See base\n class for more detail.\n config: `estimator.RunConfig` configuration object.\n feature_columns: An iterable containing all the feature columns used by\n the model. All items in the set should be instances of classes derived\n from `FeatureColumn`\n head: A `head_lib._Head` instance.\n center_bias: Whether bias centering needs to occur. Bias centering refers\n to the first node in the very first tree returning the prediction that\n is aligned with the original labels distribution. For example, for\n regression problems, the first node will return the mean of the labels.\n For binary classification problems, it will return a logit for a prior\n probability of label 1.\n is_classification: If the estimator is for classification.\n quantile_sketch_epsilon: float between 0 and 1. Error bound for quantile\n computation. This is only used for float feature columns, and the number\n of buckets generated per float feature is 1/quantile_sketch_epsilon.\n \"\"\"\n # We need it so the global step is also a resource var.\n variable_scope.enable_resource_variables()\n\n super(_BoostedTreesBase, self).__init__(\n model_fn=model_fn, model_dir=model_dir, config=config)\n self._sorted_feature_columns = sorted(\n feature_columns, key=lambda tc: tc.name)\n self._head = head\n self._n_features = _calculate_num_features(self._sorted_feature_columns)\n self._feature_col_names = _generate_feature_col_name_mapping(\n self._sorted_feature_columns)\n self._center_bias = center_bias\n self._is_classification = is_classification\n self._quantile_sketch_epsilon = quantile_sketch_epsilon\n\n def experimental_feature_importances(self, normalize=False):\n \"\"\"Computes gain-based feature importances.\n\n The higher the value, the more important the corresponding feature.\n\n Args:\n normalize: If True, normalize the feature importances.\n\n Returns:\n feature_importances: an OrderedDict, where the keys are the feature column\n names and the values are importances. It is sorted by importance.\n\n Raises:\n ValueError: When attempting to normalize on an empty ensemble\n or an ensemble of trees which have no splits. Or when attempting\n to normalize and feature importances have negative values.\n \"\"\"\n reader = checkpoint_utils.load_checkpoint(self._model_dir)\n serialized = reader.get_tensor('boosted_trees:0_serialized')\n if not serialized:\n raise ValueError('Found empty serialized string for TreeEnsemble.'\n 'You should only call this method after training.')\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n\n importances = _compute_feature_importances(ensemble_proto, self._n_features,\n normalize)\n # pylint:disable=protected-access\n return boosted_trees_utils._sum_by_feature_col_name_and_sort(\n self._feature_col_names, importances)\n # pylint:enable=protected-access\n\n def experimental_predict_with_explanations(self,\n input_fn,\n predict_keys=None,\n hooks=None,\n checkpoint_path=None):\n \"\"\"Computes model explainability outputs per example along with predictions.\n\n Currently supports directional feature contributions (DFCs). For each\n instance, DFCs indicate the aggregate contribution of each feature. See\n https://arxiv.org/abs/1312.1121 and\n http://blog.datadive.net/interpreting-random-forests/ for more details.\n\n Args:\n input_fn: A function that provides input data for predicting as\n minibatches. See [Premade Estimators](\n https://tensorflow.org/guide/premade_estimators#create_input_functions)\n for more information. The function should construct and return one of\n the following:\n * A `tf.data.Dataset` object: Outputs of `Dataset` object must be a\n tuple `(features, labels)` with same constraints as below.\n * A tuple `(features, labels)`: Where `features` is a `tf.Tensor` or a\n dictionary of string feature name to `Tensor` and `labels` is a\n `Tensor` or a dictionary of string label name to `Tensor`. Both\n `features` and `labels` are consumed by `model_fn`. They should\n satisfy the expectation of `model_fn` from inputs.\n predict_keys: list of `str`, name of the keys to predict. It is used if\n the `tf.estimator.EstimatorSpec.predictions` is a `dict`. If\n `predict_keys` is used then rest of the predictions will be filtered\n from the dictionary, with the exception of 'bias' and 'dfc', which will\n always be in the dictionary. If `None`, returns all keys in prediction\n dict, as well as two new keys 'dfc' and 'bias'.\n hooks: List of `tf.train.SessionRunHook` subclass instances. Used for\n callbacks inside the prediction call.\n checkpoint_path: Path of a specific checkpoint to predict. If `None`, the\n latest checkpoint in `model_dir` is used. If there are no checkpoints\n in `model_dir`, prediction is run with newly initialized `Variables`\n instead of ones restored from checkpoint.\n\n Yields:\n Evaluated values of `predictions` tensors. The `predictions` tensors will\n contain at least two keys 'dfc' and 'bias' for model explanations. The\n `dfc` value corresponds to the contribution of each feature to the overall\n prediction for this instance (positive indicating that the feature makes\n it more likely to select class 1 and negative less likely). The `dfc` is\n an OrderedDict, where the keys are the feature column names and the values\n are the contributions. It is sorted by the absolute value of the\n contribution (e.g OrderedDict([('age', -0.54), ('gender', 0.4), ('fare',\n 0.21)])). The 'bias' value will be the same across all the instances,\n corresponding to the probability (classification) or prediction\n (regression) of the training data distribution.\n\n Raises:\n ValueError: when wrong arguments are given or unsupported functionalities\n are requested.\n \"\"\"\n if not self._center_bias:\n raise ValueError('center_bias must be enabled during estimator '\n 'instantiation when using '\n 'experimental_predict_with_explanations.')\n # pylint: disable=protected-access\n if not self._is_classification:\n identity_inverse_link_fn = self._head._inverse_link_fn in (None,\n tf_identity)\n # pylint:enable=protected-access\n if not identity_inverse_link_fn:\n raise ValueError(\n 'For now only identity inverse_link_fn in regression_head is '\n 'supported for experimental_predict_with_explanations.')\n\n # pylint:disable=unused-argument\n def new_model_fn(features, labels, mode):\n return _bt_explanations_fn(features, self._head,\n self._sorted_feature_columns,\n self._quantile_sketch_epsilon)\n\n # pylint:enable=unused-argument\n est = estimator.Estimator(\n model_fn=new_model_fn,\n model_dir=self.model_dir,\n config=self.config,\n warm_start_from=self._warm_start_settings)\n # Make sure bias and dfc will be in prediction dict.\n user_supplied_predict_keys = predict_keys is not None\n if user_supplied_predict_keys:\n predict_keys = set(predict_keys)\n predict_keys.add(boosted_trees_utils._DEBUG_PROTO_KEY)\n predictions = est.predict(\n input_fn,\n predict_keys=predict_keys,\n hooks=hooks,\n checkpoint_path=checkpoint_path,\n yield_single_examples=True)\n for pred in predictions:\n bias, dfcs = boosted_trees_utils._parse_explanations_from_prediction(\n pred[boosted_trees_utils._DEBUG_PROTO_KEY], self._feature_col_names,\n self._is_classification)\n pred['bias'] = bias\n pred['dfc'] = dfcs\n # Don't need to expose serialized proto to end user.\n del pred[boosted_trees_utils._DEBUG_PROTO_KEY]\n yield pred\n\n\n# pylint: disable=protected-access\n@estimator_export('estimator.BoostedTreesClassifier')\nclass BoostedTreesClassifier(_BoostedTreesBase):\n \"\"\"A Classifier for Tensorflow Boosted Trees models.\n\n @compatibility(eager)\n Estimators can be used while eager execution is enabled. Note that `input_fn`\n and all hooks are executed inside a graph context, so they have to be written\n to be compatible with graph mode. Note that `input_fn` code using `tf.data`\n generally works in both graph and eager modes.\n @end_compatibility\n \"\"\"\n\n def __init__(self,\n feature_columns,\n n_batches_per_layer,\n model_dir=None,\n n_classes=_HOLD_FOR_MULTI_CLASS_SUPPORT,\n weight_column=None,\n label_vocabulary=None,\n n_trees=100,\n max_depth=6,\n learning_rate=0.1,\n l1_regularization=0.,\n l2_regularization=0.,\n tree_complexity=0.,\n min_node_weight=0.,\n config=None,\n center_bias=False,\n pruning_mode='none',\n quantile_sketch_epsilon=0.01,\n train_in_memory=False):\n \"\"\"Initializes a `BoostedTreesClassifier` instance.\n\n Example:\n\n ```python\n bucketized_feature_1 = bucketized_column(\n numeric_column('feature_1'), BUCKET_BOUNDARIES_1)\n bucketized_feature_2 = bucketized_column(\n numeric_column('feature_2'), BUCKET_BOUNDARIES_2)\n\n # Need to see a large portion of the data before we can build a layer, for\n # example half of data n_batches_per_layer = 0.5 * NUM_EXAMPLES / BATCH_SIZE\n classifier = estimator.BoostedTreesClassifier(\n feature_columns=[bucketized_feature_1, bucketized_feature_2],\n n_batches_per_layer=n_batches_per_layer,\n n_trees=100,\n ... <some other params>\n )\n\n def input_fn_train():\n ...\n return dataset\n\n classifier.train(input_fn=input_fn_train)\n\n def input_fn_eval():\n ...\n return dataset\n\n metrics = classifier.evaluate(input_fn=input_fn_eval)\n\n when train_in_memory = True, make sure the input fn is not batched:\n def input_fn_train():\n return tf.data.Dataset.zip(\n (tf.data.Dataset.from_tensors({'f1': f1_array, ...}),\n tf.data.Dataset.from_tensors(label_array)))\n ```\n\n Args:\n feature_columns: An iterable containing all the feature columns used by\n the model. All items in the set should be instances of classes derived\n from `FeatureColumn`.\n n_batches_per_layer: the number of batches to collect statistics per\n layer. The total number of batches is total number of data divided by\n batch size.\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model.\n n_classes: number of label classes. Default is binary classification.\n Multiclass support is not yet implemented.\n weight_column: A string or a `NumericColumn` created by\n `tf.fc_old.numeric_column` defining feature column representing weights.\n It is used to downweight or boost examples during training. It will be\n multiplied by the loss of the example. If it is a string, it is used as\n a key to fetch weight tensor from the `features`. If it is a\n `NumericColumn`, raw tensor is fetched by key `weight_column.key`, then\n weight_column.normalizer_fn is applied on it to get weight tensor.\n label_vocabulary: A list of strings represents possible label values. If\n given, labels must be string type and have any value in\n `label_vocabulary`. If it is not given, that means labels are already\n encoded as integer or float within [0, 1] for `n_classes=2` and encoded\n as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 . Also\n there will be errors if vocabulary is not provided and labels are\n string.\n n_trees: number trees to be created.\n max_depth: maximum depth of the tree to grow.\n learning_rate: shrinkage parameter to be used when a tree added to the\n model.\n l1_regularization: regularization multiplier applied to the absolute\n weights of the tree leafs.\n l2_regularization: regularization multiplier applied to the square weights\n of the tree leafs.\n tree_complexity: regularization factor to penalize trees with more leaves.\n min_node_weight: min_node_weight: minimum hessian a node must have for a\n split to be considered. The value will be compared with\n sum(leaf_hessian)/(batch_size * n_batches_per_layer).\n config: `RunConfig` object to configure the runtime settings.\n center_bias: Whether bias centering needs to occur. Bias centering refers\n to the first node in the very first tree returning the prediction that\n is aligned with the original labels distribution. For example, for\n regression problems, the first node will return the mean of the labels.\n For binary classification problems, it will return a logit for a prior\n probability of label 1.\n pruning_mode: one of 'none', 'pre', 'post' to indicate no pruning, pre-\n pruning (do not split a node if not enough gain is observed) and post\n pruning (build the tree up to a max depth and then prune branches with\n negative gain). For pre and post pruning, you MUST provide\n tree_complexity >0.\n quantile_sketch_epsilon: float between 0 and 1. Error bound for quantile\n computation. This is only used for float feature columns, and the number\n of buckets generated per float feature is 1/quantile_sketch_epsilon.\n train_in_memory: `bool`, when true, it assumes the dataset is in memory,\n i.e., input_fn should return the entire dataset as a single batch,\n n_batches_per_layer should be set as 1, num_worker_replicas should be 1,\n and num_ps_replicas should be 0 in `tf.Estimator.RunConfig`.\n\n Raises:\n ValueError: when wrong arguments are given or unsupported functionalities\n are requested.\n \"\"\"\n # TODO(nponomareva): Support multi-class cases.\n if n_classes == _HOLD_FOR_MULTI_CLASS_SUPPORT:\n n_classes = 2\n elif n_classes > 2 and pruning_mode is not None:\n raise ValueError('For now pruning is not supported with multi class.')\n\n head, closed_form = _create_classification_head_and_closed_form(\n n_classes, weight_column, label_vocabulary=label_vocabulary)\n # HParams for the model.\n tree_hparams = _TreeHParams(n_trees, max_depth, learning_rate,\n l1_regularization, l2_regularization,\n tree_complexity, min_node_weight, center_bias,\n pruning_mode, quantile_sketch_epsilon)\n\n def _model_fn(features, labels, mode, config):\n return _bt_model_fn(\n features,\n labels,\n mode,\n head,\n feature_columns,\n tree_hparams,\n n_batches_per_layer,\n config,\n closed_form_grad_and_hess_fn=closed_form,\n weight_column=weight_column,\n train_in_memory=train_in_memory)\n\n super(BoostedTreesClassifier, self).__init__(\n model_fn=_model_fn,\n model_dir=model_dir,\n config=config,\n feature_columns=feature_columns,\n head=head,\n center_bias=center_bias,\n is_classification=True,\n quantile_sketch_epsilon=quantile_sketch_epsilon)\n\n\n@estimator_export('estimator.BoostedTreesRegressor')\nclass BoostedTreesRegressor(_BoostedTreesBase):\n \"\"\"A Regressor for Tensorflow Boosted Trees models.\n\n @compatibility(eager)\n Estimators can be used while eager execution is enabled. Note that `input_fn`\n and all hooks are executed inside a graph context, so they have to be written\n to be compatible with graph mode. Note that `input_fn` code using `tf.data`\n generally works in both graph and eager modes.\n @end_compatibility\n \"\"\"\n\n def __init__(self,\n feature_columns,\n n_batches_per_layer,\n model_dir=None,\n label_dimension=_HOLD_FOR_MULTI_DIM_SUPPORT,\n weight_column=None,\n n_trees=100,\n max_depth=6,\n learning_rate=0.1,\n l1_regularization=0.,\n l2_regularization=0.,\n tree_complexity=0.,\n min_node_weight=0.,\n config=None,\n center_bias=False,\n pruning_mode='none',\n quantile_sketch_epsilon=0.01,\n train_in_memory=False):\n \"\"\"Initializes a `BoostedTreesRegressor` instance.\n\n Example:\n\n ```python\n bucketized_feature_1 = bucketized_column(\n numeric_column('feature_1'), BUCKET_BOUNDARIES_1)\n bucketized_feature_2 = bucketized_column(\n numeric_column('feature_2'), BUCKET_BOUNDARIES_2)\n\n # Need to see a large portion of the data before we can build a layer, for\n # example half of data n_batches_per_layer = 0.5 * NUM_EXAMPLES / BATCH_SIZE\n regressor = estimator.BoostedTreesRegressor(\n feature_columns=[bucketized_feature_1, bucketized_feature_2],\n n_batches_per_layer=n_batches_per_layer,\n n_trees=100,\n ... <some other params>\n )\n\n def input_fn_train():\n ...\n return dataset\n\n regressor.train(input_fn=input_fn_train)\n\n def input_fn_eval():\n ...\n return dataset\n\n metrics = regressor.evaluate(input_fn=input_fn_eval)\n ```\n\n Args:\n feature_columns: An iterable containing all the feature columns used by\n the model. All items in the set should be instances of classes derived\n from `FeatureColumn`.\n n_batches_per_layer: the number of batches to collect statistics per\n layer. The total number of batches is total number of data divided by\n batch size.\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model.\n label_dimension: Number of regression targets per example.\n Multi-dimensional support is not yet implemented.\n weight_column: A string or a `NumericColumn` created by\n `tf.fc_old.numeric_column` defining feature column representing weights.\n It is used to downweight or boost examples during training. It will be\n multiplied by the loss of the example. If it is a string, it is used as\n a key to fetch weight tensor from the `features`. If it is a\n `NumericColumn`, raw tensor is fetched by key `weight_column.key`, then\n weight_column.normalizer_fn is applied on it to get weight tensor.\n n_trees: number trees to be created.\n max_depth: maximum depth of the tree to grow.\n learning_rate: shrinkage parameter to be used when a tree added to the\n model.\n l1_regularization: regularization multiplier applied to the absolute\n weights of the tree leafs.\n l2_regularization: regularization multiplier applied to the square weights\n of the tree leafs.\n tree_complexity: regularization factor to penalize trees with more leaves.\n min_node_weight: min_node_weight: minimum hessian a node must have for a\n split to be considered. The value will be compared with\n sum(leaf_hessian)/(batch_size * n_batches_per_layer).\n config: `RunConfig` object to configure the runtime settings.\n center_bias: Whether bias centering needs to occur. Bias centering refers\n to the first node in the very first tree returning the prediction that\n is aligned with the original labels distribution. For example, for\n regression problems, the first node will return the mean of the labels.\n For binary classification problems, it will return a logit for a prior\n probability of label 1.\n pruning_mode: one of 'none', 'pre', 'post' to indicate no pruning, pre-\n pruning (do not split a node if not enough gain is observed) and post\n pruning (build the tree up to a max depth and then prune branches with\n negative gain). For pre and post pruning, you MUST provide\n tree_complexity >0.\n quantile_sketch_epsilon: float between 0 and 1. Error bound for quantile\n computation. This is only used for float feature columns, and the number\n of buckets generated per float feature is 1/quantile_sketch_epsilon.\n train_in_memory: `bool`, when true, it assumes the dataset is in memory,\n i.e., input_fn should return the entire dataset as a single batch,\n n_batches_per_layer should be set as 1, num_worker_replicas should be 1,\n and num_ps_replicas should be 0 in `tf.Estimator.RunConfig`.\n\n Raises:\n ValueError: when wrong arguments are given or unsupported functionalities\n are requested.\n \"\"\"\n # TODO(nponomareva): Extend it to multi-dimension cases.\n if label_dimension == _HOLD_FOR_MULTI_DIM_SUPPORT:\n label_dimension = 1\n elif label_dimension > 1 and pruning_mode is not None:\n raise ValueError('For now pruning is not supported with multi-dimension'\n 'regression.')\n head = _create_regression_head(label_dimension, weight_column)\n\n # HParams for the model.\n tree_hparams = _TreeHParams(n_trees, max_depth, learning_rate,\n l1_regularization, l2_regularization,\n tree_complexity, min_node_weight, center_bias,\n pruning_mode, quantile_sketch_epsilon)\n\n def _model_fn(features, labels, mode, config):\n return _bt_model_fn(\n features,\n labels,\n mode,\n head,\n feature_columns,\n tree_hparams,\n n_batches_per_layer,\n config,\n weight_column=weight_column,\n train_in_memory=train_in_memory)\n\n super(BoostedTreesRegressor, self).__init__(\n model_fn=_model_fn,\n model_dir=model_dir,\n config=config,\n feature_columns=feature_columns,\n head=head,\n center_bias=center_bias,\n is_classification=False,\n quantile_sketch_epsilon=quantile_sketch_epsilon)\n\n\n@estimator_export('estimator.BoostedTreesEstimator')\nclass BoostedTreesEstimator(_BoostedTreesBase): # pylint: disable=protected-access\n \"\"\"An Estimator for Tensorflow Boosted Trees models.\"\"\"\n\n def __init__(self,\n feature_columns,\n n_batches_per_layer,\n head,\n model_dir=None,\n weight_column=None,\n n_trees=100,\n max_depth=6,\n learning_rate=0.1,\n l1_regularization=0.,\n l2_regularization=0.,\n tree_complexity=0.,\n min_node_weight=0.,\n config=None,\n center_bias=False,\n pruning_mode='none',\n quantile_sketch_epsilon=0.01):\n \"\"\"Initializes a `BoostedTreesEstimator` instance.\n\n Use this interface if you need to provide a custom loss/head.\n For example, the following will be equivalent to using\n BoostedTreesRegressor\n\n # Create a head with L2 loss\n from tensorflow_estimator.python.estimator.canned import\n head_lib\n\n head = head_lib._regression_head(label_dimension=1)\n est = boosted_trees.BoostedTreesEstimator(\n feature_columns=...,\n n_batches_per_layer=...,\n head=head,\n n_trees=...,\n max_depth=...)\n\n Args:\n feature_columns: An iterable containing all the feature columns used by\n the model. All items in the set should be instances of classes derived\n from `FeatureColumn`.\n n_batches_per_layer: the number of batches to collect statistics per\n layer.\n head: the `Head` instance defined for Estimator.\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into an estimator to\n continue training a previously saved model.\n weight_column: A string or a `_NumericColumn` created by\n `tf.feature_column.numeric_column` defining feature column representing\n weights. It is used to downweight or boost examples during training. It\n will be multiplied by the loss of the example. If it is a string, it is\n used as a key to fetch weight tensor from the `features`. If it is a\n `_NumericColumn`, raw tensor is fetched by key `weight_column.key`, then\n weight_column.normalizer_fn is applied on it to get weight tensor.\n n_trees: number trees to be created.\n max_depth: maximum depth of the tree to grow.\n learning_rate: shrinkage parameter to be used when a tree added to the\n model.\n l1_regularization: regularization multiplier applied to the absolute\n weights of the tree leafs.\n l2_regularization: regularization multiplier applied to the square weights\n of the tree leafs.\n tree_complexity: regularization factor to penalize trees with more leaves.\n min_node_weight: minimum hessian a node must have for a split to be\n considered. The value will be compared with sum(leaf_hessian)/\n (batch_size * n_batches_per_layer).\n config: `RunConfig` object to configure the runtime settings.\n center_bias: Whether bias centering needs to occur. Bias centering refers\n to the first node in the very first tree returning the prediction that\n is aligned with the original labels distribution. For example, for\n regression problems, the first node will return the mean of the labels.\n For binary classification problems, it will return a logit for a prior\n probability of label 1.\n pruning_mode: one of 'none', 'pre', 'post' to indicate no pruning, pre-\n pruning (do not split a node if not enough gain is observed) and post\n pruning (build the tree up to a max depth and then prune branches with\n negative gain). For pre and post pruning, you MUST provide\n tree_complexity >0.\n quantile_sketch_epsilon: float between 0 and 1. Error bound for quantile\n computation. This is only used for float feature columns, and the number\n of buckets generated per float feature is 1/quantile_sketch_epsilon.\n\n Raises:\n ValueError: when wrong arguments are given or unsupported functionalities\n are requested.\n \"\"\"\n # HParams for the model.\n # pylint: disable=protected-access\n tree_hparams = _TreeHParams(n_trees, max_depth, learning_rate,\n l1_regularization, l2_regularization,\n tree_complexity, min_node_weight, center_bias,\n pruning_mode, quantile_sketch_epsilon)\n\n def _model_fn(features, labels, mode, config):\n return _bt_model_fn(\n features,\n labels,\n mode,\n head,\n feature_columns,\n tree_hparams,\n n_batches_per_layer,\n config=config)\n\n def _is_classification_head(head):\n \"\"\"Infers if the head is a classification head.\"\"\"\n # Check using all classification heads defined in canned/head.py. However, it\n # is not a complete list - it does not check for other classification heads\n # not defined in the head library.\n # pylint: disable=protected-access\n return isinstance(\n head, (head_lib._BinaryLogisticHeadWithSigmoidCrossEntropyLoss,\n head_lib._MultiClassHeadWithSoftmaxCrossEntropyLoss))\n # pylint: enable=protected-access\n\n super(BoostedTreesEstimator, self).__init__(\n model_fn=_model_fn,\n model_dir=model_dir,\n config=config,\n feature_columns=feature_columns,\n head=head,\n center_bias=center_bias,\n is_classification=_is_classification_head(head),\n quantile_sketch_epsilon=quantile_sketch_epsilon)\n # pylint: enable=protected-access\n\n\ndef _get_variable_shape(column):\n \"\"\"Returns the variable shape of the provided column.\"\"\"\n if feature_column_lib.is_feature_column_v2([column]):\n return column.variable_shape\n else:\n return column._variable_shape\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for v2 version of dnn_linear_combined.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport shutil\nimport tempfile\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport six\n\nfrom tensorflow.core.example import example_pb2\nfrom tensorflow.core.example import feature_pb2\nfrom tensorflow.python.feature_column import feature_column\nfrom tensorflow.python.feature_column import feature_column_v2\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import parsing_ops\nfrom tensorflow.python.ops import variables as variables_lib\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.summary.writer import writer_cache\nfrom tensorflow.python.training import input as input_lib\nfrom tensorflow_estimator.python.estimator import estimator\nfrom tensorflow_estimator.python.estimator.canned import dnn_linear_combined\nfrom tensorflow_estimator.python.estimator.canned import dnn_testing_utils\nfrom tensorflow_estimator.python.estimator.canned import linear_testing_utils\nfrom tensorflow_estimator.python.estimator.canned import prediction_keys\nfrom tensorflow_estimator.python.estimator.export import export\nfrom tensorflow_estimator.python.estimator.inputs import numpy_io\nfrom tensorflow_estimator.python.estimator.inputs import pandas_io\n\n\ntry:\n # pylint: disable=g-import-not-at-top\n import pandas as pd\n HAS_PANDAS = True\nexcept IOError:\n # Pandas writes a temporary file during import. If it fails, don't use pandas.\n HAS_PANDAS = False\nexcept ImportError:\n HAS_PANDAS = False\n\n\nclass DNNOnlyModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase):\n\n def __init__(self, methodName='runTest'): # pylint: disable=invalid-name\n test.TestCase.__init__(self, methodName)\n dnn_testing_utils.BaseDNNModelFnTest.__init__(self, self._dnn_only_model_fn)\n\n def _dnn_only_model_fn(self,\n features,\n labels,\n mode,\n head,\n hidden_units,\n feature_columns,\n optimizer='Adagrad',\n activation_fn=nn.relu,\n dropout=None,\n config=None):\n return dnn_linear_combined._dnn_linear_combined_model_fn_v2(\n features=features,\n labels=labels,\n mode=mode,\n head=head,\n linear_feature_columns=[],\n dnn_hidden_units=hidden_units,\n dnn_feature_columns=feature_columns,\n dnn_optimizer=optimizer,\n dnn_activation_fn=activation_fn,\n dnn_dropout=dropout,\n config=config)\n\n\n# A function to mimic linear-regressor init reuse same tests.\ndef _linear_regressor_fn(feature_columns,\n model_dir=None,\n label_dimension=1,\n weight_column=None,\n optimizer='Ftrl',\n config=None,\n sparse_combiner='sum'):\n return dnn_linear_combined.DNNLinearCombinedRegressorV2(\n model_dir=model_dir,\n linear_feature_columns=feature_columns,\n linear_optimizer=optimizer,\n label_dimension=label_dimension,\n weight_column=weight_column,\n config=config,\n linear_sparse_combiner=sparse_combiner)\n\n\nclass LinearOnlyRegressorEvaluationV2Test(\n linear_testing_utils.BaseLinearRegressorEvaluationTest, test.TestCase):\n\n def __init__(self, methodName='runTest'): # pylint: disable=invalid-name\n test.TestCase.__init__(self, methodName)\n linear_testing_utils.BaseLinearRegressorEvaluationTest.__init__(\n self, _linear_regressor_fn, fc_lib=feature_column_v2)\n\n\nclass LinearOnlyRegressorPredictV2Test(\n linear_testing_utils.BaseLinearRegressorPredictTest, test.TestCase):\n\n def __init__(self, methodName='runTest'): # pylint: disable=invalid-name\n test.TestCase.__init__(self, methodName)\n linear_testing_utils.BaseLinearRegressorPredictTest.__init__(\n self, _linear_regressor_fn, fc_lib=feature_column_v2)\n\n\nclass LinearOnlyRegressorIntegrationV2Test(\n linear_testing_utils.BaseLinearRegressorIntegrationTest, test.TestCase):\n\n def __init__(self, methodName='runTest'): # pylint: disable=invalid-name\n test.TestCase.__init__(self, methodName)\n linear_testing_utils.BaseLinearRegressorIntegrationTest.__init__(\n self, _linear_regressor_fn, fc_lib=feature_column_v2)\n\n\nclass LinearOnlyRegressorTrainingV2Test(\n linear_testing_utils.BaseLinearRegressorTrainingTest, test.TestCase):\n\n def __init__(self, methodName='runTest'): # pylint: disable=invalid-name\n test.TestCase.__init__(self, methodName)\n linear_testing_utils.BaseLinearRegressorTrainingTest.__init__(\n self, _linear_regressor_fn, fc_lib=feature_column_v2)\n\n\ndef _linear_classifier_fn(feature_columns,\n model_dir=None,\n n_classes=2,\n weight_column=None,\n label_vocabulary=None,\n optimizer='Ftrl',\n config=None,\n sparse_combiner='sum'):\n return dnn_linear_combined.DNNLinearCombinedClassifierV2(\n model_dir=model_dir,\n linear_feature_columns=feature_columns,\n linear_optimizer=optimizer,\n n_classes=n_classes,\n weight_column=weight_column,\n label_vocabulary=label_vocabulary,\n config=config,\n linear_sparse_combiner=sparse_combiner)\n\n\nclass LinearOnlyClassifierTrainingV2Test(\n linear_testing_utils.BaseLinearClassifierTrainingTest, test.TestCase):\n\n def __init__(self, methodName='runTest'): # pylint: disable=invalid-name\n test.TestCase.__init__(self, methodName)\n linear_testing_utils.BaseLinearClassifierTrainingTest.__init__(\n self,\n linear_classifier_fn=_linear_classifier_fn,\n fc_lib=feature_column_v2)\n\n\nclass LinearOnlyClassifierClassesEvaluationV2Test(\n linear_testing_utils.BaseLinearClassifierEvaluationTest, test.TestCase):\n\n def __init__(self, methodName='runTest'): # pylint: disable=invalid-name\n test.TestCase.__init__(self, methodName)\n linear_testing_utils.BaseLinearClassifierEvaluationTest.__init__(\n self,\n linear_classifier_fn=_linear_classifier_fn,\n fc_lib=feature_column_v2)\n\n\nclass LinearOnlyClassifierPredictV2Test(\n linear_testing_utils.BaseLinearClassifierPredictTest, test.TestCase):\n\n def __init__(self, methodName='runTest'): # pylint: disable=invalid-name\n test.TestCase.__init__(self, methodName)\n linear_testing_utils.BaseLinearClassifierPredictTest.__init__(\n self,\n linear_classifier_fn=_linear_classifier_fn,\n fc_lib=feature_column_v2)\n\n\nclass LinearOnlyClassifierIntegrationV2Test(\n linear_testing_utils.BaseLinearClassifierIntegrationTest, test.TestCase):\n\n def __init__(self, methodName='runTest'): # pylint: disable=invalid-name\n test.TestCase.__init__(self, methodName)\n linear_testing_utils.BaseLinearClassifierIntegrationTest.__init__(\n self,\n linear_classifier_fn=_linear_classifier_fn,\n fc_lib=feature_column_v2)\n\n\[email protected]((feature_column_v2,))\nclass DNNLinearCombinedRegressorIntegrationTest(test.TestCase):\n\n def setUp(self):\n self._model_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n if self._model_dir:\n writer_cache.FileWriterCache.clear()\n shutil.rmtree(self._model_dir)\n\n def _test_complete_flow_helper(\n self, linear_feature_columns, dnn_feature_columns, feature_spec,\n train_input_fn, eval_input_fn, predict_input_fn, input_dimension,\n label_dimension, batch_size):\n est = dnn_linear_combined.DNNLinearCombinedRegressorV2(\n linear_feature_columns=linear_feature_columns,\n dnn_hidden_units=(2, 2),\n dnn_feature_columns=dnn_feature_columns,\n label_dimension=label_dimension,\n model_dir=self._model_dir)\n\n # TRAIN\n num_steps = 10\n est.train(train_input_fn, steps=num_steps)\n\n # EVALUTE\n scores = est.evaluate(eval_input_fn)\n self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])\n self.assertIn('loss', six.iterkeys(scores))\n\n # PREDICT\n predictions = np.array([\n x[prediction_keys.PredictionKeys.PREDICTIONS]\n for x in est.predict(predict_input_fn)\n ])\n self.assertAllEqual((batch_size, label_dimension), predictions.shape)\n\n # EXPORT\n serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(\n feature_spec)\n export_dir = est.export_saved_model(tempfile.mkdtemp(),\n serving_input_receiver_fn)\n self.assertTrue(gfile.Exists(export_dir))\n\n def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,\n input_dimension, label_dimension, batch_size,\n fc_impl):\n linear_feature_columns = [\n fc_impl.numeric_column('x', shape=(input_dimension,))\n ]\n dnn_feature_columns = [\n fc_impl.numeric_column('x', shape=(input_dimension,))\n ]\n feature_columns = linear_feature_columns + dnn_feature_columns\n feature_spec = feature_column_v2.make_parse_example_spec_v2(feature_columns)\n self._test_complete_flow_helper(linear_feature_columns, dnn_feature_columns,\n feature_spec, train_input_fn, eval_input_fn,\n predict_input_fn, input_dimension,\n label_dimension, batch_size)\n\n def _test_complete_flow_dnn_fc_v1(self, train_input_fn, eval_input_fn,\n predict_input_fn, input_dimension,\n label_dimension, batch_size, fc_impl):\n del fc_impl\n linear_feature_columns = [\n feature_column_v2.numeric_column('x', shape=(input_dimension,))\n ]\n dnn_feature_columns = [\n feature_column._numeric_column('x', shape=(input_dimension,))\n ]\n feature_columns = linear_feature_columns + dnn_feature_columns\n feature_spec = feature_column.make_parse_example_spec(feature_columns)\n self._test_complete_flow_helper(linear_feature_columns, dnn_feature_columns,\n feature_spec, train_input_fn, eval_input_fn,\n predict_input_fn, input_dimension,\n label_dimension, batch_size)\n\n def _test_complete_flow_linear_fc_v1(self, train_input_fn, eval_input_fn,\n predict_input_fn, input_dimension,\n label_dimension, batch_size, fc_impl):\n del fc_impl\n linear_feature_columns = [\n feature_column._numeric_column('x', shape=(input_dimension,))\n ]\n dnn_feature_columns = [\n feature_column_v2.numeric_column('x', shape=(input_dimension,))\n ]\n feature_columns = linear_feature_columns + dnn_feature_columns\n feature_spec = feature_column.make_parse_example_spec(feature_columns)\n self._test_complete_flow_helper(linear_feature_columns, dnn_feature_columns,\n feature_spec, train_input_fn, eval_input_fn,\n predict_input_fn, input_dimension,\n label_dimension, batch_size)\n\n def _test_numpy_input_fn_helper(self, fc_impl, fn_to_run):\n \"\"\"Tests complete flow with numpy_input_fn.\"\"\"\n label_dimension = 2\n batch_size = 10\n data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)\n data = data.reshape(batch_size, label_dimension)\n # learn y = x\n train_input_fn = numpy_io.numpy_input_fn(\n x={'x': data},\n y=data,\n batch_size=batch_size,\n num_epochs=None,\n shuffle=True)\n eval_input_fn = numpy_io.numpy_input_fn(\n x={'x': data},\n y=data,\n batch_size=batch_size,\n shuffle=False)\n predict_input_fn = numpy_io.numpy_input_fn(\n x={'x': data},\n batch_size=batch_size,\n shuffle=False)\n\n fn_to_run(\n train_input_fn=train_input_fn,\n eval_input_fn=eval_input_fn,\n predict_input_fn=predict_input_fn,\n input_dimension=label_dimension,\n label_dimension=label_dimension,\n batch_size=batch_size,\n fc_impl=fc_impl)\n\n def test_numpy_input_fn_basic(self, fc_impl):\n self._test_numpy_input_fn_helper(fc_impl, self._test_complete_flow)\n\n def test_numpy_input_fn_dnn_fc_v1(self, fc_impl):\n with self.assertRaisesRegexp(\n ValueError, r'Received a feature column from TensorFlow v1'):\n self._test_numpy_input_fn_helper(fc_impl,\n self._test_complete_flow_dnn_fc_v1)\n\n def test_numpy_input_fn_linear_fc_v1(self, fc_impl):\n with self.assertRaisesRegexp(\n ValueError, r'Received a feature column from TensorFlow v1'):\n self._test_numpy_input_fn_helper(fc_impl,\n self._test_complete_flow_linear_fc_v1)\n\n def _test_pandas_input_fn_helper(self, fc_impl, fn_to_run):\n \"\"\"Tests complete flow with pandas_input_fn.\"\"\"\n if not HAS_PANDAS:\n return\n label_dimension = 1\n batch_size = 10\n data = np.linspace(0., 2., batch_size, dtype=np.float32)\n x = pd.DataFrame({'x': data})\n y = pd.Series(data)\n train_input_fn = pandas_io.pandas_input_fn(\n x=x,\n y=y,\n batch_size=batch_size,\n num_epochs=None,\n shuffle=True)\n eval_input_fn = pandas_io.pandas_input_fn(\n x=x,\n y=y,\n batch_size=batch_size,\n shuffle=False)\n predict_input_fn = pandas_io.pandas_input_fn(\n x=x,\n batch_size=batch_size,\n shuffle=False)\n\n fn_to_run(\n train_input_fn=train_input_fn,\n eval_input_fn=eval_input_fn,\n predict_input_fn=predict_input_fn,\n input_dimension=label_dimension,\n label_dimension=label_dimension,\n batch_size=batch_size,\n fc_impl=fc_impl)\n\n def test_pandas_input_fn_basic(self, fc_impl):\n self._test_pandas_input_fn_helper(fc_impl, self._test_complete_flow)\n\n def test_pandas_input_fn_dnn_fc_v1(self, fc_impl):\n with self.assertRaisesRegexp(\n ValueError, r'Received a feature column from TensorFlow v1'):\n self._test_pandas_input_fn_helper(fc_impl,\n self._test_complete_flow_dnn_fc_v1)\n\n def test_pandas_input_fn_linear_fc_v1(self, fc_impl):\n with self.assertRaisesRegexp(\n ValueError, r'Received a feature column from TensorFlow v1'):\n self._test_pandas_input_fn_helper(fc_impl,\n self._test_complete_flow_linear_fc_v1)\n\n def _test_input_fn_from_parse_example_helper(self, fc_impl, fn_to_run):\n \"\"\"Tests complete flow with input_fn constructed from parse_example.\"\"\"\n label_dimension = 2\n batch_size = 10\n data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)\n data = data.reshape(batch_size, label_dimension)\n\n serialized_examples = []\n for datum in data:\n example = example_pb2.Example(features=feature_pb2.Features(\n feature={\n 'x': feature_pb2.Feature(\n float_list=feature_pb2.FloatList(value=datum)),\n 'y': feature_pb2.Feature(\n float_list=feature_pb2.FloatList(value=datum)),\n }))\n serialized_examples.append(example.SerializeToString())\n\n feature_spec = {\n 'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),\n 'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),\n }\n def _train_input_fn():\n feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)\n features = linear_testing_utils.queue_parsed_features(feature_map)\n labels = features.pop('y')\n return features, labels\n def _eval_input_fn():\n feature_map = parsing_ops.parse_example(\n input_lib.limit_epochs(serialized_examples, num_epochs=1),\n feature_spec)\n features = linear_testing_utils.queue_parsed_features(feature_map)\n labels = features.pop('y')\n return features, labels\n def _predict_input_fn():\n feature_map = parsing_ops.parse_example(\n input_lib.limit_epochs(serialized_examples, num_epochs=1),\n feature_spec)\n features = linear_testing_utils.queue_parsed_features(feature_map)\n features.pop('y')\n return features, None\n\n fn_to_run(\n train_input_fn=_train_input_fn,\n eval_input_fn=_eval_input_fn,\n predict_input_fn=_predict_input_fn,\n input_dimension=label_dimension,\n label_dimension=label_dimension,\n batch_size=batch_size,\n fc_impl=fc_impl)\n\n def test_input_fn_from_parse_example_basic(self, fc_impl):\n self._test_input_fn_from_parse_example_helper(fc_impl,\n self._test_complete_flow)\n\n def test_input_fn_from_parse_example_dnn_fc_v1(self, fc_impl):\n with self.assertRaisesRegexp(\n ValueError, r'Received a feature column from TensorFlow v1'):\n self._test_input_fn_from_parse_example_helper(\n fc_impl, self._test_complete_flow_dnn_fc_v1)\n\n def test_input_fn_from_parse_example_linear_fc_v1(self, fc_impl):\n with self.assertRaisesRegexp(\n ValueError, r'Received a feature column from TensorFlow v1'):\n self._test_input_fn_from_parse_example_helper(\n fc_impl, self._test_complete_flow_linear_fc_v1)\n\n\n# A function to mimic dnn-classifier init reuse same tests.\ndef _dnn_classifier_fn(hidden_units,\n feature_columns,\n model_dir=None,\n n_classes=2,\n weight_column=None,\n label_vocabulary=None,\n optimizer='Adagrad',\n config=None):\n return dnn_linear_combined.DNNLinearCombinedClassifierV2(\n model_dir=model_dir,\n dnn_hidden_units=hidden_units,\n dnn_feature_columns=feature_columns,\n dnn_optimizer=optimizer,\n n_classes=n_classes,\n weight_column=weight_column,\n label_vocabulary=label_vocabulary,\n config=config)\n\n\nclass DNNOnlyClassifierEvaluateV2Test(\n dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase):\n\n def __init__(self, methodName='runTest'): # pylint: disable=invalid-name\n test.TestCase.__init__(self, methodName)\n dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(\n self, _dnn_classifier_fn, fc_impl=feature_column_v2)\n\n\nclass DNNOnlyClassifierPredictV2Test(\n dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase):\n\n def __init__(self, methodName='runTest'): # pylint: disable=invalid-name\n test.TestCase.__init__(self, methodName)\n dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(\n self, _dnn_classifier_fn, fc_impl=feature_column_v2)\n\n\nclass DNNOnlyClassifierTrainV2Test(dnn_testing_utils.BaseDNNClassifierTrainTest,\n test.TestCase):\n\n def __init__(self, methodName='runTest'): # pylint: disable=invalid-name\n test.TestCase.__init__(self, methodName)\n dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(\n self, _dnn_classifier_fn, fc_impl=feature_column_v2)\n\n\n# A function to mimic dnn-regressor init reuse same tests.\ndef _dnn_regressor_fn(hidden_units,\n feature_columns,\n model_dir=None,\n label_dimension=1,\n weight_column=None,\n optimizer='Adagrad',\n config=None):\n return dnn_linear_combined.DNNLinearCombinedRegressorV2(\n model_dir=model_dir,\n dnn_hidden_units=hidden_units,\n dnn_feature_columns=feature_columns,\n dnn_optimizer=optimizer,\n label_dimension=label_dimension,\n weight_column=weight_column,\n config=config)\n\n\nclass DNNOnlyRegressorEvaluateV2Test(\n dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):\n\n def __init__(self, methodName='runTest'): # pylint: disable=invalid-name\n test.TestCase.__init__(self, methodName)\n dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(\n self, _dnn_regressor_fn, fc_impl=feature_column_v2)\n\n\nclass DNNOnlyRegressorPredictV2Test(\n dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):\n\n def __init__(self, methodName='runTest'): # pylint: disable=invalid-name\n test.TestCase.__init__(self, methodName)\n dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(\n self, _dnn_regressor_fn, fc_impl=feature_column_v2)\n\n\nclass DNNOnlyRegressorTrainV2Test(dnn_testing_utils.BaseDNNRegressorTrainTest,\n test.TestCase):\n\n def __init__(self, methodName='runTest'): # pylint: disable=invalid-name\n test.TestCase.__init__(self, methodName)\n dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(\n self, _dnn_regressor_fn, fc_impl=feature_column_v2)\n\n\[email protected]((feature_column_v2,))\nclass DNNLinearCombinedClassifierIntegrationTest(test.TestCase):\n\n def setUp(self):\n self._model_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n if self._model_dir:\n writer_cache.FileWriterCache.clear()\n shutil.rmtree(self._model_dir)\n\n def _as_label(self, data_in_float):\n return np.rint(data_in_float).astype(np.int64)\n\n def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,\n input_dimension, n_classes, batch_size, fc_impl):\n linear_feature_columns = [\n fc_impl.numeric_column('x', shape=(input_dimension,))\n ]\n dnn_feature_columns = [\n fc_impl.numeric_column('x', shape=(input_dimension,))\n ]\n feature_columns = linear_feature_columns + dnn_feature_columns\n est = dnn_linear_combined.DNNLinearCombinedClassifierV2(\n linear_feature_columns=linear_feature_columns,\n dnn_hidden_units=(2, 2),\n dnn_feature_columns=dnn_feature_columns,\n n_classes=n_classes,\n model_dir=self._model_dir)\n\n # TRAIN\n num_steps = 10\n est.train(train_input_fn, steps=num_steps)\n\n # EVALUTE\n scores = est.evaluate(eval_input_fn)\n self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])\n self.assertIn('loss', six.iterkeys(scores))\n\n # PREDICT\n predicted_proba = np.array([\n x[prediction_keys.PredictionKeys.PROBABILITIES]\n for x in est.predict(predict_input_fn)\n ])\n self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)\n\n # EXPORT\n feature_spec = feature_column_v2.make_parse_example_spec_v2(feature_columns)\n serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(\n feature_spec)\n export_dir = est.export_saved_model(tempfile.mkdtemp(),\n serving_input_receiver_fn)\n self.assertTrue(gfile.Exists(export_dir))\n\n def test_numpy_input_fn(self, fc_impl):\n \"\"\"Tests complete flow with numpy_input_fn.\"\"\"\n n_classes = 3\n input_dimension = 2\n batch_size = 10\n data = np.linspace(\n 0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)\n x_data = data.reshape(batch_size, input_dimension)\n y_data = self._as_label(np.reshape(data[:batch_size], (batch_size, 1)))\n # learn y = x\n train_input_fn = numpy_io.numpy_input_fn(\n x={'x': x_data},\n y=y_data,\n batch_size=batch_size,\n num_epochs=None,\n shuffle=True)\n eval_input_fn = numpy_io.numpy_input_fn(\n x={'x': x_data},\n y=y_data,\n batch_size=batch_size,\n shuffle=False)\n predict_input_fn = numpy_io.numpy_input_fn(\n x={'x': x_data},\n batch_size=batch_size,\n shuffle=False)\n\n self._test_complete_flow(\n train_input_fn=train_input_fn,\n eval_input_fn=eval_input_fn,\n predict_input_fn=predict_input_fn,\n input_dimension=input_dimension,\n n_classes=n_classes,\n batch_size=batch_size,\n fc_impl=fc_impl)\n\n def test_pandas_input_fn(self, fc_impl):\n \"\"\"Tests complete flow with pandas_input_fn.\"\"\"\n if not HAS_PANDAS:\n return\n input_dimension = 1\n n_classes = 2\n batch_size = 10\n data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)\n x = pd.DataFrame({'x': data})\n y = pd.Series(self._as_label(data))\n train_input_fn = pandas_io.pandas_input_fn(\n x=x,\n y=y,\n batch_size=batch_size,\n num_epochs=None,\n shuffle=True)\n eval_input_fn = pandas_io.pandas_input_fn(\n x=x,\n y=y,\n batch_size=batch_size,\n shuffle=False)\n predict_input_fn = pandas_io.pandas_input_fn(\n x=x,\n batch_size=batch_size,\n shuffle=False)\n\n self._test_complete_flow(\n train_input_fn=train_input_fn,\n eval_input_fn=eval_input_fn,\n predict_input_fn=predict_input_fn,\n input_dimension=input_dimension,\n n_classes=n_classes,\n batch_size=batch_size,\n fc_impl=fc_impl)\n\n def test_input_fn_from_parse_example(self, fc_impl):\n \"\"\"Tests complete flow with input_fn constructed from parse_example.\"\"\"\n input_dimension = 2\n n_classes = 3\n batch_size = 10\n data = np.linspace(0., n_classes-1., batch_size * input_dimension,\n dtype=np.float32)\n data = data.reshape(batch_size, input_dimension)\n\n serialized_examples = []\n for datum in data:\n example = example_pb2.Example(features=feature_pb2.Features(\n feature={\n 'x':\n feature_pb2.Feature(float_list=feature_pb2.FloatList(\n value=datum)),\n 'y':\n feature_pb2.Feature(int64_list=feature_pb2.Int64List(\n value=self._as_label(datum[:1]))),\n }))\n serialized_examples.append(example.SerializeToString())\n\n feature_spec = {\n 'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),\n 'y': parsing_ops.FixedLenFeature([1], dtypes.int64),\n }\n def _train_input_fn():\n feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)\n features = linear_testing_utils.queue_parsed_features(feature_map)\n labels = features.pop('y')\n return features, labels\n def _eval_input_fn():\n feature_map = parsing_ops.parse_example(\n input_lib.limit_epochs(serialized_examples, num_epochs=1),\n feature_spec)\n features = linear_testing_utils.queue_parsed_features(feature_map)\n labels = features.pop('y')\n return features, labels\n def _predict_input_fn():\n feature_map = parsing_ops.parse_example(\n input_lib.limit_epochs(serialized_examples, num_epochs=1),\n feature_spec)\n features = linear_testing_utils.queue_parsed_features(feature_map)\n features.pop('y')\n return features, None\n\n self._test_complete_flow(\n train_input_fn=_train_input_fn,\n eval_input_fn=_eval_input_fn,\n predict_input_fn=_predict_input_fn,\n input_dimension=input_dimension,\n n_classes=n_classes,\n batch_size=batch_size,\n fc_impl=fc_impl)\n\n\[email protected]((feature_column_v2,))\nclass DNNLinearCombinedTests(test.TestCase):\n\n def setUp(self):\n self._model_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n if self._model_dir:\n shutil.rmtree(self._model_dir)\n\n def test_train_op_calls_both_dnn_and_linear(self, fc_impl):\n dnn_opt = gradient_descent_v2.SGD(1.)\n linear_opt = gradient_descent_v2.SGD(1.)\n x_column = fc_impl.numeric_column('x')\n input_fn = numpy_io.numpy_input_fn(\n x={'x': np.array([[0.], [1.]])},\n y=np.array([[0.], [1.]]),\n batch_size=1,\n shuffle=False)\n est = dnn_linear_combined.DNNLinearCombinedClassifierV2(\n linear_feature_columns=[x_column],\n # verifies linear_optimizer is used only for linear part.\n linear_optimizer=linear_opt,\n dnn_hidden_units=(2, 2),\n dnn_feature_columns=[x_column],\n # verifies dnn_optimizer is used only for dnn part.\n dnn_optimizer=dnn_opt,\n model_dir=self._model_dir)\n num_steps = 1\n est.train(input_fn, steps=num_steps)\n # verifies train_op fires linear minimize op\n self.assertEqual(num_steps, est.get_variable_value(\n linear_opt.iterations.name))\n # verifies train_op fires dnn optmizer\n self.assertEqual(num_steps, est.get_variable_value(dnn_opt.iterations.name))\n\n def test_dnn_and_linear_logits_are_added(self, fc_impl):\n with ops.Graph().as_default():\n variables_lib.Variable([[1.0]], name='linear/linear_model/x/weights')\n variables_lib.Variable([2.0], name='linear/linear_model/bias_weights')\n variables_lib.Variable([[3.0]], name='dnn/hiddenlayer_0/kernel')\n variables_lib.Variable([4.0], name='dnn/hiddenlayer_0/bias')\n variables_lib.Variable([[5.0]], name='dnn/logits/kernel')\n variables_lib.Variable([6.0], name='dnn/logits/bias')\n variables_lib.Variable(1, name='global_step', dtype=dtypes.int64)\n linear_testing_utils.save_variables_to_ckpt(self._model_dir)\n\n x_column = fc_impl.numeric_column('x')\n est = dnn_linear_combined.DNNLinearCombinedRegressorV2(\n linear_feature_columns=[x_column],\n dnn_hidden_units=[1],\n dnn_feature_columns=[x_column],\n model_dir=self._model_dir)\n input_fn = numpy_io.numpy_input_fn(\n x={'x': np.array([[10.]])}, batch_size=1, shuffle=False)\n # linear logits = 10*1 + 2 = 12\n # dnn logits = (10*3 + 4)*5 + 6 = 176\n # logits = dnn + linear = 176 + 12 = 188\n self.assertAllClose(\n {\n prediction_keys.PredictionKeys.PREDICTIONS: [188.],\n },\n next(est.predict(input_fn=input_fn)))\n\n\[email protected]((feature_column_v2,))\nclass DNNLinearCombinedWarmStartingTest(test.TestCase):\n\n def setUp(self):\n # Create a directory to save our old checkpoint and vocabularies to.\n self._ckpt_and_vocab_dir = tempfile.mkdtemp()\n\n # Make a dummy input_fn.\n def _input_fn():\n features = {\n 'age': [[23.], [31.]],\n 'city': [['Palo Alto'], ['Mountain View']],\n }\n return features, [0, 1]\n\n self._input_fn = _input_fn\n\n def tearDown(self):\n # Clean up checkpoint / vocab dir.\n writer_cache.FileWriterCache.clear()\n shutil.rmtree(self._ckpt_and_vocab_dir)\n\n def test_classifier_basic_warm_starting(self, fc_impl):\n \"\"\"Tests correctness of DNNLinearCombinedClassifier default warm-start.\"\"\"\n age = fc_impl.numeric_column('age')\n city = fc_impl.embedding_column(\n fc_impl.categorical_column_with_vocabulary_list(\n 'city', vocabulary_list=['Mountain View', 'Palo Alto']),\n dimension=5)\n\n # Create a DNNLinearCombinedClassifier and train to save a checkpoint.\n dnn_lc_classifier = dnn_linear_combined.DNNLinearCombinedClassifierV2(\n linear_feature_columns=[age],\n dnn_feature_columns=[city],\n dnn_hidden_units=[256, 128],\n model_dir=self._ckpt_and_vocab_dir,\n n_classes=4,\n linear_optimizer='SGD',\n dnn_optimizer='SGD')\n dnn_lc_classifier.train(input_fn=self._input_fn, max_steps=1)\n\n # Create a second DNNLinearCombinedClassifier, warm-started from the first.\n # Use a learning_rate = 0.0 optimizer to check values (use SGD so we don't\n # have accumulator values that change).\n # To avoid optimizer naming issue during warm start, when to create the\n # optimizer instance, the dnn_optimizer needs to be created first\n # before the linear_optimizer, since this is the order pre-defined\n # in the model function.\n # Create a default graph context to make sure the optimizer instance is\n # created within Graph v1 to make it consistent with estimator Graph.\n with ops.Graph().as_default():\n warm_started_dnn_lc_classifier = (\n dnn_linear_combined.DNNLinearCombinedClassifierV2(\n linear_feature_columns=[age],\n dnn_feature_columns=[city],\n dnn_hidden_units=[256, 128],\n n_classes=4,\n dnn_optimizer=gradient_descent_v2.SGD(\n learning_rate=0.0),\n linear_optimizer=gradient_descent_v2.SGD(\n learning_rate=0.0),\n warm_start_from=dnn_lc_classifier.model_dir))\n\n warm_started_dnn_lc_classifier.train(input_fn=self._input_fn, max_steps=1)\n for variable_name in warm_started_dnn_lc_classifier.get_variable_names():\n if 'learning_rate' in variable_name:\n self.assertAllClose(\n 0.0,\n warm_started_dnn_lc_classifier.get_variable_value(variable_name))\n else:\n self.assertAllClose(\n dnn_lc_classifier.get_variable_value(variable_name),\n warm_started_dnn_lc_classifier.get_variable_value(variable_name))\n\n def test_regressor_basic_warm_starting(self, fc_impl):\n \"\"\"Tests correctness of DNNLinearCombinedRegressor default warm-start.\"\"\"\n age = fc_impl.numeric_column('age')\n city = fc_impl.embedding_column(\n fc_impl.categorical_column_with_vocabulary_list(\n 'city', vocabulary_list=['Mountain View', 'Palo Alto']),\n dimension=5)\n\n # Create a DNNLinearCombinedRegressor and train to save a checkpoint.\n dnn_lc_regressor = dnn_linear_combined.DNNLinearCombinedRegressorV2(\n linear_feature_columns=[age],\n dnn_feature_columns=[city],\n dnn_hidden_units=[256, 128],\n model_dir=self._ckpt_and_vocab_dir,\n linear_optimizer='SGD',\n dnn_optimizer='SGD')\n dnn_lc_regressor.train(input_fn=self._input_fn, max_steps=1)\n\n # Create a second DNNLinearCombinedRegressor, warm-started from the first.\n # Use a learning_rate = 0.0 optimizer to check values (use SGD so we don't\n # have accumulator values that change).\n # To avoid optimizer naming issue during warm start, when to create the\n # optimizer instance, the dnn_optimizer needs to be created first\n # before the linear_optimizer, since this is the order pre-defined\n # in the model function.\n # Create a default graph context to make sure the optimizer instance is\n # created within Graph v1 to make it consistent with estimator Graph.\n with ops.Graph().as_default():\n warm_started_dnn_lc_regressor = (\n dnn_linear_combined.DNNLinearCombinedRegressorV2(\n linear_feature_columns=[age],\n dnn_feature_columns=[city],\n dnn_hidden_units=[256, 128],\n dnn_optimizer=gradient_descent_v2.SGD(\n learning_rate=0.0),\n linear_optimizer=gradient_descent_v2.SGD(\n learning_rate=0.0),\n warm_start_from=dnn_lc_regressor.model_dir))\n\n warm_started_dnn_lc_regressor.train(input_fn=self._input_fn, max_steps=1)\n for variable_name in warm_started_dnn_lc_regressor.get_variable_names():\n if 'learning_rate' in variable_name:\n self.assertAllClose(\n 0.0,\n warm_started_dnn_lc_regressor.get_variable_value(variable_name))\n else:\n self.assertAllClose(\n dnn_lc_regressor.get_variable_value(variable_name),\n warm_started_dnn_lc_regressor.get_variable_value(variable_name))\n\n def test_warm_starting_selective_variables(self, fc_impl):\n \"\"\"Tests selecting variables to warm-start.\"\"\"\n age = fc_impl.numeric_column('age')\n city = fc_impl.embedding_column(\n fc_impl.categorical_column_with_vocabulary_list(\n 'city', vocabulary_list=['Mountain View', 'Palo Alto']),\n dimension=5)\n\n # Create a DNNLinearCombinedClassifier and train to save a checkpoint.\n dnn_lc_classifier = dnn_linear_combined.DNNLinearCombinedClassifierV2(\n linear_feature_columns=[age],\n dnn_feature_columns=[city],\n dnn_hidden_units=[256, 128],\n model_dir=self._ckpt_and_vocab_dir,\n n_classes=4,\n linear_optimizer='SGD',\n dnn_optimizer='SGD')\n dnn_lc_classifier.train(input_fn=self._input_fn, max_steps=1)\n\n # Create a second DNNLinearCombinedClassifier, warm-started from the first.\n # Use a learning_rate = 0.0 optimizer to check values (use SGD so we don't\n # have accumulator values that change).\n warm_started_dnn_lc_classifier = (\n dnn_linear_combined.DNNLinearCombinedClassifierV2(\n linear_feature_columns=[age],\n dnn_feature_columns=[city],\n dnn_hidden_units=[256, 128],\n n_classes=4,\n linear_optimizer=gradient_descent_v2.SGD(\n learning_rate=0.0),\n dnn_optimizer=gradient_descent_v2.SGD(\n learning_rate=0.0),\n # The provided regular expression will only warm-start the deep\n # portion of the model.\n warm_start_from=estimator.WarmStartSettings(\n ckpt_to_initialize_from=dnn_lc_classifier.model_dir,\n vars_to_warm_start='.*(dnn).*')))\n\n warm_started_dnn_lc_classifier.train(input_fn=self._input_fn, max_steps=1)\n for variable_name in warm_started_dnn_lc_classifier.get_variable_names():\n if 'dnn' in variable_name:\n if 'learning_rate' in variable_name:\n self.assertAllClose(\n 0.0,\n warm_started_dnn_lc_classifier.get_variable_value(variable_name))\n else:\n self.assertAllClose(\n dnn_lc_classifier.get_variable_value(variable_name),\n warm_started_dnn_lc_classifier.get_variable_value(variable_name))\n elif 'linear' in variable_name:\n linear_values = warm_started_dnn_lc_classifier.get_variable_value(\n variable_name)\n # Since they're not warm-started, the linear weights will be\n # zero-initialized.\n self.assertAllClose(np.zeros_like(linear_values), linear_values)\n\n\nif __name__ == '__main__':\n test.main()\n" ]
[ [ "tensorflow.python.training.monitored_session.Scaffold", "tensorflow.python.training.training_util._increment_global_step", "tensorflow.python.platform.test.TestCase.setUp", "tensorflow.python.ops.state_ops.assign_add", "tensorflow.python.summary.summary.scalar", "tensorflow.python.data.ops.dataset_ops.make_one_shot_iterator", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.variables.Variable", "tensorflow.python.data.ops.dataset_ops.Dataset.range", "tensorflow.python.ops.state_ops.assign", "tensorflow.python.ops.control_flow_ops.no_op", "tensorflow.python.training.session_run_hook.SessionRunContext", "tensorflow.python.training.monitored_session.SingularMonitoredSession", "tensorflow.python.platform.test.main", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.training.checkpoint_utils.load_variable", "tensorflow.python.client.session.Session", "tensorflow.python.training.training_util.create_global_step", "tensorflow.python.summary.writer.writer_cache.FileWriterCache.get", "tensorflow.python.training.training_util.get_or_create_global_step", "tensorflow.python.framework.ops.Graph", "tensorflow.python.ops.variable_scope.get_variable", "tensorflow.python.platform.gfile.Glob", "tensorflow.python.platform.test.mock.patch.object", "tensorflow.python.summary.writer.writer_cache.FileWriterCache.clear", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.training.monitored_session._HookedSession", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.ops.data_flow_ops.ConditionalAccumulator", "tensorflow.python.ops.array_ops.constant", "tensorflow.python.feature_column.feature_column_v2._StateManagerImpl", "tensorflow.python.ops.math_ops.greater_equal", "tensorflow.core.kernels.boosted_trees.boosted_trees_pb2.TreeEnsemble", "tensorflow.python.ops.boosted_trees_ops.PruningMode.from_str", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.state_ops.assign_add", "tensorflow.python.compat.compat.forward_compatible", "tensorflow.python.summary.summary.scalar", "tensorflow.python.ops.math_ops.exp", "numpy.all", "tensorflow.python.ops.boosted_trees_ops.example_debug_outputs", "tensorflow.python.ops.array_ops.squeeze", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.ops.control_flow_v2_toggles.control_flow_v2_enabled", "tensorflow.python.ops.state_ops.assign", "tensorflow.python.framework.ops.device", "tensorflow.python.ops.control_flow_ops.no_op", "tensorflow.python.ops.boosted_trees_ops.update_ensemble", "tensorflow.python.ops.boosted_trees_ops.TreeEnsemble", "tensorflow.python.training.checkpoint_utils.load_checkpoint", "tensorflow.python.ops.gradients_impl.gradients", "tensorflow.python.util.tf_export.estimator_export", "tensorflow.python.ops.boosted_trees_ops.make_stats_summary", "tensorflow.python.ops.lookup_ops.mutable_dense_hash_table_v2", "tensorflow.python.ops.boosted_trees_ops.predict", "tensorflow.python.ops.array_ops.unstack", "tensorflow.python.ops.array_ops.bitcast", "tensorflow.python.ops.array_ops.size", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.framework.ops.control_dependencies", "numpy.zeros", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.feature_column.feature_column_v2._transform_features_v2", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.ops.lookup_ops.lookup_table_find_v2", "tensorflow.python.ops.math_ops.reduce_mean", "tensorflow.python.ops.boosted_trees_ops.QuantileAccumulator", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.ops.boosted_trees_ops.boosted_trees_bucketize", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.variable_scope.variable", "numpy.array", "numpy.sum", "tensorflow.python.ops.array_ops.stack", "tensorflow.python.feature_column.feature_column_lib.is_feature_column_v2", "tensorflow.python.feature_column.feature_column_lib.numeric_column", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.training.training_util.get_or_create_global_step", "tensorflow.python.ops.cond_v2.cond_v2", "tensorflow.python.ops.control_flow_v2_toggles.disable_control_flow_v2", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.boosted_trees_ops.center_bias", "tensorflow.python.ops.boosted_trees_ops.calculate_best_gains_per_feature", "tensorflow.python.feature_column.feature_column._transform_features", "tensorflow.python.ops.variable_scope.enable_resource_variables", "tensorflow.python.training.session_run_hook.SessionRunArgs", "tensorflow.python.ops.boosted_trees_ops.training_predict", "tensorflow.python.ops.control_flow_v2_toggles.enable_control_flow_v2" ], [ "tensorflow.python.feature_column.feature_column_v2.numeric_column", "pandas.Series", "numpy.linspace", "tensorflow.python.training.input.limit_epochs", "pandas.DataFrame", "tensorflow.python.ops.variables.Variable", "tensorflow.python.platform.test.TestCase.__init__", "tensorflow.python.platform.gfile.Exists", "numpy.zeros_like", "numpy.reshape", "tensorflow.python.feature_column.feature_column_v2.make_parse_example_spec_v2", "tensorflow.python.platform.test.main", "tensorflow.python.ops.parsing_ops.FixedLenFeature", "tensorflow.python.keras.optimizer_v2.gradient_descent.SGD", "numpy.rint", "tensorflow.python.feature_column.feature_column._numeric_column", "tensorflow.python.feature_column.feature_column.make_parse_example_spec", "numpy.array", "tensorflow.core.example.feature_pb2.FloatList", "tensorflow.python.framework.ops.Graph", "tensorflow.python.summary.writer.writer_cache.FileWriterCache.clear", "tensorflow.python.ops.parsing_ops.parse_example" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "1.4", "1.13", "2.3", "2.4", "2.2", "2.9", "1.5", "1.7", "2.5", "1.0", "2.8", "1.2", "2.10" ] } ]
clementpoiret/sparseml
[ "8442a6ef8ba11fb02f5e51472dd68b72438539b9", "8442a6ef8ba11fb02f5e51472dd68b72438539b9", "8442a6ef8ba11fb02f5e51472dd68b72438539b9", "8442a6ef8ba11fb02f5e51472dd68b72438539b9" ]
[ "tests/sparseml/pytorch/models/classification/test_mobilenet.py", "src/sparseml/tensorflow_v1/utils/nets_utils.py", "tests/sparseml/onnx/utils/test_data.py", "src/sparseml/tensorflow_v1/optim/mask_creator_pruning.py" ]
[ "# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom typing import Union\n\nimport pytest\nimport torch\n\nfrom sparseml.pytorch.models import ModelRegistry, mobilenet\nfrom tests.sparseml.pytorch.models.utils import compare_model\n\n\[email protected](\n os.getenv(\"NM_ML_SKIP_PYTORCH_TESTS\", False),\n reason=\"Skipping pytorch tests\",\n)\[email protected](\n os.getenv(\"NM_ML_SKIP_MODEL_TESTS\", False),\n reason=\"Skipping model tests\",\n)\[email protected](\n \"key,pretrained,test_input\",\n [\n (\"mobilenet\", False, True),\n (\"mobilenet\", True, False),\n (\"mobilenet\", \"base\", False),\n (\"mobilenet\", \"pruned-conservative\", False),\n (\"mobilenet\", \"pruned-moderate\", False),\n ],\n)\ndef test_mobilenets(key: str, pretrained: Union[bool, str], test_input: bool):\n model = ModelRegistry.create(key, pretrained)\n diff_model = mobilenet()\n\n if pretrained:\n compare_model(model, diff_model, same=False)\n match_model = ModelRegistry.create(key, pretrained)\n compare_model(model, match_model, same=True)\n\n if test_input:\n input_shape = ModelRegistry.input_shape(key)\n batch = torch.randn(1, *input_shape)\n out = model(batch)\n assert isinstance(out, tuple)\n for tens in out:\n assert tens.shape[0] == 1\n assert tens.shape[1] == 1000\n", "# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nUtility functions for working with tensorflow_v1 slim's nets_factory\n\"\"\"\n\nimport functools\nimport logging\nfrom typing import Callable, Dict\n\nfrom sparseml.tensorflow_v1.utils import tf_compat as tf\n\n\ntry:\n from nets import cyclegan, dcgan, nets_factory\nexcept Exception:\n nets_factory = None\n dcgan = None\n cyclegan = None\n logging.warning(\"TensorFlow slim nets not found in system\")\n\ntry:\n from tensorflow.contrib import layers as contrib_layers\n from tensorflow.contrib import slim\nexcept Exception:\n slim = None\n contrib_layers = None\n logging.warning(\"TensorFlow slim not found in system\")\n\n\n__all__ = [\n \"get_network_fn\",\n \"get_gan_network_fn\",\n \"get_model_scope\",\n \"mobilenet_v1_arg_scope\",\n]\n\n\ndef _gans_constructors() -> Dict[str, Callable]:\n return {\n \"cyclegan\": cyclegan.cyclegan_generator_resnet,\n \"dcgan_generator\": dcgan.generator,\n \"dcgan_discriminator\": dcgan.discriminator,\n }\n\n\ndef _check_slim_availability():\n if nets_factory is None or slim is None:\n raise ValueError(\n \"TensorFlow slim not setup in environment, please install first\"\n )\n\n\ndef get_network_fn(\n name: str,\n num_classes: int,\n weight_decay: float = 0.0,\n is_training: bool = False,\n arg_scope_vars: Dict = None,\n):\n \"\"\"\n Modified from slim/nets/nets_factory\n Returns a network_fn such as `logits, end_points = network_fn(images)`.\n\n :param name: The name of the network.\n :param num_classes: The number of classes to use for classification. If 0 or None,\n the logits layer is omitted and its input features are returned instead.\n :param weight_decay: The l2 coefficient for the model weights.\n :param is_training: `True` if the model is being used for training otherwise `False`\n :param arg_scope_vars: arg_scope_vars to be passed to the slim arg_scope\n :return network_fn: A function that applies the model to a batch of images. It has\n the following signature: net, end_points = network_fn(images)\n The `images` input is a tensor of shape [batch_size, height, width, 3 or\n 1] with height = width = network_fn.default_image_size. (The\n permissibility and treatment of other sizes depends on the network_fn.)\n The returned `end_points` are a dictionary of intermediate activations.\n The returned `net` is the topmost layer, depending on `num_classes`:\n If `num_classes` was a non-zero integer, `net` is a logits tensor\n of shape [batch_size, num_classes].\n If `num_classes` was 0 or `None`, `net` is a tensor with the input\n to the logits layer of shape [batch_size, 1, 1, num_features] or\n [batch_size, num_features]. Dropout has not been applied to this\n (even if the network's original classification does); it remains for\n the caller to do this or not.\n :raises ValueError: If network `name` is not recognized.\n \"\"\"\n _check_slim_availability()\n\n if not arg_scope_vars:\n arg_scope_vars = {}\n\n if \"gan\" in name.lower():\n return get_gan_network_fn(name, is_training)\n if name not in nets_factory.networks_map:\n raise ValueError(\"Name of network unknown %s\" % name)\n func = nets_factory.networks_map[name]\n arg_scope_vars[\"weight_decay\"] = weight_decay\n\n @functools.wraps(func)\n def network_fn(images, **kwargs):\n with slim.arg_scope(get_model_scope(name, arg_scope_vars=arg_scope_vars)):\n return func(\n images, num_classes=num_classes, is_training=is_training, **kwargs\n )\n\n if hasattr(func, \"default_image_size\"):\n network_fn.default_image_size = func.default_image_size\n\n return network_fn\n\n\ndef get_gan_network_fn(\n name: str,\n is_training: bool = False,\n):\n \"\"\"\n Returns network_fn for a GAN sub-model\n\n :param name: The name of the network.\n :param is_training: `True` if the model is being used for training otherwise `False`\n :return network_fn: Function that will run a gan sub-model\n :raises ValueError: If network `name` is not recognized.\n \"\"\"\n _check_slim_availability()\n\n if name not in _gans_constructors():\n raise ValueError(\"Name of GAN network unknown %s\" % name)\n\n func = _gans_constructors()[name]\n\n def network_fn(inputs, **kwargs):\n if name == \"dcgan_generator\":\n kwargs[\"final_size\"] = 16\n return func(inputs, is_training=is_training, **kwargs)\n\n return network_fn\n\n\ndef get_model_scope(model_name: str, arg_scope_vars: Dict = None):\n \"\"\"\n :param model_name: name of the model to create an arg scope for\n :param arg_scope_vars:\n :return: arg_scope_vars to be passed to the slim arg_scope\n \"\"\"\n _check_slim_availability()\n\n if arg_scope_vars is None:\n arg_scope_vars = {}\n\n arg_scope = nets_factory.arg_scopes_map[model_name](**arg_scope_vars)\n if model_name == \"mobilenet_v1\":\n arg_scope = mobilenet_v1_arg_scope(**arg_scope_vars)\n return arg_scope\n\n\ndef mobilenet_v1_arg_scope(\n is_training: bool = True,\n weight_decay: float = 0.00004,\n stddev: float = 0.09,\n regularize_depthwise: bool = False,\n batch_norm_decay: float = 0.9997,\n batch_norm_epsilon: float = 0.001,\n batch_norm_updates_collections: tf.GraphKeys = tf.GraphKeys.UPDATE_OPS,\n normalizer_fn: Callable = slim.batch_norm if slim else None,\n):\n \"\"\"\n Adapted from slim to allow for Xavier initializer\n Defines the default MobilenetV1 arg scope.\n\n :param is_training: Whether or not we're training the model. If this is set to\n None, the parameter is not added to the batch_norm arg_scope.\n :param weight_decay: The weight decay to use for regularizing the model.\n :param stddev: The standard deviation of the trunctated normal weight initializer.\n :param regularize_depthwise: Whether or not apply regularization on depthwise.\n :param batch_norm_decay: Decay for batch norm moving average.\n :param batch_norm_epsilon: Small float added to variance to avoid dividing by zero\n in batch norm.\n :param batch_norm_updates_collections: Collection for the update ops for\n batch norm.\n :param normalizer_fn: Normalization function to apply after convolution.\n :return: An `arg_scope` to use for the mobilenet v1 model.\n \"\"\"\n _check_slim_availability()\n\n batch_norm_params = {\n \"center\": True,\n \"scale\": True,\n \"decay\": batch_norm_decay,\n \"epsilon\": batch_norm_epsilon,\n \"updates_collections\": batch_norm_updates_collections,\n }\n if is_training is not None:\n batch_norm_params[\"is_training\"] = is_training\n\n # Set weight_decay for weights in Conv and DepthSepConv layers.\n weights_init = tf.keras.initializers.glorot_normal()\n regularizer = contrib_layers.l2_regularizer(weight_decay)\n if regularize_depthwise:\n depthwise_regularizer = regularizer\n else:\n depthwise_regularizer = None\n with slim.arg_scope(\n [slim.conv2d, slim.separable_conv2d],\n weights_initializer=weights_init,\n activation_fn=tf.nn.relu6,\n normalizer_fn=normalizer_fn,\n ):\n with slim.arg_scope([slim.batch_norm], **batch_norm_params):\n with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):\n with slim.arg_scope(\n [slim.separable_conv2d], weights_regularizer=depthwise_regularizer\n ) as sc:\n return sc\n", "# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nimport os\nimport tempfile\nfrom typing import Dict, NamedTuple, Tuple, Union\n\nimport numpy\nimport pytest\n\nfrom sparseml.onnx.utils import DataLoader\nfrom sparsezoo import Zoo\n\n\nDataloaderModelFixture = NamedTuple(\n \"DataloaderModelFixture\",\n [\n (\"model_path\", str),\n (\"data_shape\", Dict[str, Tuple[int, ...]]),\n (\"label_shape\", Union[None, Dict[str, Tuple[int, ...]]]),\n (\"data_types\", numpy.dtype),\n ],\n)\n\n\[email protected](\n params=[\n (\n {\n \"domain\": \"cv\",\n \"sub_domain\": \"classification\",\n \"architecture\": \"resnet_v1\",\n \"sub_architecture\": \"50\",\n \"framework\": \"pytorch\",\n \"repo\": \"sparseml\",\n \"dataset\": \"imagenet\",\n \"training_scheme\": None,\n \"sparse_name\": \"base\",\n \"sparse_category\": \"none\",\n \"sparse_target\": None,\n },\n {\"input\": (1, 3, 224, 224)},\n {\"output_0\": (1, 1000), \"output_1\": (1, 1000)},\n {\"input\": numpy.dtype(\"float32\")},\n ),\n (\n {\n \"domain\": \"cv\",\n \"sub_domain\": \"classification\",\n \"architecture\": \"mobilenet_v1\",\n \"sub_architecture\": \"1.0\",\n \"framework\": \"pytorch\",\n \"repo\": \"sparseml\",\n \"dataset\": \"imagenet\",\n \"training_scheme\": None,\n \"sparse_name\": \"base\",\n \"sparse_category\": \"none\",\n \"sparse_target\": None,\n },\n {\"input\": (1, 3, 224, 224)},\n {\"output_0\": (1, 1000)},\n {\"input\": numpy.dtype(\"float32\")},\n ),\n ]\n)\ndef dataloader_models(request) -> DataloaderModelFixture:\n model_args, input_shapes, output_shapes, data_types = request.param\n model = Zoo.load_model(**model_args)\n model_path = model.onnx_file.downloaded_path()\n\n return DataloaderModelFixture(model_path, input_shapes, output_shapes, data_types)\n\n\ndef _test_dataloader(\n dataloader: DataLoader,\n data_shapes: Dict[str, Tuple[int, ...]],\n label_shapes: Union[None, Dict[str, Tuple[int, ...]]],\n batch_size: int,\n iter_steps: int,\n num_samples: int,\n data_types: Dict[str, numpy.dtype] = None,\n):\n assert dataloader.batch_size == batch_size\n assert dataloader.iter_steps == iter_steps\n assert dataloader.infinite == (iter_steps == -1)\n if dataloader.iter_steps > 0:\n assert len(dataloader) == iter_steps\n elif dataloader.iter_steps < 0:\n assert len(dataloader) == 0\n else:\n assert len(dataloader) == math.ceil(num_samples / float(batch_size))\n\n iterations = 0\n for data, label in dataloader:\n if dataloader.infinite and iterations == iter_steps + 5:\n break\n for key in data_shapes:\n if data_types is not None and key in data_types:\n assert data[key].dtype == data_types[key]\n assert data[key].shape == (batch_size,) + data_shapes[key]\n\n if label_shapes is None:\n assert label is None\n else:\n for key in label_shapes:\n assert label[key].shape == (batch_size,) + label_shapes[key]\n iterations += 1\n assert (dataloader.infinite and iterations == iter_steps + 5) or (\n iterations == len(dataloader)\n )\n\n\[email protected](\n \"data_shapes,label_shapes,batch_size,iter_steps,num_samples,data_types\",\n [\n ({\"0000\": (3, 16, 16)}, None, 3, 1, 30, None),\n ({\"0000\": (3, 16, 16)}, None, 3, 2, 30, None),\n ({\"0000\": (3, 16, 16)}, None, 3, 2, 30, {\"0000\": numpy.dtype(\"int\")}),\n (\n {\"0000\": (3, 16, 16), \"0001\": (4, 20, 20)},\n None,\n 3,\n 2,\n 30,\n {\"0000\": numpy.int64, \"0001\": numpy.float},\n ),\n ({\"0000\": (3, 16, 16)}, {\"0000\": (1000, 1)}, 3, 20, 30, None),\n (\n {\"0000\": (3, 16, 16), \"0001\": (4, 20, 20)},\n {\"0000\": (1000, 1), \"0001\": (1,)},\n 3,\n 20,\n 30,\n None,\n ),\n ({\"0000\": (3, 16, 16)}, None, 3, 0, 30, None),\n ({\"0000\": (3, 16, 16)}, None, 3, -1, 30, None),\n ],\n)\ndef test_dataloader_from_random(\n data_shapes: Dict[str, Tuple[int, ...]],\n label_shapes: Union[None, Dict[str, Tuple[int, ...]]],\n batch_size: int,\n iter_steps: int,\n num_samples: int,\n data_types: Dict[str, numpy.dtype],\n):\n dataloader = DataLoader.from_random(\n data_shapes, label_shapes, batch_size, iter_steps, num_samples, data_types\n )\n _test_dataloader(\n dataloader,\n data_shapes,\n label_shapes,\n batch_size,\n iter_steps,\n num_samples,\n data_types,\n )\n\n\[email protected](\n \"batch_size,iter_steps,num_samples,create_labels,strip_first_dim\",\n [\n (10, 0, 100, False, True),\n (10, 0, 98, False, True),\n (10, -1, 100, False, True),\n (10, 10, 100, False, True),\n (10, 0, 100, True, True),\n (10, 0, 100, True, False),\n ],\n)\ndef test_dataloader_from_model(\n dataloader_models: DataloaderModelFixture,\n batch_size: int,\n iter_steps: int,\n num_samples: int,\n create_labels: bool,\n strip_first_dim: bool,\n):\n dataloader = DataLoader.from_model_random(\n dataloader_models.model_path,\n batch_size,\n iter_steps,\n num_samples,\n create_labels,\n strip_first_dim,\n )\n\n data_shapes = dict(dataloader_models.data_shape)\n label_shapes = dict(dataloader_models.label_shape)\n if strip_first_dim:\n for key in data_shapes:\n data_shapes[key] = data_shapes[key][1:]\n\n for key in label_shapes:\n label_shapes[key] = label_shapes[key][1:]\n\n if not create_labels:\n label_shapes = None\n\n _test_dataloader(\n dataloader,\n data_shapes,\n label_shapes,\n batch_size,\n iter_steps,\n num_samples,\n dataloader_models.data_types,\n )\n\n\[email protected](\n \"data_shape,label_shape,samples,batch_size,iter_steps\",\n [\n ({\"0000\": (3, 16, 16)}, {\"0000\": (1000,)}, 100, 3, 0),\n ({\"0000\": (3, 16, 16)}, {\"0000\": (1000,)}, 99, 3, 0),\n ({\"0000\": (3, 16, 16)}, {\"0000\": (1000,)}, 99, 3, 34),\n ({\"0000\": (3, 16, 16)}, {\"0000\": (1000,)}, 100, 3, -1),\n ({\"0000\": (3, 16, 16)}, {\"0000\": (1000,)}, 100, 3, 3),\n ({\"0000\": (3, 16, 16)}, None, 100, 3, 0),\n (\n {\"0000\": (3, 16, 16), \"0001\": (3, 16, 16)},\n {\"0000\": (1000,), \"0001\": (1000,)},\n 100,\n 3,\n 0,\n ),\n ],\n)\ndef test_dataloader(\n data_shape: Dict[str, Tuple[int, ...]],\n label_shape: Union[None, Dict[str, Tuple[int, ...]]],\n samples: int,\n batch_size: int,\n iter_steps: int,\n):\n with tempfile.TemporaryDirectory() as tempdir:\n data_glob = os.path.join(tempdir, \"inp_*.npz\")\n label_glob = (\n os.path.join(tempdir, \"out_*.npz\") if label_shape is not None else None\n )\n for i in range(samples):\n data_path = os.path.join(tempdir, \"inp_{}.npz\".format(i))\n data = {}\n for key in data_shape:\n data[key] = numpy.random.randn(*data_shape[key])\n\n numpy.savez(data_path, **data)\n\n if label_shape is not None:\n label_path = os.path.join(tempdir, \"out_{}.npz\".format(i))\n label = {}\n for key in label_shape:\n label[key] = numpy.random.randn(*label_shape[key])\n\n numpy.savez(label_path, **label)\n\n dataloader = DataLoader(data_glob, label_glob, batch_size, iter_steps)\n _test_dataloader(\n dataloader, data_shape, label_shape, batch_size, iter_steps, samples\n )\n", "# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nClasses for defining sparsity masks based on model parameters.\n\"\"\"\nfrom abc import ABC, abstractmethod\nfrom typing import Any, Callable, Iterable, List, Tuple, Union\n\nimport numpy\n\nfrom sparseml.tensorflow_v1.utils import tf_compat\n\n\n__all__ = [\n \"PruningMaskCreator\",\n \"UnstructuredPruningMaskCreator\",\n \"GroupedPruningMaskCreator\",\n \"DimensionPruningMaskCreator\",\n \"BlockPruningMaskCreator\",\n \"load_mask_creator\",\n]\n\n\nclass PruningMaskCreator(ABC):\n \"\"\"\n Base abstract class for a sparsity mask creator.\n Subclasses should define all methods for creating masks and their initializers\n \"\"\"\n\n @abstractmethod\n def get_mask_initializer(\n self,\n tensor: tf_compat.Tensor,\n ) -> Callable[[], tf_compat.Tensor]:\n \"\"\"\n :param tensor: A tensor of a model layer's weights\n :return: Tensor initializer function for this sparsity mask\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def create_sparsity_mask(\n self,\n tensor: tf_compat.Tensor,\n sparsity: tf_compat.Tensor,\n ) -> tf_compat.Tensor:\n \"\"\"\n :param tensor: A tensor of a model layer's weights\n :param sparsity: the target sparsity to use for assigning the masks\n :return: A sparsity mask close to the set sparsity based on the values of\n the input tensor\n \"\"\"\n raise NotImplementedError()\n\n\nclass UnstructuredPruningMaskCreator(PruningMaskCreator):\n \"\"\"\n Class for creating unstructured sparsity masks.\n Masks will be created using unstructured sparsity by pruning weights ranked\n by their magnitude.\n \"\"\"\n\n def get_mask_initializer(\n self,\n tensor: tf_compat.Tensor,\n ) -> Callable[[], tf_compat.Tensor]:\n \"\"\"\n :param tensor: A tensor of a model layer's weights\n :return: Initializer for tensor where an element is 1.0 for nonzero weights\n and zero for all other weights\n :raise: ValueError If the dtype is not numeric or boolean\n \"\"\"\n\n def non_zero_mask_initializer(\n shape: tf_compat.TensorShape,\n dtype: tf_compat.DType = tf_compat.float32,\n partition_info: Any = None, # unsued variable for compatability\n ) -> tf_compat.Tensor:\n dtype = tf_compat.as_dtype(dtype)\n if not dtype.is_numpy_compatible or dtype == tf_compat.string:\n raise ValueError(\"Expected numeric or boolean dtype, got %s.\" % dtype)\n\n return tf_compat.cast(tf_compat.not_equal(tensor, 0.0), dtype=dtype)\n\n return non_zero_mask_initializer\n\n def create_sparsity_mask(\n self,\n tensor: tf_compat.Tensor,\n sparsity: tf_compat.Tensor,\n ) -> tf_compat.Tensor:\n \"\"\"\n :param tensor: A tensor of a model layer's weights\n :param sparsity: the target sparsity to use for assigning the masks\n :return: A sparsity mask close to the set sparsity based on the values of\n the input tensor\n \"\"\"\n abs_var = tf_compat.abs(tensor) # Magnitudes of weights\n sparse_threshold_index = tf_compat.cast(\n tf_compat.round(\n tf_compat.cast(tf_compat.size(abs_var), tf_compat.float32) * sparsity\n ),\n tf_compat.int32,\n )\n sparse_threshold_index = tf_compat.minimum(\n tf_compat.maximum(sparse_threshold_index, 0),\n tf_compat.size(tensor) - 1,\n )\n\n try:\n argsort = tf_compat.argsort\n except Exception:\n try:\n argsort = tf_compat.contrib.framework.argsort\n except Exception:\n raise RuntimeError(\n \"cannot find argsort function in tensorflow_v1, \"\n \"currently unsupported\"\n )\n\n # produce tensor where each element is the index in sorted order of abs_var\n abs_var_flat = tf_compat.reshape(abs_var, [-1])\n element_ranks_flat = tf_compat.scatter_nd(\n tf_compat.expand_dims(argsort(abs_var_flat), 1),\n tf_compat.range(abs_var_flat.get_shape()[0].value),\n abs_var_flat.get_shape(),\n )\n element_ranks = tf_compat.reshape(element_ranks_flat, abs_var.get_shape())\n return tf_compat.cast(\n tf_compat.greater(element_ranks, sparse_threshold_index),\n tf_compat.float32,\n )\n\n def __str__(self):\n return \"unstructured\"\n\n def __repr__(self):\n return str(self)\n\n\nclass GroupedPruningMaskCreator(UnstructuredPruningMaskCreator):\n \"\"\"\n Abstract class for a sparsity mask creator that structures masks according to\n grouping functions. Subclasses should implement group_tensor and\n _map_mask_to_tensor\n \"\"\"\n\n _GROUPING_OPS = {\n \"mean\": tf_compat.reduce_mean,\n \"max\": tf_compat.reduce_max,\n \"min\": tf_compat.reduce_min,\n }\n\n @staticmethod\n def get_grouping_op(grouping_op_name: str) -> tf_compat.Operation:\n \"\"\"\n :param grouping_op_name: name of grouping operation to get tf operation for\n :return: tf operation for grouping_op_name if available, raises error otherwise\n \"\"\"\n if grouping_op_name not in GroupedPruningMaskCreator._GROUPING_OPS:\n raise ValueError(\"Invalid grouping op {}, valid grouping ops: {}\").format(\n grouping_op_name, GroupedPruningMaskCreator._GROUPING_OPS\n )\n return GroupedPruningMaskCreator._GROUPING_OPS[grouping_op_name]\n\n @abstractmethod\n def group_tensor(self, tensor: tf_compat.Tensor) -> tf_compat.Tensor:\n \"\"\"\n :param tensor: The tensor to reduce in groups\n :return: The grouped tensor\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def _map_mask_to_tensor(\n self,\n grouped_mask: tf_compat.Tensor,\n original_tensor_shape: tf_compat.TensorShape,\n ) -> tf_compat.Tensor:\n \"\"\"\n :param grouped_mask: A binary mask the size of a tensor from group_tensor\n :param original_tensor_shape: Shape of the original tensor grouped_mask\n derives from\n :return: The values from grouped_mask mapped to a tensor of size\n original_tensor_shape\n \"\"\"\n raise NotImplementedError()\n\n def get_mask_initializer(\n self,\n tensor: tf_compat.Tensor,\n ) -> Callable[[], tf_compat.Tensor]:\n \"\"\"\n :param tensor: A tensor of a model layer's weights\n :return: Tensor initializer function for this sparsity mask\n \"\"\"\n\n def grouped_non_zero_mask_initializer(\n shape: tf_compat.TensorShape,\n dtype: tf_compat.DType = tf_compat.float32,\n partition_info: Any = None, # unsued variable for compatability\n ) -> tf_compat.Tensor:\n dtype = tf_compat.as_dtype(dtype)\n if not dtype.is_numpy_compatible or dtype == tf_compat.string:\n raise ValueError(\"Expected numeric or boolean dtype, got %s.\" % dtype)\n grouped_tensor = self.group_tensor(tensor)\n grouped_mask = tf_compat.not_equal(grouped_tensor, 0.0)\n mask = self._map_mask_to_tensor(grouped_mask, tensor.shape)\n return tf_compat.cast(mask, dtype=dtype)\n\n return grouped_non_zero_mask_initializer\n\n def create_sparsity_mask(\n self,\n tensor: tf_compat.Tensor,\n sparsity: tf_compat.Tensor,\n ) -> tf_compat.Tensor:\n \"\"\"\n :param tensor: A tensor of a model layer's weights\n :param sparsity: the target sparsity to use for assigning the masks\n :return: A sparsity mask close to the set sparsity based on the values of\n the input tensor\n \"\"\"\n grouped_tensor = self.group_tensor(tensor)\n grouped_mask = super().create_sparsity_mask(grouped_tensor, sparsity)\n return self._map_mask_to_tensor(grouped_mask, tensor.shape)\n\n\nclass DimensionPruningMaskCreator(GroupedPruningMaskCreator):\n \"\"\"\n Structured sparsity mask creator that groups sparsity blocks by the given\n dimension(s)\n\n :param dim: The index or list of indices of dimensions to group the mask by or\n the type of dims to prune (['channel', 'filter'])\n \"\"\"\n\n _VALID_DIM_NAMES = [\"channel\", \"filter\"]\n\n def __init__(\n self,\n dim: Union[str, int, List[int]],\n grouping_op_name: str = \"mean\",\n ):\n if isinstance(dim, int):\n dim = [dim]\n self._dim = dim # List[int]\n self._grouping_op = GroupedPruningMaskCreator.get_grouping_op(grouping_op_name)\n self._dim_name = None\n if isinstance(dim, str):\n if dim in DimensionPruningMaskCreator._VALID_DIM_NAMES:\n self._dim_name = dim\n else:\n raise ValueError(\n \"Invalid Dimension name: {}, valid names: {}\".format(\n dim, DimensionPruningMaskCreator._VALID_DIM_NAMES\n )\n )\n\n def _set_dim_by_name_for_tensor(self, tensor: tf_compat.Tensor):\n n_dims = len(tensor.shape)\n if n_dims <= 2:\n if self._dim_name == \"channel\":\n self._dim = [0]\n else:\n raise ValueError(\n f\"filter pruning unsupported for tensors with fewer than \"\n f\"3 dimensions. Received Tensor with shape {tensor.shape}\"\n )\n elif self._dim_name == \"channel\":\n # in channel should be the second to last dimension\n self._dim = [n_dims - 2]\n elif self._dim_name == \"filter\":\n # Non-kernel dimensions should be the last two in a conv (in / out channels)\n self._dim = [n_dims - 2, n_dims - 1]\n else:\n raise ValueError(\n \"Invalid dimension prune type: {}, valid types: {}\".format(\n self._dim_name, DimensionPruningMaskCreator._VALID_DIM_NAMES\n )\n )\n\n def group_tensor(self, tensor: tf_compat.Tensor) -> tf_compat.Tensor:\n \"\"\"\n :param tensor: The tensor to transform\n :return: The absolute mean values of the tensor grouped by the\n dimension(s) in self._dim\n \"\"\"\n if self._dim_name is not None:\n self._set_dim_by_name_for_tensor(tensor)\n n_dims = len(tensor.shape)\n reduced_axis = [idx for idx in range(n_dims) if idx not in self._dim]\n return self._grouping_op(\n tf_compat.abs(tensor),\n axis=reduced_axis,\n keepdims=True,\n )\n\n def _map_mask_to_tensor(\n self,\n grouped_mask: tf_compat.Tensor,\n original_tensor_shape: tf_compat.TensorShape,\n ) -> tf_compat.Tensor:\n \"\"\"\n :param grouped_mask: A binary mask the size of a tensor from group_tensor\n :param original_tensor_shape: Shape of the original tensor grouped_mask\n derives from\n :return: The values from grouped_mask mapped to a tensor of size\n original_tensor_shape\n \"\"\"\n # using tile instead of broadcast_to for compatibility with older tf versions\n # equivalent to: tf_compat.broadcast_to(grouped_mask, original_tensor_shape)\n tile_vals = [\n dim if idx not in self._dim else 1\n for (idx, dim) in enumerate(original_tensor_shape)\n ]\n return tf_compat.tile(grouped_mask, tile_vals)\n\n def __str__(self):\n if self._dim_name is not None:\n return self._dim_name\n return \"{}:{}\".format(self.__class__.__name__, self._dim)\n\n def __repr__(self):\n return str(self)\n\n\nclass BlockPruningMaskCreator(GroupedPruningMaskCreator):\n \"\"\"\n Structured sparsity mask creator that groups the input tensor into blocks of\n shape block_shape.\n block_shape must divide the shape of any input tensor evenly and must have exactly\n 2 elements for the shape of in and out channels in the blocks.\n\n :param block_shape: The shape of blocks to strucure blocks of in and out channels\n in the mask by. -1 represents blocking along the entire dimension.\n \"\"\"\n\n def __init__(\n self,\n block_shape: List[int],\n grouping_op_name: str = \"mean\",\n ):\n if len(block_shape) != 2:\n raise ValueError(\n (\n \"Invalid block_shape: {}\"\n \" , block_shape must have length == 2 for in and out channels\"\n ).format(block_shape)\n )\n self._block_shape = block_shape\n self._grouping_op = GroupedPruningMaskCreator.get_grouping_op(grouping_op_name)\n\n def group_tensor(self, tensor: tf_compat.Tensor) -> tf_compat.Tensor:\n \"\"\"\n :param tensor: The tensor to transform\n :return: The absolute mean values of the tensor grouped by blocks of\n shape self._block_shape\n \"\"\"\n blocked_tens_shape, _ = self._get_blocked_tens_shape_and_validate(tensor.shape)\n # reorder so that in and out channel dimensions come before kernel\n n_dims = len(tensor.shape)\n if n_dims >= 3:\n tens_trans_dims = [n_dims - 2, n_dims - 1, *range(n_dims - 2)]\n tensor = tf_compat.transpose(tensor, tens_trans_dims)\n blocked_tens = tf_compat.reshape(tensor, blocked_tens_shape)\n reduced_blocks = self._grouping_op(\n tf_compat.abs(blocked_tens), 1, keepdims=True\n )\n return reduced_blocks\n\n def _map_mask_to_tensor(\n self,\n grouped_mask: tf_compat.Tensor,\n original_tensor_shape: tf_compat.TensorShape,\n ) -> tf_compat.Tensor:\n \"\"\"\n :param grouped_mask: A binary mask the size of a tensor from group_tensor\n :param original_tensor_shape: Shape of the original tensor grouped_mask\n derives from\n :return: The values from grouped_mask mapped to a tensor of size\n original_tensor_shape\n \"\"\"\n (\n blocked_tens_shape,\n original_tensor_shape,\n ) = self._get_blocked_tens_shape_and_validate(original_tensor_shape)\n block_values_shape = [blocked_tens_shape[0], blocked_tens_shape[2]]\n # expand so every element has a corresponding value in the original tensor\n block_mask = tf_compat.reshape(grouped_mask, block_values_shape)\n block_mask = tf_compat.expand_dims(block_mask, 1)\n\n # Recover reduced dimension of block_mask, using tile instead of broadcast_to\n # for compatibility with older versions of tf\n block_mask_shape = [dim.value for dim in block_mask.shape]\n tile_shape = [\n int(block_dim / mask_dim)\n for (block_dim, mask_dim) in zip(blocked_tens_shape, block_mask_shape)\n ]\n # equivalent to: tf_compat.broadcast_to(block_mask, blocked_tens_shape)\n tensor_mask_blocked = tf_compat.tile(block_mask, tile_shape)\n\n mask = tf_compat.reshape(tensor_mask_blocked, original_tensor_shape)\n # Undo channel / kernel transpose if applicable\n n_dims = len(original_tensor_shape)\n if n_dims >= 3:\n tens_trans_dims = [*range(2, n_dims), 0, 1]\n mask = tf_compat.transpose(mask, tens_trans_dims)\n return mask\n\n def _get_blocked_tens_shape_and_validate(\n self,\n tens_shape: tf_compat.TensorShape,\n ) -> Tuple[List[int], tf_compat.TensorShape]:\n \"\"\"\n :param tens_shape: The shape of the tensor to group in blocks\n :return: shape of tens when blocked by block_shape and the original\n tensor shape with any transposes applied to it\n :raise: ValueError if we are unable to block tens by shape block_shape\n \"\"\"\n block_shape = self._block_shape\n n_dims = len(tens_shape)\n if len(tens_shape) >= 3: # conv should have block shape like [1, ..., 1, X, Y]\n block_shape = [*[1] * (n_dims - 2), *block_shape]\n tens_shape = [dim.value for dim in tens_shape]\n for idx, shape in enumerate(block_shape):\n if shape == -1:\n block_shape[idx] = int(tens_shape[idx])\n # Validate\n if n_dims < 2:\n raise ValueError(\n \"Invalid tensor shape {}.\"\n \" BlockSparsityMaskCreator can only create masks from tensors with 2 or\"\n \" more dimensions, tensor has {}.\".format(tens_shape, n_dims)\n )\n for tens_dim, block_dim in zip(tens_shape, block_shape):\n if tens_dim % block_dim != 0:\n raise ValueError(\n f\"Invalid block_shape {block_shape} for parameter shape \"\n f\"{tens_shape}. Elements of block_shape must divide parameter \"\n f\"shape evenly\"\n )\n # If this is a series of conv filters, reorder so in and out channels are first\n if n_dims >= 3:\n transpose_idx = [n_dims - 2, n_dims - 1, *range(n_dims - 2)]\n block_shape = [block_shape[idx] for idx in transpose_idx]\n tens_shape = [tens_shape[idx] for idx in transpose_idx]\n # Compute blocked tensor shape\n if len(block_shape) > 1 and block_shape[1] > 1:\n blocked_tens_shape = [\n tens_shape[0] * tens_shape[1] // (block_shape[0] * block_shape[1]),\n block_shape[0] * block_shape[1],\n -1,\n ]\n else:\n blocked_tens_shape = [tens_shape[0] // block_shape[0], block_shape[0], -1]\n tens_size = numpy.prod(tens_shape)\n num_block_elements = blocked_tens_shape[0] * blocked_tens_shape[1]\n blocked_tens_shape[2] = tens_size // num_block_elements\n return blocked_tens_shape, tens_shape\n\n def __str__(self):\n return str(self._block_shape)\n\n def __repr__(self):\n return str(self)\n\n\nmask_creator_name_to_constructor_lambda = {\n \"unstructured\": lambda: UnstructuredPruningMaskCreator(),\n \"channel\": lambda: DimensionPruningMaskCreator(\"channel\"),\n \"filter\": lambda: DimensionPruningMaskCreator(\"filter\"),\n}\n\n\ndef load_mask_creator(obj: Union[str, Iterable[int]]) -> PruningMaskCreator:\n \"\"\"\n :param obj: Formatted string or iterable of block_shape specifying\n SparsityMaskCreator object to return\n :return: SparsityMaskCreator object created from obj\n \"\"\"\n if isinstance(obj, str) and obj in mask_creator_name_to_constructor_lambda:\n constructor_lambda = mask_creator_name_to_constructor_lambda[obj]\n return constructor_lambda()\n # Checking for a BlockSparsityMaskCreator string\n if (\"[\" in obj and \"]\" in obj) or (\"(\" in obj and \")\" in obj):\n stripped_str = obj.strip(\"[|]|(|)\")\n block_shape = [int(s) for s in stripped_str.split(\",\")]\n return BlockPruningMaskCreator(block_shape)\n if isinstance(obj, list) or isinstance(obj, tuple):\n return BlockPruningMaskCreator(obj)\n raise ValueError(\n \"Invalid mask type string: {}, could not map to an object\".format(obj)\n )\n" ]
[ [ "torch.randn" ], [ "tensorflow.contrib.slim.arg_scope", "tensorflow.contrib.layers.l2_regularizer" ], [ "numpy.savez", "numpy.random.randn", "numpy.dtype" ], [ "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Totoketchup/Adaptive-MultiSpeaker-Separation
[ "8e7e869b8050643a777e315d1ddac577a8dc85ff" ]
[ "models/SC_V2.py" ]
[ "# -*- coding: utf-8 -*-\nimport tensorflow as tf\nfrom utils.ops import BLSTM, Conv1D, Reshape, Normalize, f_props, scope, log10\nfrom models.network import Separator\n\nclass L41ModelV2(Separator):\n\n\tdef __init__(self, graph=None, **kwargs):\n\t\tkwargs['mask_a'] = 1.0\n\t\tkwargs['mask_b'] = -1.0\n\n\t\tsuper(L41ModelV2, self).__init__(graph, **kwargs)\n\n\t\twith self.graph.as_default():\n\t\t\t# Define the speaker vectors to use during training\n\t\t\tself.speaker_vectors =tf.Variable(tf.truncated_normal(\n\t\t\t\t\t\t\t\t [self.num_speakers, self.embedding_size],\n\t\t\t\t\t\t\t\t stddev=tf.sqrt(2/float(self.embedding_size))), name='speaker_centroids')\n\t\tself.init_separator()\n\n\t@scope\n\tdef prediction(self):\n\t\t# L41 network\n\t\tshape = tf.shape(self.X)\n\n\t\tself.true_masks = 1.0 + self.y\n\n\t\tX_in = tf.identity(self.X)\n\t\t\n\n\t\tlayers = [BLSTM(self.layer_size, name='BLSTM_'+str(i), drop_val=self.rdropout) for i in range(self.nb_layers)]\n\n\t\tlayers_sp = [\n\t\t\tConv1D([1, self.layer_size, self.embedding_size*self.F]),\n\t\t\tReshape([self.B, shape[1], self.F, self.embedding_size]),\n\t\t]\n\n\t\tlayers += layers_sp\n\n\t\ty = f_props(layers, X_in)\n\t\t\n\t\treturn y\n\n\t@scope\n\tdef cost(self):\n\t\t\"\"\"\n\t\tConstruct the cost function op for the negative sampling cost\n\t\t\"\"\"\n\n\t\tif self.loss_with_silence:\n\t\t\tmax_ = tf.reduce_max(tf.abs(self.X), [1, 2], keep_dims=True)\n\t\t\tlog_compare = log10(tf.divide(max_, tf.abs(self.X)))\n\t\t\tmask = tf.cast(log_compare < self.threshold_silence_loss, tf.float32)\n\t\t\ttf.summary.image('separator/silence_mask', tf.expand_dims(mask,3), max_outputs=1)\n\t\t\ty_a_b = self.y * tf.expand_dims(mask, 3)\n\t\t\ty_0_1 = (self.y + 1.0)/2.0 * tf.expand_dims(mask, 3)\n\t\telse:\n\t\t\ty_a_b = self.y\n\t\t\ty_0_1 = (self.y + 1.0)/2.0 \n\n\n\t\ttf.summary.image('mask/true/1', tf.abs(tf.expand_dims(y_0_1[:,:,:,0],3)))\n\t\ttf.summary.image('mask/true/2', tf.abs(tf.expand_dims(y_0_1[:,:,:,1],3)))\n\n\n\t\t# Get the embedded T-F vectors from the network\n\t\tembedding = self.prediction # [B, T, F, E]\n\n\t\tembedding_broad = tf.expand_dims(embedding, 4) # [B, T, F, E, 1]\n\t\ty_broad = tf.expand_dims(y_0_1, 3) # [B, T, F, 1, S] \n\t\tv_mean = tf.reduce_sum(embedding_broad * y_broad, [1,2]) / ( 1e-12 + tf.expand_dims(tf.reduce_sum(y_0_1, [1,2]), 1))# [B, E, S]\n\t\t\n\t\t#\n\t\t# Reconstruction loss\n\t\t#\n\n\t\twith tf.name_scope('reconstruction_loss'):\n\n\t\t\tv_mean_broad = tf.expand_dims(v_mean, 1) # [B, 1, E, S]\n\t\t\tv_mean_broad = tf.expand_dims(v_mean_broad, 1) # [B, 1, 1, E, S]\n\n\t\t\tassignments = tf.reduce_sum(v_mean_broad * embedding_broad, 3) # [B, T, F, S]\n\n\t\t\tassignments = tf.nn.sigmoid(assignments) # [B, T, F, S]\n\n\t\t\tmasked_input = tf.expand_dims(self.X_input, 3) * assignments\n\n\t\t\t# X_non_mix [B, T, F, S]\t\t\t\n\t\t\tcost_recons = tf.reduce_mean(tf.square(self.X_non_mix - masked_input), axis=[1, 2])\n\t\t\tcost_recons = tf.reduce_mean(cost_recons, axis=-1) # Mean among all speakers [B, S]\n\t\t\tcost_recons = tf.reduce_mean(cost_recons)\n\t\t\ttf.summary.scalar('value', cost_recons)\n\n\t\t#\n\t\t# Constrast loss\n\t\t#\n\t\twith tf.name_scope('source_contrastive_loss'):\n\n\t\t\tspeaker_vectors = tf.nn.l2_normalize(self.speaker_vectors, 1)\n\t\t\tembedding = tf.nn.l2_normalize(embedding, -1)\n\n\t\t\tI = tf.expand_dims(self.I, axis=2) # [B, S, 1]\n\t\t\t# Gathering the speaker_vectors [|S|, E]\n\t\t\tVspeakers = tf.gather_nd(speaker_vectors, I) # [B, S, E]\n\t\t\t\n\t\t\t# Expand the dimensions in preparation for broadcasting\n\t\t\tVspeakers_broad = tf.expand_dims(Vspeakers, 1)\n\t\t\tVspeakers_broad = tf.expand_dims(Vspeakers_broad, 1) # [B, 1, 1, S, E]\n\t\t\tembedding_broad = tf.expand_dims(embedding, 3)\n\n\t\t\t# Compute the dot product between the embedding vectors and speaker\n\t\t\t# vectors\n\t\t\tdot = tf.reduce_sum(Vspeakers_broad * embedding_broad, 4)\n\n\t\t\t# Compute the cost for every element\n\n\t\t\tsc_cost = -tf.log(tf.nn.sigmoid(y_a_b * dot))\n\n\t\t\tsc_cost = tf.reduce_mean(sc_cost, 3) # Average the cost over all speakers in the input\n\t\t\tsc_cost = tf.reduce_mean(sc_cost, 0)\t# Average the cost over all batches\n\t\t\tsc_cost = tf.reduce_mean(sc_cost) \n\t\t\ttf.summary.scalar('value', sc_cost)\n\n\t\tcost = sc_cost + cost_recons\n\t\ttf.summary.scalar('total', cost)\n\n\t\treturn cost" ]
[ [ "tensorflow.nn.l2_normalize", "tensorflow.nn.sigmoid", "tensorflow.gather_nd", "tensorflow.shape", "tensorflow.reduce_mean", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.identity", "tensorflow.expand_dims", "tensorflow.name_scope", "tensorflow.square", "tensorflow.summary.scalar", "tensorflow.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
Cury30/Anomaly_detection
[ "fad172f6d9cc8dad73a79bc89290c578d67e2b35" ]
[ "create_bg.py" ]
[ "import cv2\nimport os\nimport numpy as np\nimport argparse\nimport uuid\nimport sys\nimport scipy.spatial\nimport matplotlib.pyplot as plt\n\n\nmodel_path = str(sys.argv[1])\nROADMASKDIR = model_path + \"/RoadMask/\"\nMINUTEMASKDIR = model_path + \"/MinuteMask/\"\n#INPUTVIDEOPATH = os.environ['AICITYVIDEOPATH'] + \"/test-data/\"\nINPUTVIDEOPATH = model_path + \"/Dataset/\"\ndarktexfile=open(\"dark.txt\",\"w\")\ndarkthreshold=290000\nvideo_amount = len(next(os.walk(INPUTVIDEOPATH))[2]) + 1\n\n\n\nprint(\"Using Input Video Path : \"+INPUTVIDEOPATH)\n\ndef unsharp_mask(image, kernel_size=(7, 7), sigma=1.0, amount=1.0, threshold=0):\n blurred = cv2.GaussianBlur(image, kernel_size, sigma)\n sharpened = float(amount + 1) * image - float(amount) * blurred\n sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))\n sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))\n sharpened = sharpened.round().astype(np.uint8)\n if threshold > 0:\n low_contrast_mask = np.absolute(image - blurred) < threshold\n np.copyto(sharpened, image, where=low_contrast_mask)\n return sharpened\n\ndef apply_filter(frame):\n frame = cv2.GaussianBlur(frame, (3, 3), 0)\n ret, frame = cv2.threshold(frame, 220, 255, cv2.THRESH_BINARY)\n return frame\n\ndef mkdir_ifndef(dirname):\n if not os.path.isdir(dirname):\n os.mkdir(dirname)\n\ndef create_bg(vidnum): \n mkdir_ifndef(ROADMASKDIR)\n mkdir_ifndef(MINUTEMASKDIR)\n print(INPUTVIDEOPATH+str(vidnum)+\".mp4\") #modificacion\n cap = cv2.VideoCapture(INPUTVIDEOPATH+str(vidnum)+\".mp4\")\n vh = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n vw = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n print(\"VH: {}, VW: {}\".format(vh,vw)) #modificacion\n length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n print(\"Length: {}\".format(length)) #modificacion\n weight=255.0/length\n vroi = 255 * np.ones((vw, vh), dtype=np.uint8)\n vroi2 = 255 * np.ones((vw, vh), dtype=np.uint8)\n bs = cv2.createBackgroundSubtractorMOG2(detectShadows=False)\n bs.setBackgroundRatio(0.6)\n bs.setHistory(256)\n bs.setNMixtures(4)\n bs.setVarInit(15)\n bs.setVarThreshold(25)\n cmpx_reduction_frames = 256\n learn_rate=0.007\n cmpx_reduction_factor = 1 - np.exp(256 * np.log(0.995))\n masksum = np.zeros((vw, vh), np.float32) \n (rAvg, gAvg, bAvg) = (None, None, None)\n maskcount=0\n total=0\n while True:\n ret, frame = cap.read()\n frame_num = cap.get(cv2.CAP_PROP_POS_FRAMES)\n if not ret:\n break\n if frame_num == bs.getHistory():\n learn_rate = 0.005\n bs.setComplexityReductionThreshold(cmpx_reduction_factor)\n frame = cv2.bitwise_and(frame, frame, mask=vroi)\n fg_img = bs.apply(frame, learningRate=learn_rate)\n bg_img = bs.getBackgroundImage()\n ret, fg_img = cv2.threshold(fg_img, 192, 255, cv2.THRESH_BINARY)\n fg_mask = apply_filter(fg_img)\n fg_mask2 = fg_mask.copy()\n fg_mask = cv2.bitwise_and(fg_mask, fg_mask, mask=vroi2)\n sharpened_image = unsharp_mask(bg_img)\n kernel = np.ones((5,5), np.uint8) \n img_erosion = cv2.erode(fg_mask, kernel, iterations=3) \n img_dilation = cv2.dilate(img_erosion, kernel, iterations=3)\n opening = cv2.morphologyEx(img_dilation, cv2.MORPH_OPEN, kernel)\n masksum=masksum+(opening*weight)\n (B, G, R) = cv2.split(sharpened_image.astype(\"float\"))\n if rAvg is None:\n rAvg = R\n bAvg = B\n gAvg = G\n else:\n rAvg = ((total * rAvg) + (1 * R)) / (total + 1.0)\n gAvg = ((total * gAvg) + (1 * G)) / (total + 1.0)\n bAvg = ((total * bAvg) + (1 * B)) / (total + 1.0)\n total+=1\n if(frame_num%(30*60)==0):\n maskcount+=1\n mkdir_ifndef(MINUTEMASKDIR+str(vidnum))\n total=0\n avg = cv2.merge([bAvg, gAvg, rAvg]).astype(\"uint8\")\n cv2.imwrite(MINUTEMASKDIR+str(vidnum)+\"/\"+str(maskcount)+\".png\",avg)\n (rAvg, gAvg, bAvg) = (None, None, None)\n if(maskcount==1):\n img=plt.imread(MINUTEMASKDIR+str(vidnum)+\"/\"+str(maskcount)+\".png\")\n intensity = img.sum(axis=2)\n pixelsum=0\n for row in intensity:\n pixelsum+=sum(row)\n if(pixelsum < darkthreshold):\n darktexfile.write(str(vidnum)+\"\\n\")\n else:\n if(frame_num%(length/4)==0): #This part is just because of the limit on google collab. Shuldnt be here\n maskcount+=1\n mkdir_ifndef(MINUTEMASKDIR+str(vidnum))\n total=0\n avg = cv2.merge([bAvg, gAvg, rAvg]).astype(\"uint8\")\n cv2.imwrite(MINUTEMASKDIR+str(vidnum)+\"/\"+str(maskcount)+\".png\",avg)\n (rAvg, gAvg, bAvg) = (None, None, None)\n if(maskcount==1):\n img=plt.imread(MINUTEMASKDIR+str(vidnum)+\"/\"+str(maskcount)+\".png\")\n intensity = img.sum(axis=2)\n pixelsum=0\n for row in intensity:\n pixelsum+=sum(row)\n if(pixelsum < darkthreshold):\n darktexfile.write(str(vidnum)+\"\\n\")\n\n masksum=apply_filter(masksum) \n cv2.imwrite(ROADMASKDIR+str(vidnum)+\".png\",masksum)\n cap.release()\n\ndef find_freeze():\n out = open(\"freeze.txt\",'w')\n for i in range(1,video_amount):\n count = 1\n videoPath = INPUTVIDEOPATH + \"%d.mp4\"%(i)\n cap = cv2.VideoCapture(videoPath)\n ret, frame2 = cap.read()\n start = -1\n consec = 0\n while(cap.isOpened()):\n frame1 = frame2\n ret, frame2 = cap.read()\n if not ret:\n break\n count +=1\n difference = cv2.subtract(frame1, frame2)\n b, g, r = cv2.split(difference)\n if cv2.countNonZero(b) <= 3000 and cv2.countNonZero(g) <= 3000 and cv2.countNonZero(r) <= 3000:\n if(start == -1):\n start = count - 1\n consec = 0\n elif(start != -1):\n consec += 1\n if(consec > 10):\n if(count - start - consec > 120):\n out.write(\"%d %d %d\\n\"%(i, start, count-1-consec))\n start = -1\n consec = 0\n if(start != - 1 and start != count -1):\n start = - 1\n out.close()\n\nif __name__ == \"__main__\":\n for i in range(1,video_amount):\n create_bg(i)\n find_freeze()\n" ]
[ [ "numpy.log", "numpy.absolute", "numpy.ones", "numpy.copyto", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
spencerking/qiskit-experiments
[ "11a254b010afe35933aaabac70de12b5b5a244bf", "11a254b010afe35933aaabac70de12b5b5a244bf" ]
[ "qiskit_experiments/library/quantum_volume/qv_analysis.py", "qiskit_experiments/library/characterization/t2ramsey_analysis.py" ]
[ "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\"\"\"\nQuantum Volume analysis class.\n\"\"\"\n\nimport math\n\nimport warnings\nfrom typing import Optional\nimport numpy as np\n\nfrom qiskit_experiments.framework import BaseAnalysis, AnalysisResultData, FitVal\nfrom qiskit_experiments.curve_analysis import plot_scatter, plot_errorbar\n\n\nclass QuantumVolumeAnalysis(BaseAnalysis):\n r\"\"\"A class to analyze quantum volume experiments.\n\n # section: overview\n Calculate the quantum volume of the analysed system.\n The quantum volume is determined by the largest successful circuit depth.\n A depth is successful if it has 'mean heavy-output probability' > 2/3 with confidence\n level > 0.977 (corresponding to z_value = 2), and at least 100 trials have been ran.\n we assume the error (standard deviation) of the heavy output probability is due to a\n binomial distribution. The standard deviation for binomial distribution is\n :math:`\\sqrt{(np(1-p))}`, where :math:`n` is the number of trials and :math:`p`\n is the success probability.\n \"\"\"\n\n # pylint: disable = arguments-differ\n def _run_analysis(\n self,\n experiment_data,\n plot: bool = True,\n ax: Optional[\"matplotlib.pyplot.AxesSubplot\"] = None,\n ):\n \"\"\"Run analysis on circuit data.\n\n Args:\n experiment_data (ExperimentData): the experiment data to analyze.\n plot (bool): If True generate a plot of fitted data.\n ax (AxesSubplot): Optional, matplotlib axis to add plot to.\n\n Returns:\n tuple: A pair ``(result_data figures)`` where\n ``result_data`` is a list of\n :class:`AnalysisResultData` objects, and ``figures`` may be\n None, a single figure, or a list of figures.\n \"\"\"\n depth = experiment_data.experiment.num_qubits\n data = experiment_data.data()\n num_trials = len(data)\n heavy_output_prob_exp = []\n\n for data_trial in data:\n heavy_output = self._calc_ideal_heavy_output(\n data_trial[\"metadata\"][\"ideal_probabilities\"], data_trial[\"metadata\"][\"depth\"]\n )\n heavy_output_prob_exp.append(\n self._calc_exp_heavy_output_probability(data_trial, heavy_output)\n )\n\n hop_result, qv_result = self._calc_quantum_volume(heavy_output_prob_exp, depth, num_trials)\n\n if plot:\n ax = self._format_plot(hop_result, ax=ax)\n figures = [ax.get_figure()]\n else:\n figures = None\n return [hop_result, qv_result], figures\n\n @staticmethod\n def _calc_ideal_heavy_output(probabilities_vector, depth):\n \"\"\"\n Calculate the bit strings of the heavy output for the ideal simulation\n\n Args:\n ideal_data (dict): the simulation result of the ideal circuit\n\n Returns:\n list: the bit strings of the heavy output\n \"\"\"\n\n format_spec = \"{0:0%db}\" % depth\n # Keys are bit strings and values are probabilities of observing those strings\n all_output_prob_ideal = {\n format_spec.format(b): float(np.real(probabilities_vector[b]))\n for b in range(2 ** depth)\n }\n\n median_probabilities = float(np.real(np.median(probabilities_vector)))\n heavy_strings = list(\n filter(\n lambda x: all_output_prob_ideal[x] > median_probabilities,\n list(all_output_prob_ideal.keys()),\n )\n )\n return heavy_strings\n\n @staticmethod\n def _calc_exp_heavy_output_probability(data, heavy_outputs):\n \"\"\"\n Calculate the probability of measuring heavy output string in the data\n\n Args:\n data (dict): the result of the circuit exectution\n heavy_outputs (list): the bit strings of the heavy output from the ideal simulation\n\n Returns:\n int: heavy output probability\n \"\"\"\n circ_shots = sum(data[\"counts\"].values())\n\n # Calculate the number of heavy output counts in the experiment\n heavy_output_counts = sum([data[\"counts\"].get(value, 0) for value in heavy_outputs])\n\n # Calculate the experimental heavy output probability\n return heavy_output_counts / circ_shots\n\n @staticmethod\n def _calc_z_value(mean, sigma):\n \"\"\"Calculate z value using mean and sigma.\n\n Args:\n mean (float): mean\n sigma (float): standard deviation\n\n Returns:\n float: z_value in standard normal distibution.\n \"\"\"\n\n if sigma == 0:\n # Assign a small value for sigma if sigma = 0\n sigma = 1e-10\n warnings.warn(\"Standard deviation sigma should not be zero.\")\n\n z_value = (mean - 2 / 3) / sigma\n\n return z_value\n\n @staticmethod\n def _calc_confidence_level(z_value):\n \"\"\"Calculate confidence level using z value.\n\n Accumulative probability for standard normal distribution\n in [-z, +infinity] is 1/2 (1 + erf(z/sqrt(2))),\n where z = (X - mu)/sigma = (hmean - 2/3)/sigma\n\n Args:\n z_value (float): z value in in standard normal distibution.\n\n Returns:\n float: confidence level in decimal (not percentage).\n \"\"\"\n\n confidence_level = 0.5 * (1 + math.erf(z_value / 2 ** 0.5))\n\n return confidence_level\n\n def _calc_quantum_volume(self, heavy_output_prob_exp, depth, trials):\n \"\"\"\n Calc the quantum volume of the analysed system.\n quantum volume is determined by the largest successful depth.\n A depth is successful if it has 'mean heavy-output probability' > 2/3 with confidence\n level > 0.977 (corresponding to z_value = 2), and at least 100 trials have been ran.\n we assume the error (standard deviation) of the heavy output probability is due to a\n binomial distribution. standard deviation for binomial distribution is sqrt(np(1-p)),\n where n is the number of trials and p is the success probability.\n\n Returns:\n dict: quantum volume calculations -\n the quantum volume,\n whether the results passed the threshold,\n the confidence of the result,\n the heavy output probability for each trial,\n the mean heavy output probability,\n the error of the heavy output probability,\n the depth of the circuit,\n the number of trials ran\n \"\"\"\n quantum_volume = 1\n success = False\n\n mean_hop = np.mean(heavy_output_prob_exp)\n sigma_hop = (mean_hop * ((1.0 - mean_hop) / trials)) ** 0.5\n z = 2\n threshold = 2 / 3 + z * sigma_hop\n z_value = self._calc_z_value(mean_hop, sigma_hop)\n confidence_level = self._calc_confidence_level(z_value)\n if confidence_level > 0.977:\n quality = \"good\"\n else:\n quality = \"bad\"\n\n # Must have at least 100 trials\n if trials < 100:\n warnings.warn(\"Must use at least 100 trials to consider Quantum Volume as successful.\")\n\n if mean_hop > threshold and trials >= 100:\n quantum_volume = 2 ** depth\n success = True\n\n hop_result = AnalysisResultData(\n \"mean_HOP\",\n value=FitVal(mean_hop, sigma_hop),\n quality=quality,\n extra={\n \"HOPs\": heavy_output_prob_exp,\n \"two_sigma\": 2 * sigma_hop,\n \"depth\": depth,\n \"trials\": trials,\n },\n )\n\n qv_result = AnalysisResultData(\n \"quantum_volume\",\n value=quantum_volume,\n quality=quality,\n extra={\n \"success\": success,\n \"confidence\": confidence_level,\n \"depth\": depth,\n \"trials\": trials,\n },\n )\n return hop_result, qv_result\n\n @staticmethod\n def _format_plot(\n hop_result: AnalysisResultData, ax: Optional[\"matplotlib.pyplot.AxesSubplot\"] = None\n ):\n \"\"\"Format the QV plot\n\n Args:\n hop_result: the heavy output probability analysis result.\n ax: matplotlib axis to add plot to.\n\n Returns:\n AxesSubPlot: the matplotlib axes containing the plot.\n \"\"\"\n trials = hop_result.extra[\"trials\"]\n heavy_probs = hop_result.extra[\"HOPs\"]\n trial_list = np.arange(1, trials + 1) # x data\n\n hop_accumulative = np.cumsum(heavy_probs) / trial_list\n two_sigma = 2 * (hop_accumulative * (1 - hop_accumulative) / trial_list) ** 0.5\n\n # Plot inidivual HOP as scatter\n ax = plot_scatter(\n trial_list,\n heavy_probs,\n ax=ax,\n s=3,\n zorder=3,\n label=\"Individual HOP\",\n )\n # Plot accumulative HOP\n ax.plot(trial_list, hop_accumulative, color=\"r\", label=\"Cumulative HOP\")\n\n # Plot two-sigma shaded area\n ax = plot_errorbar(\n trial_list,\n hop_accumulative,\n two_sigma,\n ax=ax,\n fmt=\"none\",\n ecolor=\"lightgray\",\n elinewidth=20,\n capsize=0,\n alpha=0.5,\n label=\"2$\\\\sigma$\",\n )\n # Plot 2/3 success threshold\n ax.axhline(2 / 3, color=\"k\", linestyle=\"dashed\", linewidth=1, label=\"Threshold\")\n\n ax.set_ylim(\n max(hop_accumulative[-1] - 4 * two_sigma[-1], 0),\n min(hop_accumulative[-1] + 4 * two_sigma[-1], 1),\n )\n\n ax.set_xlabel(\"Number of Trials\", fontsize=14)\n ax.set_ylabel(\"Heavy Output Probability\", fontsize=14)\n\n ax.set_title(\n \"Quantum Volume experiment for depth \"\n + str(hop_result.extra[\"depth\"])\n + \" - accumulative hop\",\n fontsize=14,\n )\n\n # Re-arrange legend order\n handles, labels = ax.get_legend_handles_labels()\n handles = [handles[1], handles[2], handles[0], handles[3]]\n labels = [labels[1], labels[2], labels[0], labels[3]]\n ax.legend(handles, labels)\n return ax\n", "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\"\"\"\nT2Ramsey Experiment class.\n\"\"\"\n\nfrom typing import List, Optional, Tuple, Dict\nimport dataclasses\nimport numpy as np\n\nfrom qiskit.utils import apply_prefix\nfrom qiskit_experiments.framework import (\n BaseAnalysis,\n Options,\n ExperimentData,\n AnalysisResultData,\n FitVal,\n)\nfrom qiskit_experiments.curve_analysis import curve_fit, plot_curve_fit, plot_errorbar, plot_scatter\nfrom qiskit_experiments.curve_analysis.curve_fit import process_curve_data\nfrom qiskit_experiments.curve_analysis.data_processing import level2_probability\n\n\n# pylint: disable = invalid-name\nclass T2RamseyAnalysis(BaseAnalysis):\n r\"\"\"\n T2 Ramsey result analysis class.\n\n # section: fit_model\n This class is used to analyze the results of a T2 Ramsey experiment.\n The probability of measuring :math:`|+\\rangle` state is assumed to be of the form\n\n .. math::\n\n f(t) = a\\mathrm{e}^{-t / T_2^*}\\cos(2\\pi f t + \\phi) + b\n\n # section: fit_parameters\n\n defpar a:\n desc: Amplitude. Height of the decay curve.\n init_guess: 0.5\n bounds: [-0.5, 1.5]\n\n defpar b:\n desc: Offset. Base line of the decay curve.\n init_guess: 0.5\n bounds: [-0.5, 1.5]\n\n defpar \\phi:\n desc: Shift. Relative shift of the graph from the origin.\n init_guess: 0.0\n bounds: [-np.pi, np.pi]\n\n defpar T_2^*:\n desc: Represents the rate of decay.\n init_guess: the mean of the input delays.\n bounds: [0, np.inf]\n\n defpar f:\n desc: Frequency. Represents the difference in frequency between\n the user guess and the actual frequency of the qubit.\n init_guess: input osc_freq.\n bounds: [0.1 * f, 10 * f]\n\n \"\"\"\n\n @classmethod\n def _default_options(cls) -> Options:\n r\"\"\"Default analysis options.\n\n Analysis Options:\n user_p0 (List[Float]): user guesses for the fit parameters\n :math:`(a, b, f, \\phi, T_2^*)`.\n user_bounds (Tuple[List[float], List[float]]): Lower and upper bounds\n for the fit parameters.\n plot (bool): Create a graph if and only if True.\n \"\"\"\n options = super()._default_options()\n\n options.user_p0 = None\n options.user_bounds = None\n\n return options\n\n # pylint: disable=arguments-differ, unused-argument\n def _run_analysis(\n self,\n experiment_data: ExperimentData,\n user_p0: Optional[Dict[str, float]] = None,\n user_bounds: Optional[Tuple[List[float], List[float]]] = None,\n plot: bool = False,\n ax: Optional[\"AxesSubplot\"] = None,\n **kwargs,\n ) -> Tuple[List[AnalysisResultData], List[\"matplotlib.figure.Figure\"]]:\n r\"\"\"Calculate T2Ramsey experiment.\n\n Args:\n experiment_data (ExperimentData): the experiment data to analyze\n user_p0: contains initial values given by the user, for the\n fit parameters :math:`(a, t2ramsey, f, \\phi, b)`\n user_bounds: lower and upper bounds on the parameters in p0,\n given by the user.\n The first tuple is the lower bounds,\n The second tuple is the upper bounds.\n For both params, the order is :math:`a, t2ramsey, f, \\phi, b`.\n plot: if True, create the plot, otherwise, do not create the plot.\n ax: the plot object\n **kwargs: additional parameters for curve fit.\n\n Returns:\n The analysis result with the estimated :math:`t2ramsey` and 'f' (frequency)\n The graph of the function.\n \"\"\"\n\n def osc_fit_fun(x, a, t2ramsey, f, phi, c):\n \"\"\"Decay cosine fit function\"\"\"\n return a * np.exp(-x / t2ramsey) * np.cos(2 * np.pi * f * x + phi) + c\n\n def _format_plot(ax, unit, fit_result, conversion_factor):\n \"\"\"Format curve fit plot\"\"\"\n # Formatting\n ax.tick_params(labelsize=14)\n ax.set_xlabel(\"Delay (s)\", fontsize=12)\n ax.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(0, 0))\n ax.set_ylabel(\"Probability of measuring 0\", fontsize=12)\n t2ramsey = fit_result[\"popt\"][1] / conversion_factor\n t2_err = fit_result[\"popt_err\"][1] / conversion_factor\n box_text = \"$T_2Ramsey$ = {:.2f} \\u00B1 {:.2f} {}\".format(t2ramsey, t2_err, unit)\n bbox_props = dict(boxstyle=\"square,pad=0.3\", fc=\"white\", ec=\"black\", lw=1)\n ax.text(\n 0.6,\n 0.9,\n box_text,\n ha=\"center\",\n va=\"center\",\n size=12,\n bbox=bbox_props,\n transform=ax.transAxes,\n )\n return ax\n\n # implementation of _run_analysis\n\n data = experiment_data.data()\n circ_metadata = data[0][\"metadata\"]\n unit = circ_metadata[\"unit\"]\n conversion_factor = circ_metadata.get(\"dt_factor\", None)\n osc_freq = circ_metadata.get(\"osc_freq\", None)\n if conversion_factor is None:\n conversion_factor = 1 if unit in (\"s\", \"dt\") else apply_prefix(1, unit)\n\n xdata, ydata, sigma = process_curve_data(data, lambda datum: level2_probability(datum, \"0\"))\n\n t2ramsey_estimate = np.mean(xdata)\n p0, bounds = self._t2ramsey_default_params(\n conversion_factor, user_p0, user_bounds, t2ramsey_estimate, osc_freq\n )\n xdata *= conversion_factor\n fit_result = curve_fit(\n osc_fit_fun, xdata, ydata, p0=list(p0.values()), sigma=sigma, bounds=bounds\n )\n fit_result = dataclasses.asdict(fit_result)\n fit_result[\"circuit_unit\"] = unit\n if osc_freq is not None:\n fit_result[\"osc_freq\"] = osc_freq\n if unit == \"dt\":\n fit_result[\"dt\"] = conversion_factor\n quality = self._fit_quality(\n fit_result[\"popt\"], fit_result[\"popt_err\"], fit_result[\"reduced_chisq\"]\n )\n chisq = fit_result[\"reduced_chisq\"]\n\n if plot:\n ax = plot_curve_fit(osc_fit_fun, fit_result, ax=ax)\n ax = plot_scatter(xdata, ydata, ax=ax)\n ax = plot_errorbar(xdata, ydata, sigma, ax=ax)\n _format_plot(ax, unit, fit_result, conversion_factor)\n figures = [ax.get_figure()]\n else:\n figures = None\n\n # Output unit is 'sec', regardless of the unit used in the input\n result_t2star = AnalysisResultData(\n \"T2star\",\n value=FitVal(fit_result[\"popt\"][1], fit_result[\"popt_err\"][1], \"s\"),\n quality=quality,\n chisq=chisq,\n extra=fit_result,\n )\n result_freq = AnalysisResultData(\n \"Frequency\",\n value=FitVal(fit_result[\"popt\"][2], fit_result[\"popt_err\"][2], \"Hz\"),\n quality=quality,\n chisq=chisq,\n extra=fit_result,\n )\n\n return [result_t2star, result_freq], figures\n\n def _t2ramsey_default_params(\n self,\n conversion_factor,\n user_p0=None,\n user_bounds=None,\n t2ramsey_input=None,\n freq_input=None,\n ) -> Tuple[List[float], Tuple[List[float]]]:\n \"\"\"Default fit parameters for oscillation data.\n\n Note that :math:`T_2^*` unit is converted to 'sec' and 'f' unit is\n converted to Hz, so the output will be given in 'sec' and 'Hz'.\n \"\"\"\n if user_p0 is None:\n a = 0.5\n t2ramsey = t2ramsey_input * conversion_factor\n f = freq_input\n phi = 0.0\n b = 0.5\n else:\n a = user_p0[\"A\"]\n t2ramsey = user_p0[\"T2star\"] * conversion_factor\n f = user_p0[\"f\"]\n phi = user_p0[\"phi\"]\n b = user_p0[\"B\"]\n p0 = {\"a_guess\": a, \"T2star\": t2ramsey, \"f_guess\": f, \"phi_guess\": phi, \"b_guess\": b}\n\n if user_bounds is None:\n a_bounds = [-0.5, 1.5]\n t2ramsey_bounds = [0, np.inf]\n f_bounds = [0.1 * f, 10 * f]\n phi_bounds = [-np.pi, np.pi]\n b_bounds = [-0.5, 1.5]\n bounds = [\n [a_bounds[i], t2ramsey_bounds[i], f_bounds[i], phi_bounds[i], b_bounds[i]]\n for i in range(2)\n ]\n else:\n bounds = user_bounds\n return p0, bounds\n\n @staticmethod\n def _fit_quality(fit_out, fit_err, reduced_chisq):\n # pylint: disable = too-many-boolean-expressions\n if (\n (reduced_chisq < 3)\n and (fit_err[0] is None or fit_err[0] < 0.1 * fit_out[0])\n and (fit_err[1] is None or fit_err[1] < 0.1 * fit_out[1])\n and (fit_err[2] is None or fit_err[2] < 0.1 * fit_out[2])\n ):\n return \"good\"\n else:\n return \"bad\"\n" ]
[ [ "numpy.arange", "numpy.median", "numpy.cumsum", "numpy.real", "numpy.mean" ], [ "numpy.exp", "numpy.mean", "numpy.cos" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
houcharlie/federated-legacy
[ "cb10a9cdcea33288f8113e7445782d21c8c65f81", "cb10a9cdcea33288f8113e7445782d21c8c65f81", "cb10a9cdcea33288f8113e7445782d21c8c65f81", "cb10a9cdcea33288f8113e7445782d21c8c65f81" ]
[ "tensorflow_federated/python/core/utils/encoding_utils_test.py", "tensorflow_federated/python/core/impl/value_impl.py", "tensorflow_federated/python/research/optimization/stackoverflow_lr/run_federated.py", "tensorflow_federated/python/core/impl/executors/executor_test_utils.py" ]
[ "# Copyright 2019, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport warnings\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.common_libs import test\nfrom tensorflow_federated.python.core.api import computation_types\nfrom tensorflow_federated.python.core.api import computations\nfrom tensorflow_federated.python.core.api import placements\nfrom tensorflow_federated.python.core.backends.native import execution_contexts\nfrom tensorflow_federated.python.core.impl.types import type_conversions\nfrom tensorflow_federated.python.core.templates.measured_process import MeasuredProcess\nfrom tensorflow_federated.python.core.utils import encoding_utils\nfrom tensorflow_federated.python.core.utils.computation_utils import StatefulAggregateFn\nfrom tensorflow_federated.python.core.utils.computation_utils import StatefulBroadcastFn\nfrom tensorflow_model_optimization.python.core.internal import tensor_encoding as te\n\n_bad_encoder_named_parameters = [('float', 1.0), ('string', 'str'),\n ('object', object),\n ('encoder', te.encoders.identity())]\n\n\nclass EncodedBroadcastTest(test.TestCase, parameterized.TestCase):\n \"\"\"Tests for build_encoded_broadcast method.\"\"\"\n\n def test_build_encoded_broadcast_raise_warning(self):\n value = tf.constant(np.random.rand(20))\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n encoder = te.encoders.as_simple_encoder(te.encoders.identity(), value_spec)\n\n with warnings.catch_warnings(record=True):\n warnings.simplefilter('error', DeprecationWarning)\n with self.assertRaisesRegex(DeprecationWarning,\n 'tff.utils.build_encoded_broadcast()'):\n encoding_utils.build_encoded_broadcast(value, encoder)\n\n @parameterized.named_parameters(\n ('tf_constant_identity', tf.constant, te.encoders.identity),\n ('tf_constant_uniform_quantization', tf.constant,\n lambda: te.encoders.uniform_quantization(8)),\n ('numpy_identity', lambda x: x, te.encoders.identity),\n ('numpy_uniform_quantization', lambda x: x,\n lambda: te.encoders.uniform_quantization(8)),\n )\n def test_build_encoded_broadcast(self, value_constructor,\n encoder_constructor):\n value = value_constructor(np.random.rand(20))\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n value_type = computation_types.to_type(value_spec)\n encoder = te.encoders.as_simple_encoder(encoder_constructor(), value_spec)\n broadcast_fn = encoding_utils.build_encoded_broadcast(value, encoder)\n state_type = broadcast_fn._initialize_fn.type_signature.result\n broadcast_signature = computations.federated_computation(\n broadcast_fn._next_fn,\n computation_types.FederatedType(state_type, placements.SERVER),\n computation_types.FederatedType(value_type,\n placements.SERVER)).type_signature\n\n self.assertIsInstance(broadcast_fn, StatefulBroadcastFn)\n self.assertEqual(state_type, broadcast_signature.result[0].member)\n self.assertEqual(placements.SERVER, broadcast_signature.result[0].placement)\n self.assertEqual(value_type, broadcast_signature.result[1].member)\n self.assertEqual(placements.CLIENTS,\n broadcast_signature.result[1].placement)\n\n @parameterized.named_parameters(*_bad_encoder_named_parameters)\n def test_build_encoded_broadcast_raises_bad_encoder(self, bad_encoder):\n value = tf.constant([0.0, 1.0])\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_broadcast(value, bad_encoder)\n\n def test_build_encoded_broadcast_raises_incompatible_encoder(self):\n value = tf.constant([0.0, 1.0])\n incompatible_encoder = te.encoders.as_simple_encoder(\n te.encoders.identity(), tf.TensorSpec((3,)))\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_broadcast(value, incompatible_encoder)\n\n def test_build_encoded_broadcast_raises_bad_structure(self):\n value = [tf.constant([0.0, 1.0]), tf.constant([0.0, 1.0])]\n encoder = te.encoders.as_simple_encoder(te.encoders.identity(),\n tf.TensorSpec((2,)))\n with self.assertRaises(ValueError):\n encoding_utils.build_encoded_broadcast(value, encoder)\n\n\nclass EncodedBroadcastProcessTest(test.TestCase, parameterized.TestCase):\n \"\"\"Tests for build_encoded_broadcast_process method.\"\"\"\n\n @parameterized.named_parameters(\n ('tf_constant_identity', tf.constant, te.encoders.identity),\n ('tf_constant_uniform_quantization', tf.constant,\n lambda: te.encoders.uniform_quantization(8)),\n ('numpy_identity', lambda x: x, te.encoders.identity),\n ('numpy_uniform_quantization', lambda x: x,\n lambda: te.encoders.uniform_quantization(8)),\n )\n def test_build_encoded_broadcast_process(self, value_constructor,\n encoder_constructor):\n value = value_constructor(np.random.rand(20))\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n value_type = computation_types.to_type(value_spec)\n encoder = te.encoders.as_simple_encoder(encoder_constructor(), value_spec)\n broadcast_process = encoding_utils.build_encoded_broadcast_process(\n value_type, encoder)\n state_type = broadcast_process._initialize_fn.type_signature.result\n broadcast_signature = broadcast_process._next_fn.type_signature\n\n self.assertIsInstance(broadcast_process, MeasuredProcess)\n self.assertEqual(state_type, broadcast_signature.result[0])\n self.assertEqual(placements.SERVER, broadcast_signature.result[0].placement)\n self.assertEqual(value_type, broadcast_signature.result[1].member)\n self.assertEqual(placements.CLIENTS,\n broadcast_signature.result[1].placement)\n\n @parameterized.named_parameters(*_bad_encoder_named_parameters)\n def test_build_encoded_broadcast_process_raises_bad_encoder(\n self, bad_encoder):\n value_type = computation_types.TensorType(tf.float32, shape=[2])\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_broadcast_process(value_type, bad_encoder)\n\n def test_build_encoded_broadcast_process_raises_incompatible_encoder(self):\n value_type = computation_types.TensorType(tf.float32, shape=[2])\n incompatible_encoder = te.encoders.as_simple_encoder(\n te.encoders.identity(), tf.TensorSpec((3,)))\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_broadcast_process(value_type,\n incompatible_encoder)\n\n def test_build_encoded_broadcast_process_raises_bad_structure(self):\n value_type = computation_types.StructType([\n computation_types.TensorType(tf.float32, shape=[2]),\n computation_types.TensorType(tf.float32, shape=[2])\n ])\n encoder = te.encoders.as_simple_encoder(te.encoders.identity(),\n tf.TensorSpec((2,)))\n with self.assertRaises(ValueError):\n encoding_utils.build_encoded_broadcast_process(value_type, encoder)\n\n\nclass EncodedSumTest(test.TestCase, parameterized.TestCase):\n \"\"\"Tests for build_encoded_sum method.\"\"\"\n\n def test_build_encoded_sum_raise_warning(self):\n value = tf.constant(np.random.rand(20))\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n encoder = te.encoders.as_gather_encoder(te.encoders.identity(), value_spec)\n\n with warnings.catch_warnings(record=True):\n warnings.simplefilter('error', DeprecationWarning)\n with self.assertRaisesRegex(DeprecationWarning,\n 'tff.utils.build_encoded_sum()'):\n encoding_utils.build_encoded_sum(value, encoder)\n\n @parameterized.named_parameters(\n ('tf_constant_identity', tf.constant, te.encoders.identity),\n ('tf_constant_uniform_quantization', tf.constant,\n lambda: te.encoders.uniform_quantization(8)),\n ('numpy_identity', lambda x: x, te.encoders.identity),\n ('numpy_uniform_quantization', lambda x: x,\n lambda: te.encoders.uniform_quantization(8)),\n )\n def test_build_encoded_sum(self, value_constructor, encoder_constructor):\n value = value_constructor(np.random.rand(20))\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n value_type = computation_types.to_type(value_spec)\n encoder = te.encoders.as_gather_encoder(encoder_constructor(), value_spec)\n gather_fn = encoding_utils.build_encoded_sum(value, encoder)\n state_type = gather_fn._initialize_fn.type_signature.result\n gather_signature = computations.federated_computation(\n gather_fn._next_fn,\n computation_types.FederatedType(state_type, placements.SERVER),\n computation_types.FederatedType(value_type, placements.CLIENTS),\n computation_types.FederatedType(\n computation_types.to_type(tf.float32),\n placements.CLIENTS)).type_signature\n\n self.assertIsInstance(gather_fn, StatefulAggregateFn)\n self.assertEqual(state_type, gather_signature.result[0].member)\n self.assertEqual(placements.SERVER, gather_signature.result[0].placement)\n self.assertEqual(value_type, gather_signature.result[1].member)\n self.assertEqual(placements.SERVER, gather_signature.result[1].placement)\n\n def test_run_encoded_sum(self):\n value = np.array([0.0, 1.0, 2.0, -1.0])\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n value_type = computation_types.to_type(value_spec)\n encoder = te.encoders.as_gather_encoder(te.encoders.identity(), value_spec)\n gather_fn = encoding_utils.build_encoded_sum(value, encoder)\n initial_state = gather_fn.initialize()\n\n @computations.federated_computation(\n computation_types.FederatedType(\n gather_fn._initialize_fn.type_signature.result, placements.SERVER),\n computation_types.FederatedType(value_type, placements.CLIENTS))\n def call_gather(state, value):\n return gather_fn(state, value)\n\n _, value_sum = call_gather(initial_state, [value, value])\n self.assertAllClose(2 * value, value_sum)\n\n _, value_sum = call_gather(initial_state, [value, -value])\n self.assertAllClose(0 * value, value_sum)\n\n _, value_sum = call_gather(initial_state, [value, 2 * value])\n self.assertAllClose(3 * value, value_sum)\n\n @parameterized.named_parameters(*_bad_encoder_named_parameters)\n def test_build_encoded_sum_raises_bad_encoder(self, bad_encoder):\n value = tf.constant([0.0, 1.0])\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_sum(value, bad_encoder)\n\n def test_build_encoded_sum_raises_incompatible_encoder(self):\n value = tf.constant([0.0, 1.0])\n incompatible_encoder = te.encoders.as_gather_encoder(\n te.encoders.identity(), tf.TensorSpec((3,)))\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_sum(value, incompatible_encoder)\n\n def test_build_encoded_sum_raises_bad_structure(self):\n value = [tf.constant([0.0, 1.0]), tf.constant([0.0, 1.0])]\n encoder = te.encoders.as_gather_encoder(te.encoders.identity(),\n tf.TensorSpec((2,)))\n with self.assertRaises(ValueError):\n encoding_utils.build_encoded_sum(value, encoder)\n\n\nclass EncodedSumProcessTest(test.TestCase, parameterized.TestCase):\n \"\"\"Tests for build_encoded_sum_process method.\"\"\"\n\n @parameterized.named_parameters(\n ('tf_constant_identity', tf.constant, te.encoders.identity),\n ('tf_constant_uniform_quantization', tf.constant,\n lambda: te.encoders.uniform_quantization(8)),\n ('numpy_identity', lambda x: x, te.encoders.identity),\n ('numpy_uniform_quantization', lambda x: x,\n lambda: te.encoders.uniform_quantization(8)),\n )\n def test_build_encoded_sum_process(self, value_constructor,\n encoder_constructor):\n value = value_constructor(np.random.rand(20))\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n value_type = computation_types.to_type(value_spec)\n encoder = te.encoders.as_gather_encoder(encoder_constructor(), value_spec)\n gather_process = encoding_utils.build_encoded_sum_process(\n value_type, encoder)\n state_type = gather_process._initialize_fn.type_signature.result\n gather_signature = gather_process._next_fn.type_signature\n\n self.assertIsInstance(gather_process, MeasuredProcess)\n self.assertEqual(state_type, gather_signature.result[0])\n self.assertEqual(placements.SERVER, gather_signature.result[0].placement)\n self.assertEqual(value_type, gather_signature.result[1].member)\n self.assertEqual(placements.SERVER, gather_signature.result[1].placement)\n\n def test_run_encoded_sum_process(self):\n value = np.array([0.0, 1.0, 2.0, -1.0])\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n encoder = te.encoders.as_gather_encoder(te.encoders.identity(), value_spec)\n value_type = type_conversions.type_from_tensors(value)\n gather_process = encoding_utils.build_encoded_sum_process(\n value_type, encoder)\n initial_state = gather_process.initialize()\n call_gather = gather_process._next_fn\n\n output = call_gather(initial_state, [value, value])\n self.assertAllClose(2 * value, output['result'])\n\n output = call_gather(initial_state, [value, -value])\n self.assertAllClose(0 * value, output['result'])\n\n output = call_gather(initial_state, [value, 2 * value])\n self.assertAllClose(3 * value, output['result'])\n\n @parameterized.named_parameters(*_bad_encoder_named_parameters)\n def test_build_encoded_sum_process_raises_bad_encoder(self, bad_encoder):\n value_type = computation_types.TensorType(tf.float32, shape=[2])\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_sum_process(value_type, bad_encoder)\n\n def test_build_encoded_sum_process_raises_incompatible_encoder(self):\n value_type = computation_types.TensorType(tf.float32, shape=[2])\n incompatible_encoder = te.encoders.as_gather_encoder(\n te.encoders.identity(), tf.TensorSpec((3,)))\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_sum_process(value_type, incompatible_encoder)\n\n def test_build_encoded_sum_process_raises_bad_structure(self):\n value_type = computation_types.StructType([\n computation_types.TensorType(tf.float32, shape=[2]),\n computation_types.TensorType(tf.float32, shape=[2])\n ])\n encoder = te.encoders.as_gather_encoder(te.encoders.identity(),\n tf.TensorSpec((2,)))\n with self.assertRaises(ValueError):\n encoding_utils.build_encoded_sum_process(value_type, encoder)\n\n\nclass EncodedMeanTest(test.TestCase, parameterized.TestCase):\n \"\"\"Tests for build_encoded_mean method.\"\"\"\n\n def test_build_encoded_mean_raise_warning(self):\n value = tf.constant(np.random.rand(20))\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n encoder = te.encoders.as_gather_encoder(te.encoders.identity(), value_spec)\n\n with warnings.catch_warnings(record=True):\n warnings.simplefilter('error', DeprecationWarning)\n with self.assertRaisesRegex(DeprecationWarning,\n 'tff.utils.build_encoded_mean()'):\n encoding_utils.build_encoded_mean(value, encoder)\n\n @parameterized.named_parameters(\n ('tf_constant_identity', tf.constant, te.encoders.identity),\n ('tf_constant_uniform_quantization', tf.constant,\n lambda: te.encoders.uniform_quantization(8)),\n ('numpy_identity', lambda x: x, te.encoders.identity),\n ('numpy_uniform_quantization', lambda x: x,\n lambda: te.encoders.uniform_quantization(8)),\n )\n def test_build_encoded_mean(self, value_constructor, encoder_constructor):\n value = value_constructor(np.random.rand(20))\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n value_type = computation_types.to_type(value_spec)\n encoder = te.encoders.as_gather_encoder(encoder_constructor(), value_spec)\n gather_fn = encoding_utils.build_encoded_mean(value, encoder)\n state_type = gather_fn._initialize_fn.type_signature.result\n gather_signature = computations.federated_computation(\n gather_fn._next_fn,\n computation_types.FederatedType(state_type, placements.SERVER),\n computation_types.FederatedType(value_type, placements.CLIENTS),\n computation_types.FederatedType(\n computation_types.to_type(tf.float32),\n placements.CLIENTS)).type_signature\n\n self.assertIsInstance(gather_fn, StatefulAggregateFn)\n self.assertEqual(state_type, gather_signature.result[0].member)\n self.assertEqual(placements.SERVER, gather_signature.result[0].placement)\n self.assertEqual(value_type, gather_signature.result[1].member)\n self.assertEqual(placements.SERVER, gather_signature.result[1].placement)\n\n def test_run_encoded_mean(self):\n value = np.array([0.0, 1.0, 2.0, -1.0])\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n value_type = computation_types.to_type(value_spec)\n encoder = te.encoders.as_gather_encoder(te.encoders.identity(), value_spec)\n gather_fn = encoding_utils.build_encoded_mean(value, encoder)\n initial_state = gather_fn.initialize()\n\n @computations.federated_computation(\n computation_types.FederatedType(\n gather_fn._initialize_fn.type_signature.result, placements.SERVER),\n computation_types.FederatedType(value_type, placements.CLIENTS),\n computation_types.FederatedType(\n computation_types.to_type(tf.float32), placements.CLIENTS))\n def call_gather(state, value, weight):\n return gather_fn(state, value, weight)\n\n _, value_mean = call_gather(initial_state, [value, value], [1.0, 1.0])\n self.assertAllClose(1 * value, value_mean)\n\n _, value_mean = call_gather(initial_state, [value, value], [0.3, 0.7])\n self.assertAllClose(1 * value, value_mean)\n\n _, value_mean = call_gather(initial_state, [value, 2 * value], [1.0, 2.0])\n self.assertAllClose(5 / 3 * value, value_mean)\n\n @parameterized.named_parameters(*_bad_encoder_named_parameters)\n def test_build_encoded_mean_raises_bad_encoder(self, bad_encoder):\n value = tf.constant([0.0, 1.0])\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_mean(value, bad_encoder)\n\n def test_build_encoded_mean_raises_incompatible_encoder(self):\n value = tf.constant([0.0, 1.0])\n incompatible_encoder = te.encoders.as_gather_encoder(\n te.encoders.identity(), tf.TensorSpec((3,)))\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_mean(value, incompatible_encoder)\n\n def test_build_encoded_mean_raises_bad_structure(self):\n value = [tf.constant([0.0, 1.0]), tf.constant([0.0, 1.0])]\n encoder = te.encoders.as_gather_encoder(te.encoders.identity(),\n tf.TensorSpec((2,)))\n with self.assertRaises(ValueError):\n encoding_utils.build_encoded_mean(value, encoder)\n\n\nclass EncodedMeanProcessTest(test.TestCase, parameterized.TestCase):\n \"\"\"Tests for build_encoded_mean_process method.\"\"\"\n\n @parameterized.named_parameters(\n ('tf_constant_identity', tf.constant, te.encoders.identity),\n ('tf_constant_uniform_quantization', tf.constant,\n lambda: te.encoders.uniform_quantization(8)),\n ('numpy_identity', lambda x: x, te.encoders.identity),\n ('numpy_uniform_quantization', lambda x: x,\n lambda: te.encoders.uniform_quantization(8)),\n )\n def test_build_encoded_mean_process(self, value_constructor,\n encoder_constructor):\n value = value_constructor(np.random.rand(20))\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n value_type = computation_types.to_type(value_spec)\n encoder = te.encoders.as_gather_encoder(encoder_constructor(), value_spec)\n gather_process = encoding_utils.build_encoded_mean_process(\n value_type, encoder)\n state_type = gather_process._initialize_fn.type_signature.result\n gather_signature = gather_process._next_fn.type_signature\n\n self.assertIsInstance(gather_process, MeasuredProcess)\n self.assertEqual(state_type, gather_signature.result[0])\n self.assertEqual(placements.SERVER, gather_signature.result[0].placement)\n self.assertEqual(value_type, gather_signature.result[1].member)\n self.assertEqual(placements.SERVER, gather_signature.result[1].placement)\n\n def test_run_encoded_mean_process(self):\n value = np.array([0.0, 1.0, 2.0, -1.0])\n value_spec = tf.TensorSpec(value.shape, tf.dtypes.as_dtype(value.dtype))\n encoder = te.encoders.as_gather_encoder(te.encoders.identity(), value_spec)\n value_type = type_conversions.type_from_tensors(value)\n gather_process = encoding_utils.build_encoded_mean_process(\n value_type, encoder)\n initial_state = gather_process.initialize()\n call_gather = gather_process._next_fn\n\n output = call_gather(initial_state, [value, value], [1.0, 1.0])\n self.assertAllClose(1 * value, output['result'])\n\n output = call_gather(initial_state, [value, value], [0.3, 0.7])\n self.assertAllClose(1 * value, output['result'])\n\n output = call_gather(initial_state, [value, 2 * value], [1.0, 2.0])\n self.assertAllClose(5 / 3 * value, output['result'])\n\n @parameterized.named_parameters(*_bad_encoder_named_parameters)\n def test_build_encoded_mean_process_raises_bad_encoder(self, bad_encoder):\n value_type = computation_types.TensorType(tf.float32, shape=[2])\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_mean_process(value_type, bad_encoder)\n\n def test_build_encoded_mean_process_raises_incompatible_encoder(self):\n value_type = computation_types.TensorType(tf.float32, shape=[2])\n incompatible_encoder = te.encoders.as_gather_encoder(\n te.encoders.identity(), tf.TensorSpec((3,)))\n with self.assertRaises(TypeError):\n encoding_utils.build_encoded_mean_process(value_type,\n incompatible_encoder)\n\n def test_build_encoded_mean_process_raises_bad_structure(self):\n value_type = computation_types.StructType([\n computation_types.TensorType(tf.float32, shape=[2]),\n computation_types.TensorType(tf.float32, shape=[2])\n ])\n encoder = te.encoders.as_gather_encoder(te.encoders.identity(),\n tf.TensorSpec((2,)))\n with self.assertRaises(ValueError):\n encoding_utils.build_encoded_mean_process(value_type, encoder)\n\n\nclass EncodingUtilsTest(test.TestCase, parameterized.TestCase):\n \"\"\"Tests for utilities for building StatefulFns.\"\"\"\n\n @parameterized.named_parameters(\n ('identity', te.encoders.identity),\n ('uniform', lambda: te.encoders.uniform_quantization(8)),\n ('hadamard', lambda: te.encoders.hadamard_quantization(8)),\n (\n 'one_over_n',\n lambda: te.core.EncoderComposer( # pylint: disable=g-long-lambda\n te.testing.PlusOneOverNEncodingStage()).make()),\n (\n 'state_update',\n lambda: te.core.EncoderComposer( # pylint: disable=g-long-lambda\n StateUpdateTensorsEncodingStage()).make()),\n )\n def test_build_encode_decode_tf_computations_for_broadcast(\n self, encoder_constructor):\n value_spec = tf.TensorSpec((20,), tf.float32)\n encoder = te.encoders.as_simple_encoder(encoder_constructor(), value_spec)\n\n _, state_type = encoding_utils._build_initial_state_tf_computation(encoder)\n value_type = computation_types.to_type(value_spec)\n encode_fn, decode_fn = (\n encoding_utils._build_encode_decode_tf_computations_for_broadcast(\n state_type, value_type, encoder))\n\n self.assertEqual(state_type, encode_fn.type_signature.parameter[0])\n self.assertEqual(state_type, encode_fn.type_signature.result[0])\n # Output of encode should be the input to decode.\n self.assertEqual(encode_fn.type_signature.result[1],\n decode_fn.type_signature.parameter)\n # Decode should return the same type as input to encode - value_type.\n self.assertEqual(value_type, encode_fn.type_signature.parameter[1])\n self.assertEqual(value_type, decode_fn.type_signature.result)\n\n @parameterized.named_parameters(\n ('identity', te.encoders.identity),\n ('uniform', lambda: te.encoders.uniform_quantization(8)),\n ('hadamard', lambda: te.encoders.hadamard_quantization(8)),\n (\n 'one_over_n',\n lambda: te.core.EncoderComposer( # pylint: disable=g-long-lambda\n te.testing.PlusOneOverNEncodingStage()).make()),\n (\n 'state_update',\n lambda: te.core.EncoderComposer( # pylint: disable=g-long-lambda\n StateUpdateTensorsEncodingStage()).make()),\n )\n def test_build_tf_computations_for_sum(self, encoder_constructor):\n # Tests that the partial computations have matching relevant input-output\n # signatures.\n value_spec = tf.TensorSpec((20,), tf.float32)\n encoder = te.encoders.as_gather_encoder(encoder_constructor(), value_spec)\n\n _, state_type = encoding_utils._build_initial_state_tf_computation(encoder)\n value_type = computation_types.to_type(value_spec)\n nest_encoder = encoding_utils._build_tf_computations_for_gather(\n state_type, value_type, encoder)\n\n self.assertEqual(state_type,\n nest_encoder.get_params_fn.type_signature.parameter)\n encode_params_type = nest_encoder.get_params_fn.type_signature.result[0]\n decode_before_sum_params_type = nest_encoder.get_params_fn.type_signature.result[\n 1]\n decode_after_sum_params_type = nest_encoder.get_params_fn.type_signature.result[\n 2]\n\n self.assertEqual(value_type,\n nest_encoder.encode_fn.type_signature.parameter[0])\n self.assertEqual(encode_params_type,\n nest_encoder.encode_fn.type_signature.parameter[1])\n self.assertEqual(decode_before_sum_params_type,\n nest_encoder.encode_fn.type_signature.parameter[2])\n state_update_tensors_type = nest_encoder.encode_fn.type_signature.result[2]\n\n accumulator_type = nest_encoder.zero_fn.type_signature.result\n self.assertEqual(state_update_tensors_type,\n accumulator_type.state_update_tensors)\n\n self.assertEqual(accumulator_type,\n nest_encoder.accumulate_fn.type_signature.parameter[0])\n self.assertEqual(nest_encoder.encode_fn.type_signature.result,\n nest_encoder.accumulate_fn.type_signature.parameter[1])\n self.assertEqual(accumulator_type,\n nest_encoder.accumulate_fn.type_signature.result)\n self.assertEqual(accumulator_type,\n nest_encoder.merge_fn.type_signature.parameter[0])\n self.assertEqual(accumulator_type,\n nest_encoder.merge_fn.type_signature.parameter[1])\n self.assertEqual(accumulator_type,\n nest_encoder.merge_fn.type_signature.result)\n self.assertEqual(accumulator_type,\n nest_encoder.report_fn.type_signature.parameter)\n self.assertEqual(accumulator_type,\n nest_encoder.report_fn.type_signature.result)\n\n self.assertEqual(\n accumulator_type.values,\n nest_encoder.decode_after_sum_fn.type_signature.parameter[0])\n self.assertEqual(\n decode_after_sum_params_type,\n nest_encoder.decode_after_sum_fn.type_signature.parameter[1])\n self.assertEqual(value_type,\n nest_encoder.decode_after_sum_fn.type_signature.result)\n\n self.assertEqual(state_type,\n nest_encoder.update_state_fn.type_signature.parameter[0])\n self.assertEqual(state_update_tensors_type,\n nest_encoder.update_state_fn.type_signature.parameter[1])\n self.assertEqual(state_type,\n nest_encoder.update_state_fn.type_signature.result)\n\n\[email protected]_style_adaptive_encoding_stage\nclass StateUpdateTensorsEncodingStage(te.core.AdaptiveEncodingStageInterface):\n \"\"\"Test encoding stage using supported state aggregation modes.\n\n This implementation does not use `encoding_stage.StateAggregationMode.STACK`\n which is currently not supported by the implementation.\n \"\"\"\n\n ENCODED_VALUES_KEY = 'state_update_tensors_identity'\n SUM_STATE_UPDATE_KEY = 'state_update_tensors_update_sum'\n MIN_STATE_UPDATE_KEY = 'state_update_tensors_update_min'\n MAX_STATE_UPDATE_KEY = 'state_update_tensors_update_max'\n LAST_SUM_STATE_KEY = 'state_update_tensors_state_sum'\n LAST_MIN_STATE_KEY = 'state_update_tensors_state_min'\n LAST_MAX_STATE_KEY = 'state_update_tensors_state_max'\n\n @property\n def name(self):\n \"\"\"See base class.\"\"\"\n return 'state_update_tensors'\n\n @property\n def compressible_tensors_keys(self):\n \"\"\"See base class.\"\"\"\n return [self.ENCODED_VALUES_KEY]\n\n @property\n def commutes_with_sum(self):\n \"\"\"See base class.\"\"\"\n return True\n\n @property\n def decode_needs_input_shape(self):\n \"\"\"See base class.\"\"\"\n return False\n\n @property\n def state_update_aggregation_modes(self):\n \"\"\"See base class.\"\"\"\n return {\n self.SUM_STATE_UPDATE_KEY: te.core.StateAggregationMode.SUM,\n self.MIN_STATE_UPDATE_KEY: te.core.StateAggregationMode.MIN,\n self.MAX_STATE_UPDATE_KEY: te.core.StateAggregationMode.MAX,\n }\n\n def initial_state(self):\n \"\"\"See base class.\"\"\"\n return {\n self.LAST_SUM_STATE_KEY: tf.constant(0.0),\n self.LAST_MIN_STATE_KEY: tf.constant(0.0),\n self.LAST_MAX_STATE_KEY: tf.constant(0.0),\n }\n\n def update_state(self, state, state_update_tensors):\n \"\"\"See base class.\"\"\"\n del state # Unused.\n return {\n self.LAST_SUM_STATE_KEY:\n tf.reduce_sum(state_update_tensors[self.SUM_STATE_UPDATE_KEY]),\n self.LAST_MIN_STATE_KEY:\n tf.reduce_min(state_update_tensors[self.MIN_STATE_UPDATE_KEY]),\n self.LAST_MAX_STATE_KEY:\n tf.reduce_max(state_update_tensors[self.MAX_STATE_UPDATE_KEY])\n }\n\n def get_params(self, state):\n \"\"\"See base class.\"\"\"\n del state # Unused.\n return {}, {}\n\n def encode(self, x, encode_params):\n \"\"\"See base class.\"\"\"\n del encode_params # Unused.\n x = tf.identity(x)\n return {\n self.ENCODED_VALUES_KEY: x\n }, {\n self.SUM_STATE_UPDATE_KEY: tf.reduce_sum(x),\n self.MIN_STATE_UPDATE_KEY: tf.reduce_min(x),\n self.MAX_STATE_UPDATE_KEY: tf.reduce_max(x),\n }\n\n def decode(self,\n encoded_tensors,\n decode_params,\n num_summands=None,\n shape=None):\n \"\"\"See base class.\"\"\"\n del decode_params, num_summands, shape # Unused.\n return tf.identity(encoded_tensors[self.ENCODED_VALUES_KEY])\n\n\nif __name__ == '__main__':\n execution_contexts.set_local_execution_context()\n test.main()\n", "# Copyright 2018, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Implementations of the abstract interface Value in api/value_base.\"\"\"\n\nimport abc\nimport collections\nfrom typing import Any, Union\n\nimport attr\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.common_libs import py_typecheck\nfrom tensorflow_federated.python.common_libs import structure\nfrom tensorflow_federated.python.core.api import computation_base\nfrom tensorflow_federated.python.core.api import computation_types\nfrom tensorflow_federated.python.core.api import value_base\nfrom tensorflow_federated.python.core.impl import tensorflow_serialization\nfrom tensorflow_federated.python.core.impl.compiler import building_block_factory\nfrom tensorflow_federated.python.core.impl.compiler import building_blocks\nfrom tensorflow_federated.python.core.impl.compiler import intrinsic_defs\nfrom tensorflow_federated.python.core.impl.context_stack import context_base\nfrom tensorflow_federated.python.core.impl.context_stack import context_stack_base\nfrom tensorflow_federated.python.core.impl.context_stack import symbol_binding_context\nfrom tensorflow_federated.python.core.impl.types import placement_literals\nfrom tensorflow_federated.python.core.impl.types import type_conversions\nfrom tensorflow_federated.python.core.impl.utils import function_utils\nfrom tensorflow_federated.python.core.impl.utils import tensorflow_utils\n\n\ndef _unfederated(type_signature):\n if type_signature.is_federated():\n return type_signature.member\n return type_signature\n\n\n# Note: not a `ValueImpl` method because of the `__setattr__` override\ndef _is_federated_named_tuple(vimpl: 'ValueImpl') -> bool:\n comp_ty = vimpl.type_signature\n return comp_ty.is_federated() and comp_ty.member.is_struct()\n\n\n# Note: not a `ValueImpl` method because of the `__setattr__` override\ndef _is_named_tuple(vimpl: 'ValueImpl') -> bool:\n return vimpl.type_signature.is_struct() # pylint: disable=protected-access\n\n\ndef _check_struct_or_federated_struct(\n vimpl: 'ValueImpl',\n attribute: str,\n):\n if not (_is_named_tuple(vimpl) or _is_federated_named_tuple(vimpl)):\n raise AttributeError(\n f'`tff.Value` of non-structural type {vimpl.type_signature} has no '\n f'attribute {attribute}')\n\n\ndef _check_symbol_binding_context(context: context_base.Context):\n if not isinstance(context, symbol_binding_context.SymbolBindingContext):\n raise context_base.ContextError('TFF values should only be materialized '\n 'inside a context which can bind '\n 'references, generally a '\n '`FederatedComputationContext`. Attempted '\n 'to materialize a TFF value in a context '\n '{c} of type {t}.'.format(\n c=context, t=type(context)))\n\n\nclass ValueImpl(value_base.Value, metaclass=abc.ABCMeta):\n \"\"\"A generic base class for values that appear in TFF computations.\n\n If the value in this class is of `StructType` or `FederatedType`\n containing a `StructType`, the inner fields can be accessed by name\n (e.g. `my_value_impl.x = ...` or `y = my_value_impl.y`).\n\n Note that setting nested fields (e.g. `my_value_impl.x.y = ...`) will not\n work properly because it translates to\n `my_value_impl.__getattr__('x').__setattr__('y')`, but the object returned\n by `__getattr__` cannot proxy writes back to the original `ValueImpl`.\n \"\"\"\n\n def __init__(\n self,\n comp: building_blocks.ComputationBuildingBlock,\n context_stack: context_stack_base.ContextStack,\n ):\n \"\"\"Constructs a value of the given type.\n\n Args:\n comp: An instance of building_blocks.ComputationBuildingBlock that\n contains the logic that computes this value.\n context_stack: The context stack to use.\n \"\"\"\n py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)\n py_typecheck.check_type(context_stack, context_stack_base.ContextStack)\n _check_symbol_binding_context(context_stack.current)\n # We override `__setattr__` for `ValueImpl` and so must assign fields using\n # the `__setattr__` impl on the superclass (rather than simply using\n # e.g. `self._comp = comp`.\n super().__setattr__('_comp', comp)\n super().__setattr__('_context_stack', context_stack)\n\n @property\n def type_signature(self):\n return self._comp.type_signature\n\n @classmethod\n def get_comp(cls, value):\n py_typecheck.check_type(value, cls)\n return value._comp # pylint: disable=protected-access\n\n @classmethod\n def get_context_stack(cls, value):\n py_typecheck.check_type(value, cls)\n return value._context_stack # pylint: disable=protected-access\n\n def __repr__(self):\n return repr(self._comp)\n\n def __str__(self):\n return str(self._comp)\n\n def __dir__(self):\n attributes = ['type_signature']\n type_signature = _unfederated(self.type_signature)\n if type_signature.is_struct():\n attributes.extend(dir(type_signature))\n return attributes\n\n def __getattr__(self, name):\n py_typecheck.check_type(name, str)\n _check_struct_or_federated_struct(self, name)\n if _is_federated_named_tuple(self):\n return ValueImpl(\n building_block_factory.create_federated_getattr_call(\n self._comp, name), self._context_stack)\n if name not in dir(self.type_signature):\n raise AttributeError(\n 'There is no such attribute \\'{}\\' in this tuple. Valid attributes: ({})'\n .format(name, ', '.join(dir(self.type_signature))))\n if self._comp.is_struct():\n return ValueImpl(getattr(self._comp, name), self._context_stack)\n return ValueImpl(\n building_blocks.Selection(self._comp, name=name), self._context_stack)\n\n def __setattr__(self, name, value):\n py_typecheck.check_type(name, str)\n _check_struct_or_federated_struct(self, name)\n value_comp = ValueImpl.get_comp(to_value(value, None, self._context_stack))\n if _is_federated_named_tuple(self):\n new_comp = building_block_factory.create_federated_setattr_call(\n self._comp, name, value_comp)\n super().__setattr__('_comp', new_comp)\n return\n named_tuple_setattr_lambda = building_block_factory.create_named_tuple_setattr_lambda(\n self.type_signature, name, value_comp)\n new_comp = building_blocks.Call(named_tuple_setattr_lambda, self._comp)\n fc_context = self._context_stack.current\n ref = fc_context.bind_computation_to_reference(new_comp)\n super().__setattr__('_comp', ref)\n\n def __bool__(self):\n raise TypeError(\n 'Federated computation values do not support boolean operations. '\n 'If you were attempting to perform logic on tensors, consider moving '\n 'this logic into a tff.tf_computation.')\n\n def __len__(self):\n type_signature = _unfederated(self.type_signature)\n if not type_signature.is_struct():\n raise TypeError(\n 'Operator len() is only supported for (possibly federated) structure'\n 'types, but the object on which it has been invoked is of type {}.'\n .format(self.type_signature))\n return len(type_signature)\n\n def __getitem__(self, key: Union[int, str, slice]):\n py_typecheck.check_type(key, (int, str, slice))\n if isinstance(key, str):\n return getattr(self, key)\n if _is_federated_named_tuple(self):\n return ValueImpl(\n building_block_factory.create_federated_getitem_call(self._comp, key),\n self._context_stack)\n if not _is_named_tuple(self):\n raise TypeError(\n 'Operator getitem() is only supported for structure types, but the '\n 'object on which it has been invoked is of type {}.'.format(\n self.type_signature))\n elem_length = len(self.type_signature)\n if isinstance(key, int):\n if key < 0 or key >= elem_length:\n raise IndexError(\n 'The index of the selected element {} is out of range.'.format(key))\n if self._comp.is_struct():\n return ValueImpl(self._comp[key], self._context_stack)\n else:\n return ValueImpl(\n building_blocks.Selection(self._comp, index=key),\n self._context_stack)\n elif isinstance(key, slice):\n index_range = range(*key.indices(elem_length))\n if not index_range:\n raise IndexError('Attempted to slice 0 elements, which is not '\n 'currently supported.')\n return to_value([self[k] for k in index_range], None, self._context_stack)\n\n def __iter__(self):\n type_signature = _unfederated(self.type_signature)\n if not type_signature.is_struct():\n raise TypeError(\n 'Operator iter() is only supported for (possibly federated) structure '\n 'types, but the object on which it has been invoked is of type {}.'\n .format(self.type_signature))\n for index in range(len(type_signature)):\n yield self[index]\n\n def __call__(self, *args, **kwargs):\n if not self.type_signature.is_function():\n raise SyntaxError(\n 'Function-like invocation is only supported for values of functional '\n 'types, but the value being invoked is of type {} that does not '\n 'support invocation.'.format(self.type_signature))\n if args or kwargs:\n args = [to_value(x, None, self._context_stack) for x in args]\n kwargs = {\n k: to_value(v, None, self._context_stack) for k, v in kwargs.items()\n }\n arg = function_utils.pack_args(self.type_signature.parameter, args,\n kwargs, self._context_stack.current)\n arg = ValueImpl.get_comp(to_value(arg, None, self._context_stack))\n else:\n arg = None\n fc_context = self._context_stack.current\n call = building_blocks.Call(self._comp, arg)\n ref = fc_context.bind_computation_to_reference(call)\n return ValueImpl(ref, self._context_stack)\n\n def __add__(self, other):\n other = to_value(other, None, self._context_stack)\n if not self.type_signature.is_equivalent_to(other.type_signature):\n raise TypeError('Cannot add {} and {}.'.format(self.type_signature,\n other.type_signature))\n call = building_blocks.Call(\n building_blocks.Intrinsic(\n intrinsic_defs.GENERIC_PLUS.uri,\n computation_types.FunctionType(\n [self.type_signature, self.type_signature],\n self.type_signature)),\n ValueImpl.get_comp(to_value([self, other], None, self._context_stack)))\n fc_context = self._context_stack.current\n ref = fc_context.bind_computation_to_reference(call)\n return ValueImpl(ref, self._context_stack)\n\n\ndef _wrap_constant_as_value(const, context_stack):\n \"\"\"Wraps the given Python constant as a `tff.Value`.\n\n Args:\n const: Python constant to be converted to TFF value. Anything convertible to\n Tensor via `tf.constant` can be passed in.\n context_stack: The context stack to use.\n\n Returns:\n An instance of `value_base.Value`.\n \"\"\"\n py_typecheck.check_type(context_stack, context_stack_base.ContextStack)\n tf_comp, _ = tensorflow_serialization.serialize_py_fn_as_tf_computation(\n lambda: tf.constant(const), None, context_stack)\n compiled_comp = building_blocks.CompiledComputation(tf_comp)\n called_comp = building_blocks.Call(compiled_comp)\n fc_context = context_stack.current\n ref = fc_context.bind_computation_to_reference(called_comp)\n return ValueImpl(ref, context_stack)\n\n\ndef _wrap_sequence_as_value(elements, element_type, context_stack):\n \"\"\"Wraps `elements` as a TFF sequence with elements of type `element_type`.\n\n Args:\n elements: Python object to the wrapped as a TFF sequence value.\n element_type: An instance of `Type` that determines the type of elements of\n the sequence.\n context_stack: The context stack to use.\n\n Returns:\n An instance of `tff.Value`.\n\n Raises:\n TypeError: If `elements` and `element_type` are of incompatible types.\n \"\"\"\n # TODO(b/113116813): Add support for other representations of sequences.\n py_typecheck.check_type(elements, list)\n py_typecheck.check_type(context_stack, context_stack_base.ContextStack)\n\n # Checks that the types of all the individual elements are compatible with the\n # requested type of the sequence as a while.\n for elem in elements:\n elem_type = type_conversions.infer_type(elem)\n if not element_type.is_assignable_from(elem_type):\n raise TypeError(\n 'Expected all sequence elements to be {}, found {}.'.format(\n element_type, elem_type))\n\n # Defines a no-arg function that builds a `tf.data.Dataset` from the elements.\n def _create_dataset_from_elements():\n return tensorflow_utils.make_data_set_from_elements(\n tf.compat.v1.get_default_graph(), elements, element_type)\n\n # Wraps the dataset as a value backed by a no-argument TensorFlow computation.\n tf_comp, _ = tensorflow_serialization.serialize_py_fn_as_tf_computation(\n _create_dataset_from_elements, None, context_stack)\n call = building_blocks.Call(building_blocks.CompiledComputation(tf_comp))\n fc_context = context_stack.current\n ref = fc_context.bind_computation_to_reference(call)\n return ValueImpl(ref, context_stack)\n\n\ndef _dictlike_items_to_value(items, context_stack, container_type) -> ValueImpl:\n value = building_blocks.Struct(\n [(k, ValueImpl.get_comp(to_value(v, None, context_stack)))\n for k, v in items], container_type)\n return ValueImpl(value, context_stack)\n\n\ndef to_value(\n arg: Any,\n type_spec,\n context_stack: context_stack_base.ContextStack,\n parameter_type_hint=None,\n) -> ValueImpl:\n \"\"\"Converts the argument into an instance of `tff.Value`.\n\n The types of non-`tff.Value` arguments that are currently convertible to\n `tff.Value` include the following:\n\n * Lists, tuples, `structure.Struct`s, named tuples, and dictionaries, all\n of which are converted into instances of `tff.Tuple`.\n * Placement literals, converted into instances of `tff.Placement`.\n * Computations.\n * Python constants of type `str`, `int`, `float`, `bool`\n * Numpy objects inherting from `np.ndarray` or `np.generic` (the parent\n of numpy scalar types)\n\n Args:\n arg: Either an instance of `tff.Value`, or an argument convertible to\n `tff.Value`. The argument must not be `None`.\n type_spec: An optional `computation_types.Type` or value convertible to it\n by `computation_types.to_type` which specifies the desired type signature\n of the resulting value. This allows for disambiguating the target type\n (e.g., when two TFF types can be mapped to the same Python\n representations), or `None` if none available, in which case TFF tries to\n determine the type of the TFF value automatically.\n context_stack: The context stack to use.\n parameter_type_hint: An optional `computation_types.Type` or value\n convertible to it by `computation_types.to_type` which specifies an\n argument type to use in the case that `arg` is a\n `function_utils.PolymorphicFunction`.\n\n Returns:\n An instance of `tff.Value` corresponding to the given `arg`, and of TFF type\n matching the `type_spec` if specified (not `None`).\n\n Raises:\n TypeError: if `arg` is of an unsupported type, or of a type that does not\n match `type_spec`. Raises explicit error message if TensorFlow constructs\n are encountered, as TensorFlow code should be sealed away from TFF\n federated context.\n \"\"\"\n py_typecheck.check_type(context_stack, context_stack_base.ContextStack)\n _check_symbol_binding_context(context_stack.current)\n if type_spec is not None:\n type_spec = computation_types.to_type(type_spec)\n if isinstance(arg, ValueImpl):\n result = arg\n elif isinstance(arg, building_blocks.ComputationBuildingBlock):\n result = ValueImpl(arg, context_stack)\n elif isinstance(arg, placement_literals.PlacementLiteral):\n result = ValueImpl(building_blocks.Placement(arg), context_stack)\n elif isinstance(\n arg, (computation_base.Computation, function_utils.PolymorphicFunction)):\n if isinstance(arg, function_utils.PolymorphicFunction):\n if parameter_type_hint is None:\n raise TypeError(\n 'Polymorphic computations cannot be converted to TFF values '\n 'without a type hint. Consider explicitly specifying the '\n 'argument types of a computation before passing it to a '\n 'function that requires a TFF value (such as a TFF intrinsic '\n 'like `federated_map`). If you are a TFF developer and think '\n 'this should be supported, consider providing `parameter_type_hint` '\n 'as an argument to the encompassing `to_value` conversion.')\n parameter_type_hint = computation_types.to_type(parameter_type_hint)\n arg = arg.fn_for_argument_type(parameter_type_hint)\n py_typecheck.check_type(arg, computation_base.Computation)\n result = ValueImpl(arg.to_compiled_building_block(), context_stack)\n elif type_spec is not None and type_spec.is_sequence():\n result = _wrap_sequence_as_value(arg, type_spec.element, context_stack)\n elif isinstance(arg, structure.Struct):\n result = ValueImpl(\n building_blocks.Struct([\n (k, ValueImpl.get_comp(to_value(v, None, context_stack)))\n for k, v in structure.iter_elements(arg)\n ]), context_stack)\n elif py_typecheck.is_named_tuple(arg):\n items = arg._asdict().items() # pytype: disable=attribute-error\n result = _dictlike_items_to_value(items, context_stack, type(arg))\n elif py_typecheck.is_attrs(arg):\n items = attr.asdict(\n arg, dict_factory=collections.OrderedDict, recurse=False).items()\n result = _dictlike_items_to_value(items, context_stack, type(arg))\n elif isinstance(arg, dict):\n if isinstance(arg, collections.OrderedDict):\n items = arg.items()\n else:\n items = sorted(arg.items())\n result = _dictlike_items_to_value(items, context_stack, type(arg))\n elif isinstance(arg, (tuple, list)):\n result = ValueImpl(\n building_blocks.Struct(\n [ValueImpl.get_comp(to_value(x, None, context_stack)) for x in arg],\n type(arg)), context_stack)\n elif isinstance(arg, tensorflow_utils.TENSOR_REPRESENTATION_TYPES):\n result = _wrap_constant_as_value(arg, context_stack)\n elif isinstance(arg, (tf.Tensor, tf.Variable)):\n raise TypeError(\n 'TensorFlow construct {} has been encountered in a federated '\n 'context. TFF does not support mixing TF and federated orchestration '\n 'code. Please wrap any TensorFlow constructs with '\n '`tff.tf_computation`.'.format(arg))\n else:\n raise TypeError(\n 'Unable to interpret an argument of type {} as a TFF value.'.format(\n py_typecheck.type_string(type(arg))))\n py_typecheck.check_type(result, ValueImpl)\n if (type_spec is not None and\n not type_spec.is_assignable_from(result.type_signature)):\n raise TypeError(\n 'The supplied argument maps to TFF type {}, which is incompatible with '\n 'the requested type {}.'.format(result.type_signature, type_spec))\n return result\n", "# Copyright 2019, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Trains and evaluates Stackoverflow LR model using TFF.\"\"\"\n\nimport functools\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.research.optimization.shared import fed_avg_schedule\nfrom tensorflow_federated.python.research.optimization.shared import iterative_process_builder\nfrom tensorflow_federated.python.research.utils import training_loop\nfrom tensorflow_federated.python.research.utils import training_utils\nfrom tensorflow_federated.python.research.utils import utils_impl\nfrom tensorflow_federated.python.research.utils.datasets import stackoverflow_lr_dataset\nfrom tensorflow_federated.python.research.utils.models import stackoverflow_lr_models\n\nwith utils_impl.record_hparam_flags():\n # Experiment hyperparameters\n flags.DEFINE_integer('vocab_tokens_size', 10000, 'Vocab tokens size used.')\n flags.DEFINE_integer('vocab_tags_size', 500, 'Vocab tags size used.')\n flags.DEFINE_integer('client_batch_size', 100,\n 'Batch size used on the client.')\n flags.DEFINE_integer('clients_per_round', 10,\n 'How many clients to sample per round.')\n flags.DEFINE_integer(\n 'client_epochs_per_round', 1,\n 'Number of client (inner optimizer) epochs per federated round.')\n flags.DEFINE_integer(\n 'num_validation_examples', 10000, 'Number of examples '\n 'to use from test set for per-round validation.')\n flags.DEFINE_integer('max_elements_per_user', 1000, 'Max number of training '\n 'sentences to use per user.')\n flags.DEFINE_integer(\n 'client_datasets_random_seed', 1, 'The random seed '\n 'governing the client dataset selection.')\n\nFLAGS = flags.FLAGS\n\n\ndef metrics_builder():\n \"\"\"Returns a `list` of `tf.keras.metric.Metric` objects.\"\"\"\n return [\n tf.keras.metrics.Precision(name='precision'),\n tf.keras.metrics.Recall(top_k=5, name='recall_at_5'),\n ]\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n stackoverflow_train, stackoverflow_validation, stackoverflow_test = stackoverflow_lr_dataset.get_stackoverflow_datasets(\n vocab_tokens_size=FLAGS.vocab_tokens_size,\n vocab_tags_size=FLAGS.vocab_tags_size,\n client_batch_size=FLAGS.client_batch_size,\n client_epochs_per_round=FLAGS.client_epochs_per_round,\n max_training_elements_per_user=FLAGS.max_elements_per_user,\n num_validation_examples=FLAGS.num_validation_examples)\n\n input_spec = stackoverflow_train.create_tf_dataset_for_client(\n stackoverflow_train.client_ids[0]).element_spec\n\n model_builder = functools.partial(\n stackoverflow_lr_models.create_logistic_model,\n vocab_tokens_size=FLAGS.vocab_tokens_size,\n vocab_tags_size=FLAGS.vocab_tags_size)\n\n loss_builder = functools.partial(\n tf.keras.losses.BinaryCrossentropy,\n from_logits=False,\n reduction=tf.keras.losses.Reduction.SUM)\n\n training_process = iterative_process_builder.from_flags(\n input_spec=input_spec,\n model_builder=model_builder,\n loss_builder=loss_builder,\n metrics_builder=metrics_builder)\n\n client_datasets_fn = training_utils.build_client_datasets_fn(\n train_dataset=stackoverflow_train,\n train_clients_per_round=FLAGS.clients_per_round,\n random_seed=FLAGS.client_datasets_random_seed)\n\n assign_weights_fn = fed_avg_schedule.ServerState.assign_weights_to_keras_model\n\n evaluate_fn = training_utils.build_evaluate_fn(\n model_builder=model_builder,\n eval_dataset=stackoverflow_validation,\n loss_builder=loss_builder,\n metrics_builder=metrics_builder,\n assign_weights_to_keras_model=assign_weights_fn)\n\n test_fn = training_utils.build_evaluate_fn(\n model_builder=model_builder,\n # Use both val and test for symmetry with other experiments, which\n # evaluate on the entire test set.\n eval_dataset=stackoverflow_validation.concatenate(stackoverflow_test),\n loss_builder=loss_builder,\n metrics_builder=metrics_builder,\n assign_weights_to_keras_model=assign_weights_fn)\n\n logging.info('Training model:')\n logging.info(model_builder().summary())\n\n training_loop.run(\n training_process, client_datasets_fn, evaluate_fn, test_fn=test_fn)\n\n\nif __name__ == '__main__':\n app.run(main)\n", "# Copyright 2019, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utils for testing executors.\"\"\"\n\nimport asyncio\nimport functools\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport tensorflow as tf\n\nfrom tensorflow_federated.proto.v0 import computation_pb2 as pb\nfrom tensorflow_federated.python.common_libs import py_typecheck\nfrom tensorflow_federated.python.common_libs import serialization_utils\nfrom tensorflow_federated.python.common_libs import structure\nfrom tensorflow_federated.python.core.api import computation_types\nfrom tensorflow_federated.python.core.impl import reference_executor\nfrom tensorflow_federated.python.core.impl.compiler import computation_factory\nfrom tensorflow_federated.python.core.impl.compiler import intrinsic_defs\nfrom tensorflow_federated.python.core.impl.compiler import tensorflow_computation_factory\nfrom tensorflow_federated.python.core.impl.context_stack import context_base\nfrom tensorflow_federated.python.core.impl.context_stack import context_stack_impl\nfrom tensorflow_federated.python.core.impl.executors import execution_context\nfrom tensorflow_federated.python.core.impl.executors import executor_base\nfrom tensorflow_federated.python.core.impl.executors import executor_stacks\nfrom tensorflow_federated.python.core.impl.executors import executor_value_base\nfrom tensorflow_federated.python.core.impl.types import placement_literals\nfrom tensorflow_federated.python.core.impl.types import type_factory\nfrom tensorflow_federated.python.core.impl.types import type_serialization\nfrom tensorflow_federated.python.core.impl.utils import tensorflow_utils\n\n\ndef install_executor(executor_factory_instance):\n context = execution_context.ExecutionContext(executor_factory_instance)\n return context_stack_impl.context_stack.install(context)\n\n\ndef executors(*args):\n \"\"\"A decorator for creating tests parameterized by executors.\n\n Note: To use this decorator your test is required to inherit from\n `parameterized.TestCase`.\n\n 1. The decorator can be specified without arguments:\n\n ```\n @executors\n def foo(self):\n ...\n ```\n\n 2. The decorator can be called with arguments:\n\n ```\n @executors(\n ('label', executor),\n ...\n )\n def foo(self):\n ...\n ```\n\n If the decorator is specified without arguments or is called with no\n arguments, the default this decorator with parameterize the test by the\n following executors:\n\n * reference executor\n * local executor\n\n If the decorator is called with arguments the arguments must be in a form that\n is accpeted by `parameterized.named_parameters`.\n\n Args:\n *args: Either a test function to be decorated or named executors for the\n decorated method, either a single iterable, or a list of tuples or dicts.\n\n Returns:\n A test generator to be handled by `parameterized.TestGeneratorMetaclass`.\n \"\"\"\n\n def executor_decorator(fn):\n \"\"\"Create a wrapped function with custom execution contexts.\"\"\"\n\n def wrapped_fn(self, executor):\n \"\"\"Install a particular execution context before running `fn`.\"\"\"\n # Executors inheriting from `executor_base.Executor` will need to be\n # wrapped in an execution context. The `ReferenceExecutor` is special and\n # inherits from `context_base.Context`, so we don't wrap.\n if not isinstance(executor, context_base.Context):\n context = execution_context.ExecutionContext(executor)\n else:\n context = executor\n with context_stack_impl.context_stack.install(context):\n fn(self)\n\n return wrapped_fn\n\n def decorator(fn, *named_executors):\n \"\"\"Construct a custom `parameterized.named_parameter` decorator for `fn`.\"\"\"\n wraps_decorator = functools.wraps(fn)\n if not named_executors:\n named_executors = [\n ('reference', reference_executor.ReferenceExecutor()),\n ('local', executor_stacks.local_executor_factory()),\n ]\n named_parameters_decorator = parameterized.named_parameters(\n *named_executors)\n fn = executor_decorator(fn)\n fn = named_parameters_decorator(fn)\n fn = wraps_decorator(fn)\n return fn\n\n if len(args) == 1 and callable(args[0]):\n return decorator(args[0])\n else:\n return lambda x: decorator(x, *args)\n\n\nclass AsyncTestCase(absltest.TestCase):\n \"\"\"A test case that manages a new event loop for each test.\n\n Each test will have a new event loop instead of using the current event loop.\n This ensures that tests are isolated from each other and avoid unexpected side\n effects.\n\n Attributes:\n loop: An `asyncio` event loop.\n \"\"\"\n\n def setUp(self):\n super().setUp()\n self.loop = asyncio.new_event_loop()\n\n # If `setUp()` fails, then `tearDown()` is not called; however cleanup\n # functions will be called. Register the newly created loop `close()`\n # function here to ensure it is closed after each test.\n self.addCleanup(self.loop.close)\n\n def run_sync(self, coro):\n return self.loop.run_until_complete(coro)\n\n\nclass TracingExecutor(executor_base.Executor):\n \"\"\"Tracing executor keeps a log of all calls for use in testing.\"\"\"\n\n def __init__(self, target):\n \"\"\"Creates a new instance of a tracing executor.\n\n The tracing executor keeps the trace of all calls. Entries in the trace\n consist of the method name followed by arguments and the returned result,\n with the executor values represented as integer indexes starting from 1.\n\n Args:\n target: An instance of `executor_base.Executor`.\n \"\"\"\n py_typecheck.check_type(target, executor_base.Executor)\n self._target = target\n self._last_used_index = 0\n self._trace = []\n\n @property\n def trace(self):\n return self._trace\n\n def _get_new_value_index(self):\n val_index = self._last_used_index + 1\n self._last_used_index = val_index\n return val_index\n\n async def create_value(self, value, type_spec=None):\n target_val = await self._target.create_value(value, type_spec)\n wrapped_val = TracingExecutorValue(self, self._get_new_value_index(),\n target_val)\n if type_spec is not None:\n self._trace.append(('create_value', value, type_spec, wrapped_val.index))\n else:\n self._trace.append(('create_value', value, wrapped_val.index))\n return wrapped_val\n\n async def create_call(self, comp, arg=None):\n if arg is not None:\n target_val = await self._target.create_call(comp.value, arg.value)\n wrapped_val = TracingExecutorValue(self, self._get_new_value_index(),\n target_val)\n self._trace.append(\n ('create_call', comp.index, arg.index, wrapped_val.index))\n return wrapped_val\n else:\n target_val = await self._target.create_call(comp.value)\n wrapped_val = TracingExecutorValue(self, self._get_new_value_index(),\n target_val)\n self._trace.append(('create_call', comp.index, wrapped_val.index))\n return wrapped_val\n\n async def create_struct(self, elements):\n target_val = await self._target.create_struct(\n structure.map_structure(lambda x: x.value, elements))\n wrapped_val = TracingExecutorValue(self, self._get_new_value_index(),\n target_val)\n self._trace.append(\n ('create_struct', structure.map_structure(lambda x: x.index,\n elements), wrapped_val.index))\n return wrapped_val\n\n def close(self):\n self._target.close()\n\n async def create_selection(self, source, index=None, name=None):\n target_val = await self._target.create_selection(\n source.value, index=index, name=name)\n wrapped_val = TracingExecutorValue(self, self._get_new_value_index(),\n target_val)\n self._trace.append(\n ('create_selection', source.index, index if index is not None else name,\n wrapped_val.index))\n return wrapped_val\n\n\nclass TracingExecutorValue(executor_value_base.ExecutorValue):\n \"\"\"A value managed by `TracingExecutor`.\"\"\"\n\n def __init__(self, owner, index, value):\n \"\"\"Creates an instance of a value in the tracing executor.\n\n Args:\n owner: An instance of `TracingExecutor`.\n index: An integer identifying the value.\n value: An embedded value from the target executor.\n \"\"\"\n py_typecheck.check_type(owner, TracingExecutor)\n py_typecheck.check_type(index, int)\n py_typecheck.check_type(value, executor_value_base.ExecutorValue)\n self._owner = owner\n self._index = index\n self._value = value\n\n @property\n def index(self):\n return self._index\n\n @property\n def value(self):\n return self._value\n\n @property\n def type_signature(self):\n return self._value.type_signature\n\n async def compute(self):\n result = await self._value.compute()\n self._owner.trace.append(('compute', self._index, result))\n return result\n\n\ndef create_dummy_intrinsic_def_federated_aggregate():\n value = intrinsic_defs.FEDERATED_AGGREGATE\n type_signature = computation_types.FunctionType([\n type_factory.at_clients(tf.float32),\n tf.float32,\n type_factory.reduction_op(tf.float32, tf.float32),\n type_factory.binary_op(tf.float32),\n computation_types.FunctionType(tf.float32, tf.float32),\n ], type_factory.at_server(tf.float32))\n return value, type_signature\n\n\ndef create_dummy_intrinsic_def_federated_apply():\n value = intrinsic_defs.FEDERATED_APPLY\n type_signature = computation_types.FunctionType([\n type_factory.unary_op(tf.float32),\n type_factory.at_server(tf.float32),\n ], type_factory.at_server(tf.float32))\n return value, type_signature\n\n\ndef create_dummy_intrinsic_def_federated_broadcast():\n value = intrinsic_defs.FEDERATED_BROADCAST\n type_signature = computation_types.FunctionType(\n type_factory.at_server(tf.float32),\n type_factory.at_clients(tf.float32, all_equal=True))\n return value, type_signature\n\n\ndef create_dummy_intrinsic_def_federated_collect():\n value = intrinsic_defs.FEDERATED_COLLECT\n type_signature = computation_types.FunctionType(\n type_factory.at_clients(tf.float32),\n type_factory.at_server(computation_types.SequenceType(tf.float32)))\n return value, type_signature\n\n\ndef create_dummy_intrinsic_def_federated_eval_at_clients():\n value = intrinsic_defs.FEDERATED_EVAL_AT_CLIENTS\n type_signature = computation_types.FunctionType(\n computation_types.FunctionType(None, tf.float32),\n type_factory.at_clients(tf.float32))\n return value, type_signature\n\n\ndef create_dummy_intrinsic_def_federated_eval_at_server():\n value = intrinsic_defs.FEDERATED_EVAL_AT_SERVER\n type_signature = computation_types.FunctionType(\n computation_types.FunctionType(None, tf.float32),\n type_factory.at_server(tf.float32))\n return value, type_signature\n\n\ndef create_dummy_intrinsic_def_federated_map():\n value = intrinsic_defs.FEDERATED_MAP\n type_signature = computation_types.FunctionType([\n type_factory.unary_op(tf.float32),\n type_factory.at_clients(tf.float32),\n ], type_factory.at_clients(tf.float32))\n return value, type_signature\n\n\ndef create_dummy_intrinsic_def_federated_map_all_equal():\n value = intrinsic_defs.FEDERATED_MAP_ALL_EQUAL\n type_signature = computation_types.FunctionType([\n type_factory.unary_op(tf.float32),\n type_factory.at_clients(tf.float32, all_equal=True),\n ], type_factory.at_clients(tf.float32, all_equal=True))\n return value, type_signature\n\n\ndef create_dummy_intrinsic_def_federated_mean():\n value = intrinsic_defs.FEDERATED_MEAN\n type_signature = computation_types.FunctionType(\n type_factory.at_clients(tf.float32), type_factory.at_server(tf.float32))\n return value, type_signature\n\n\ndef create_dummy_intrinsic_def_federated_reduce():\n value = intrinsic_defs.FEDERATED_REDUCE\n type_signature = computation_types.FunctionType([\n type_factory.at_clients(tf.float32),\n tf.float32,\n type_factory.reduction_op(tf.float32, tf.float32),\n ], type_factory.at_server(tf.float32))\n return value, type_signature\n\n\ndef create_dummy_intrinsic_def_federated_secure_sum():\n value = intrinsic_defs.FEDERATED_SECURE_SUM\n type_signature = computation_types.FunctionType([\n type_factory.at_clients(tf.float32),\n tf.float32,\n ], type_factory.at_server(tf.float32))\n return value, type_signature\n\n\ndef create_dummy_intrinsic_def_federated_sum():\n value = intrinsic_defs.FEDERATED_SUM\n type_signature = computation_types.FunctionType(\n type_factory.at_clients(tf.float32), type_factory.at_server(tf.float32))\n return value, type_signature\n\n\ndef create_dummy_intrinsic_def_federated_value_at_clients():\n value = intrinsic_defs.FEDERATED_VALUE_AT_CLIENTS\n type_signature = computation_types.FunctionType(\n tf.float32, type_factory.at_clients(tf.float32, all_equal=True))\n return value, type_signature\n\n\ndef create_dummy_intrinsic_def_federated_value_at_server():\n value = intrinsic_defs.FEDERATED_VALUE_AT_SERVER\n type_signature = computation_types.FunctionType(\n tf.float32, type_factory.at_server(tf.float32))\n return value, type_signature\n\n\ndef create_dummy_intrinsic_def_federated_weighted_mean():\n value = intrinsic_defs.FEDERATED_WEIGHTED_MEAN\n type_signature = computation_types.FunctionType([\n type_factory.at_clients(tf.float32),\n type_factory.at_clients(tf.float32),\n ], type_factory.at_server(tf.float32))\n return value, type_signature\n\n\ndef create_dummy_intrinsic_def_federated_zip_at_clients():\n value = intrinsic_defs.FEDERATED_ZIP_AT_CLIENTS\n type_signature = computation_types.FunctionType([\n type_factory.at_clients(tf.float32),\n type_factory.at_clients(tf.float32)\n ], type_factory.at_clients([tf.float32, tf.float32]))\n return value, type_signature\n\n\ndef create_dummy_intrinsic_def_federated_zip_at_server():\n value = intrinsic_defs.FEDERATED_ZIP_AT_SERVER\n type_signature = computation_types.FunctionType(\n [type_factory.at_server(tf.float32),\n type_factory.at_server(tf.float32)],\n type_factory.at_server([tf.float32, tf.float32]))\n return value, type_signature\n\n\ndef create_dummy_placement_literal():\n \"\"\"Returns a `placement_literals.PlacementLiteral` and type.\"\"\"\n value = placement_literals.SERVER\n type_signature = computation_types.PlacementType()\n return value, type_signature\n\n\ndef create_dummy_computation_call():\n \"\"\"Returns a call computation and type.\"\"\"\n fn, fn_type = create_dummy_computation_tensorflow_constant()\n type_signature = fn_type.result\n value = pb.Computation(\n type=type_serialization.serialize_type(type_signature),\n call=pb.Call(function=fn))\n return value, type_signature\n\n\ndef create_dummy_computation_intrinsic():\n \"\"\"Returns a intrinsic computation and type.\"\"\"\n intrinsic_def, type_signature = create_dummy_intrinsic_def_federated_eval_at_server(\n )\n value = pb.Computation(\n type=type_serialization.serialize_type(type_signature),\n intrinsic=pb.Intrinsic(uri=intrinsic_def.uri))\n return value, type_signature\n\n\ndef create_dummy_computation_lambda_empty():\n \"\"\"Returns a lambda computation and type `( -> <>)`.\"\"\"\n value = computation_factory.create_lambda_empty_struct()\n type_signature = computation_types.FunctionType(None, [])\n return value, type_signature\n\n\ndef create_dummy_computation_lambda_identity():\n \"\"\"Returns a lambda computation and type `(float32 -> float32)`.\"\"\"\n tensor_type = computation_types.TensorType(tf.float32)\n value = computation_factory.create_lambda_identity(tensor_type)\n type_signature = computation_types.FunctionType(tensor_type, tensor_type)\n return value, type_signature\n\n\ndef create_dummy_computation_placement():\n \"\"\"Returns a placement computation and type.\"\"\"\n placement_literal, type_signature = create_dummy_placement_literal()\n value = pb.Computation(\n type=type_serialization.serialize_type(type_signature),\n placement=pb.Placement(uri=placement_literal.uri))\n return value, type_signature\n\n\ndef create_dummy_computation_reference():\n \"\"\"Returns a reference computation and type.\"\"\"\n type_signature = computation_types.TensorType(tf.float32)\n value = pb.Computation(\n type=type_serialization.serialize_type(type_signature),\n reference=pb.Reference(name='a'))\n return value, type_signature\n\n\ndef create_dummy_computation_selection():\n \"\"\"Returns a selection computation and type.\"\"\"\n source, source_type = create_dummy_computation_tuple()\n type_signature = source_type[0]\n value = pb.Computation(\n type=type_serialization.serialize_type(type_signature),\n selection=pb.Selection(source=source, index=0))\n return value, type_signature\n\n\ndef create_dummy_computation_tensorflow_add():\n \"\"\"Returns a tensorflow computation and type.\n\n `(<float32,float32> -> float32)`\n \"\"\"\n type_spec = tf.float32\n\n with tf.Graph().as_default() as graph:\n parameter_1_value, parameter_1_binding = tensorflow_utils.stamp_parameter_in_graph(\n 'x', type_spec, graph)\n parameter_2_value, parameter_2_binding = tensorflow_utils.stamp_parameter_in_graph(\n 'y', type_spec, graph)\n result_value = tf.add(parameter_1_value, parameter_2_value)\n result_type, result_binding = tensorflow_utils.capture_result_from_graph(\n result_value, graph)\n\n parameter_type = computation_types.StructType([type_spec, type_spec])\n type_signature = computation_types.FunctionType(parameter_type, result_type)\n struct_binding = pb.TensorFlow.StructBinding(\n element=[parameter_1_binding, parameter_2_binding])\n parameter_binding = pb.TensorFlow.Binding(struct=struct_binding)\n tensorflow = pb.TensorFlow(\n graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),\n parameter=parameter_binding,\n result=result_binding)\n value = pb.Computation(\n type=type_serialization.serialize_type(type_signature),\n tensorflow=tensorflow)\n return value, type_signature\n\n\ndef create_dummy_computation_tensorflow_constant():\n \"\"\"Returns a tensorflow computation and type `( -> float32)`.\"\"\"\n value = 10.0\n tensor_type = computation_types.TensorType(tf.float32)\n value = tensorflow_computation_factory.create_constant(value, tensor_type)\n type_signature = computation_types.FunctionType(None, tensor_type)\n return value, type_signature\n\n\ndef create_dummy_computation_tensorflow_empty():\n \"\"\"Returns a tensorflow computation and type `( -> <>)`.\"\"\"\n value = tensorflow_computation_factory.create_empty_tuple()\n type_signature = computation_types.FunctionType(None, [])\n return value, type_signature\n\n\ndef create_dummy_computation_tensorflow_identity():\n \"\"\"Returns a tensorflow computation and type `(float32 -> float32)`.\"\"\"\n tensor_type = computation_types.TensorType(tf.float32)\n value = tensorflow_computation_factory.create_identity(tensor_type)\n type_signature = computation_types.FunctionType(tensor_type, tensor_type)\n return value, type_signature\n\n\ndef create_dummy_computation_tensorflow_random():\n \"\"\"Returns a tensorflow computation and type `( -> float32)`.\"\"\"\n\n with tf.Graph().as_default() as graph:\n result = tf.random.normal([])\n result_type, result_binding = tensorflow_utils.capture_result_from_graph(\n result, graph)\n\n type_signature = computation_types.FunctionType(None, result_type)\n tensorflow = pb.TensorFlow(\n graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),\n parameter=None,\n result=result_binding)\n value = pb.Computation(\n type=type_serialization.serialize_type(type_signature),\n tensorflow=tensorflow)\n return value, type_signature\n\n\ndef create_dummy_computation_tensorflow_tuple():\n \"\"\"Returns a tensorflow computation and type.\n\n `( -> <('a', float32), ('b', float32), ('c', float32)>)`\n \"\"\"\n value = 10.0\n\n with tf.Graph().as_default() as graph:\n names = ['a', 'b', 'c']\n result = structure.Struct((n, tf.constant(value)) for n in names)\n result_type, result_binding = tensorflow_utils.capture_result_from_graph(\n result, graph)\n\n type_signature = computation_types.FunctionType(None, result_type)\n tensorflow = pb.TensorFlow(\n graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),\n parameter=None,\n result=result_binding)\n value = pb.Computation(\n type=type_serialization.serialize_type(type_signature),\n tensorflow=tensorflow)\n return value, type_signature\n\n\ndef create_dummy_computation_tuple():\n \"\"\"Returns a tuple computation and type.\"\"\"\n names = ['a', 'b', 'c']\n fn, fn_type = create_dummy_computation_tensorflow_constant()\n element_value = pb.Computation(\n type=type_serialization.serialize_type(fn_type),\n call=pb.Call(function=fn))\n element_type = fn_type.result\n elements = [pb.Struct.Element(name=n, value=element_value) for n in names]\n type_signature = computation_types.StructType(\n (n, element_type) for n in names)\n value = pb.Computation(\n type=type_serialization.serialize_type(type_signature),\n struct=pb.Struct(element=elements))\n return value, type_signature\n\n\ndef create_dummy_value_at_clients(number_of_clients: int = 3):\n \"\"\"Returns a Python value and federated type at clients.\"\"\"\n value = [float(x) for x in range(10, number_of_clients + 10)]\n type_signature = type_factory.at_clients(tf.float32)\n return value, type_signature\n\n\ndef create_dummy_value_at_clients_all_equal():\n \"\"\"Returns a Python value and federated type at clients and all equal.\"\"\"\n value = 10.0\n type_signature = type_factory.at_clients(tf.float32, all_equal=True)\n return value, type_signature\n\n\ndef create_dummy_value_at_server():\n \"\"\"Returns a Python value and federated type at server.\"\"\"\n value = 10.0\n type_signature = type_factory.at_server(tf.float32)\n return value, type_signature\n\n\ndef create_dummy_value_unplaced():\n \"\"\"Returns a Python value and unplaced type.\"\"\"\n value = 10.0\n type_signature = computation_types.TensorType(tf.float32)\n return value, type_signature\n" ]
[ [ "tensorflow.reduce_max", "tensorflow.constant", "tensorflow.reduce_sum", "tensorflow.identity", "tensorflow.reduce_min", "numpy.random.rand", "numpy.array", "tensorflow.dtypes.as_dtype", "tensorflow.TensorSpec" ], [ "tensorflow.compat.v1.get_default_graph", "tensorflow.constant" ], [ "tensorflow.keras.metrics.Recall", "tensorflow.keras.metrics.Precision" ], [ "tensorflow.random.normal", "tensorflow.Graph", "tensorflow.add", "tensorflow.constant" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.4", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] } ]
stefantaubert/waveglow
[ "5169ec751343a3e3008209a1a2f055e71a65908a", "5169ec751343a3e3008209a1a2f055e71a65908a" ]
[ "src/waveglow/core/inference.py", "src/waveglow/app/inference.py" ]
[ "import datetime\nfrom dataclasses import dataclass\nfrom logging import Logger\nfrom pathlib import Path\nfrom typing import Callable, Dict, List, Optional, Tuple\n\nimport imageio\nimport numpy as np\nimport torch\nfrom audio_utils import get_duration_s, normalize_wav\nfrom audio_utils.audio import concatenate_audios\nfrom audio_utils.mel import TacotronSTFT, plot_melspec_np\nfrom general_utils.generic_list import GenericList\nfrom image_utils import (calculate_structual_similarity_np,\n make_same_width_by_filling_white)\nfrom mcd import get_mcd_between_mel_spectograms\nfrom pandas import DataFrame\nfrom tqdm import tqdm\nfrom waveglow.core.model_checkpoint import CheckpointWaveglow\nfrom waveglow.core.synthesizer import InferenceResult, Synthesizer\nfrom waveglow.globals import MCD_NO_OF_COEFFS_PER_FRAME\nfrom waveglow.utils import cosine_dist_mels\n\n\n@dataclass\nclass InferMelEntry():\n identifier: str\n mel: np.ndarray\n mel_path: Path\n sr: int\n\n\n@dataclass\nclass InferenceEntry():\n entry: InferMelEntry = None\n inference_result: InferenceResult = None\n seed: int = None\n inferred_duration_s: float = None\n iteration: int = None\n mel_original_frames: int = None\n mel_inferred_frames: int = None\n mcd_dtw: float = None\n mcd_dtw_penalty: int = None\n mcd_dtw_frames: int = None\n mcd: float = None\n mcd_penalty: int = None\n mcd_frames: int = None\n structural_similarity: float = None\n cosine_similarity: float = None\n denoiser_strength: float = None\n sigma: float = None\n train_name: str = None\n mel_path: Path = None\n\n\nclass InferenceEntries(GenericList[InferenceEntry]):\n pass\n\n\ndef get_df(entries: InferenceEntries) -> DataFrame:\n if len(entries) == 0:\n return DataFrame()\n\n data = [\n {\n \"Id\": entry.entry.identifier,\n \"Timepoint\": f\"{entry.inference_result.timepoint:%Y/%m/%d %H:%M:%S}\",\n \"Iteration\": entry.iteration,\n \"Seed\": entry.seed,\n \"Sigma\": entry.sigma,\n \"Denoiser strength\": entry.denoiser_strength,\n \"Inference duration (s)\": entry.inference_result.inference_duration_s,\n \"Denoising duration (s)\": entry.inference_result.denoising_duration_s,\n \"Overamplified?\": entry.inference_result.was_overamplified,\n \"Inferred wav duration (s)\": entry.inferred_duration_s,\n \"# Frames original mel\": entry.mel_original_frames,\n \"# Frames inferred mel\": entry.mel_inferred_frames,\n \"# Difference frames\": entry.mel_inferred_frames - entry.mel_original_frames,\n \"Sampling rate (Hz)\": entry.inference_result.sampling_rate,\n \"Train name\": entry.train_name,\n \"Mel path\": str(entry.entry.mel_path),\n \"Mel sampling rate\": str(entry.entry.sr),\n }\n for entry in entries.items()\n ]\n\n df = DataFrame(\n data=[x.values() for x in data],\n columns=data[0].keys(),\n )\n\n return df\n\n\n@dataclass\nclass InferenceEntryOutput():\n identifier: int = None\n mel_orig: np.ndarray = None\n mel_orig_img: np.ndarray = None\n orig_sr: int = None\n inferred_sr: int = None\n mel_inferred_denoised: np.ndarray = None\n mel_inferred_denoised_img: np.ndarray = None\n wav_inferred_denoised: np.ndarray = None\n mel_denoised_diff_img: np.ndarray = None\n wav_inferred: np.ndarray = None\n\n\ndef mel_to_torch(mel: np.ndarray) -> np.ndarray:\n res = torch.FloatTensor(mel)\n res = res.cuda()\n return res\n\n\ndef infer(mel_entries: List[InferMelEntry], checkpoint: CheckpointWaveglow, custom_hparams: Optional[Dict[str, str]], denoiser_strength: float, sigma: float, sentence_pause_s: float, save_callback: Callable[[InferenceEntryOutput], None], concatenate: bool, seed: int, train_name: str, logger: Logger) -> Tuple[InferenceEntries, Tuple[Optional[np.ndarray], int]]:\n inference_entries = InferenceEntries()\n\n if len(mel_entries) == 0:\n logger.info(\"Nothing to synthesize!\")\n return inference_entries\n\n synth = Synthesizer(\n checkpoint=checkpoint,\n custom_hparams=custom_hparams,\n logger=logger\n )\n\n # Check mels have the same sampling rate as trained waveglow model\n for mel_entry in mel_entries:\n assert mel_entry.sr == synth.hparams.sampling_rate\n\n taco_stft = TacotronSTFT(synth.hparams, logger=logger)\n mels_torch = []\n mels_torch_prepared = []\n for mel_entry in mel_entries:\n mel_torch = mel_to_torch(mel_entry.mel)\n mels_torch.append(mel_torch)\n mel_var = torch.autograd.Variable(mel_torch)\n mel_var = mel_var.cuda()\n mel_var = mel_var.unsqueeze(0)\n mels_torch_prepared.append(mel_var)\n\n inference_results = synth.infer_all(\n mels_torch_prepared, sigma, denoiser_strength, seed=seed)\n\n complete_wav_denoised: Optional[np.ndarray] = None\n if concatenate:\n if len(inference_results) >= 1:\n logger.info(\"Concatening audios...\")\n complete_wav_denoised = concatenate_audios(\n [x.wav_denoised for x in inference_results], sentence_pause_s, synth.hparams.sampling_rate)\n complete_wav_denoised = normalize_wav(complete_wav_denoised)\n if len(inference_results) >= 1:\n logger.info(\"Done.\")\n\n inference_result: InferenceResult\n mel_entry: InferMelEntry\n for mel_entry, inference_result in tqdm(zip(mel_entries, inference_results)):\n wav_inferred_denoised_normalized = normalize_wav(inference_result.wav_denoised)\n\n val_entry = InferenceEntry(\n entry=mel_entry,\n inference_result=inference_result,\n iteration=checkpoint.iteration,\n inferred_duration_s=get_duration_s(\n inference_result.wav_denoised, inference_result.sampling_rate),\n denoiser_strength=denoiser_strength,\n sigma=sigma,\n seed=seed,\n train_name=train_name,\n )\n\n mel_orig = mel_entry.mel\n\n wav_inferred_denoised_normalized_tensor = torch.FloatTensor(wav_inferred_denoised_normalized)\n mel_inferred_denoised = taco_stft.get_mel_tensor(wav_inferred_denoised_normalized_tensor)\n mel_inferred_denoised = mel_inferred_denoised.numpy()\n\n validation_entry_output = InferenceEntryOutput(\n identifier=mel_entry.identifier,\n mel_orig=mel_orig,\n inferred_sr=inference_result.sampling_rate,\n mel_inferred_denoised=mel_inferred_denoised,\n wav_inferred_denoised=wav_inferred_denoised_normalized,\n orig_sr=mel_entry.sr,\n wav_inferred=normalize_wav(inference_result.wav),\n mel_denoised_diff_img=None,\n mel_inferred_denoised_img=None,\n mel_orig_img=None,\n )\n\n mcd_dtw, penalty_dtw, final_frame_number_dtw = get_mcd_between_mel_spectograms(\n mel_1=mel_orig,\n mel_2=mel_inferred_denoised,\n n_mfcc=MCD_NO_OF_COEFFS_PER_FRAME,\n take_log=False,\n use_dtw=True,\n )\n\n val_entry.mel_original_frames = mel_orig.shape[1]\n val_entry.mel_inferred_frames = mel_inferred_denoised.shape[1]\n val_entry.mcd_dtw = mcd_dtw\n val_entry.mcd_dtw_penalty = penalty_dtw\n val_entry.mcd_dtw_frames = final_frame_number_dtw\n\n mcd, penalty, final_frame_number = get_mcd_between_mel_spectograms(\n mel_1=mel_orig,\n mel_2=mel_inferred_denoised,\n n_mfcc=MCD_NO_OF_COEFFS_PER_FRAME,\n take_log=False,\n use_dtw=False,\n )\n\n val_entry.mcd = mcd\n val_entry.mcd_penalty = penalty\n val_entry.mcd_frames = final_frame_number\n\n cosine_similarity = cosine_dist_mels(mel_orig, mel_inferred_denoised)\n val_entry.cosine_similarity = cosine_similarity\n\n mel_original_img_raw, mel_original_img = plot_melspec_np(mel_orig)\n mel_inferred_denoised_img_raw, mel_inferred_denoised_img = plot_melspec_np(\n mel_inferred_denoised)\n\n validation_entry_output.mel_orig_img = mel_original_img\n validation_entry_output.mel_inferred_denoised_img = mel_inferred_denoised_img\n\n mel_original_img_raw_same_dim, mel_inferred_denoised_img_raw_same_dim = make_same_width_by_filling_white(\n img_a=mel_original_img_raw,\n img_b=mel_inferred_denoised_img_raw,\n )\n\n mel_original_img_same_dim, mel_inferred_denoised_img_same_dim = make_same_width_by_filling_white(\n img_a=mel_original_img,\n img_b=mel_inferred_denoised_img,\n )\n\n structural_similarity_raw, mel_difference_denoised_img_raw = calculate_structual_similarity_np(\n img_a=mel_original_img_raw_same_dim,\n img_b=mel_inferred_denoised_img_raw_same_dim,\n )\n val_entry.structural_similarity = structural_similarity_raw\n\n structural_similarity, mel_denoised_diff_img = calculate_structual_similarity_np(\n img_a=mel_original_img_same_dim,\n img_b=mel_inferred_denoised_img_same_dim,\n )\n validation_entry_output.mel_denoised_diff_img = mel_denoised_diff_img\n\n imageio.imsave(\"/tmp/mel_original_img_raw.png\", mel_original_img_raw)\n imageio.imsave(\"/tmp/mel_inferred_img_raw.png\", mel_inferred_denoised_img_raw)\n imageio.imsave(\"/tmp/mel_difference_denoised_img_raw.png\", mel_difference_denoised_img_raw)\n\n # logger.info(val_entry)\n logger.info(f\"Current: {val_entry.entry.identifier}\")\n logger.info(f\"MCD DTW: {val_entry.mcd_dtw}\")\n logger.info(f\"MCD DTW penalty: {val_entry.mcd_dtw_penalty}\")\n logger.info(f\"MCD DTW frames: {val_entry.mcd_dtw_frames}\")\n\n logger.info(f\"MCD: {val_entry.mcd}\")\n logger.info(f\"MCD penalty: {val_entry.mcd_penalty}\")\n logger.info(f\"MCD frames: {val_entry.mcd_frames}\")\n\n # logger.info(f\"MCD DTW V2: {val_entry.mcd_dtw_v2}\")\n logger.info(f\"Structural Similarity: {val_entry.structural_similarity}\")\n logger.info(f\"Cosine Similarity: {val_entry.cosine_similarity}\")\n save_callback(validation_entry_output)\n inference_entries.append(val_entry)\n\n return inference_entries, (complete_wav_denoised, synth.hparams.sampling_rate)\n", "import datetime\nfrom functools import partial\nfrom logging import getLogger\nfrom pathlib import Path\nfrom shutil import copyfile\nfrom typing import Any, Dict, List, Optional\n\nimport imageio\nimport numpy as np\nimport regex as re\nfrom audio_utils import float_to_wav\nfrom general_utils import parse_json, pass_lines_list, save_json\nfrom image_utils import stack_images_vertically\nfrom image_utils.main import stack_images_horizontally\nfrom waveglow.app.defaults import (DEFAULT_DENOISER_STRENGTH,\n DEFAULT_READ_MEL_INFO_PATH,\n DEFAULT_SAVE_WAV_INFO_COPY_PATH,\n DEFAULT_SEED, DEFAULT_SENTENCE_PAUSE_S,\n DEFAULT_SIGMA)\nfrom waveglow.app.io import (get_checkpoints_dir, get_inference_root_dir,\n get_train_dir, get_wav_info_dict,\n get_wav_out_dict)\nfrom waveglow.core import (CheckpointWaveglow, InferenceEntries,\n InferenceEntryOutput)\nfrom waveglow.core import infer as infer_core\nfrom waveglow.core.inference import InferMelEntry, get_df\nfrom waveglow.utils import get_custom_or_last_checkpoint, prepare_logger\n\nOUTPUT_INFO_FILE_NAME = \"wav_out.json\"\n\n\ndef get_infer_dir(train_dir: Path, run_name: str) -> Path:\n #input_name = get_basename(wav_path)\n return get_inference_root_dir(train_dir) / run_name\n\n\ndef get_inferred_mel_dir(infer_dir: int, nr: int) -> Path:\n dest_dir = infer_dir / f\"{nr}\"\n return dest_dir\n\n\ndef save_results(output: InferenceEntryOutput, infer_dir: Path, denoised_audio_wav_paths: List[Dict[str, Any]]) -> None:\n dest_dir = get_inferred_mel_dir(infer_dir, output.identifier)\n dest_dir.mkdir(parents=True, exist_ok=True)\n\n imageio.imsave(dest_dir / \"original.png\", output.mel_orig_img)\n imageio.imsave(dest_dir / \"inferred_denoised.png\", output.mel_inferred_denoised_img)\n imageio.imsave(dest_dir / \"diff.png\", output.mel_denoised_diff_img)\n np.save(dest_dir / \"original.mel.npy\", output.mel_orig)\n np.save(dest_dir / \"inferred_denoised.mel.npy\", output.mel_inferred_denoised)\n\n inferred_denoised_path = dest_dir / \"inferred_denoised.wav\"\n float_to_wav(output.wav_inferred_denoised, inferred_denoised_path, sample_rate=output.inferred_sr)\n\n pat = re.compile(\"id=([0-9]*)_\")\n entry_id = re.findall(pat, output.identifier)\n if len(entry_id) == 1:\n inferred_denoised_path_copy = infer_dir / f\"{entry_id[0]}.wav\"\n copyfile(inferred_denoised_path, inferred_denoised_path_copy)\n\n float_to_wav(output.wav_inferred, dest_dir / \"inferred.wav\", sample_rate=output.inferred_sr)\n\n stack_images_vertically(\n list_im=[\n dest_dir / \"original.png\",\n dest_dir / \"inferred_denoised.png\",\n dest_dir / \"diff.png\",\n ],\n out_path=dest_dir / \"comparison.png\"\n )\n\n wav_info = get_wav_info_dict(\n identifier=output.identifier,\n path=inferred_denoised_path,\n sr=output.inferred_sr,\n )\n\n denoised_audio_wav_paths.append(wav_info)\n\n\ndef save_stats(infer_dir: Path, entries: InferenceEntries) -> None:\n path = infer_dir / \"total.csv\"\n df = get_df(entries)\n df.to_csv(path, sep=\"\\t\", header=True, index=False)\n\n\ndef mel_inferred_denoised_v_plot(infer_dir: Path, sentences: InferenceEntries) -> None:\n paths = [get_inferred_mel_dir(infer_dir, x.entry.identifier) / \"inferred_denoised.png\"\n for x in sentences.items()]\n path = infer_dir / \"inferred_denoised_v.png\"\n stack_images_vertically(paths, path)\n\n\ndef mel_inferred_denoised_h_plot(infer_dir: Path, sentences: InferenceEntries) -> None:\n paths = [get_inferred_mel_dir(infer_dir, x.entry.identifier) / \"inferred_denoised.png\"\n for x in sentences.items()]\n path = infer_dir / \"inferred_denoised_h.png\"\n stack_images_horizontally(paths, path)\n\n\ndef infer_parse_json(base_dir: Path, train_name: str, json_path: Path = DEFAULT_READ_MEL_INFO_PATH, custom_checkpoint: Optional[int] = None, sigma: float = DEFAULT_SIGMA, denoiser_strength: float = DEFAULT_DENOISER_STRENGTH, sentence_pause_s: Optional[float] = DEFAULT_SENTENCE_PAUSE_S, custom_hparams: Optional[Dict[str, str]] = None, no_concatenation: bool = False, seed: int = DEFAULT_SEED, copy_wav_info_to: Optional[str] = DEFAULT_SAVE_WAV_INFO_COPY_PATH) -> None:\n logger = getLogger(__name__)\n if not json_path.is_file():\n logger.info(\"Json file not found.\")\n return\n\n json_content = parse_json(json_path)\n if len(json_content) == 0:\n logger.info(\"No mels found in this file.\")\n return\n\n logger.info(\"Inferring these mels:\")\n pass_lines_list(logger.info, [x[\"path\"] for x in json_content[\"mels\"]])\n\n name = json_content[\"name\"]\n\n train_dir = get_train_dir(base_dir, train_name)\n assert train_dir.is_dir()\n\n checkpoint_path, iteration = get_custom_or_last_checkpoint(\n get_checkpoints_dir(train_dir), custom_checkpoint)\n\n mel_entries = []\n for mel_data in json_content[\"mels\"]:\n mel_entry = InferMelEntry(\n identifier=mel_data[\"id\"],\n mel=np.load(mel_data[\"path\"]),\n sr=mel_data[\"sr\"],\n mel_path=Path(mel_data[\"path\"]),\n )\n mel_entries.append(mel_entry)\n\n run_name = f\"{datetime.datetime.now():%Y-%m-%d,%H-%M-%S}__mels={len(mel_entries)}__it={iteration}____{name}\"\n\n infer_dir = get_infer_dir(train_dir, run_name)\n infer_dir.mkdir(parents=True, exist_ok=True)\n\n _infer(\n infer_dir=infer_dir,\n run_name=run_name,\n mel_entries=mel_entries,\n checkpoint_path=checkpoint_path,\n custom_hparams=custom_hparams,\n denoiser_strength=denoiser_strength,\n no_concatenation=no_concatenation,\n sentence_pause_s=sentence_pause_s,\n sigma=sigma,\n seed=seed,\n copy_wav_info_to=copy_wav_info_to,\n train_name=train_name,\n )\n\n\ndef infer(base_dir: Path, train_name: str, mel_paths: List[Path], sampling_rate: int, custom_checkpoint: Optional[int] = None, sigma: float = DEFAULT_SIGMA, denoiser_strength: float = DEFAULT_DENOISER_STRENGTH, sentence_pause_s: Optional[float] = DEFAULT_SENTENCE_PAUSE_S, custom_hparams: Optional[Dict[str, str]] = None, no_concatenation: bool = False, seed: int = DEFAULT_SEED, copy_wav_info_to: Optional[str] = DEFAULT_SAVE_WAV_INFO_COPY_PATH) -> None:\n train_dir = get_train_dir(base_dir, train_name)\n assert train_dir.is_dir()\n\n checkpoint_path, iteration = get_custom_or_last_checkpoint(\n get_checkpoints_dir(train_dir), custom_checkpoint)\n\n run_name = f\"{datetime.datetime.now():%Y-%m-%d,%H-%M-%S}__mels={len(mel_paths)}__it={iteration}\"\n\n infer_dir = get_infer_dir(train_dir, run_name)\n infer_dir.mkdir(parents=True, exist_ok=True)\n\n mel_entries = []\n for identifier, mel_path in zip(range(1, len(mel_paths) + 1), mel_paths):\n mel_entry = InferMelEntry(\n identifier=identifier,\n mel=np.load(mel_path),\n sr=sampling_rate,\n mel_path=mel_path,\n )\n mel_entries.append(mel_entry)\n\n _infer(\n infer_dir=infer_dir,\n run_name=run_name,\n mel_entries=mel_entries,\n checkpoint_path=checkpoint_path,\n custom_hparams=custom_hparams,\n denoiser_strength=denoiser_strength,\n no_concatenation=no_concatenation,\n sentence_pause_s=sentence_pause_s,\n sigma=sigma,\n seed=seed,\n copy_wav_info_to=copy_wav_info_to,\n train_name=train_name,\n )\n\n\ndef _infer(infer_dir: Path, run_name: str, checkpoint_path: Path, mel_entries: List[InferMelEntry], sigma: float, denoiser_strength: float, sentence_pause_s: Optional[float], custom_hparams: Optional[Dict[str, str]], no_concatenation: bool, seed: int, copy_wav_info_to: Optional[Path], train_name: str) -> None:\n logger = prepare_logger(infer_dir / \"log.txt\")\n\n checkpoint = CheckpointWaveglow.load(checkpoint_path, logger)\n concatenate = not no_concatenation\n\n denoised_audio_wav_paths: List[Dict[str, Any]] = []\n save_callback = partial(\n save_results,\n infer_dir=infer_dir,\n denoised_audio_wav_paths=denoised_audio_wav_paths\n )\n\n inference_results, complete = infer_core(\n mel_entries=mel_entries,\n checkpoint=checkpoint,\n custom_hparams=custom_hparams,\n denoiser_strength=denoiser_strength,\n sigma=sigma,\n sentence_pause_s=sentence_pause_s,\n logger=logger,\n save_callback=save_callback,\n concatenate=concatenate,\n seed=seed,\n train_name=train_name,\n )\n\n if concatenate:\n complete_wav_denoised, complete_wav_denoised_sr = complete\n assert complete_wav_denoised is not None\n assert complete_wav_denoised_sr is not None\n float_to_wav(complete_wav_denoised, infer_dir / \"complete_denoised.wav\",\n sample_rate=complete_wav_denoised_sr)\n\n logger.info(\"Creating mel_inferred_denoised_v.png\")\n mel_inferred_denoised_v_plot(infer_dir, inference_results)\n\n logger.info(\"Creating mel_inferred_denoised_h.png\")\n mel_inferred_denoised_h_plot(infer_dir, inference_results)\n\n logger.info(\"Creating total.csv\")\n save_stats(infer_dir, inference_results)\n\n wav_paths_json = save_denoised_audio_wav_paths(\n infer_dir=infer_dir,\n name=run_name,\n denoised_audio_wav_paths=denoised_audio_wav_paths,\n )\n\n logger.info(\"Wrote all inferred mel paths including sampling rate into these file(s):\")\n logger.info(wav_paths_json)\n\n if copy_wav_info_to is not None:\n copy_wav_info_to.parent.mkdir(parents=True, exist_ok=True)\n copyfile(wav_paths_json, copy_wav_info_to)\n logger.info(copy_wav_info_to)\n\n logger.info(f\"Saved output to: {infer_dir}\")\n\n # save_infer_wav(infer_dir, wav_sr, wav)\n # save_infer_plot(infer_dir, wav_mel)\n # save_infer_orig_wav(infer_dir, wav_path)\n # save_infer_orig_plot(infer_dir, orig_mel)\n # score = save_diff_plot(infer_dir)\n # save_v(infer_dir)\n\n # logger.info(f\"Imagescore: {score*100}%\")\n # logger.info(f\"Saved output to: {infer_dir}\")\n\n\ndef save_denoised_audio_wav_paths(infer_dir: Path, name: str, denoised_audio_wav_paths: List[Dict[str, Any]]) -> str:\n info_json = get_wav_out_dict(\n name=name,\n root_dir=infer_dir,\n wav_info_dict=denoised_audio_wav_paths,\n )\n\n path = infer_dir / OUTPUT_INFO_FILE_NAME\n save_json(path, info_json)\n #text = '\\n'.join(mel_postnet_npy_paths)\n #save_txt(path, text)\n return path\n" ]
[ [ "torch.FloatTensor", "pandas.DataFrame", "torch.autograd.Variable" ], [ "numpy.load", "numpy.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Noczio/VoorSpelling
[ "51e30ab3f3b2e346c6eb56578818020e142a3adb", "51e30ab3f3b2e346c6eb56578818020e142a3adb" ]
[ "AppVoor/resources/backend_scripts/parameter_search.py", "AppVoor/tests/split_data_test.py" ]
[ "from abc import ABC, abstractmethod\nfrom typing import Any\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import GridSearchCV\nfrom skopt import BayesSearchCV\nfrom skopt.space import Real, Integer, Categorical\n\nfrom resources.backend_scripts.switcher import Switch\n\nNpArray = np.ndarray\nDataFrame = pd.DataFrame\n\n\nclass ParameterSearch(ABC):\n\n @abstractmethod\n def search_parameters(self, x: DataFrame, y: NpArray, parameters: dict,\n n_folds_validation: int, model: Any, score_type: str) -> tuple:\n pass\n\n\nclass BayesianSearch(ParameterSearch):\n\n def search_parameters(self, x: DataFrame, y: NpArray, parameters: dict,\n n_folds_validation: int, model: Any, score_type: str) -> tuple:\n clf = BayesSearchCV(estimator=model, search_spaces=parameters, cv=n_folds_validation,\n verbose=10, scoring=score_type)\n clf.fit(x, y)\n best_params = clf.best_params_\n best_score = clf.best_score_\n return best_params, best_score\n\n\nclass GridSearch(ParameterSearch):\n\n def search_parameters(self, x: DataFrame, y: NpArray, parameters: dict,\n n_folds_validation: int, model: Any, score_type: str) -> tuple:\n clf = GridSearchCV(estimator=model, param_grid=parameters, cv=n_folds_validation,\n verbose=10, scoring=score_type)\n clf.fit(x, y)\n best_params = clf.best_params_\n best_score = clf.best_score_\n return best_params, best_score\n\n\nclass ParameterSearchPossibilities(Switch):\n\n @staticmethod\n def BS() -> BayesianSearch:\n return BayesianSearch()\n\n @staticmethod\n def GS() -> GridSearch:\n return GridSearch()\n\n @staticmethod\n def BayesianSearch() -> BayesianSearch:\n return BayesianSearch()\n\n @staticmethod\n def GridSearch() -> GridSearch:\n return GridSearch()\n\n\nclass BayesianSearchParametersPossibilities(Switch):\n\n @staticmethod\n def LinearSVC() -> dict:\n return {'C': Real(1, 30, prior='log-uniform'),\n 'tol': Real(0.0001, 1, prior='log-uniform'),\n 'dual': Categorical([False]),\n 'penalty': Categorical(['l1', 'l2']),\n 'intercept_scaling': Real(1, 50, prior='log-uniform')}\n\n @staticmethod\n def SVC() -> dict:\n return {'C': Real(1, 30, prior='log-uniform'),\n 'tol': Real(0.0001, 1, prior='log-uniform'),\n 'gamma': Categorical(['scale', 'auto']),\n 'kernel': Categorical(['rbf', 'sigmoid'])}\n\n @staticmethod\n def KNeighborsClassifier() -> dict:\n return {'n_neighbors': Integer(1, 40),\n 'weights': Categorical(['uniform', 'distance']),\n 'leaf_size': Integer(30, 100),\n 'p': Integer(1, 30),\n 'algorithm': Categorical(['auto', 'ball_tree', 'kd_tree', 'brute'])}\n\n @staticmethod\n def GaussianNB() -> dict:\n return {'var_smoothing': Real(0.000000001, 100, prior='log-uniform')}\n\n @staticmethod\n def LinearSVR() -> dict:\n return {'epsilon': Real(0, 30, prior='log-uniform'),\n 'tol': Real(0.0001, 1, prior='log-uniform'),\n 'C': Real(1, 30, prior='log-uniform'),\n 'loss': Categorical(['epsilon_insensitive', 'squared_epsilon_insensitive']),\n 'dual': Categorical([False])}\n\n @staticmethod\n def SVR() -> dict:\n return {'gamma': Categorical(['scale', 'auto']),\n 'tol': Real(0.0001, 1, prior='log-uniform'),\n 'C': Real(1, 30, prior='log-uniform'),\n 'epsilon': Real(0.1, 30, prior='log-uniform'),\n 'kernel': Categorical(['rbf', 'sigmoid'])}\n\n @staticmethod\n def Lasso() -> dict:\n return {'alpha': Real(1, 40, prior='log-uniform'),\n 'tol': Real(0.0001, 1, prior='log-uniform'),\n 'selection': Categorical(['cyclic', 'random']),\n 'positive': Categorical([True, False])}\n\n @staticmethod\n def SGDClassifier() -> dict:\n return {'penalty': Categorical(['l2', 'l1', 'elasticnet']),\n 'alpha': Real(0.0001, 40, prior='log-uniform'),\n 'tol': Real(0.0001, 1, prior='log-uniform'),\n 'random_state': Integer(0, 1000)}\n\n @staticmethod\n def AffinityPropagation() -> dict:\n return {'damping': Real(0.5, 1, prior='log-uniform'),\n 'convergence_iter': Integer(15, 100),\n 'affinity': Categorical(['euclidean', 'precomputed']),\n 'random_state': Integer(0, 1000)}\n\n @staticmethod\n def KMeans() -> dict:\n return {'n_clusters': Integer(1, 50),\n 'tol': Real(0.0001, 1, prior='log-uniform'),\n 'random_state': Integer(0, 1000),\n 'algorithm': Categorical(['auto', 'full', 'elkan'])}\n\n @staticmethod\n def MiniBatchKMeans() -> dict:\n return {'n_clusters': Integer(1, 50),\n 'tol': Real(0, 1, prior='log-uniform'),\n 'batch_size': Integer(100, 512),\n 'reassignment_ratio': Real(0.01, 5, prior='log-uniform'),\n 'random_state': Integer(0, 1000)}\n\n @staticmethod\n def MeanShift() -> dict:\n return {'bin_seeding': Categorical([True, False]),\n 'cluster_all': Categorical([True, False]),\n 'min_bin_freq': Integer(1, 30)}\n\n\nclass GridSearchParametersPossibilities(Switch):\n\n @staticmethod\n def LinearSVC() -> dict:\n return {'C': np.arange(1, 32, 5),\n 'tol': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0] + list(np.arange(1, 5.5, 0.5)),\n 'dual': (False,),\n 'penalty': ('l1', 'l2'),\n 'intercept_scaling': np.arange(1, 22, 5)}\n\n @staticmethod\n def SVC() -> dict:\n return {'C': np.arange(1, 32, 5),\n 'tol': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0] + list(np.arange(1, 5.5, 0.5)),\n 'gamma': ('scale', 'auto'),\n 'kernel': ('rbf', 'sigmoid')}\n\n @staticmethod\n def KNeighborsClassifier() -> dict:\n return {'n_neighbors': np.arange(1, 32, 5),\n 'weights': ('uniform', 'distance'),\n 'leaf_size': (30, 50, 70, 100),\n 'p': (1, 2, 3, 5, 10, 15),\n 'algorithm': ('auto', 'ball_tree', 'kd_tree', 'brute')}\n\n @staticmethod\n def GaussianNB() -> dict:\n return {'var_smoothing': [0.000000001, 0.00000001, 0.0000001, 0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0] +\n list(np.arange(1, 101, 1))}\n\n @staticmethod\n def LinearSVR() -> dict:\n return {'epsilon': np.arange(0, 22, 3),\n 'tol': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0] + list(np.arange(1, 5.5, 0.5)),\n 'C': np.arange(1, 32, 5),\n 'loss': ('epsilon_insensitive', 'squared_epsilon_insensitive'),\n 'dual': (False,)}\n\n @staticmethod\n def SVR() -> dict:\n return {'gamma': ('scale', 'auto'),\n 'tol': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0] + list(np.arange(1, 5.5, 0.5)),\n 'C': np.arange(1, 32, 5),\n 'epsilon': (0.1, 1, 2, 3, 4, 5),\n 'kernel': ('rbf', 'sigmoid')}\n\n @staticmethod\n def Lasso() -> dict:\n return {'alpha': np.arange(1, 32, 5),\n 'tol': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0] + list(np.arange(1, 5.5, 0.5)),\n 'selection': ('cyclic', 'random'),\n 'positive': (True, False)}\n\n @staticmethod\n def SGDClassifier() -> dict:\n return {'penalty': ('l2', 'l1', 'elasticnet'),\n 'alpha': (0.0001, 0.01, 1, 2, 3, 4, 5),\n 'tol': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0] + list(np.arange(1, 5.5, 0.5)),\n 'random_state': np.arange(0, 2500, 500)}\n\n @staticmethod\n def AffinityPropagation() -> dict:\n return {'damping': np.arange(0.5, 1.1, 0.1),\n 'convergence_iter': (15, 30, 40, 50),\n 'affinity': ('euclidean', 'precomputed'),\n 'random_state': np.arange(0, 2500, 500)}\n\n @staticmethod\n def KMeans() -> dict:\n return {'n_clusters': np.arange(1, 32, 5),\n 'tol': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0] + list(np.arange(1, 5.5, 0.5)),\n 'random_state': np.arange(0, 2500, 500),\n 'algorithm': ('auto', 'full', 'elkan')}\n\n @staticmethod\n def MiniBatchKMeans() -> dict:\n return {'n_clusters': np.arange(1, 32, 5),\n 'tol': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1],\n 'batch_size': np.arange(100, 600, 100),\n 'reassignment_ratio': (0.01, 0.1, 1, 3, 5),\n 'random_state': np.arange(0, 2500, 500)}\n\n @staticmethod\n def MeanShift() -> dict:\n return {'bin_seeding': (True, False),\n 'cluster_all': (True, False),\n 'min_bin_freq': np.arange(1, 32, 1)}\n\n\nclass ParameterSearchCreator:\n\n @staticmethod\n def create_parameter_selector(selection_type: str) -> ParameterSearch:\n try:\n parameter_search_name = selection_type.replace(\" \", \"\")\n parameter_search_method = ParameterSearchPossibilities.case(parameter_search_name)\n return parameter_search_method\n except():\n available_types = ParameterSearchCreator.get_available_types()\n types_as_string = \", \".join(available_types)\n raise AttributeError(f\"Parameter value is wrong. \"\n f\"It should be any of the following: {types_as_string}\")\n\n @staticmethod\n def get_available_types() -> tuple:\n available_types = [func for func in dir(ParameterSearchPossibilities)\n if callable(getattr(ParameterSearchPossibilities, func)) and not\n (func.startswith(\"__\") or func is \"case\")]\n return tuple(available_types)\n", "import unittest\n\nimport pandas as pd\nimport numpy as np\n\nfrom resources.backend_scripts.is_data import DataEnsurer\nfrom resources.backend_scripts.load_data import LoaderCreator\nfrom resources.backend_scripts.split_data import SplitterReturner\n\n\nclass MyTestCase(unittest.TestCase):\n _loader_creator = LoaderCreator()\n\n def test_single_split_columns_match(self):\n # load diabetes.csv from disk\n folder_name = \"datasets\"\n file_name = \"diabetes.csv\"\n test_full_path = \".\\\\..\\\\\" + folder_name + \"\\\\\" + file_name\n csv_type = self._loader_creator.create_loader(test_full_path, \"CSV\")\n df = csv_type.get_file_transformed()\n expected_y_len, expected_x_len = df.shape # true prediction and data len with shape method\n # shape returns original column value. x doesn't have prediction column, so it must be original value - 1\n expected_x_len -= 1\n # use of splitterReturner with a NormalSplitter implementation\n splitter = SplitterReturner()\n x, y = splitter.split_x_y_from_df(df)\n # do the values match in both x and y dataframes\n self.assertEqual(len(x.columns), expected_x_len)\n self.assertEqual(len(y), expected_y_len)\n\n def test_single_split_returns_a_tuple(self):\n # load diabetes.csv from disk\n folder_name = \"datasets\"\n file_name = \"diabetes.csv\"\n test_full_path = \".\\\\..\\\\\" + folder_name + \"\\\\\" + file_name\n csv_type = self._loader_creator.create_loader(test_full_path, \"CSV\")\n df = csv_type.get_file_transformed()\n # use of splitterReturner with a NormalSplitter implementation\n splitter = SplitterReturner()\n # split dataframe into x and y\n data = splitter.split_x_y_from_df(df)\n result = DataEnsurer.validate_py_data(data, tuple)\n self.assertTrue(result)\n\n def test_single_split_x_and_y_is_a_dataframe_and_numpy_array(self):\n # load diabetes.csv from disk\n folder_name = \"datasets\"\n file_name = \"diabetes.csv\"\n test_full_path = \".\\\\..\\\\\" + folder_name + \"\\\\\" + file_name\n csv_type = self._loader_creator.create_loader(test_full_path, \"CSV\")\n df = csv_type.get_file_transformed()\n # use of splitterReturner with a NormalSplitter implementation\n splitter = SplitterReturner()\n # split dataframe into x and y\n data = splitter.split_x_y_from_df(df)\n results = [isinstance(data[0], pd.DataFrame), isinstance(data[-1], np.ndarray)]\n # are all outputs True?\n for r in results:\n self.assertTrue(r)\n\n def test_train_test_split_size_zero_is_wrong(self):\n # load diabetes.csv from disk\n folder_name = \"datasets\"\n file_name = \"diabetes.csv\"\n test_full_path = \".\\\\..\\\\\" + folder_name + \"\\\\\" + file_name\n csv_type = self._loader_creator.create_loader(test_full_path, \"CSV\")\n df = csv_type.get_file_transformed()\n # use of splitterReturner with a NormalSplitter implementation\n with self.assertRaises(ValueError):\n splitter = SplitterReturner()\n # split dataframe into x and y, then use train_and_test_split\n x, y = splitter.split_x_y_from_df(df)\n _ = splitter.train_and_test_split(x, y, 0.0) # 80 percent of data should be training and the other 20 is\n\n def test_train_test_split_size_less_than_zero_is_wrong(self):\n # load diabetes.csv from disk\n folder_name = \"datasets\"\n file_name = \"diabetes.csv\"\n test_full_path = \".\\\\..\\\\\" + folder_name + \"\\\\\" + file_name\n csv_type = self._loader_creator.create_loader(test_full_path, \"CSV\")\n df = csv_type.get_file_transformed()\n # this should raise a ValueError because size = -0.5 is not a valid number\n with self.assertRaises(ValueError):\n # use of splitterReturner with a NormalSplitter implementation\n splitter = SplitterReturner()\n # split dataframe into x and y, then use train_and_test_split\n x, y = splitter.split_x_y_from_df(df)\n _ = splitter.train_and_test_split(x, y, -0.5) # -0.5 is not a valid value\n\n def test_split_into_x_and_y_is_not_a_valid_dataframe(self):\n # dummy dictionary\n temp_dict = {'x': [i for i in range(200)]}\n # transform dictionary to dataframe\n df = pd.DataFrame.from_dict(temp_dict)\n # this should raise a TypeError because dataframe doesnt meet column requirements\n with self.assertRaises(TypeError):\n splitter = SplitterReturner()\n _, _ = splitter.split_x_y_from_df(df)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.arange", "sklearn.model_selection.GridSearchCV" ], [ "pandas.DataFrame.from_dict" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
svenaoki/zenml
[ "b94dff83f0e7c8ab29e99d6b42a0c906a3512b63", "b94dff83f0e7c8ab29e99d6b42a0c906a3512b63" ]
[ "examples/not_so_quickstart/steps/torch_steps.py", "examples/caching/run.py" ]
[ "# Copyright (c) ZenML GmbH 2021. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.optim.lr_scheduler import StepLR\nfrom torch.utils.data import DataLoader, TensorDataset\n\nfrom zenml.steps import step\n\nfrom .params import TrainerConfig\n\nDEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass Net(nn.Module):\n \"\"\"Straightforward NN for classification.\"\"\"\n\n def __init__(self):\n super(Net, self).__init__()\n self.flat_network = nn.Sequential(\n nn.Flatten(),\n nn.Linear(784, 10),\n )\n # fully connected layer, output 10 classes\n self.out = nn.Linear(10, 10)\n\n def forward(self, x):\n x = self.flat_network(x)\n x = self.out(x)\n output = self.out(x)\n return output\n\n\ndef get_data_loader_from_np(X: np.ndarray, y: np.ndarray) -> DataLoader:\n \"\"\"Returns a torch Dataloader from two np arrays.\"\"\"\n tensor_x = torch.Tensor(X) # transform to torch tensor\n tensor_y = torch.Tensor(y).type(torch.LongTensor)\n\n torch_dataset = TensorDataset(tensor_x, tensor_y) # create your dataset\n torch_dataloader = DataLoader(torch_dataset) # create your dataloader\n return torch_dataloader\n\n\n@step\ndef torch_trainer(\n config: TrainerConfig,\n X_train: np.ndarray,\n y_train: np.ndarray,\n) -> nn.Module:\n \"\"\"Train a neural net from scratch to recognize MNIST digits return our\n model or the learner\"\"\"\n train_loader = get_data_loader_from_np(X_train, y_train)\n\n model = Net().to(DEVICE)\n optimizer = optim.Adadelta(model.parameters(), lr=config.lr)\n\n scheduler = StepLR(optimizer, step_size=1, gamma=config.gamma)\n for epoch in range(1, config.epochs + 1):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(DEVICE), target.to(DEVICE)\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n scheduler.step()\n\n return model\n\n\n@step\ndef torch_evaluator(\n X_test: np.ndarray,\n y_test: np.ndarray,\n model: nn.Module,\n) -> float:\n \"\"\"Calculate the loss for the model for each epoch in a graph\"\"\"\n model.eval()\n test_loader = get_data_loader_from_np(X_test, y_test)\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(DEVICE), target.to(DEVICE)\n output = model(data)\n test_loss += F.nll_loss(\n output, target, reduction=\"sum\"\n ).item() # sum up batch loss\n pred = output.argmax(\n dim=1, keepdim=True\n ) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print(\n \"\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n\".format(\n test_loss,\n correct,\n len(test_loader.dataset),\n 100.0 * correct / len(test_loader.dataset),\n )\n )\n return correct / len(test_loader.dataset)\n", "# Copyright (c) ZenML GmbH 2021. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom zenml.pipelines import pipeline\nfrom zenml.steps import BaseStepConfig, Output, step\n\n\nclass TrainerConfig(BaseStepConfig):\n \"\"\"Trainer params\"\"\"\n\n epochs: int = 1\n gamma: float = 0.7\n lr: float = 0.001\n\n\n@step\ndef importer_mnist() -> Output(\n X_train=np.ndarray, y_train=np.ndarray, X_test=np.ndarray, y_test=np.ndarray\n):\n \"\"\"Download the MNIST data store it as an artifact\"\"\"\n (X_train, y_train), (\n X_test,\n y_test,\n ) = tf.keras.datasets.mnist.load_data()\n return X_train, y_train, X_test, y_test\n\n\n@step\ndef normalizer(\n X_train: np.ndarray, X_test: np.ndarray\n) -> Output(X_train_normed=np.ndarray, X_test_normed=np.ndarray):\n \"\"\"Normalize the values for all the images so they are between 0 and 1\"\"\"\n X_train_normed = X_train / 255.0\n X_test_normed = X_test / 255.0\n return X_train_normed, X_test_normed\n\n\n@step\ndef tf_trainer(\n config: TrainerConfig,\n X_train: np.ndarray,\n y_train: np.ndarray,\n) -> tf.keras.Model:\n \"\"\"Train a neural net from scratch to recognize MNIST digits return our\n model or the learner\"\"\"\n model = tf.keras.Sequential(\n [\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(10, activation=\"relu\"),\n tf.keras.layers.Dense(10),\n ]\n )\n\n model.compile(\n optimizer=tf.keras.optimizers.Adam(0.001),\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[\"accuracy\"],\n )\n\n model.fit(\n X_train,\n y_train,\n epochs=config.epochs,\n )\n\n # write model\n return model\n\n\n@step\ndef tf_evaluator(\n X_test: np.ndarray,\n y_test: np.ndarray,\n model: tf.keras.Model,\n) -> float:\n \"\"\"Calculate the loss for the model for each epoch in a graph\"\"\"\n\n _, test_acc = model.evaluate(X_test, y_test, verbose=2)\n return test_acc\n\n\n# Define the pipeline\n@pipeline\ndef mnist_pipeline(\n importer,\n normalizer,\n trainer,\n evaluator,\n):\n # Link all the steps artifacts together\n X_train, y_train, X_test, y_test = importer()\n X_trained_normed, X_test_normed = normalizer(X_train=X_train, X_test=X_test)\n model = trainer(X_train=X_trained_normed, y_train=y_train)\n evaluator(X_test=X_test_normed, y_test=y_test, model=model)\n\n\n# Initialize a pipeline run\nrun_1 = mnist_pipeline(\n importer=importer_mnist(),\n normalizer=normalizer(),\n trainer=tf_trainer(config=TrainerConfig(epochs=1)),\n evaluator=tf_evaluator(),\n)\n\n# Run the pipeline\nrun_1.run()\n\n# Initialize a pipeline run again\nrun_2 = mnist_pipeline(\n importer=importer_mnist(),\n normalizer=normalizer(),\n trainer=tf_trainer(config=TrainerConfig(epochs=2)),\n evaluator=tf_evaluator(),\n)\n\n# Run the pipeline again\nrun_2.run()\n" ]
[ [ "torch.Tensor", "torch.nn.functional.nll_loss", "torch.utils.data.TensorDataset", "torch.utils.data.DataLoader", "torch.nn.Flatten", "torch.nn.Linear", "torch.no_grad", "torch.cuda.is_available", "torch.optim.lr_scheduler.StepLR" ], [ "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.keras.layers.Dense", "tensorflow.keras.datasets.mnist.load_data", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.layers.Flatten" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
gotgenes/CpGHMMExample
[ "d55e02ee930da040808e278bd1216ddc25116d0f" ]
[ "CpGHMMExample/gencpgdata.py" ]
[ "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n# Copyright (c) 2012 Christopher D. Lasher\n#\n# This software is released under the MIT License. Please see\n# LICENSE.txt for details.\n\n\n\"\"\"Generates a random sequence with CpG islands.\n\nThis script produces three outfiles:\n\n* a FASTA format sequence file\n\n* a file containing the start and end positions of CpG islands\n\n* a file containing the parameters of the transitions\n\"\"\"\n\nimport argparse\nimport bisect\nimport random\nimport textwrap\n\nimport numpy as np\n\nimport logging\nLOGGER = logging.getLogger()\nLOGGER.setLevel(logging.INFO)\nSTREAM_HANDLER = logging.StreamHandler()\nSTREAM_HANDLER.setLevel(logging.INFO)\nLOGGER.addHandler(STREAM_HANDLER)\nFORMATTER = logging.Formatter('%(message)s')\nSTREAM_HANDLER.setFormatter(FORMATTER)\n\nALPHABET = 'ACGT'\n\n# Rows are ACGT, columns are ACGT\n_CPG_CPG_PROBABILITIES = np.array([\n [\n 0.1,\n 0.4,\n 0.4,\n 0.1\n ],\n [\n 0.05,\n 0.45,\n 0.45,\n 0.05\n ],\n [\n 0.05,\n 0.45,\n 0.45,\n 0.05\n ],\n [\n 0.1,\n 0.4,\n 0.4,\n 0.1\n ],\n])\n\n# Rows are ACGT, columns are ACGT\n_NORMAL_NORMAL_PROBABILITIES = np.array([\n [\n 0.25,\n 0.25,\n 0.25,\n 0.25\n ],\n [\n 0.15,\n 0.35,\n 0.35,\n 0.15\n ],\n [\n 0.15,\n 0.35,\n 0.35,\n 0.15\n ],\n [\n 0.25,\n 0.25,\n 0.25,\n 0.25\n ],\n])\n\n_CPG_TO_NORMAL_TRANSITION_PROB = 0.005\n_NORMAL_TO_CPG_TRANSITION_PROB = 0.0025\n\n_CPG_PROBABILITIES = np.concatenate(\n (\n (1 - _CPG_TO_NORMAL_TRANSITION_PROB) * _CPG_CPG_PROBABILITIES,\n _CPG_TO_NORMAL_TRANSITION_PROB * _NORMAL_NORMAL_PROBABILITIES\n ),\n 1\n)\n_NORMAL_PROBABILITIES = np.concatenate(\n (\n _NORMAL_TO_CPG_TRANSITION_PROB * _CPG_CPG_PROBABILITIES,\n (1 - _NORMAL_TO_CPG_TRANSITION_PROB) * _NORMAL_NORMAL_PROBABILITIES\n ),\n 1\n)\n\nTRANSITION_PROBABILITIES = np.concatenate(\n (_CPG_PROBABILITIES, _NORMAL_PROBABILITIES))\n\nTRANSITION_CUMSUMS = TRANSITION_PROBABILITIES.cumsum(1).tolist()\nfor row in TRANSITION_CUMSUMS:\n row[-1] = 1.0\n\n\ndef generate_sequence(length):\n \"\"\"Generates the random sequence, including CpG islands.\n\n :param length: length of the sequence to generate\n :returns: a randomly generated sequence, and a list of start and end\n positions of CpG sites within the sequence\n\n \"\"\"\n sequence = []\n cpg_sites = []\n cpg_start = None\n in_cpg = False\n start = random.randrange(len(TRANSITION_CUMSUMS))\n sequence.append(ALPHABET[start % 4])\n if start < 4:\n in_cpg = True\n cpg_start = start\n prev_index = start\n for x in range(1, length):\n random_value = random.random()\n transition_index = bisect.bisect_left(\n TRANSITION_CUMSUMS[prev_index], random_value)\n sequence.append(ALPHABET[transition_index % 4])\n if transition_index < 4:\n if not in_cpg:\n cpg_start = x\n in_cpg = True\n else:\n if in_cpg:\n cpg_sites.append((cpg_start, x - 1))\n in_cpg = False\n prev_index = transition_index\n\n if in_cpg:\n cpg_sites.append((cpg_start, length - 1))\n\n return ''.join(sequence), cpg_sites\n\n\ndef wrap_sequence(sequence, width=50):\n return '\\n'.join(sequence[i:i+width] for i in\n xrange(0, len(sequence), width))\n\n\ndef output_sequence(outfileh, sequence):\n \"\"\"Writes the sequence to the outfile in FASTA format.\n\n :param outfileh: @todo\n :param sequence: @todo\n :returns: @todo\n\n \"\"\"\n outfileh.write('>testcpg\\n')\n formatted_sequence = wrap_sequence(sequence, 50)\n outfileh.write(formatted_sequence)\n\n\ndef output_sites(outfileh, sites):\n \"\"\"Writes the CpG start and end positions to a CSV-format file.\n\n :param outfileh: @todo\n :param sites: @todo\n :returns: @todo\n\n \"\"\"\n outlines = (\"{},{}\\n\".format(start, end) for (start, end) in sites)\n outfileh.writelines(outlines)\n\n\ndef make_cli_parser():\n \"\"\"Creates the command-line interface.\n\n :returns: an :py:class:`argparse.ArgumentParser` instance\n\n \"\"\"\n cli_parser = argparse.ArgumentParser(description=__doc__)\n cli_parser.add_argument(\n 'length', type=int, help=\"length of sequence to generate\")\n return cli_parser\n\n\ndef main(argv=None):\n cli_parser = make_cli_parser()\n args = cli_parser.parse_args(argv)\n LOGGER.info(\"Generating random CpG sequence.\")\n sequence, cpg_sites = generate_sequence(args.length)\n LOGGER.info(\"Writing sequence to test_cpg_sequence.fasta\")\n with open('test_cpg_sequence.fasta', 'w') as fasta_outfile:\n output_sequence(fasta_outfile, sequence)\n LOGGER.info(\"Writing CpG site positions to test_cpg_sites.csv\")\n with open('test_cpg_sites.csv', 'w') as positions_outfile:\n output_sites(positions_outfile, cpg_sites)\n LOGGER.info(\"Writing transition probabilities to \"\n \"test_cpg_transitions.csv\")\n np.savetxt('test_cpg_transitions.csv', TRANSITION_PROBABILITIES,\n delimiter=',')\n\n\nif __name__ == '__main__':\n main()\n\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.savetxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wangyusu/pymatgen
[ "a90af2fe71eff15134ca33c6e58f07caba425ae9", "a90af2fe71eff15134ca33c6e58f07caba425ae9", "a90af2fe71eff15134ca33c6e58f07caba425ae9", "a90af2fe71eff15134ca33c6e58f07caba425ae9" ]
[ "pymatgen/symmetry/tests/test_analyzer.py", "pymatgen/analysis/tests/test_piezo_sensitivity.py", "pymatgen/electronic_structure/tests/test_plotter.py", "pymatgen/analysis/defects/corrections.py" ]
[ "# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\nimport unittest\nfrom pathlib import Path\nimport os\nimport numpy as np\n\nfrom pymatgen.core.operations import SymmOp\nfrom pymatgen.core.sites import PeriodicSite\nfrom pymatgen.core.structure import Molecule, Structure\nfrom pymatgen.io.cif import CifParser\nfrom pymatgen.io.vasp.inputs import Poscar\nfrom pymatgen.io.vasp.outputs import Vasprun\nfrom pymatgen.symmetry.analyzer import (\n PointGroupAnalyzer,\n SpacegroupAnalyzer,\n cluster_sites,\n iterative_symmetrize,\n)\nfrom pymatgen.util.testing import PymatgenTest\n\n\ntest_dir_mol = os.path.join(PymatgenTest.TEST_FILES_DIR, \"molecules\")\n\n\nclass SpacegroupAnalyzerTest(PymatgenTest):\n def setUp(self):\n p = Poscar.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, \"POSCAR\"))\n self.structure = p.structure\n self.sg = SpacegroupAnalyzer(self.structure, 0.001)\n self.disordered_structure = self.get_structure(\"Li10GeP2S12\")\n self.disordered_sg = SpacegroupAnalyzer(self.disordered_structure, 0.001)\n s = p.structure.copy()\n site = s[0]\n del s[0]\n s.append(site.species, site.frac_coords)\n self.sg3 = SpacegroupAnalyzer(s, 0.001)\n graphite = self.get_structure(\"Graphite\")\n graphite.add_site_property(\"magmom\", [0.1] * len(graphite))\n self.sg4 = SpacegroupAnalyzer(graphite, 0.001)\n self.structure4 = graphite\n\n def test_primitive(self):\n s = Structure.from_spacegroup(\"Fm-3m\", np.eye(3) * 3, [\"Cu\"], [[0, 0, 0]])\n a = SpacegroupAnalyzer(s)\n self.assertEqual(len(s), 4)\n self.assertEqual(len(a.find_primitive()), 1)\n\n def test_is_laue(self):\n s = Structure.from_spacegroup(\"Fm-3m\", np.eye(3) * 3, [\"Cu\"], [[0, 0, 0]])\n a = SpacegroupAnalyzer(s)\n self.assertTrue(a.is_laue())\n\n def test_magnetic(self):\n lfp = PymatgenTest.get_structure(\"LiFePO4\")\n sg = SpacegroupAnalyzer(lfp, 0.1)\n self.assertEqual(sg.get_space_group_symbol(), \"Pnma\")\n magmoms = [0] * len(lfp)\n magmoms[4] = 1\n magmoms[5] = -1\n magmoms[6] = 1\n magmoms[7] = -1\n lfp.add_site_property(\"magmom\", magmoms)\n sg = SpacegroupAnalyzer(lfp, 0.1)\n self.assertEqual(sg.get_space_group_symbol(), \"Pnma\")\n\n def test_get_space_symbol(self):\n self.assertEqual(self.sg.get_space_group_symbol(), \"Pnma\")\n self.assertEqual(self.disordered_sg.get_space_group_symbol(), \"P4_2/nmc\")\n self.assertEqual(self.sg3.get_space_group_symbol(), \"Pnma\")\n self.assertEqual(self.sg4.get_space_group_symbol(), \"P6_3/mmc\")\n\n def test_get_space_number(self):\n self.assertEqual(self.sg.get_space_group_number(), 62)\n self.assertEqual(self.disordered_sg.get_space_group_number(), 137)\n self.assertEqual(self.sg4.get_space_group_number(), 194)\n\n def test_get_hall(self):\n self.assertEqual(self.sg.get_hall(), \"-P 2ac 2n\")\n self.assertEqual(self.disordered_sg.get_hall(), \"P 4n 2n -1n\")\n\n def test_get_pointgroup(self):\n self.assertEqual(self.sg.get_point_group_symbol(), \"mmm\")\n self.assertEqual(self.disordered_sg.get_point_group_symbol(), \"4/mmm\")\n\n def test_get_symmetry_operations(self):\n\n for sg, structure in [(self.sg, self.structure), (self.sg4, self.structure4)]:\n\n pgops = sg.get_point_group_operations()\n fracsymmops = sg.get_symmetry_operations()\n symmops = sg.get_symmetry_operations(True)\n latt = structure.lattice\n for fop, op, pgop in zip(fracsymmops, symmops, pgops):\n # translation vector values should all be 0 or 0.5\n t = fop.translation_vector * 2\n self.assertArrayAlmostEqual(t - np.round(t), 0)\n\n self.assertArrayAlmostEqual(fop.rotation_matrix, pgop.rotation_matrix)\n for site in structure:\n newfrac = fop.operate(site.frac_coords)\n newcart = op.operate(site.coords)\n self.assertTrue(np.allclose(latt.get_fractional_coords(newcart), newfrac))\n found = False\n newsite = PeriodicSite(site.species, newcart, latt, coords_are_cartesian=True)\n for testsite in structure:\n if newsite.is_periodic_image(testsite, 1e-3):\n found = True\n break\n self.assertTrue(found)\n\n # Make sure this works for any position, not just the atomic\n # ones.\n random_fcoord = np.random.uniform(size=(3))\n random_ccoord = latt.get_cartesian_coords(random_fcoord)\n newfrac = fop.operate(random_fcoord)\n newcart = op.operate(random_ccoord)\n self.assertTrue(np.allclose(latt.get_fractional_coords(newcart), newfrac))\n\n def test_get_symmetry_dataset(self):\n ds = self.sg.get_symmetry_dataset()\n self.assertEqual(ds[\"international\"], \"Pnma\")\n\n def test_get_crystal_system(self):\n crystal_system = self.sg.get_crystal_system()\n self.assertEqual(\"orthorhombic\", crystal_system)\n self.assertEqual(\"tetragonal\", self.disordered_sg.get_crystal_system())\n\n def test_get_refined_structure(self):\n for a in self.sg.get_refined_structure().lattice.angles:\n self.assertEqual(a, 90)\n refined = self.disordered_sg.get_refined_structure()\n for a in refined.lattice.angles:\n self.assertEqual(a, 90)\n self.assertEqual(refined.lattice.a, refined.lattice.b)\n s = self.get_structure(\"Li2O\")\n sg = SpacegroupAnalyzer(s, 0.01)\n self.assertEqual(sg.get_refined_structure().num_sites, 4 * s.num_sites)\n\n def test_get_symmetrized_structure(self):\n symm_struct = self.sg.get_symmetrized_structure()\n for a in symm_struct.lattice.angles:\n self.assertEqual(a, 90)\n self.assertEqual(len(symm_struct.equivalent_sites), 5)\n\n symm_struct = self.disordered_sg.get_symmetrized_structure()\n self.assertEqual(len(symm_struct.equivalent_sites), 8)\n self.assertEqual([len(i) for i in symm_struct.equivalent_sites], [16, 4, 8, 4, 2, 8, 8, 8])\n s1 = symm_struct.equivalent_sites[1][1]\n s2 = symm_struct[symm_struct.equivalent_indices[1][1]]\n self.assertEqual(s1, s2)\n self.assertEqual(self.sg4.get_symmetrized_structure()[0].magmom, 0.1)\n self.assertEqual(symm_struct.wyckoff_symbols[0], \"16h\")\n # self.assertEqual(symm_struct[0].wyckoff, \"16h\")\n\n # Check copying\n self.assertEqual(symm_struct.copy(), symm_struct)\n d = symm_struct.as_dict()\n from pymatgen.symmetry.structure import SymmetrizedStructure\n\n ss = SymmetrizedStructure.from_dict(d)\n self.assertEqual(ss.wyckoff_symbols[0], \"16h\")\n self.assertIn(\"SymmetrizedStructure\", ss.__str__())\n\n def test_find_primitive(self):\n \"\"\"\n F m -3 m Li2O testing of converting to primitive cell\n \"\"\"\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"Li2O.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure)\n primitive_structure = s.find_primitive()\n self.assertEqual(primitive_structure.formula, \"Li2 O1\")\n # This isn't what is expected. All the angles should be 60\n self.assertAlmostEqual(primitive_structure.lattice.alpha, 60)\n self.assertAlmostEqual(primitive_structure.lattice.beta, 60)\n self.assertAlmostEqual(primitive_structure.lattice.gamma, 60)\n self.assertAlmostEqual(primitive_structure.lattice.volume, structure.lattice.volume / 4.0)\n\n def test_get_ir_reciprocal_mesh(self):\n grid = self.sg.get_ir_reciprocal_mesh()\n self.assertEqual(len(grid), 216)\n self.assertAlmostEqual(grid[1][0][0], 0.1)\n self.assertAlmostEqual(grid[1][0][1], 0.0)\n self.assertAlmostEqual(grid[1][0][2], 0.0)\n self.assertAlmostEqual(grid[1][1], 2)\n\n def test_get_conventional_standard_structure(self):\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"bcc_1927.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n conv = s.get_conventional_standard_structure()\n self.assertAlmostEqual(conv.lattice.alpha, 90)\n self.assertAlmostEqual(conv.lattice.beta, 90)\n self.assertAlmostEqual(conv.lattice.gamma, 90)\n self.assertAlmostEqual(conv.lattice.a, 9.1980270633769461)\n self.assertAlmostEqual(conv.lattice.b, 9.1980270633769461)\n self.assertAlmostEqual(conv.lattice.c, 9.1980270633769461)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"btet_1915.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n conv = s.get_conventional_standard_structure()\n self.assertAlmostEqual(conv.lattice.alpha, 90)\n self.assertAlmostEqual(conv.lattice.beta, 90)\n self.assertAlmostEqual(conv.lattice.gamma, 90)\n self.assertAlmostEqual(conv.lattice.a, 5.0615106678044235)\n self.assertAlmostEqual(conv.lattice.b, 5.0615106678044235)\n self.assertAlmostEqual(conv.lattice.c, 4.2327080177761687)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"orci_1010.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n conv = s.get_conventional_standard_structure()\n self.assertAlmostEqual(conv.lattice.alpha, 90)\n self.assertAlmostEqual(conv.lattice.beta, 90)\n self.assertAlmostEqual(conv.lattice.gamma, 90)\n self.assertAlmostEqual(conv.lattice.a, 2.9542233922299999)\n self.assertAlmostEqual(conv.lattice.b, 4.6330325651443296)\n self.assertAlmostEqual(conv.lattice.c, 5.373703587040775)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"orcc_1003.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n conv = s.get_conventional_standard_structure()\n self.assertAlmostEqual(conv.lattice.alpha, 90)\n self.assertAlmostEqual(conv.lattice.beta, 90)\n self.assertAlmostEqual(conv.lattice.gamma, 90)\n self.assertAlmostEqual(conv.lattice.a, 4.1430033493799998)\n self.assertAlmostEqual(conv.lattice.b, 31.437979757624728)\n self.assertAlmostEqual(conv.lattice.c, 3.99648651)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"orac_632475.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n conv = s.get_conventional_standard_structure()\n self.assertAlmostEqual(conv.lattice.alpha, 90)\n self.assertAlmostEqual(conv.lattice.beta, 90)\n self.assertAlmostEqual(conv.lattice.gamma, 90)\n self.assertAlmostEqual(conv.lattice.a, 3.1790663399999999)\n self.assertAlmostEqual(conv.lattice.b, 9.9032878699999998)\n self.assertAlmostEqual(conv.lattice.c, 3.5372412099999999)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"monoc_1028.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n conv = s.get_conventional_standard_structure()\n self.assertAlmostEqual(conv.lattice.alpha, 90)\n self.assertAlmostEqual(conv.lattice.beta, 117.53832420192903)\n self.assertAlmostEqual(conv.lattice.gamma, 90)\n self.assertAlmostEqual(conv.lattice.a, 14.033435583000625)\n self.assertAlmostEqual(conv.lattice.b, 3.96052850731)\n self.assertAlmostEqual(conv.lattice.c, 6.8743926325200002)\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"hex_1170.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n conv = s.get_conventional_standard_structure()\n self.assertAlmostEqual(conv.lattice.alpha, 90)\n self.assertAlmostEqual(conv.lattice.beta, 90)\n self.assertAlmostEqual(conv.lattice.gamma, 120)\n self.assertAlmostEqual(conv.lattice.a, 3.699919902005897)\n self.assertAlmostEqual(conv.lattice.b, 3.699919902005897)\n self.assertAlmostEqual(conv.lattice.c, 6.9779585500000003)\n\n structure = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, \"tric_684654.json\"))\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n conv = s.get_conventional_standard_structure()\n self.assertAlmostEqual(conv.lattice.alpha, 74.09581916308757)\n self.assertAlmostEqual(conv.lattice.beta, 75.72817279281173)\n self.assertAlmostEqual(conv.lattice.gamma, 63.63234318667333)\n self.assertAlmostEqual(conv.lattice.a, 3.741372924048738)\n self.assertAlmostEqual(conv.lattice.b, 3.9883228679270686)\n self.assertAlmostEqual(conv.lattice.c, 7.288495840048958)\n\n def test_get_primitive_standard_structure(self):\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"bcc_1927.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n prim = s.get_primitive_standard_structure()\n self.assertAlmostEqual(prim.lattice.alpha, 109.47122063400001)\n self.assertAlmostEqual(prim.lattice.beta, 109.47122063400001)\n self.assertAlmostEqual(prim.lattice.gamma, 109.47122063400001)\n self.assertAlmostEqual(prim.lattice.a, 7.9657251015812145)\n self.assertAlmostEqual(prim.lattice.b, 7.9657251015812145)\n self.assertAlmostEqual(prim.lattice.c, 7.9657251015812145)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"btet_1915.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n prim = s.get_primitive_standard_structure()\n self.assertAlmostEqual(prim.lattice.alpha, 105.015053349)\n self.assertAlmostEqual(prim.lattice.beta, 105.015053349)\n self.assertAlmostEqual(prim.lattice.gamma, 118.80658411899999)\n self.assertAlmostEqual(prim.lattice.a, 4.1579321075608791)\n self.assertAlmostEqual(prim.lattice.b, 4.1579321075608791)\n self.assertAlmostEqual(prim.lattice.c, 4.1579321075608791)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"orci_1010.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n prim = s.get_primitive_standard_structure()\n self.assertAlmostEqual(prim.lattice.alpha, 134.78923546600001)\n self.assertAlmostEqual(prim.lattice.beta, 105.856239333)\n self.assertAlmostEqual(prim.lattice.gamma, 91.276341676000001)\n self.assertAlmostEqual(prim.lattice.a, 3.8428217771014852)\n self.assertAlmostEqual(prim.lattice.b, 3.8428217771014852)\n self.assertAlmostEqual(prim.lattice.c, 3.8428217771014852)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"orcc_1003.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n prim = s.get_primitive_standard_structure()\n self.assertAlmostEqual(prim.lattice.alpha, 90)\n self.assertAlmostEqual(prim.lattice.beta, 90)\n self.assertAlmostEqual(prim.lattice.gamma, 164.985257335)\n self.assertAlmostEqual(prim.lattice.a, 15.854897098324196)\n self.assertAlmostEqual(prim.lattice.b, 15.854897098324196)\n self.assertAlmostEqual(prim.lattice.c, 3.99648651)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"orac_632475.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n prim = s.get_primitive_standard_structure()\n self.assertAlmostEqual(prim.lattice.alpha, 90)\n self.assertAlmostEqual(prim.lattice.beta, 90)\n self.assertAlmostEqual(prim.lattice.gamma, 144.40557588533386)\n self.assertAlmostEqual(prim.lattice.a, 5.2005185662155391)\n self.assertAlmostEqual(prim.lattice.b, 5.2005185662155391)\n self.assertAlmostEqual(prim.lattice.c, 3.5372412099999999)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"monoc_1028.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n prim = s.get_primitive_standard_structure()\n self.assertAlmostEqual(prim.lattice.alpha, 63.579155761999999)\n self.assertAlmostEqual(prim.lattice.beta, 116.42084423747779)\n self.assertAlmostEqual(prim.lattice.gamma, 148.47965136208569)\n self.assertAlmostEqual(prim.lattice.a, 7.2908007159612325)\n self.assertAlmostEqual(prim.lattice.b, 7.2908007159612325)\n self.assertAlmostEqual(prim.lattice.c, 6.8743926325200002)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"hex_1170.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n prim = s.get_primitive_standard_structure()\n self.assertAlmostEqual(prim.lattice.alpha, 90)\n self.assertAlmostEqual(prim.lattice.beta, 90)\n self.assertAlmostEqual(prim.lattice.gamma, 120)\n self.assertAlmostEqual(prim.lattice.a, 3.699919902005897)\n self.assertAlmostEqual(prim.lattice.b, 3.699919902005897)\n self.assertAlmostEqual(prim.lattice.c, 6.9779585500000003)\n\n parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, \"rhomb_3478_conv.cif\"))\n structure = parser.get_structures(False)[0]\n s = SpacegroupAnalyzer(structure, symprec=1e-2)\n prim = s.get_primitive_standard_structure()\n self.assertAlmostEqual(prim.lattice.alpha, 28.049186140546812)\n self.assertAlmostEqual(prim.lattice.beta, 28.049186140546812)\n self.assertAlmostEqual(prim.lattice.gamma, 28.049186140546812)\n self.assertAlmostEqual(prim.lattice.a, 5.9352627428399982)\n self.assertAlmostEqual(prim.lattice.b, 5.9352627428399982)\n self.assertAlmostEqual(prim.lattice.c, 5.9352627428399982)\n\n def test_tricky_structure(self):\n # for some reason this structure kills spglib1.9\n # 1.7 can't find symmetry either, but at least doesn't kill python\n s = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, \"POSCAR.tricky_symmetry\"))\n sa = SpacegroupAnalyzer(s, 0.1)\n sa.get_space_group_symbol()\n sa.get_space_group_number()\n sa.get_point_group_symbol()\n sa.get_crystal_system()\n sa.get_hall()\n\n\nclass SpacegroupTest(unittest.TestCase):\n def setUp(self):\n p = Poscar.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, \"POSCAR\"))\n self.structure = p.structure\n self.sg1 = SpacegroupAnalyzer(self.structure, 0.001).get_space_group_operations()\n\n def test_are_symmetrically_equivalent(self):\n sites1 = [self.structure[i] for i in [0, 1]]\n sites2 = [self.structure[i] for i in [2, 3]]\n self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))\n\n sites1 = [self.structure[i] for i in [0, 1]]\n sites2 = [self.structure[i] for i in [0, 2]]\n self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))\n\n\nH2O2 = Molecule(\n [\"O\", \"O\", \"H\", \"H\"],\n [\n [0, 0.727403, -0.050147],\n [0, -0.727403, -0.050147],\n [0.83459, 0.897642, 0.401175],\n [-0.83459, -0.897642, 0.401175],\n ],\n)\n\nC2H2F2Br2 = Molecule(\n [\"C\", \"C\", \"F\", \"Br\", \"H\", \"F\", \"H\", \"Br\"],\n [\n [-0.752000, 0.001000, -0.141000],\n [0.752000, -0.001000, 0.141000],\n [-1.158000, 0.991000, 0.070000],\n [-1.240000, -0.737000, 0.496000],\n [-0.924000, -0.249000, -1.188000],\n [1.158000, -0.991000, -0.070000],\n [0.924000, 0.249000, 1.188000],\n [1.240000, 0.737000, -0.496000],\n ],\n)\n\nH2O = Molecule(\n [\"H\", \"O\", \"H\"],\n [[0, 0.780362, -0.456316], [0, 0, 0.114079], [0, -0.780362, -0.456316]],\n)\n\nC2H4 = Molecule(\n [\"C\", \"C\", \"H\", \"H\", \"H\", \"H\"],\n [\n [0.0000, 0.0000, 0.6695],\n [0.0000, 0.0000, -0.6695],\n [0.0000, 0.9289, 1.2321],\n [0.0000, -0.9289, 1.2321],\n [0.0000, 0.9289, -1.2321],\n [0.0000, -0.9289, -1.2321],\n ],\n)\n\nNH3 = Molecule(\n [\"N\", \"H\", \"H\", \"H\"],\n [\n [0.0000, 0.0000, 0.0000],\n [0.0000, -0.9377, -0.3816],\n [0.8121, 0.4689, -0.3816],\n [-0.8121, 0.4689, -0.3816],\n ],\n)\n\nBF3 = Molecule(\n [\"B\", \"F\", \"F\", \"F\"],\n [\n [0.0000, 0.0000, 0.0000],\n [0.0000, -0.9377, 0.00],\n [0.8121, 0.4689, 0],\n [-0.8121, 0.4689, 0],\n ],\n)\n\nCH4 = Molecule(\n [\"C\", \"H\", \"H\", \"H\", \"H\"],\n [\n [0.000000, 0.000000, 0.000000],\n [0.000000, 0.000000, 1.08],\n [1.026719, 0.000000, -0.363000],\n [-0.513360, -0.889165, -0.363000],\n [-0.513360, 0.889165, -0.363000],\n ],\n)\n\nPF6 = Molecule(\n [\"P\", \"F\", \"F\", \"F\", \"F\", \"F\", \"F\"],\n [[0, 0, 0], [0, 0, 1], [0, 0, -1], [0, 1, 0], [0, -1, 0], [1, 0, 0], [-1, 0, 0]],\n)\n\n\nclass PointGroupAnalyzerTest(PymatgenTest):\n def test_spherical(self):\n a = PointGroupAnalyzer(CH4)\n self.assertEqual(a.sch_symbol, \"Td\")\n self.assertEqual(len(a.get_pointgroup()), 24)\n a = PointGroupAnalyzer(PF6)\n self.assertEqual(a.sch_symbol, \"Oh\")\n self.assertEqual(len(a.get_pointgroup()), 48)\n m = Molecule.from_file(os.path.join(test_dir_mol, \"c60.xyz\"))\n a = PointGroupAnalyzer(m)\n self.assertEqual(a.sch_symbol, \"Ih\")\n\n cube_species = [\"C\", \"C\", \"C\", \"C\", \"C\", \"C\", \"C\", \"C\"]\n cube_coords = [\n [0, 0, 0],\n [1, 0, 0],\n [0, 1, 0],\n [1, 1, 0],\n [0, 0, 1],\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 1],\n ]\n\n m = Molecule(cube_species, cube_coords)\n a = PointGroupAnalyzer(m, 0.1)\n self.assertEqual(a.sch_symbol, \"Oh\")\n\n def test_tricky(self):\n m = Molecule.from_file(os.path.join(test_dir_mol, \"dh.xyz\"))\n a = PointGroupAnalyzer(m, 0.1)\n self.assertEqual(a.sch_symbol, \"D*h\")\n\n def test_linear(self):\n coords = [\n [0.000000, 0.000000, 0.000000],\n [0.000000, 0.000000, 1.08],\n [0, 0.000000, -1.08],\n ]\n mol = Molecule([\"C\", \"H\", \"H\"], coords)\n a = PointGroupAnalyzer(mol)\n self.assertEqual(a.sch_symbol, \"D*h\")\n mol = Molecule([\"C\", \"H\", \"N\"], coords)\n a = PointGroupAnalyzer(mol)\n self.assertEqual(a.sch_symbol, \"C*v\")\n\n def test_asym_top(self):\n coords = [\n [0.000000, 0.000000, 0.000000],\n [0.000000, 0.000000, 1.08],\n [1.026719, 0.000000, -0.363000],\n [-0.513360, -0.889165, -0.363000],\n [-0.513360, 0.889165, -0.363000],\n ]\n mol = Molecule([\"C\", \"H\", \"F\", \"Br\", \"Cl\"], coords)\n a = PointGroupAnalyzer(mol)\n\n self.assertEqual(a.sch_symbol, \"C1\")\n self.assertEqual(len(a.get_pointgroup()), 1)\n coords = [\n [0.000000, 0.000000, 1.08],\n [1.026719, 0.000000, -0.363000],\n [-0.513360, -0.889165, -0.363000],\n [-0.513360, 0.889165, -0.363000],\n ]\n cs_mol = Molecule([\"H\", \"F\", \"Cl\", \"Cl\"], coords)\n a = PointGroupAnalyzer(cs_mol)\n self.assertEqual(a.sch_symbol, \"Cs\")\n self.assertEqual(len(a.get_pointgroup()), 2)\n a = PointGroupAnalyzer(C2H2F2Br2)\n self.assertEqual(a.sch_symbol, \"Ci\")\n self.assertEqual(len(a.get_pointgroup()), 2)\n\n def test_cyclic(self):\n a = PointGroupAnalyzer(H2O2)\n self.assertEqual(a.sch_symbol, \"C2\")\n self.assertEqual(len(a.get_pointgroup()), 2)\n a = PointGroupAnalyzer(H2O)\n self.assertEqual(a.sch_symbol, \"C2v\")\n self.assertEqual(len(a.get_pointgroup()), 4)\n a = PointGroupAnalyzer(NH3)\n self.assertEqual(a.sch_symbol, \"C3v\")\n self.assertEqual(len(a.get_pointgroup()), 6)\n cs2 = Molecule.from_file(os.path.join(test_dir_mol, \"Carbon_Disulfide.xyz\"))\n a = PointGroupAnalyzer(cs2, eigen_tolerance=0.001)\n self.assertEqual(a.sch_symbol, \"C2v\")\n\n def test_dihedral(self):\n a = PointGroupAnalyzer(C2H4)\n self.assertEqual(a.sch_symbol, \"D2h\")\n self.assertEqual(len(a.get_pointgroup()), 8)\n a = PointGroupAnalyzer(BF3)\n self.assertEqual(a.sch_symbol, \"D3h\")\n self.assertEqual(len(a.get_pointgroup()), 12)\n m = Molecule.from_file(os.path.join(test_dir_mol, \"b12h12.xyz\"))\n a = PointGroupAnalyzer(m)\n self.assertEqual(a.sch_symbol, \"Ih\")\n\n def test_symmetrize_molecule1(self):\n np.random.seed(77)\n distortion = np.random.randn(len(C2H4), 3) / 10\n dist_mol = Molecule(C2H4.species, C2H4.cart_coords + distortion)\n\n eq = iterative_symmetrize(dist_mol, max_n=100, epsilon=1e-7)\n sym_mol, eq_sets, ops = eq[\"sym_mol\"], eq[\"eq_sets\"], eq[\"sym_ops\"]\n\n self.assertTrue({0, 1} in eq_sets.values())\n self.assertTrue({2, 3, 4, 5} in eq_sets.values())\n\n coords = sym_mol.cart_coords\n for i, eq_set in eq_sets.items():\n for j in eq_set:\n rotated = np.dot(ops[i][j], coords[i])\n self.assertTrue(np.allclose(np.dot(ops[i][j], coords[i]), coords[j]))\n\n def test_symmetrize_molecule2(self):\n np.random.seed(77)\n distortion = np.random.randn(len(C2H2F2Br2), 3) / 20\n dist_mol = Molecule(C2H2F2Br2.species, C2H2F2Br2.cart_coords + distortion)\n PA1 = PointGroupAnalyzer(C2H2F2Br2, tolerance=0.1)\n self.assertTrue(PA1.get_pointgroup().sch_symbol == \"Ci\")\n PA2 = PointGroupAnalyzer(dist_mol, tolerance=0.1)\n self.assertTrue(PA2.get_pointgroup().sch_symbol == \"C1\")\n eq = iterative_symmetrize(dist_mol, tolerance=0.3)\n PA3 = PointGroupAnalyzer(eq[\"sym_mol\"], tolerance=0.1)\n self.assertTrue(PA3.get_pointgroup().sch_symbol == \"Ci\")\n\n def test_get_kpoint_weights(self):\n for name in [\"SrTiO3\", \"LiFePO4\", \"Graphite\"]:\n s = PymatgenTest.get_structure(name)\n a = SpacegroupAnalyzer(s)\n ir_mesh = a.get_ir_reciprocal_mesh((4, 4, 4))\n weights = [i[1] for i in ir_mesh]\n weights = np.array(weights) / sum(weights)\n for i, w in zip(weights, a.get_kpoint_weights([i[0] for i in ir_mesh])):\n self.assertAlmostEqual(i, w)\n\n for name in [\"SrTiO3\", \"LiFePO4\", \"Graphite\"]:\n s = PymatgenTest.get_structure(name)\n a = SpacegroupAnalyzer(s)\n ir_mesh = a.get_ir_reciprocal_mesh((1, 2, 3))\n weights = [i[1] for i in ir_mesh]\n weights = np.array(weights) / sum(weights)\n for i, w in zip(weights, a.get_kpoint_weights([i[0] for i in ir_mesh])):\n self.assertAlmostEqual(i, w)\n\n v = Vasprun(os.path.join(PymatgenTest.TEST_FILES_DIR, \"vasprun.xml\"))\n a = SpacegroupAnalyzer(v.final_structure)\n wts = a.get_kpoint_weights(v.actual_kpoints)\n\n for w1, w2 in zip(v.actual_kpoints_weights, wts):\n self.assertAlmostEqual(w1, w2)\n\n kpts = [[0, 0, 0], [0.15, 0.15, 0.15], [0.2, 0.2, 0.2]]\n self.assertRaises(ValueError, a.get_kpoint_weights, kpts)\n\n\nclass FuncTest(unittest.TestCase):\n def test_cluster_sites(self):\n o, c = cluster_sites(CH4, 0.1)\n self.assertEqual(o.specie.symbol, \"C\")\n self.assertEqual(len(c), 1)\n o, c = cluster_sites(C2H2F2Br2.get_centered_molecule(), 0.1)\n self.assertIsNone(o)\n self.assertEqual(len(c), 4)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\n\"\"\"\nTest for the piezo tensor class\n\"\"\"\n\n__author__ = \"Handong Ling\"\n__version__ = \"0.1\"\n__maintainer__ = \"Handong Ling\"\n__email__ = \"[email protected]\"\n__status__ = \"Development\"\n__date__ = \"4/23/19\"\n\nimport os\nimport unittest\n\nimport numpy as np\n\nimport pymatgen\nfrom pymatgen.analysis.piezo import PiezoTensor\nfrom pymatgen.analysis.piezo_sensitivity import *\nfrom pymatgen.symmetry import site_symmetries as ss\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer as sga\nfrom pymatgen.util.testing import PymatgenTest\n\ntry:\n from phonopy import Phonopy\nexcept ImportError:\n Phonopy = None\n\ntest_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, \"piezo_sensitivity\")\n\n\nclass PiezoSensitivityTest(PymatgenTest):\n def setUp(self):\n self.piezo_struc = self.get_structure(\"Pb2TiZrO6\")\n self.IST = np.load(os.path.join(test_dir, \"pztist.npy\"), allow_pickle=True)\n self.BEC = np.load(os.path.join(test_dir, \"pztborn.npy\"), allow_pickle=True)\n self.FCM = np.load(os.path.join(test_dir, \"pztfcm.npy\"), allow_pickle=True)\n self.pointops = np.load(os.path.join(test_dir, \"pointops.npy\"), allow_pickle=True)\n self.sharedops = np.load(os.path.join(test_dir, \"sharedops.npy\"), allow_pickle=True)\n self.BEC_operations = np.load(os.path.join(test_dir, \"becops.npy\"), allow_pickle=True)\n self.IST_operations = np.load(os.path.join(test_dir, \"istops.npy\"), allow_pickle=True)\n self.FCM_operations = np.load(os.path.join(test_dir, \"fcmops.npy\"), allow_pickle=True)\n self.piezo = np.array(\n [\n [\n [5.32649351e-03, -1.33404642e-14, -6.86958142e02],\n [-1.33404642e-14, 4.95526253e-03, -5.60353712e-13],\n [-6.86958142e02, -5.60353712e-13, 1.33209787e-02],\n ],\n [\n [4.86622567e-03, 3.14840965e-13, -7.41608150e-13],\n [3.14840965e-13, 5.23745666e-03, -6.68536818e02],\n [-7.41608150e-13, -6.68536818e02, 1.35025755e-02],\n ],\n [\n [-1.01086427e02, 3.20177004e-14, -3.68487214e-14],\n [3.20177004e-14, -1.01086427e02, 1.22012318e-14],\n [-3.68487214e-14, 1.22012318e-14, -5.32241086e02],\n ],\n ]\n )\n\n def test_BornEffectiveChargeTensor(self):\n bec = BornEffectiveCharge(self.piezo_struc, self.BEC, self.pointops)\n self.assertArrayAlmostEqual(self.BEC, bec.bec)\n\n def test_InternalStrainTensor(self):\n ist = InternalStrainTensor(self.piezo_struc, self.IST, self.pointops)\n self.assertArrayAlmostEqual(ist.ist, self.IST)\n\n def test_ForceConstantMatrix(self):\n fcmt = ForceConstantMatrix(self.piezo_struc, self.FCM, self.pointops, self.sharedops)\n self.assertArrayAlmostEqual(fcmt.fcm, self.FCM)\n\n def test_get_BEC_operations(self):\n bec = BornEffectiveCharge(self.piezo_struc, self.BEC, self.pointops)\n bec.get_BEC_operations()\n self.assertTrue(np.all(self.BEC_operations == bec.BEC_operations))\n\n def test_get_rand_BEC(self):\n bec = BornEffectiveCharge(self.piezo_struc, self.BEC, self.pointops)\n bec.get_BEC_operations()\n rand_BEC = bec.get_rand_BEC()\n for i in range(len(self.BEC_operations)):\n for j in range(len(self.BEC_operations[i][2])):\n self.assertTrue(\n np.allclose(\n rand_BEC[self.BEC_operations[i][0]],\n self.BEC_operations[i][2][j].transform_tensor(rand_BEC[self.BEC_operations[i][1]]),\n atol=1e-03,\n )\n )\n\n def test_get_rand_IST(self):\n ist = InternalStrainTensor(self.piezo_struc, self.IST, self.pointops)\n ist.get_IST_operations()\n rand_IST = ist.get_rand_IST()\n for i in range(len(self.IST_operations)):\n for j in range(len(self.IST_operations[i])):\n self.assertTrue(\n np.allclose(\n rand_IST[i],\n self.IST_operations[i][j][1].transform_tensor(rand_IST[self.IST_operations[i][j][0]]),\n atol=1e-03,\n )\n )\n\n def test_get_FCM_operations(self):\n fcm = ForceConstantMatrix(self.piezo_struc, self.FCM, self.pointops, self.sharedops)\n fcm.get_FCM_operations()\n self.assertTrue(np.all(fcm.FCM_operations == self.FCM_operations))\n\n def test_get_unstable_FCM(self):\n fcm = ForceConstantMatrix(self.piezo_struc, self.FCM, self.pointops, self.sharedops)\n fcm.get_FCM_operations()\n rand_FCM = fcm.get_unstable_FCM()\n rand_FCM = np.reshape(rand_FCM, (10, 3, 10, 3)).swapaxes(1, 2)\n for i in range(len(self.FCM_operations)):\n for j in range(len(self.FCM_operations[i][4])):\n self.assertTrue(\n np.allclose(\n self.FCM_operations[i][4][j].transform_tensor(\n rand_FCM[self.FCM_operations[i][2]][self.FCM_operations[i][3]]\n ),\n rand_FCM[self.FCM_operations[i][0]][self.FCM_operations[i][1]],\n atol=1e-04,\n )\n )\n\n def test_get_FCM_symmetry(self):\n fcm = ForceConstantMatrix(self.piezo_struc, self.FCM, self.pointops, self.sharedops)\n fcm.get_FCM_operations()\n\n fcm = fcm.get_symmetrized_FCM(np.random.rand(30, 30))\n fcm = np.reshape(fcm, (10, 3, 10, 3)).swapaxes(1, 2)\n for i in range(len(self.FCM_operations)):\n for j in range(len(self.FCM_operations[i][4])):\n self.assertTrue(\n np.allclose(\n self.FCM_operations[i][4][j].transform_tensor(\n fcm[self.FCM_operations[i][2]][self.FCM_operations[i][3]]\n ),\n fcm[self.FCM_operations[i][0]][self.FCM_operations[i][1]],\n atol=1e-04,\n )\n )\n\n def test_get_asum_FCM(self):\n fcm = ForceConstantMatrix(self.piezo_struc, self.FCM, self.pointops, self.sharedops)\n fcm.get_FCM_operations()\n rand_FCM = fcm.get_unstable_FCM()\n rand_FCM = fcm.get_asum_FCM(rand_FCM)\n rand_FCM = np.reshape(rand_FCM, (10, 3, 10, 3)).swapaxes(1, 2)\n\n for i in range(len(self.FCM_operations)):\n for j in range(len(self.FCM_operations[i][4])):\n self.assertTrue(\n np.allclose(\n self.FCM_operations[i][4][j].transform_tensor(\n rand_FCM[self.FCM_operations[i][2]][self.FCM_operations[i][3]]\n ),\n rand_FCM[self.FCM_operations[i][0]][self.FCM_operations[i][1]],\n atol=1e-04,\n )\n )\n\n for i in range(len(rand_FCM)):\n asum1 = np.zeros([3, 3])\n asum2 = np.zeros([3, 3])\n for j in range(len(rand_FCM[i])):\n asum1 += rand_FCM[i][j]\n asum2 += rand_FCM[j][i]\n self.assertTrue(np.allclose(asum1, np.zeros([3, 3]), atol=1e-05))\n self.assertTrue(np.allclose(asum2, np.zeros([3, 3]), atol=1e-05))\n\n def test_get_stable_FCM(self):\n fcm = ForceConstantMatrix(self.piezo_struc, self.FCM, self.pointops, self.sharedops)\n fcm.get_FCM_operations()\n rand_FCM = fcm.get_unstable_FCM()\n rand_FCM1 = fcm.get_stable_FCM(rand_FCM)\n\n eigs, vecs = np.linalg.eig(rand_FCM1)\n eigsort = np.argsort(np.abs(eigs))\n for i in range(3, len(eigs)):\n self.assertTrue(eigs[eigsort[i]] < 1e-06)\n\n rand_FCM1 = np.reshape(rand_FCM1, (10, 3, 10, 3)).swapaxes(1, 2)\n\n for i in range(len(self.FCM_operations)):\n for j in range(len(self.FCM_operations[i][4])):\n self.assertTrue(\n np.allclose(\n self.FCM_operations[i][4][j].transform_tensor(\n rand_FCM1[self.FCM_operations[i][2]][self.FCM_operations[i][3]]\n ),\n rand_FCM1[self.FCM_operations[i][0]][self.FCM_operations[i][1]],\n atol=1e-04,\n )\n )\n\n for i in range(len(rand_FCM1)):\n asum1 = np.zeros([3, 3])\n asum2 = np.zeros([3, 3])\n for j in range(len(rand_FCM1[i])):\n asum1 += rand_FCM1[i][j]\n asum2 += rand_FCM1[j][i]\n self.assertTrue(np.allclose(asum1, np.zeros([3, 3]), atol=1e-05))\n self.assertTrue(np.allclose(asum2, np.zeros([3, 3]), atol=1e-05))\n\n @unittest.skipIf(Phonopy is None, \"Phonopy not present\")\n def test_rand_FCM(self):\n fcm = ForceConstantMatrix(self.piezo_struc, self.FCM, self.pointops, self.sharedops)\n fcm.get_FCM_operations()\n rand_FCM = fcm.get_rand_FCM()\n structure = pymatgen.io.phonopy.get_phonopy_structure(self.piezo_struc)\n pnstruc = Phonopy(structure, np.eye(3), np.eye(3))\n\n pnstruc.set_force_constants(rand_FCM)\n dyn = pnstruc.get_dynamical_matrix_at_q([0, 0, 0])\n dyn = np.reshape(dyn, (10, 3, 10, 3)).swapaxes(1, 2)\n dyn = np.real(dyn)\n numsites = len(self.piezo_struc)\n masses = []\n for j in range(numsites):\n masses.append(self.piezo_struc.sites[j].specie.atomic_mass)\n dynmass = np.zeros([numsites, numsites, 3, 3])\n for m in range(numsites):\n for n in range(numsites):\n dynmass[m][n] = dyn[m][n] / np.sqrt(masses[m]) / np.sqrt(masses[n])\n\n dynmass = np.reshape(np.swapaxes(dynmass, 1, 2), (10 * 3, 10 * 3))\n eigs, vecs = np.linalg.eig(dynmass)\n eigsort = np.argsort(np.abs(eigs))\n for i in range(3, len(eigs)):\n self.assertTrue(eigs[eigsort[i]] < 1e-06)\n # rand_FCM1 = np.reshape(rand_FCM1, (10,3,10,3)).swapaxes(1,2)\n\n dynmass = np.reshape(dynmass, (10, 3, 10, 3)).swapaxes(1, 2)\n for i in range(len(self.FCM_operations)):\n for j in range(len(self.FCM_operations[i][4])):\n self.assertTrue(\n np.allclose(\n self.FCM_operations[i][4][j].transform_tensor(\n dynmass[self.FCM_operations[i][2]][self.FCM_operations[i][3]]\n ),\n dynmass[self.FCM_operations[i][0]][self.FCM_operations[i][1]],\n atol=1e-04,\n )\n )\n\n for i in range(len(dynmass)):\n asum1 = np.zeros([3, 3])\n asum2 = np.zeros([3, 3])\n for j in range(len(dynmass[i])):\n asum1 += dynmass[i][j]\n asum2 += dynmass[j][i]\n self.assertTrue(np.allclose(asum1, np.zeros([3, 3]), atol=1e-05))\n self.assertTrue(np.allclose(asum2, np.zeros([3, 3]), atol=1e-05))\n\n def test_get_piezo(self):\n piezo = get_piezo(self.BEC, self.IST, self.FCM)\n self.assertTrue(np.allclose(piezo, self.piezo, atol=1e-05))\n\n @unittest.skipIf(Phonopy is None, \"Phonopy not present\")\n def test_rand_piezo(self):\n rand_BEC, rand_IST, rand_FCM, piezo = rand_piezo(\n self.piezo_struc,\n self.pointops,\n self.sharedops,\n self.BEC,\n self.IST,\n self.FCM,\n )\n\n for i in range(len(self.BEC_operations)):\n for j in range(len(self.BEC_operations[i][2])):\n self.assertTrue(\n np.allclose(\n rand_BEC[self.BEC_operations[i][0]],\n self.BEC_operations[i][2][j].transform_tensor(rand_BEC[self.BEC_operations[i][1]]),\n atol=1e-03,\n )\n )\n\n for i in range(len(self.IST_operations)):\n for j in range(len(self.IST_operations[i])):\n self.assertTrue(\n np.allclose(\n rand_IST[i],\n self.IST_operations[i][j][1].transform_tensor(rand_IST[self.IST_operations[i][j][0]]),\n atol=1e-03,\n )\n )\n\n structure = pymatgen.io.phonopy.get_phonopy_structure(self.piezo_struc)\n pnstruc = Phonopy(structure, np.eye(3), np.eye(3))\n\n pnstruc.set_force_constants(rand_FCM)\n dyn = pnstruc.get_dynamical_matrix_at_q([0, 0, 0])\n dyn = np.reshape(dyn, (10, 3, 10, 3)).swapaxes(1, 2)\n dyn = np.real(dyn)\n numsites = len(self.piezo_struc)\n masses = []\n for j in range(numsites):\n masses.append(self.piezo_struc.sites[j].specie.atomic_mass)\n dynmass = np.zeros([numsites, numsites, 3, 3])\n for m in range(numsites):\n for n in range(numsites):\n dynmass[m][n] = dyn[m][n] / np.sqrt(masses[m]) / np.sqrt(masses[n])\n\n dynmass = np.reshape(np.swapaxes(dynmass, 1, 2), (10 * 3, 10 * 3))\n eigs, vecs = np.linalg.eig(dynmass)\n eigsort = np.argsort(np.abs(eigs))\n for i in range(3, len(eigs)):\n self.assertTrue(eigs[eigsort[i]] < 1e-06)\n # rand_FCM1 = np.reshape(rand_FCM1, (10,3,10,3)).swapaxes(1,2)\n\n dynmass = np.reshape(dynmass, (10, 3, 10, 3)).swapaxes(1, 2)\n for i in range(len(self.FCM_operations)):\n for j in range(len(self.FCM_operations[i][4])):\n self.assertTrue(\n np.allclose(\n self.FCM_operations[i][4][j].transform_tensor(\n dynmass[self.FCM_operations[i][2]][self.FCM_operations[i][3]]\n ),\n dynmass[self.FCM_operations[i][0]][self.FCM_operations[i][1]],\n atol=1e-04,\n )\n )\n\n for i in range(len(dynmass)):\n asum1 = np.zeros([3, 3])\n asum2 = np.zeros([3, 3])\n for j in range(len(dynmass[i])):\n asum1 += dynmass[i][j]\n asum2 += dynmass[j][i]\n self.assertTrue(np.allclose(asum1, np.zeros([3, 3]), atol=1e-05))\n self.assertTrue(np.allclose(asum2, np.zeros([3, 3]), atol=1e-05))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\nimport json\nimport os\nimport unittest\nimport warnings\nfrom io import open\n\nimport scipy\nfrom monty.os.path import which\n\nfrom pymatgen.core.structure import Structure\nfrom pymatgen.electronic_structure.bandstructure import BandStructureSymmLine\nfrom pymatgen.electronic_structure.boltztrap import BoltztrapAnalyzer\nfrom pymatgen.electronic_structure.cohp import CompleteCohp\nfrom pymatgen.electronic_structure.core import Spin\nfrom pymatgen.electronic_structure.dos import CompleteDos\nfrom pymatgen.electronic_structure.plotter import (\n BoltztrapPlotter,\n BSDOSPlotter,\n BSPlotter,\n BSPlotterProjected,\n CohpPlotter,\n DosPlotter,\n fold_point,\n plot_brillouin_zone,\n plot_ellipsoid,\n)\nfrom pymatgen.io.vasp import Vasprun\nfrom pymatgen.util.testing import PymatgenTest\n\n\nclass DosPlotterTest(unittest.TestCase):\n def setUp(self):\n with open(os.path.join(PymatgenTest.TEST_FILES_DIR, \"complete_dos.json\"), \"r\", encoding=\"utf-8\") as f:\n self.dos = CompleteDos.from_dict(json.load(f))\n self.plotter = DosPlotter(sigma=0.2, stack=True)\n warnings.simplefilter(\"ignore\")\n\n def tearDown(self):\n warnings.simplefilter(\"default\")\n\n def test_add_dos_dict(self):\n d = self.plotter.get_dos_dict()\n self.assertEqual(len(d), 0)\n self.plotter.add_dos_dict(self.dos.get_element_dos(), key_sort_func=lambda x: x.X)\n d = self.plotter.get_dos_dict()\n self.assertEqual(len(d), 4)\n\n def test_get_dos_dict(self):\n self.plotter.add_dos_dict(self.dos.get_element_dos(), key_sort_func=lambda x: x.X)\n d = self.plotter.get_dos_dict()\n for el in [\"Li\", \"Fe\", \"P\", \"O\"]:\n self.assertIn(el, d)\n\n # Minimal baseline testing for get_plot. not a true test. Just checks that\n # it can actually execute.\n def test_get_plot(self):\n # Disabling latex is needed for this test to work.\n from matplotlib import rc\n\n rc(\"text\", usetex=False)\n self.plotter.add_dos_dict(self.dos.get_element_dos(), key_sort_func=lambda x: x.X)\n plt = self.plotter.get_plot()\n self.plotter.save_plot(\"dosplot.png\")\n self.assertTrue(os.path.isfile(\"dosplot.png\"))\n os.remove(\"dosplot.png\")\n plt.close(\"all\")\n\n\nclass BSPlotterTest(unittest.TestCase):\n def setUp(self):\n with open(os.path.join(PymatgenTest.TEST_FILES_DIR, \"CaO_2605_bandstructure.json\"), \"r\", encoding=\"utf-8\") as f:\n d = json.loads(f.read())\n self.bs = BandStructureSymmLine.from_dict(d)\n self.plotter = BSPlotter(self.bs)\n\n self.assertEqual(len(self.plotter._bs), 1, \"wrong number of band objects\")\n\n with open(os.path.join(PymatgenTest.TEST_FILES_DIR, \"N2_12103_bandstructure.json\"), \"r\", encoding=\"utf-8\") as f:\n d = json.loads(f.read())\n self.sbs_sc = BandStructureSymmLine.from_dict(d)\n\n with open(os.path.join(PymatgenTest.TEST_FILES_DIR, \"C_48_bandstructure.json\"), \"r\", encoding=\"utf-8\") as f:\n d = json.loads(f.read())\n self.sbs_met = BandStructureSymmLine.from_dict(d)\n\n self.plotter_multi = BSPlotter([self.sbs_sc, self.sbs_met])\n self.assertEqual(len(self.plotter_multi._bs), 2, \"wrong number of band objects\")\n self.assertEqual(self.plotter_multi._nb_bands, [96, 96], \"wrong number of bands\")\n warnings.simplefilter(\"ignore\")\n\n def tearDown(self):\n warnings.simplefilter(\"default\")\n\n def test_add_bs(self):\n self.plotter_multi.add_bs(self.sbs_sc)\n self.assertEqual(len(self.plotter_multi._bs), 3, \"wrong number of band objects\")\n self.assertEqual(self.plotter_multi._nb_bands, [96, 96, 96], \"wrong number of bands\")\n\n def test_get_branch_steps(self):\n steps_idx = BSPlotter._get_branch_steps(self.sbs_sc.branches)\n self.assertEqual(steps_idx, [0, 121, 132, 143], \"wrong list of steps idx\")\n\n def test_rescale_distances(self):\n rescaled_distances = self.plotter_multi._rescale_distances(self.sbs_sc, self.sbs_met)\n self.assertEqual(\n len(rescaled_distances),\n len(self.sbs_met.distance),\n \"wrong lenght of distances list\",\n )\n self.assertEqual(rescaled_distances[-1], 6.5191398067252875, \"wrong last distance value\")\n self.assertEqual(\n rescaled_distances[148],\n self.sbs_sc.distance[19],\n \"wrong distance at high symm k-point\",\n )\n\n def test_interpolate_bands(self):\n data = self.plotter.bs_plot_data()\n d = data[\"distances\"]\n en = data[\"energy\"][\"1\"]\n int_distances, int_energies = self.plotter._interpolate_bands(d, en)\n\n self.assertEqual(len(int_distances), 10, \"wrong lenght of distances list\")\n self.assertEqual(len(int_distances[0]), 100, \"wrong lenght of distances in a branch\")\n self.assertEqual(len(int_energies), 10, \"wrong lenght of distances list\")\n self.assertEqual(int_energies[0].shape, (16, 100), \"wrong lenght of distances list\")\n\n def test_bs_plot_data(self):\n self.assertEqual(\n len(self.plotter.bs_plot_data()[\"distances\"]),\n 10,\n \"wrong number of sequences of branches\",\n )\n self.assertEqual(\n len(self.plotter.bs_plot_data()[\"distances\"][0]),\n 16,\n \"wrong number of distances in the first sequence of branches\",\n )\n self.assertEqual(\n sum([len(e) for e in self.plotter.bs_plot_data()[\"distances\"]]),\n 160,\n \"wrong number of distances\",\n )\n\n lenght = len(self.plotter.bs_plot_data(split_branches=False)[\"distances\"][0])\n self.assertEqual(lenght, 144, \"wrong number of distances in the first sequence of branches\")\n\n lenght = len(self.plotter.bs_plot_data(split_branches=False)[\"distances\"])\n self.assertEqual(lenght, 2, \"wrong number of distances in the first sequence of branches\")\n\n self.assertEqual(self.plotter.bs_plot_data()[\"ticks\"][\"label\"][5], \"K\", \"wrong tick label\")\n self.assertEqual(\n len(self.plotter.bs_plot_data()[\"ticks\"][\"label\"]),\n 19,\n \"wrong number of tick labels\",\n )\n\n def test_get_ticks(self):\n self.assertEqual(self.plotter.get_ticks()[\"label\"][5], \"K\", \"wrong tick label\")\n self.assertEqual(\n self.plotter.get_ticks()[\"distance\"][5],\n 2.406607625322699,\n \"wrong tick distance\",\n )\n\n # Minimal baseline testing for get_plot. not a true test. Just checks that\n # it can actually execute.\n def test_get_plot(self):\n # zero_to_efermi = True, ylim = None, smooth = False,\n # vbm_cbm_marker = False, smooth_tol = None\n\n # Disabling latex is needed for this test to work.\n from matplotlib import rc\n\n rc(\"text\", usetex=False)\n\n plt = self.plotter.get_plot()\n self.assertEqual(plt.ylim(), (-4.0, 7.6348), \"wrong ylim\")\n plt = self.plotter.get_plot(smooth=True)\n plt = self.plotter.get_plot(vbm_cbm_marker=True)\n self.plotter.save_plot(\"bsplot.png\")\n self.assertTrue(os.path.isfile(\"bsplot.png\"))\n os.remove(\"bsplot.png\")\n plt.close(\"all\")\n\n # test plotter with 2 bandstructures\n plt = self.plotter_multi.get_plot()\n self.assertEqual(len(plt.gca().get_lines()), 874, \"wrong number of lines\")\n self.assertEqual(plt.ylim(), (-10.0, 10.0), \"wrong ylim\")\n plt = self.plotter_multi.get_plot(zero_to_efermi=False)\n self.assertEqual(plt.ylim(), (-15.2379, 12.67141266), \"wrong ylim\")\n plt = self.plotter_multi.get_plot(smooth=True)\n self.plotter_multi.save_plot(\"bsplot.png\")\n self.assertTrue(os.path.isfile(\"bsplot.png\"))\n os.remove(\"bsplot.png\")\n plt.close(\"all\")\n\n\nclass BSPlotterProjectedTest(unittest.TestCase):\n def setUp(self):\n with open(os.path.join(PymatgenTest.TEST_FILES_DIR, \"Cu2O_361_bandstructure.json\"), \"r\", encoding=\"utf-8\") as f:\n d = json.load(f)\n self.bs = BandStructureSymmLine.from_dict(d)\n self.plotter = BSPlotterProjected(self.bs)\n warnings.simplefilter(\"ignore\")\n\n def tearDown(self):\n warnings.simplefilter(\"default\")\n\n # Minimal baseline testing for get_plot. not a true test. Just checks that\n # it can actually execute.\n def test_methods(self):\n self.plotter.get_elt_projected_plots().close()\n self.plotter.get_elt_projected_plots_color().close()\n self.plotter.get_projected_plots_dots({\"Cu\": [\"d\", \"s\"], \"O\": [\"p\"]}).close()\n self.plotter.get_projected_plots_dots_patom_pmorb(\n {\"Cu\": [\"dxy\", \"s\", \"px\"], \"O\": [\"px\", \"py\", \"pz\"]},\n {\"Cu\": [3, 5], \"O\": [1]},\n ).close()\n\n\nclass BSDOSPlotterTest(unittest.TestCase):\n def setUp(self):\n warnings.simplefilter(\"ignore\")\n\n def tearDown(self):\n warnings.simplefilter(\"default\")\n\n # Minimal baseline testing for get_plot. not a true test. Just checks that\n # it can actually execute.\n def test_methods(self):\n v = Vasprun(os.path.join(PymatgenTest.TEST_FILES_DIR, \"vasprun_Si_bands.xml\"))\n p = BSDOSPlotter()\n plt = p.get_plot(\n v.get_band_structure(kpoints_filename=os.path.join(PymatgenTest.TEST_FILES_DIR, \"KPOINTS_Si_bands\"))\n )\n plt.close()\n plt = p.get_plot(\n v.get_band_structure(kpoints_filename=os.path.join(PymatgenTest.TEST_FILES_DIR, \"KPOINTS_Si_bands\")),\n v.complete_dos,\n )\n plt.close(\"all\")\n\n\nclass PlotBZTest(unittest.TestCase):\n def setUp(self):\n self.rec_latt = Structure.from_file(\n os.path.join(PymatgenTest.TEST_FILES_DIR, \"Si.cssr\")\n ).lattice.reciprocal_lattice\n self.kpath = [[[0.0, 0.0, 0.0], [0.5, 0.0, 0.5], [0.5, 0.25, 0.75], [0.375, 0.375, 0.75]]]\n self.labels = {\n \"\\\\Gamma\": [0.0, 0.0, 0.0],\n \"K\": [0.375, 0.375, 0.75],\n \"L\": [0.5, 0.5, 0.5],\n \"U\": [0.625, 0.25, 0.625],\n \"W\": [0.5, 0.25, 0.75],\n \"X\": [0.5, 0.0, 0.5],\n }\n self.hessian = [\n [17.64757034, 3.90159625, -4.77845607],\n [3.90159625, 14.88874142, 6.75776076],\n [-4.77845607, 6.75776076, 12.12987493],\n ]\n self.center = [0.41, 0.0, 0.41]\n self.points = [[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]]\n warnings.simplefilter(\"ignore\")\n\n def tearDown(self):\n warnings.simplefilter(\"default\")\n\n def test_bz_plot(self):\n fig, ax = plot_ellipsoid(self.hessian, self.center, lattice=self.rec_latt)\n fig = plot_brillouin_zone(\n self.rec_latt,\n lines=self.kpath,\n labels=self.labels,\n kpoints=self.points,\n ax=ax,\n show=False,\n )\n\n def test_fold_point(self):\n self.assertTrue(\n scipy.allclose(\n fold_point([0.0, -0.5, 0.5], lattice=self.rec_latt),\n self.rec_latt.get_cartesian_coords([0.0, 0.5, 0.5]),\n )\n )\n self.assertTrue(\n scipy.allclose(\n fold_point([0.1, -0.6, 0.2], lattice=self.rec_latt),\n self.rec_latt.get_cartesian_coords([0.1, 0.4, 0.2]),\n )\n )\n\n\nx_trans = which(\"x_trans\")\n\n\[email protected](not x_trans, \"No x_trans.\")\nclass BoltztrapPlotterTest(unittest.TestCase):\n def setUp(self):\n bz = BoltztrapAnalyzer.from_files(os.path.join(PymatgenTest.TEST_FILES_DIR, \"boltztrap/transp/\"))\n self.plotter = BoltztrapPlotter(bz)\n warnings.simplefilter(\"ignore\")\n\n def tearDown(self):\n warnings.simplefilter(\"default\")\n\n def test_plot_carriers(self):\n plt = self.plotter.plot_carriers()\n self.assertEqual(len(plt.gca().get_lines()), 7, \"wrong number of lines\")\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[0][0],\n -2.0702422655947665,\n \"wrong 0 data in line 0\",\n )\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[1][0],\n 6.525490122298364e22,\n \"wrong 1 data in line 0\",\n )\n plt.close()\n\n def test_plot_complexity_factor_mu(self):\n plt = self.plotter.plot_complexity_factor_mu()\n self.assertEqual(len(plt.gca().get_lines()), 2, \"wrong number of lines\")\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[0][0],\n -2.0702422655947665,\n \"wrong 0 data in line 0\",\n )\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[1][0],\n 0.004708835456903449,\n \"wrong 1 data in line 0\",\n )\n plt.close()\n\n def test_plot_conductivity_dop(self):\n plt = self.plotter.plot_conductivity_dop()\n self.assertEqual(len(plt.gca().get_lines()), 8, \"wrong number of lines\")\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[0][0],\n 1000000000000000.0,\n \"wrong 0 data in line 0\",\n )\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[1][0],\n 0.3801957596666667,\n \"wrong 1 data in line 0\",\n )\n plt.close()\n\n def test_plot_conductivity_mu(self):\n plt = self.plotter.plot_conductivity_mu()\n self.assertEqual(len(plt.gca().get_lines()), 9, \"wrong number of lines\")\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[0][0],\n -2.0702422655947665,\n \"wrong 0 data in line 0\",\n )\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[1][0],\n 1965.1306,\n \"wrong 1 data in line 0\",\n )\n plt.close()\n\n def test_plot_conductivity_temp(self):\n plt = self.plotter.plot_conductivity_temp()\n self.assertEqual(len(plt.gca().get_lines()), 6, \"wrong number of lines\")\n self.assertEqual(plt.gca().get_lines()[0].get_data()[0][0], 100, \"wrong 0 data in line 0\")\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[1][0],\n 0.3801957596666667,\n \"wrong 1 data in line 0\",\n )\n plt.close()\n\n def test_plot_dos(self):\n plt = self.plotter.plot_dos()\n self.assertEqual(len(plt.gca().get_lines()), 3, \"wrong number of lines\")\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[0][0],\n -2.4197044934588674,\n \"wrong 0 data in line 0\",\n )\n self.assertEqual(plt.gca().get_lines()[0].get_data()[1][0], 0.0, \"wrong 1 data in line 0\")\n plt.close()\n\n def test_plot_eff_mass_dop(self):\n plt = self.plotter.plot_eff_mass_dop()\n self.assertEqual(len(plt.gca().get_lines()), 8, \"wrong number of lines\")\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[0][0],\n 1000000000000000.0,\n \"wrong 0 data in line 0\",\n )\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[1][0],\n 1.4231240011719886,\n \"wrong 1 data in line 0\",\n )\n plt.close()\n\n def test_plot_eff_mass_temp(self):\n plt = self.plotter.plot_eff_mass_temp()\n self.assertEqual(len(plt.gca().get_lines()), 6, \"wrong number of lines\")\n self.assertEqual(plt.gca().get_lines()[0].get_data()[0][0], 100, \"wrong 0 data in line 0\")\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[1][0],\n 1.4231240011719886,\n \"wrong 1 data in line 0\",\n )\n plt.close()\n\n def test_plot_hall_carriers(self):\n plt = self.plotter.plot_hall_carriers()\n self.assertEqual(len(plt.gca().get_lines()), 7, \"wrong number of lines\")\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[0][0],\n -2.0702422655947665,\n \"wrong 0 data in line 0\",\n )\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[1][0],\n 9.538187273102463e17,\n \"wrong 1 data in line 0\",\n )\n plt.close()\n\n def test_plot_power_factor_dop(self):\n plt = self.plotter.plot_power_factor_dop()\n self.assertEqual(len(plt.gca().get_lines()), 8, \"wrong number of lines\")\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[0][0],\n 1000000000000000.0,\n \"wrong 0 data in line 0\",\n )\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[1][0],\n 0.40606868935796925,\n \"wrong 1 data in line 0\",\n )\n plt.close()\n\n def test_plot_power_factor_mu(self):\n plt = self.plotter.plot_power_factor_mu()\n self.assertEqual(len(plt.gca().get_lines()), 9, \"wrong number of lines\")\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[0][0],\n -2.0702422655947665,\n \"wrong 0 data in line 0\",\n )\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[1][0],\n 365.5514594136157,\n \"wrong 1 data in line 0\",\n )\n plt.close()\n\n def test_plot_power_factor_temp(self):\n plt = self.plotter.plot_power_factor_temp()\n self.assertEqual(len(plt.gca().get_lines()), 6, \"wrong number of lines\")\n self.assertEqual(plt.gca().get_lines()[0].get_data()[0][0], 100, \"wrong 0 data in line 0\")\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[1][0],\n 0.40606868935796925,\n \"wrong 1 data in line 0\",\n )\n plt.close()\n\n def test_plot_seebeck_dop(self):\n plt = self.plotter.plot_seebeck_dop()\n self.assertEqual(len(plt.gca().get_lines()), 8, \"wrong number of lines\")\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[0][0],\n 1000000000000000.0,\n \"wrong 0 data in line 0\",\n )\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[1][0],\n 1050.8197666666667,\n \"wrong 1 data in line 0\",\n )\n plt.close()\n\n def test_plot_seebeck_eff_mass_mu(self):\n plt = self.plotter.plot_seebeck_eff_mass_mu()\n self.assertEqual(len(plt.gca().get_lines()), 2, \"wrong number of lines\")\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[0][0],\n -2.0702422655947665,\n \"wrong 0 data in line 0\",\n )\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[1][0],\n 6412.881888198197,\n \"wrong 1 data in line 0\",\n )\n plt.close()\n\n def test_plot_seebeck_mu(self):\n plt = self.plotter.plot_seebeck_mu()\n self.assertEqual(len(plt.gca().get_lines()), 9, \"wrong number of lines\")\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[0][0],\n -2.0702422655947665,\n \"wrong 0 data in line 0\",\n )\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[1][0],\n -433.11096000000003,\n \"wrong 1 data in line 0\",\n )\n plt.close()\n\n def test_plot_seebeck_temp(self):\n plt = self.plotter.plot_seebeck_temp()\n self.assertEqual(len(plt.gca().get_lines()), 6, \"wrong number of lines\")\n self.assertEqual(plt.gca().get_lines()[0].get_data()[0][0], 100, \"wrong 0 data in line 0\")\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[1][0],\n 1050.8197666666667,\n \"wrong 1 data in line 0\",\n )\n plt.close()\n\n def test_plot_zt_dop(self):\n plt = self.plotter.plot_zt_dop()\n self.assertEqual(len(plt.gca().get_lines()), 8, \"wrong number of lines\")\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[0][0],\n 1000000000000000.0,\n \"wrong 0 data in line 0\",\n )\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[1][0],\n 4.060682863129955e-05,\n \"wrong 1 data in line 0\",\n )\n plt.close()\n\n def test_plot_zt_mu(self):\n plt = self.plotter.plot_zt_mu()\n self.assertEqual(len(plt.gca().get_lines()), 9, \"wrong number of lines\")\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[0][0],\n -2.0702422655947665,\n \"wrong 0 data in line 0\",\n )\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[1][0],\n 0.2153839699235254,\n \"wrong 1 data in line 0\",\n )\n plt.close()\n\n def test_plot_zt_temp(self):\n plt = self.plotter.plot_zt_temp()\n self.assertEqual(len(plt.gca().get_lines()), 6, \"wrong number of lines\")\n self.assertEqual(plt.gca().get_lines()[0].get_data()[0][0], 100, \"wrong 0 data in line 0\")\n self.assertEqual(\n plt.gca().get_lines()[0].get_data()[1][0],\n 4.060682863129955e-05,\n \"wrong 1 data in line 0\",\n )\n plt.close()\n\n\nclass CohpPlotterTest(PymatgenTest):\n def setUp(self):\n path = os.path.join(PymatgenTest.TEST_FILES_DIR, \"cohp\", \"complete_cohp_lobster.json\")\n with open(os.path.join(path), \"r\") as f:\n self.cohp = CompleteCohp.from_dict(json.load(f))\n path = os.path.join(PymatgenTest.TEST_FILES_DIR, \"cohp\", \"complete_coop_lobster.json\")\n with open(os.path.join(path), \"r\") as f:\n self.coop = CompleteCohp.from_dict(json.load(f))\n self.cohp_plot = CohpPlotter(zero_at_efermi=False)\n self.coop_plot = CohpPlotter(are_coops=True)\n warnings.simplefilter(\"ignore\")\n\n def tearDown(self):\n warnings.simplefilter(\"default\")\n\n def test_attributes(self):\n self.assertFalse(self.cohp_plot.are_coops)\n self.assertTrue(self.coop_plot.are_coops)\n self.assertFalse(self.cohp_plot.zero_at_efermi)\n self.assertTrue(self.coop_plot.zero_at_efermi)\n self.cohp_plot.add_cohp_dict(self.cohp.all_cohps)\n cohp_energies = self.cohp_plot._cohps[\"1\"][\"energies\"]\n self.assertEqual(len(cohp_energies), 301)\n self.assertAlmostEqual(cohp_energies[0], -0.27768)\n self.assertAlmostEqual(cohp_energies[-1], 14.77248)\n self.coop_plot.add_cohp_dict(self.coop.all_cohps)\n coop_energies = self.coop_plot._cohps[\"10\"][\"energies\"]\n self.assertEqual(len(coop_energies), 241)\n self.assertAlmostEqual(coop_energies[0], -6.02510)\n self.assertAlmostEqual(coop_energies[-1], 6.02510)\n\n def test_add_cohp_dict(self):\n # Sorts the populations by z-coordinates of the sites\n def sortkeys(sites):\n return sites[0].z, sites[1].z\n\n sorted_keys = [\"3\", \"4\", \"7\", \"8\", \"9\", \"10\", \"11\", \"6\", \"5\", \"2\", \"1\"]\n\n d_coop = self.coop_plot.get_cohp_dict()\n self.assertEqual(len(d_coop), 0)\n bonds = self.coop.bonds\n self.coop_plot.add_cohp_dict(self.coop.all_cohps, key_sort_func=lambda x: sortkeys(bonds[x][\"sites\"]))\n d_coop = self.coop_plot.get_cohp_dict()\n self.assertEqual(len(d_coop), 11)\n self.assertEqual(list(self.coop_plot._cohps.keys()), sorted_keys)\n\n def test_get_cohp_dict(self):\n self.cohp_plot.add_cohp_dict(self.cohp.all_cohps)\n d_cohp = self.cohp_plot.get_cohp_dict()\n for bond in [\"1\", \"2\"]:\n self.assertIn(bond, d_cohp)\n\n def test_get_plot(self):\n self.cohp_plot.add_cohp_dict(self.cohp.all_cohps)\n plt_cohp = self.cohp_plot.get_plot()\n ax_cohp = plt_cohp.gca()\n self.assertEqual(ax_cohp.get_xlabel(), \"-COHP\")\n self.assertEqual(ax_cohp.get_ylabel(), \"$E$ (eV)\")\n legend_labels = ax_cohp.get_legend_handles_labels()[1]\n self.assertEqual(len(self.cohp_plot._cohps), len(legend_labels))\n self.assertEqual(ax_cohp.lines[0].get_linestyle(), \"-\")\n self.assertEqual(ax_cohp.lines[1].get_linestyle(), \"--\")\n for label in legend_labels:\n self.assertIn(label, self.cohp_plot._cohps)\n linesindex = legend_labels.index(\"1\")\n linestyles = {Spin.up: \"-\", Spin.down: \"--\"}\n cohp_fe_fe = self.cohp.all_cohps[\"1\"]\n for s, spin in enumerate([Spin.up, Spin.down]):\n lines = ax_cohp.lines[2 * linesindex + s]\n self.assertArrayAlmostEqual(lines.get_xdata(), -cohp_fe_fe.cohp[spin])\n self.assertArrayAlmostEqual(lines.get_ydata(), self.cohp.energies)\n self.assertEqual(lines.get_linestyle(), linestyles[spin])\n plt_cohp.close()\n\n plt_cohp = self.cohp_plot.get_plot(invert_axes=False, plot_negative=False)\n ax_cohp = plt_cohp.gca()\n self.assertEqual(ax_cohp.get_xlabel(), \"$E$ (eV)\")\n self.assertEqual(ax_cohp.get_ylabel(), \"COHP\")\n for s, spin in enumerate([Spin.up, Spin.down]):\n lines = ax_cohp.lines[2 * linesindex + s]\n self.assertArrayAlmostEqual(lines.get_xdata(), self.cohp.energies)\n self.assertArrayAlmostEqual(lines.get_ydata(), cohp_fe_fe.cohp[spin])\n plt_cohp.close()\n\n plt_cohp = self.cohp_plot.get_plot(integrated=True)\n ax_cohp = plt_cohp.gca()\n self.assertEqual(ax_cohp.get_xlabel(), \"-ICOHP (eV)\")\n for s, spin in enumerate([Spin.up, Spin.down]):\n lines = ax_cohp.lines[2 * linesindex + s]\n self.assertArrayAlmostEqual(lines.get_xdata(), -cohp_fe_fe.icohp[spin])\n\n coop_dict = {\"Bi5-Bi6\": self.coop.all_cohps[\"10\"]}\n self.coop_plot.add_cohp_dict(coop_dict)\n plt_coop = self.coop_plot.get_plot()\n ax_coop = plt_coop.gca()\n self.assertEqual(ax_coop.get_xlabel(), \"COOP\")\n self.assertEqual(ax_coop.get_ylabel(), \"$E - E_f$ (eV)\")\n lines_coop = ax_coop.get_lines()[0]\n self.assertArrayAlmostEqual(lines_coop.get_ydata(), self.coop.energies - self.coop.efermi)\n coop_bi_bi = self.coop.all_cohps[\"10\"].cohp[Spin.up]\n self.assertArrayAlmostEqual(lines_coop.get_xdata(), coop_bi_bi)\n\n # Cleanup.\n plt_cohp.close()\n plt_coop.close(\"all\")\n\n def test_save_plot(self):\n self.cohp_plot.add_cohp_dict(self.cohp.all_cohps)\n plt_cohp = self.cohp_plot.get_plot()\n self.cohp_plot.save_plot(\"cohpplot.png\")\n self.assertTrue(os.path.isfile(\"cohpplot.png\"))\n os.remove(\"cohpplot.png\")\n plt_cohp.close(\"all\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\"\"\"\nImplementation of defect correction methods.\n\"\"\"\n\nimport logging\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy\nfrom scipy import stats\n\nfrom pymatgen.analysis.defects.core import DefectCorrection\nfrom pymatgen.analysis.defects.utils import (\n QModel,\n ang_to_bohr,\n converge,\n eV_to_k,\n generate_R_and_G_vecs,\n generate_reciprocal_vectors_squared,\n hart_to_ev,\n kumagai_to_V,\n tune_for_gamma,\n)\n\n__author__ = \"Danny Broberg, Shyam Dwaraknath\"\n__copyright__ = \"Copyright 2018, The Materials Project\"\n__version__ = \"1.0\"\n__maintainer__ = \"Shyam Dwaraknath\"\n__email__ = \"[email protected]\"\n__status__ = \"Development\"\n__date__ = \"Mar 15, 2018\"\n\nlogger = logging.getLogger(__name__)\n\n\nclass FreysoldtCorrection(DefectCorrection):\n \"\"\"\n A class for FreysoldtCorrection class. Largely adapated from PyCDT code\n\n If this correction is used, please reference Freysoldt's original paper.\n doi: 10.1103/PhysRevLett.102.016402\n \"\"\"\n\n def __init__(\n self,\n dielectric_const,\n q_model=None,\n energy_cutoff=520,\n madetol=0.0001,\n axis=None,\n ):\n \"\"\"\n Initializes the FreysoldtCorrection class\n Args:\n dielectric_const (float or 3x3 matrix): Dielectric constant for the structure\n q_model (QModel): instantiated QModel object or None.\n Uses default parameters to instantiate QModel if None supplied\n energy_cutoff (int): Maximum energy in eV in reciprocal space to perform\n integration for potential correction.\n madeltol(float): Convergence criteria for the Madelung energy for potential correction\n axis (int): Axis to calculate correction.\n If axis is None, then averages over all three axes is performed.\n \"\"\"\n self.q_model = QModel() if not q_model else q_model\n self.energy_cutoff = energy_cutoff\n self.madetol = madetol\n self.dielectric_const = dielectric_const\n\n if isinstance(dielectric_const, (int, float)):\n self.dielectric = float(dielectric_const)\n else:\n self.dielectric = float(np.mean(np.diag(dielectric_const)))\n\n self.axis = axis\n\n self.metadata = {\"pot_plot_data\": {}, \"pot_corr_uncertainty_md\": {}}\n\n def get_correction(self, entry):\n \"\"\"\n Gets the Freysoldt correction for a defect entry\n Args:\n entry (DefectEntry): defect entry to compute Freysoldt correction on.\n\n Requires following keys to exist in DefectEntry.parameters dict:\n\n axis_grid (3 x NGX where NGX is the length of the NGX grid\n in the x,y and z axis directions. Same length as planar\n average lists):\n A list of 3 numpy arrays which contain the cartesian axis\n values (in angstroms) that correspond to each planar avg\n potential supplied.\n\n bulk_planar_averages (3 x NGX where NGX is the length of\n the NGX grid in the x,y and z axis directions.):\n A list of 3 numpy arrays which contain the planar averaged\n electrostatic potential for the bulk supercell.\n\n defect_planar_averages (3 x NGX where NGX is the length of\n the NGX grid in the x,y and z axis directions.):\n A list of 3 numpy arrays which contain the planar averaged\n electrostatic potential for the defective supercell.\n\n initial_defect_structure (Structure) structure corresponding to\n initial defect supercell structure (uses Lattice for charge correction)\n\n defect_frac_sc_coords (3 x 1 array) Fractional co-ordinates of\n defect location in supercell structure\n Returns:\n FreysoldtCorrection values as a dictionary\n \"\"\"\n\n if self.axis is None:\n list_axis_grid = np.array(entry.parameters[\"axis_grid\"])\n list_bulk_plnr_avg_esp = np.array(entry.parameters[\"bulk_planar_averages\"])\n list_defect_plnr_avg_esp = np.array(entry.parameters[\"defect_planar_averages\"])\n list_axes = range(len(list_axis_grid))\n else:\n list_axes = np.array(self.axis)\n list_axis_grid, list_bulk_plnr_avg_esp, list_defect_plnr_avg_esp = (\n [],\n [],\n [],\n )\n for ax in list_axes:\n list_axis_grid.append(np.array(entry.parameters[\"axis_grid\"][ax]))\n list_bulk_plnr_avg_esp.append(np.array(entry.parameters[\"bulk_planar_averages\"][ax]))\n list_defect_plnr_avg_esp.append(np.array(entry.parameters[\"defect_planar_averages\"][ax]))\n\n lattice = entry.parameters[\"initial_defect_structure\"].lattice.copy()\n defect_frac_coords = entry.parameters[\"defect_frac_sc_coords\"]\n\n q = entry.defect.charge\n\n es_corr = self.perform_es_corr(lattice, entry.charge)\n\n pot_corr_tracker = []\n\n for x, pureavg, defavg, axis in zip(\n list_axis_grid, list_bulk_plnr_avg_esp, list_defect_plnr_avg_esp, list_axes\n ):\n tmp_pot_corr = self.perform_pot_corr(\n x,\n pureavg,\n defavg,\n lattice,\n entry.charge,\n defect_frac_coords,\n axis,\n widthsample=1.0,\n )\n pot_corr_tracker.append(tmp_pot_corr)\n\n pot_corr = np.mean(pot_corr_tracker)\n\n entry.parameters[\"freysoldt_meta\"] = dict(self.metadata)\n entry.parameters[\"potalign\"] = pot_corr / (-q) if q else 0.0\n\n return {\n \"freysoldt_electrostatic\": es_corr,\n \"freysoldt_potential_alignment\": pot_corr,\n }\n\n def perform_es_corr(self, lattice, q, step=1e-4):\n \"\"\"\n Peform Electrostatic Freysoldt Correction\n Args:\n lattice: Pymatgen lattice object\n q (int): Charge of defect\n step (float): step size for numerical integration\n Return:\n Electrostatic Point Charge contribution to Freysoldt Correction (float)\n \"\"\"\n logger.info(\"Running Freysoldt 2011 PC calculation (should be \" \"equivalent to sxdefectalign)\")\n logger.debug(\"defect lattice constants are (in angstroms)\" + str(lattice.abc))\n\n [a1, a2, a3] = ang_to_bohr * np.array(lattice.get_cartesian_coords(1))\n logging.debug(\"In atomic units, lat consts are (in bohr):\" + str([a1, a2, a3]))\n vol = np.dot(a1, np.cross(a2, a3)) # vol in bohr^3\n\n def e_iso(encut):\n gcut = eV_to_k(encut) # gcut is in units of 1/A\n return scipy.integrate.quad(lambda g: self.q_model.rho_rec(g * g) ** 2, step, gcut)[0] * (q ** 2) / np.pi\n\n def e_per(encut):\n eper = 0\n for g2 in generate_reciprocal_vectors_squared(a1, a2, a3, encut):\n eper += (self.q_model.rho_rec(g2) ** 2) / g2\n eper *= (q ** 2) * 2 * round(np.pi, 6) / vol\n eper += (q ** 2) * 4 * round(np.pi, 6) * self.q_model.rho_rec_limit0 / vol\n return eper\n\n eiso = converge(e_iso, 5, self.madetol, self.energy_cutoff)\n logger.debug(\"Eisolated : %f\", round(eiso, 5))\n\n eper = converge(e_per, 5, self.madetol, self.energy_cutoff)\n\n logger.info(\"Eperiodic : %f hartree\", round(eper, 5))\n logger.info(\"difference (periodic-iso) is %f hartree\", round(eper - eiso, 6))\n logger.info(\"difference in (eV) is %f\", round((eper - eiso) * hart_to_ev, 4))\n\n es_corr = round((eiso - eper) / self.dielectric * hart_to_ev, 6)\n logger.info(\"Defect Correction without alignment %f (eV): \", es_corr)\n return es_corr\n\n def perform_pot_corr(\n self,\n axis_grid,\n pureavg,\n defavg,\n lattice,\n q,\n defect_frac_position,\n axis,\n widthsample=1.0,\n ):\n \"\"\"\n For performing planar averaging potential alignment\n Args:\n axis_grid (1 x NGX where NGX is the length of the NGX grid\n in the axis direction. Same length as pureavg list):\n A numpy array which contain the cartesian axis\n values (in angstroms) that correspond to each planar avg\n potential supplied.\n pureavg (1 x NGX where NGX is the length of the NGX grid in\n the axis direction.):\n A numpy array for the planar averaged\n electrostatic potential of the bulk supercell.\n defavg (1 x NGX where NGX is the length of the NGX grid in\n the axis direction.):\n A numpy array for the planar averaged\n electrostatic potential of the defect supercell.\n lattice: Pymatgen Lattice object of the defect supercell\n q (float or int): charge of the defect\n defect_frac_position: Fracitional Coordinates of the defect in the supercell\n axis (int): axis for performing the freysoldt correction on\n widthsample (float): width (in Angstroms) of the region in between defects\n where the potential alignment correction is averaged. Default is 1 Angstrom.\n Returns:\n Potential Alignment contribution to Freysoldt Correction (float)\n \"\"\"\n logging.debug(\"run Freysoldt potential alignment method for axis \" + str(axis))\n nx = len(axis_grid)\n\n # shift these planar averages to have defect at origin\n axfracval = defect_frac_position[axis]\n axbulkval = axfracval * lattice.abc[axis]\n if axbulkval < 0:\n axbulkval += lattice.abc[axis]\n elif axbulkval > lattice.abc[axis]:\n axbulkval -= lattice.abc[axis]\n\n if axbulkval:\n for i in range(nx):\n if axbulkval < axis_grid[i]:\n break\n rollind = len(axis_grid) - i\n pureavg = np.roll(pureavg, rollind)\n defavg = np.roll(defavg, rollind)\n\n # if not self._silence:\n logger.debug(\"calculating lr part along planar avg axis\")\n reci_latt = lattice.reciprocal_lattice\n dg = reci_latt.abc[axis]\n dg /= ang_to_bohr # convert to bohr to do calculation in atomic units\n\n # Build background charge potential with defect at origin\n v_G = np.empty(len(axis_grid), np.dtype(\"c16\"))\n v_G[0] = 4 * np.pi * -q / self.dielectric * self.q_model.rho_rec_limit0\n g = np.roll(np.arange(-nx / 2, nx / 2, 1, dtype=int), int(nx / 2)) * dg\n g2 = np.multiply(g, g)[1:]\n v_G[1:] = 4 * np.pi / (self.dielectric * g2) * -q * self.q_model.rho_rec(g2)\n v_G[nx // 2] = 0 if not (nx % 2) else v_G[nx // 2]\n\n # Get the real space potential by peforming a fft and grabbing the imaginary portion\n v_R = np.fft.fft(v_G)\n\n if abs(np.imag(v_R).max()) > self.madetol:\n raise Exception(\"imaginary part found to be %s\", repr(np.imag(v_R).max()))\n v_R /= lattice.volume * ang_to_bohr ** 3\n v_R = np.real(v_R) * hart_to_ev\n\n # get correction\n short = np.array(defavg) - np.array(pureavg) - np.array(v_R)\n checkdis = int((widthsample / 2) / (axis_grid[1] - axis_grid[0]))\n mid = int(len(short) / 2)\n\n tmppot = [short[i] for i in range(mid - checkdis, mid + checkdis + 1)]\n logger.debug(\"shifted defect position on axis (%s) to origin\", repr(axbulkval))\n logger.debug(\n \"means sampling region is (%f,%f)\",\n axis_grid[mid - checkdis],\n axis_grid[mid + checkdis],\n )\n\n C = -np.mean(tmppot)\n logger.debug(\"C = %f\", C)\n final_shift = [short[j] + C for j in range(len(v_R))]\n v_R = [elmnt - C for elmnt in v_R]\n\n logger.info(\"C value is averaged to be %f eV \", C)\n logger.info(\"Potentital alignment energy correction (-q*delta V): %f (eV)\", -q * C)\n self.pot_corr = -q * C\n\n # log plotting data:\n self.metadata[\"pot_plot_data\"][axis] = {\n \"Vr\": v_R,\n \"x\": axis_grid,\n \"dft_diff\": np.array(defavg) - np.array(pureavg),\n \"final_shift\": final_shift,\n \"check\": [mid - checkdis, mid + checkdis + 1],\n }\n\n # log uncertainty:\n self.metadata[\"pot_corr_uncertainty_md\"][axis] = {\n \"stats\": stats.describe(tmppot)._asdict(),\n \"potcorr\": -q * C,\n }\n\n return self.pot_corr\n\n def plot(self, axis, title=None, saved=False):\n \"\"\"\n Plots the planar average electrostatic potential against the Long range and\n short range models from Freysoldt. Must run perform_pot_corr or get_correction\n (to load metadata) before this can be used.\n Args:\n axis (int): axis to plot\n title (str): Title to be given to plot. Default is no title.\n saved (bool): Whether to save file or not. If False then returns plot\n object. If True then saves plot as str(title) + \"FreyplnravgPlot.pdf\"\n\n \"\"\"\n if not self.metadata[\"pot_plot_data\"]:\n raise ValueError(\"Cannot plot potential alignment before running correction!\")\n\n x = self.metadata[\"pot_plot_data\"][axis][\"x\"]\n v_R = self.metadata[\"pot_plot_data\"][axis][\"Vr\"]\n dft_diff = self.metadata[\"pot_plot_data\"][axis][\"dft_diff\"]\n final_shift = self.metadata[\"pot_plot_data\"][axis][\"final_shift\"]\n check = self.metadata[\"pot_plot_data\"][axis][\"check\"]\n\n plt.figure()\n plt.clf()\n plt.plot(x, v_R, c=\"green\", zorder=1, label=\"long range from model\")\n plt.plot(x, dft_diff, c=\"red\", label=\"DFT locpot diff\")\n plt.plot(x, final_shift, c=\"blue\", label=\"short range (aligned)\")\n\n tmpx = [x[i] for i in range(check[0], check[1])]\n plt.fill_between(tmpx, -100, 100, facecolor=\"red\", alpha=0.15, label=\"sampling region\")\n\n plt.xlim(round(x[0]), round(x[-1]))\n ymin = min(min(v_R), min(dft_diff), min(final_shift))\n ymax = max(max(v_R), max(dft_diff), max(final_shift))\n plt.ylim(-0.2 + ymin, 0.2 + ymax)\n plt.xlabel(r\"distance along axis ($\\AA$)\", fontsize=15)\n plt.ylabel(\"Potential (V)\", fontsize=15)\n plt.legend(loc=9)\n plt.axhline(y=0, linewidth=0.2, color=\"black\")\n plt.title(str(title) + \" defect potential\", fontsize=18)\n plt.xlim(0, max(x))\n if saved:\n plt.savefig(str(title) + \"FreyplnravgPlot.pdf\")\n return None\n return plt\n\n\nclass KumagaiCorrection(DefectCorrection):\n \"\"\"\n A class for KumagaiCorrection class. Largely adapated from PyCDT code\n\n If this correction is used, please reference Kumagai and Oba's original paper\n (doi: 10.1103/PhysRevB.89.195205) as well as Freysoldt's original\n paper (doi: 10.1103/PhysRevLett.102.016402)\n\n NOTE that equations 8 and 9 from Kumagai et al. reference are divided by (4 pi) to get SI units\n \"\"\"\n\n def __init__(self, dielectric_tensor, sampling_radius=None, gamma=None):\n \"\"\"\n Initializes the Kumagai Correction\n Args:\n dielectric_tensor (float or 3x3 matrix): Dielectric constant for the structure\n\n optional data that can be tuned:\n sampling_radius (float): radius (in Angstrom) which sites must be outside\n of to be included in the correction. Publication by Kumagai advises to\n use Wigner-Seitz radius of defect supercell, so this is default value.\n gamma (float): convergence parameter for gamma function.\n Code will automatically determine this if set to None.\n \"\"\"\n self.metadata = {\n \"gamma\": gamma,\n \"sampling_radius\": sampling_radius,\n \"potalign\": None,\n }\n\n if isinstance(dielectric_tensor, (int, float)):\n self.dielectric = np.identity(3) * dielectric_tensor\n else:\n self.dielectric = np.array(dielectric_tensor)\n\n def get_correction(self, entry):\n \"\"\"\n Gets the Kumagai correction for a defect entry\n Args:\n entry (DefectEntry): defect entry to compute Kumagai correction on.\n\n Requires following parameters in the DefectEntry to exist:\n\n bulk_atomic_site_averages (list): list of bulk structure\"s atomic site averaged ESPs * charge,\n in same order as indices of bulk structure\n note this is list given by VASP's OUTCAR (so it is multiplied by a test charge of -1)\n\n defect_atomic_site_averages (list): list of defect structure\"s atomic site averaged ESPs * charge,\n in same order as indices of defect structure\n note this is list given by VASP's OUTCAR (so it is multiplied by a test charge of -1)\n\n site_matching_indices (list): list of corresponding site index values for\n bulk and defect site structures EXCLUDING the defect site itself\n (ex. [[bulk structure site index, defect structure\"s corresponding site index], ... ]\n\n initial_defect_structure (Structure): Pymatgen Structure object representing un-relaxed defect\n structure\n\n defect_frac_sc_coords (array): Defect Position in fractional coordinates of the supercell\n given in bulk_structure\n Returns:\n KumagaiCorrection values as a dictionary\n\n \"\"\"\n bulk_atomic_site_averages = entry.parameters[\"bulk_atomic_site_averages\"]\n defect_atomic_site_averages = entry.parameters[\"defect_atomic_site_averages\"]\n site_matching_indices = entry.parameters[\"site_matching_indices\"]\n defect_sc_structure = entry.parameters[\"initial_defect_structure\"]\n defect_frac_sc_coords = entry.parameters[\"defect_frac_sc_coords\"]\n\n lattice = defect_sc_structure.lattice\n volume = lattice.volume\n q = entry.defect.charge\n\n if not self.metadata[\"gamma\"]:\n self.metadata[\"gamma\"] = tune_for_gamma(lattice, self.dielectric)\n\n prec_set = [25, 28]\n g_vecs, recip_summation, r_vecs, real_summation = generate_R_and_G_vecs(\n self.metadata[\"gamma\"], prec_set, lattice, self.dielectric\n )\n\n pot_shift = self.get_potential_shift(self.metadata[\"gamma\"], volume)\n si = self.get_self_interaction(self.metadata[\"gamma\"])\n es_corr = [(real_summation[ind] + recip_summation[ind] + pot_shift + si) for ind in range(2)]\n\n # increase precision if correction is not converged yet\n # TODO: allow for larger prec_set to be tried if this fails\n if abs(es_corr[0] - es_corr[1]) > 0.0001:\n logger.debug(\n \"Es_corr summation not converged! ({} vs. {})\\nTrying a larger prec_set...\".format(\n es_corr[0], es_corr[1]\n )\n )\n prec_set = [30, 35]\n g_vecs, recip_summation, r_vecs, real_summation = generate_R_and_G_vecs(\n self.metadata[\"gamma\"], prec_set, lattice, self.dielectric\n )\n es_corr = [(real_summation[ind] + recip_summation[ind] + pot_shift + si) for ind in range(2)]\n if abs(es_corr[0] - es_corr[1]) < 0.0001:\n raise ValueError(\"Correction still not converged after trying prec_sets up to 35... serious error.\")\n\n es_corr = es_corr[0] * -(q ** 2.0) * kumagai_to_V / 2.0 # [eV]\n\n # if no sampling radius specified for pot align, then assuming Wigner-Seitz radius:\n if not self.metadata[\"sampling_radius\"]:\n wz = lattice.get_wigner_seitz_cell()\n dist = []\n for facet in wz:\n midpt = np.mean(np.array(facet), axis=0)\n dist.append(np.linalg.norm(midpt))\n self.metadata[\"sampling_radius\"] = min(dist)\n\n # assemble site_list based on matching indices\n # [[defect_site object, Vqb for site], .. repeat for all non defective sites]\n site_list = []\n for bs_ind, ds_ind in site_matching_indices:\n Vqb = -(defect_atomic_site_averages[int(ds_ind)] - bulk_atomic_site_averages[int(bs_ind)])\n site_list.append([defect_sc_structure[int(ds_ind)], Vqb])\n\n pot_corr = self.perform_pot_corr(\n defect_sc_structure,\n defect_frac_sc_coords,\n site_list,\n self.metadata[\"sampling_radius\"],\n q,\n r_vecs[0],\n g_vecs[0],\n self.metadata[\"gamma\"],\n )\n\n entry.parameters[\"kumagai_meta\"] = dict(self.metadata)\n entry.parameters[\"potalign\"] = pot_corr / (-q) if q else 0.0\n\n return {\n \"kumagai_electrostatic\": es_corr,\n \"kumagai_potential_alignment\": pot_corr,\n }\n\n def perform_es_corr(self, gamma, prec, lattice, charge):\n \"\"\"\n Peform Electrostatic Kumagai Correction\n Args:\n gamma (float): Ewald parameter\n prec (int): Precision parameter for reciprical/real lattice vector generation\n lattice: Pymatgen Lattice object corresponding to defect supercell\n charge (int): Defect charge\n Return:\n Electrostatic Point Charge contribution to Kumagai Correction (float)\n \"\"\"\n volume = lattice.volume\n\n g_vecs, recip_summation, r_vecs, real_summation = generate_R_and_G_vecs(gamma, [prec], lattice, self.dielectric)\n recip_summation = recip_summation[0]\n real_summation = real_summation[0]\n\n es_corr = (\n recip_summation\n + real_summation\n + self.get_potential_shift(gamma, volume)\n + self.get_self_interaction(gamma)\n )\n\n es_corr *= -(charge ** 2.0) * kumagai_to_V / 2.0 # [eV]\n\n return es_corr\n\n def perform_pot_corr(\n self,\n defect_structure,\n defect_frac_coords,\n site_list,\n sampling_radius,\n q,\n r_vecs,\n g_vecs,\n gamma,\n ):\n \"\"\"\n For performing potential alignment in manner described by Kumagai et al.\n Args:\n defect_structure: Pymatgen Structure object corrsponding to the defect supercell\n\n defect_frac_coords (array): Defect Position in fractional coordinates of the supercell\n given in bulk_structure\n\n site_list: list of corresponding site index values for\n bulk and defect site structures EXCLUDING the defect site itself\n (ex. [[bulk structure site index, defect structure\"s corresponding site index], ... ]\n\n sampling_radius (float): radius (in Angstrom) which sites must be outside\n of to be included in the correction. Publication by Kumagai advises to\n use Wigner-Seitz radius of defect supercell, so this is default value.\n\n q (int): Defect charge\n\n r_vecs: List of real lattice vectors to use in summation\n\n g_vecs: List of reciprocal lattice vectors to use in summation\n\n gamma (float): Ewald parameter\n\n Return:\n Potential alignment contribution to Kumagai Correction (float)\n \"\"\"\n volume = defect_structure.lattice.volume\n potential_shift = self.get_potential_shift(gamma, volume)\n\n pot_dict = {} # keys will be site index in the defect structure\n for_correction = [] # region to sample for correction\n\n # for each atom, do the following:\n # (a) get relative_vector from defect_site to site in defect_supercell structure\n # (b) recalculate the recip and real summation values based on this r_vec\n # (c) get information needed for pot align\n for site, Vqb in site_list:\n dist, jimage = site.distance_and_image_from_frac_coords(defect_frac_coords)\n vec_defect_to_site = defect_structure.lattice.get_cartesian_coords(\n site.frac_coords - jimage - defect_frac_coords\n )\n dist_to_defect = np.linalg.norm(vec_defect_to_site)\n if abs(dist_to_defect - dist) > 0.001:\n raise ValueError(\"Error in computing vector to defect\")\n\n relative_real_vectors = [r_vec - vec_defect_to_site for r_vec in r_vecs[:]]\n\n real_sum = self.get_real_summation(gamma, relative_real_vectors)\n recip_sum = self.get_recip_summation(gamma, g_vecs, volume, r=vec_defect_to_site[:])\n\n Vpc = (real_sum + recip_sum + potential_shift) * kumagai_to_V * q\n\n defect_struct_index = defect_structure.index(site)\n pot_dict[defect_struct_index] = {\n \"Vpc\": Vpc,\n \"Vqb\": Vqb,\n \"dist_to_defect\": dist_to_defect,\n }\n\n logger.debug(\"For atom {}\\n\\tbulk/defect DFT potential difference = \" \"{}\".format(defect_struct_index, Vqb))\n logger.debug(\"\\tanisotropic model charge: {}\".format(Vpc))\n logger.debug(\"\\t\\treciprocal part: {}\".format(recip_sum * kumagai_to_V * q))\n logger.debug(\"\\t\\treal part: {}\".format(real_sum * kumagai_to_V * q))\n logger.debug(\"\\t\\tself interaction part: {}\".format(potential_shift * kumagai_to_V * q))\n logger.debug(\"\\trelative_vector to defect: {}\".format(vec_defect_to_site))\n\n if dist_to_defect > sampling_radius:\n logger.debug(\n \"\\tdistance to defect is {} which is outside minimum sampling \"\n \"radius {}\".format(dist_to_defect, sampling_radius)\n )\n for_correction.append(Vqb - Vpc)\n else:\n logger.debug(\n \"\\tdistance to defect is {} which is inside minimum sampling \"\n \"radius {} (so will not include for correction)\"\n \"\".format(dist_to_defect, sampling_radius)\n )\n\n if len(for_correction):\n pot_alignment = np.mean(for_correction)\n else:\n logger.info(\"No atoms sampled for_correction radius!\" \" Assigning potential alignment value of 0.\")\n pot_alignment = 0.0\n\n self.metadata[\"potalign\"] = pot_alignment\n pot_corr = -q * pot_alignment\n\n # log uncertainty stats:\n self.metadata[\"pot_corr_uncertainty_md\"] = {\n \"stats\": stats.describe(for_correction)._asdict(),\n \"number_sampled\": len(for_correction),\n }\n self.metadata[\"pot_plot_data\"] = pot_dict\n\n logger.info(\"Kumagai potential alignment (site averaging): %f\", pot_alignment)\n logger.info(\"Kumagai potential alignment correction energy: %f eV\", pot_corr)\n\n return pot_corr\n\n def get_real_summation(self, gamma, real_vectors):\n \"\"\"\n Get real summation term from list of real-space vectors\n \"\"\"\n real_part = 0\n invepsilon = np.linalg.inv(self.dielectric)\n rd_epsilon = np.sqrt(np.linalg.det(self.dielectric))\n\n for r_vec in real_vectors:\n if np.linalg.norm(r_vec) > 1e-8:\n loc_res = np.sqrt(np.dot(r_vec, np.dot(invepsilon, r_vec)))\n nmr = scipy.special.erfc(gamma * loc_res) # pylint: disable=E1101\n real_part += nmr / loc_res\n\n real_part /= 4 * np.pi * rd_epsilon\n\n return real_part\n\n def get_recip_summation(self, gamma, recip_vectors, volume, r=[0.0, 0.0, 0.0]):\n \"\"\"\n Get Reciprocal summation term from list of reciprocal-space vectors\n \"\"\"\n recip_part = 0\n\n for g_vec in recip_vectors:\n # dont need to avoid G=0, because it will not be\n # in recip list (if generate_R_and_G_vecs is used)\n Gdotdiel = np.dot(g_vec, np.dot(self.dielectric, g_vec))\n summand = np.exp(-Gdotdiel / (4 * (gamma ** 2))) * np.cos(np.dot(g_vec, r)) / Gdotdiel\n recip_part += summand\n\n recip_part /= volume\n\n return recip_part\n\n def get_self_interaction(self, gamma):\n \"\"\"\n Args:\n gamma ():\n\n Returns:\n Self-interaction energy of defect.\n \"\"\"\n determ = np.linalg.det(self.dielectric)\n return -gamma / (2.0 * np.pi * np.sqrt(np.pi * determ))\n\n @staticmethod\n def get_potential_shift(gamma, volume):\n \"\"\"\n Args:\n gamma (float): Gamma\n volume (float): Volume.\n\n Returns:\n Potential shift for defect.\n \"\"\"\n return -0.25 / (volume * gamma ** 2.0)\n\n def plot(self, title=None, saved=False):\n \"\"\"\n Plots the AtomicSite electrostatic potential against the Long range and short range models\n from Kumagai and Oba (doi: 10.1103/PhysRevB.89.195205)\n \"\"\"\n if \"pot_plot_data\" not in self.metadata.keys():\n raise ValueError(\"Cannot plot potential alignment before running correction!\")\n\n sampling_radius = self.metadata[\"sampling_radius\"]\n site_dict = self.metadata[\"pot_plot_data\"]\n potalign = self.metadata[\"potalign\"]\n\n plt.figure()\n plt.clf()\n\n distances, sample_region = [], []\n Vqb_list, Vpc_list, diff_list = [], [], []\n for site_ind, site_dict in site_dict.items():\n dist = site_dict[\"dist_to_defect\"]\n distances.append(dist)\n\n Vqb = site_dict[\"Vqb\"]\n Vpc = site_dict[\"Vpc\"]\n\n Vqb_list.append(Vqb)\n Vpc_list.append(Vpc)\n diff_list.append(Vqb - Vpc)\n\n if dist > sampling_radius:\n sample_region.append(Vqb - Vpc)\n\n plt.plot(\n distances,\n Vqb_list,\n color=\"r\",\n marker=\"^\",\n linestyle=\"None\",\n label=\"$V_{q/b}$\",\n )\n\n plt.plot(\n distances,\n Vpc_list,\n color=\"g\",\n marker=\"o\",\n linestyle=\"None\",\n label=\"$V_{pc}$\",\n )\n\n plt.plot(\n distances,\n diff_list,\n color=\"b\",\n marker=\"x\",\n linestyle=\"None\",\n label=\"$V_{q/b}$ - $V_{pc}$\",\n )\n\n x = np.arange(sampling_radius, max(distances) * 1.05, 0.01)\n y_max = max(max(Vqb_list), max(Vpc_list), max(diff_list)) + 0.1\n y_min = min(min(Vqb_list), min(Vpc_list), min(diff_list)) - 0.1\n plt.fill_between(x, y_min, y_max, facecolor=\"red\", alpha=0.15, label=\"sampling region\")\n plt.axhline(y=potalign, linewidth=0.5, color=\"red\", label=\"pot. align. / -q\")\n\n plt.legend(loc=0)\n plt.axhline(y=0, linewidth=0.2, color=\"black\")\n\n plt.ylim([y_min, y_max])\n plt.xlim([0, max(distances) * 1.1])\n\n plt.xlabel(r\"Distance from defect ($\\AA$)\", fontsize=20)\n plt.ylabel(\"Potential (V)\", fontsize=20)\n plt.title(str(title) + \" atomic site potential plot\", fontsize=20)\n\n if saved:\n plt.savefig(str(title) + \"KumagaiESPavgPlot.pdf\")\n return None\n return plt\n\n\nclass BandFillingCorrection(DefectCorrection):\n \"\"\"\n A class for BandFillingCorrection class. Largely adapted from PyCDT code\n \"\"\"\n\n def __init__(self, resolution=0.01):\n \"\"\"\n Initializes the Bandfilling correction\n\n Args:\n resolution (float): energy resolution to maintain for gap states\n \"\"\"\n self.resolution = resolution\n self.metadata = {\"num_hole_vbm\": None, \"num_elec_cbm\": None, \"potalign\": None}\n\n def get_correction(self, entry):\n \"\"\"\n Gets the BandFilling correction for a defect entry\n Args:\n entry (DefectEntry): defect entry to compute BandFilling correction on.\n Requires following parameters in the DefectEntry to exist:\n eigenvalues\n dictionary of defect eigenvalues, as stored in a Vasprun object\n\n kpoint_weights (list of floats)\n kpoint weights corresponding to the dictionary of eigenvalues,\n as stored in a Vasprun object\n\n potalign (float)\n potential alignment for the defect calculation\n Only applies to non-zero charge,\n When using potential alignment correction (freysoldt or kumagai),\n need to divide by -q\n\n cbm (float)\n CBM of bulk calculation (or band structure calculation of bulk);\n calculated on same level of theory as the defect\n (ex. GGA defects -> requires GGA cbm)\n\n vbm (float)\n VBM of bulk calculation (or band structure calculation of bulk);\n calculated on same level of theory as the defect\n (ex. GGA defects -> requires GGA vbm)\n\n run_metadata[\"defect_incar\"] (dict)\n Dictionary of INCAR settings for the defect calculation,\n required to check if the calculation included spin-orbit coupling\n (to determine the spin factor for occupancies of the electron bands)\n Returns:\n Bandfilling Correction value as a dictionary\n\n \"\"\"\n eigenvalues = entry.parameters[\"eigenvalues\"]\n kpoint_weights = entry.parameters[\"kpoint_weights\"]\n potalign = entry.parameters[\"potalign\"]\n vbm = entry.parameters[\"vbm\"]\n cbm = entry.parameters[\"cbm\"]\n soc_calc = entry.parameters[\"run_metadata\"][\"defect_incar\"].get(\"LSORBIT\")\n\n bf_corr = self.perform_bandfill_corr(eigenvalues, kpoint_weights, potalign, vbm, cbm, soc_calc)\n\n entry.parameters[\"bandfilling_meta\"] = dict(self.metadata)\n\n return {\"bandfilling_correction\": bf_corr}\n\n def perform_bandfill_corr(self, eigenvalues, kpoint_weights, potalign, vbm, cbm, soc_calc=False):\n \"\"\"\n This calculates the band filling correction based on excess of electrons/holes in CB/VB...\n\n Note that the total free holes and electrons may also be used for a \"shallow donor/acceptor\"\n correction with specified band shifts:\n +num_elec_cbm * Delta E_CBM (or -num_hole_vbm * Delta E_VBM)\n \"\"\"\n bf_corr = 0.0\n\n self.metadata[\"potalign\"] = potalign\n self.metadata[\"num_hole_vbm\"] = 0.0\n self.metadata[\"num_elec_cbm\"] = 0.0\n\n core_occupation_value = list(eigenvalues.values())[0][0][0][1] # get occupation of a core eigenvalue\n if len(eigenvalues.keys()) == 1:\n # needed because occupation of non-spin calcs is sometimes still 1... should be 2\n spinfctr = 2.0 if core_occupation_value == 1.0 and not soc_calc else 1.0\n elif len(eigenvalues.keys()) == 2:\n spinfctr = 1.0\n else:\n raise ValueError(\"Eigenvalue keys greater than 2\")\n\n # for tracking mid gap states...\n shifted_cbm = cbm - potalign # shift cbm with potential alignment\n shifted_vbm = vbm - potalign # shift vbm with potential alignment\n\n for spinset in eigenvalues.values():\n for kptset, weight in zip(spinset, kpoint_weights):\n for eig, occu in kptset: # eig is eigenvalue and occu is occupation\n if occu and (eig > shifted_cbm - self.resolution): # donor MB correction\n bf_corr += weight * spinfctr * occu * (eig - shifted_cbm) # \"move the electrons down\"\n self.metadata[\"num_elec_cbm\"] += weight * spinfctr * occu\n elif (occu != core_occupation_value) and (\n eig <= shifted_vbm + self.resolution\n ): # acceptor MB correction\n bf_corr += (\n weight * spinfctr * (core_occupation_value - occu) * (shifted_vbm - eig)\n ) # \"move the holes up\"\n self.metadata[\"num_hole_vbm\"] += weight * spinfctr * (core_occupation_value - occu)\n\n bf_corr *= -1 # need to take negative of this shift for energetic correction\n\n return bf_corr\n\n\nclass BandEdgeShiftingCorrection(DefectCorrection):\n \"\"\"\n A class for BandEdgeShiftingCorrection class. Largely adapted from PyCDT code\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initializes the BandEdgeShiftingCorrection class\n \"\"\"\n self.metadata = {\n \"vbmshift\": 0.0,\n \"cbmshift\": 0.0,\n }\n\n def get_correction(self, entry):\n \"\"\"\n Gets the BandEdge correction for a defect entry\n Args:\n entry (DefectEntry): defect entry to compute BandFilling correction on.\n Requires some parameters in the DefectEntry to properly function:\n hybrid_cbm (float)\n CBM of HYBRID bulk calculation one wishes to shift to\n\n hybrid_vbm (float)\n VBM of HYBRID bulk calculation one wishes to shift to\n\n cbm (float)\n CBM of bulk calculation (or band structure calculation of bulk);\n calculated on same level of theory as the defect\n (ex. GGA defects -> requires GGA cbm)\n\n vbm (float)\n VBM of bulk calculation (or band structure calculation of bulk);\n calculated on same level of theory as the defect\n (ex. GGA defects -> requires GGA vbm)\n Returns:\n BandfillingCorrection value as a dictionary\n \"\"\"\n hybrid_cbm = entry.parameters[\"hybrid_cbm\"]\n hybrid_vbm = entry.parameters[\"hybrid_vbm\"]\n vbm = entry.parameters[\"vbm\"]\n cbm = entry.parameters[\"cbm\"]\n\n self.metadata[\"vbmshift\"] = hybrid_vbm - vbm # note vbmshift has UPWARD as positive convention\n self.metadata[\"cbmshift\"] = hybrid_cbm - cbm # note cbmshift has UPWARD as positive convention\n\n charge = entry.charge\n bandedgeshifting_correction = charge * self.metadata[\"vbmshift\"]\n entry.parameters[\"bandshift_meta\"] = dict(self.metadata)\n\n return {\"bandedgeshifting_correction\": bandedgeshifting_correction}\n" ]
[ [ "numpy.dot", "numpy.random.seed", "numpy.eye", "numpy.round", "numpy.random.uniform", "numpy.array" ], [ "numpy.swapaxes", "numpy.abs", "numpy.allclose", "numpy.sqrt", "numpy.reshape", "numpy.linalg.eig", "numpy.eye", "numpy.all", "numpy.real", "numpy.random.rand", "numpy.array", "numpy.zeros" ], [ "matplotlib.rc" ], [ "numpy.diag", "matplotlib.pyplot.legend", "numpy.dot", "numpy.imag", "numpy.sqrt", "numpy.dtype", "matplotlib.pyplot.plot", "numpy.mean", "numpy.cross", "numpy.exp", "numpy.roll", "numpy.arange", "scipy.stats.describe", "numpy.linalg.det", "numpy.real", "scipy.special.erfc", "matplotlib.pyplot.figure", "numpy.multiply", "numpy.linalg.inv", "matplotlib.pyplot.ylim", "numpy.identity", "matplotlib.pyplot.fill_between", "numpy.array", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.axhline", "numpy.fft.fft", "numpy.linalg.norm", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.11", "1.10", "1.12", "1.19", "1.13", "1.16", "1.9", "1.18", "1.21", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
QPC-database/multimodal-affinities
[ "c3298e8db56a8b41110cc5681852f9f15d6deaa6", "c3298e8db56a8b41110cc5681852f9f15d6deaa6" ]
[ "multimodal_affinities/visualization/image_utils.py", "multimodal_affinities/clustering/trainable/auto_constraints.py" ]
[ "import cv2\nimport numpy as np\nfrom PIL import Image\nimport random\nimport string\nimport os\n\nclass ImageUtils(object):\n @staticmethod\n def read_image_for_bokeh(image_path, resize_height=None):\n # Open image, and make sure it's RGB*A*\n image = Image.open(image_path).convert('RGBA')\n print(\"image: {}\".format(image))\n if resize_height:\n image = ImageUtils.resize_image_by_height(image, resize_height)\n\n image_width, image_height = image.size\n # Create an array representation for the image `img`, and an 8-bit \"4\n # layer/RGBA\" version of it `view`.\n img = np.empty((image_height, image_width), dtype=np.uint32)\n view = img.view(dtype=np.uint8).reshape((image_height, image_width, 4))\n # Copy the RGBA image into view, flipping it so it comes right-side up\n # with a lower-left origin\n view[:, :, :] = np.flipud(np.asarray(image))\n print(\"input image width x height {}x{}\".format(image_width, image_height))\n return view, (image_width, image_height)\n\n @staticmethod\n def resize_image_by_height(pil_image, dst_height):\n src_width, src_height = pil_image.size\n factor = float(src_height) / dst_height\n dst_width = int(src_width / factor)\n pil_image.thumbnail((dst_width, dst_height), Image.ANTIALIAS)\n return pil_image\n\n\ndef resize_image(img, output_dimensions):\n '''\n resizes an img to output dimensions in x and y while preserving aspect ratio.\n pads (or cuts) along vertical direction if needed\n :param img:\n :param output_dimensions:\n :return:\n '''\n\n image_width = output_dimensions[0]\n image_height = output_dimensions[1]\n img_shape = img.shape\n num_pad_x = image_width - img.shape[1]\n pad_both_x_and_y = True\n if pad_both_x_and_y and num_pad_x > 0:\n num_pad_l = int(float(num_pad_x) / 2)\n num_pad_r = int(num_pad_x) - num_pad_l\n img = cv2.copyMakeBorder(img, 0, 0, num_pad_l, num_pad_r, cv2.BORDER_WRAP)\n elif not pad_both_x_and_y or num_pad_x < 0:\n resize_factor = float(img_shape[1]) / image_width\n img = cv2.resize(img, (int(img_shape[1] / resize_factor),\n int(img_shape[0] / resize_factor)))\n\n num_pad_y = image_height - img.shape[0]\n if num_pad_y > 0:\n num_pad_t = int(float(num_pad_y) / 2)\n num_pad_b = int(num_pad_y) - num_pad_t\n img = cv2.copyMakeBorder(img, num_pad_t, num_pad_b, 0, 0, cv2.BORDER_WRAP)\n elif num_pad_y < 0:\n num_pad_t = int(float(-num_pad_y) / 2)\n num_pad_b = int(-num_pad_y) - num_pad_t\n img = img[num_pad_t:-num_pad_b,:,:]\n\n # # debugging crops\n # random_filename = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))\n # cv2.imwrite(os.path.join(output_directory, random_filename + '.jpg'), img)\n return img", "import sys\nimport numpy as np\nimport torch\n\nif (sys.version[0] == 2):\n import cPickle as pickle\nelse:\n import pickle\nfrom scipy.spatial.distance import cdist\nfrom scipy.sparse.csgraph import minimum_spanning_tree, connected_components\nfrom scipy.sparse import csr_matrix, tril, find, triu, coo_matrix\nfrom sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering\n\n\nclass AutoConstraints(object):\n def __init__(self):\n pass\n\n def generate_auto_must_link_const_from_embeddings(self, embeddings, n_neighbors=2, dist_meas=\"cosine\", w_multiplier=1):\n if len(embeddings) == 0:\n return []\n raw_embeddings = [embedding.detach().cpu().numpy() for embedding in embeddings]\n X = np.stack(raw_embeddings, axis=0)\n\n print(\"--- mutual Knn ---\")\n E, W, connections_per_point, pairs_list = self.connectivity_structure_mknn(X=X,\n n_neighbors=n_neighbors,\n dist_meas=dist_meas,\n w_multiplier=w_multiplier)\n return pairs_list\n\n\n def find_knn(self, X, n_neighbors, dist_meas, w_multiplier = 1):\n \"\"\"\n :param X: dataset\n :param n_neighbours: number of neighbours for mknn calculation\n :return: P - mknn graph (csr matrix), Q - weights car matrix\n \"\"\"\n samples = X.shape[0]\n batchsize = 10000\n b = np.arange(n_neighbors + 1)\n b = tuple(b[1:].ravel())\n\n z = np.zeros((samples, n_neighbors))\n weigh = np.zeros_like(z)\n X = np.reshape(X, (X.shape[0], -1))\n # This loop speeds up the computation by operating in batches\n # This can be parallelized to further utilize CPU/GPU resource\n for x in np.arange(0, samples, batchsize):\n start = x\n end = min(x + batchsize, samples)\n w = w_multiplier * cdist(X[start:end], X, dist_meas)\n # the first columns will be the indexes of the knn of each sample (the first column is always the same\n # index as the row)\n y = np.argpartition(w, b, axis=1)\n\n z[start:end, :] = y[:, 1:n_neighbors + 1]\n # the weights are the distances between the two samples\n weigh[start:end, :] = np.reshape(\n w[tuple(np.repeat(np.arange(end - start), n_neighbors)), tuple(\n y[:, 1:n_neighbors + 1].ravel())], (end - start, n_neighbors))\n del (w)\n\n ind = np.repeat(np.arange(samples), n_neighbors)\n P = csr_matrix((np.ones((samples * n_neighbors)), (ind.ravel(), z.ravel())), shape=(samples, samples))\n Q = csr_matrix((weigh.ravel(), (ind.ravel(), z.ravel())), shape=(samples, samples))\n return P, Q\n\n def generate_auto_cannot_link_const_from_ner_tags(self, entities, max_connections_per_point = 20):\n pairs_list = []\n tags = np.array([entity.ner_tag if entity.ner_tag is not None else -1 for entity in entities])\n indices_with_tags = np.where(tags > 0)\n counter_list = np.zeros((len(entities), 1))\n for ind_i in indices_with_tags[0].tolist():\n for ind_j in indices_with_tags[0].tolist():\n if tags[ind_i] == tags[ind_j]:\n continue\n if counter_list[ind_i] <= max_connections_per_point and counter_list[ind_j] <= max_connections_per_point:\n counter_list[ind_i] = counter_list[ind_i] + 1\n counter_list[ind_j] = counter_list[ind_j] + 1\n pairs_list.append([ind_i, ind_j])\n indices_upper_case = [i for i,entity in enumerate(entities) if entity.text.isupper()]\n for ind_i in indices_upper_case:\n for ind_j in range(len(entities)):\n if ind_j not in indices_upper_case:\n if counter_list[ind_i] <= max_connections_per_point and counter_list[ind_j] <= max_connections_per_point:\n counter_list[ind_i] = counter_list[ind_i] + 1\n counter_list[ind_j] = counter_list[ind_j] + 1\n pairs_list.append([ind_i, ind_j])\n return pairs_list\n\n def generate_auto_cannot_link_const_from_geometric_embeddings(self, embeddings, ratio_threshold = 1.2, max_connections_per_point = 15):\n \"\"\"\n :param X: the dataset\n :param ratio_threshold\n :return: pairds_list\n \"\"\"\n if len(embeddings) == 0:\n return []\n pairs_list = []\n height_vals = [embedding.detach().cpu().numpy()[0][2] for embedding in embeddings]\n counter_list = np.zeros((len(height_vals), 1))\n for i in range(len(height_vals)):\n height_i = height_vals[i]\n for j in range(i+1, len(height_vals)):\n height_j = height_vals[j]\n ratio_curr = float(height_i) / height_j\n if ratio_curr >= ratio_threshold or (1 / ratio_curr) >= ratio_threshold:\n if counter_list[i] <= max_connections_per_point and counter_list[j] <= max_connections_per_point:\n counter_list[i] = counter_list[i] + 1\n counter_list[j] = counter_list[j] + 1\n pairs_list.append([i,j])\n print(\"cannot_link_const_from_geometric_embeddings contributed %d new cannot-links\" % len(pairs_list))\n return pairs_list\n\n\n def connectivity_structure_mknn(self, X, n_neighbors, dist_meas, w_multiplier = 1):\n \"\"\"\n :param X: the dataset\n :param n_neighbours: the number of closest neighbours taken into account\n :param w_multiplier: if 1, obtains mutual nearest neighbors, if -1 obtains mutual farthest neighbors\n :return: matrix E with 1 where the two points are mknn. the matrix is lower triangular (zeros in the top triangular)\n so that each connection will be taken into account only once.\n W is a matrix of the weight of each connection. both are sparse matrices.\n \"\"\"\n samples = X.shape[0]\n P, Q = self.find_knn(X, n_neighbors=n_neighbors, dist_meas=dist_meas, w_multiplier=w_multiplier)\n # Tcsr = minimum_spanning_tree(Q)\n P = P.minimum(P.transpose()) # + Tcsr.maximum(Tcsr.transpose())\n index = np.asarray(find(P)).T\n E = csr_matrix((np.ones(len(index)), (index[:, 0], index[:, 1])), [samples, samples])\n connections_per_point = np.sum(E, 0) # sum of each row\n E = triu(E, k=1)\n a = np.sum(connections_per_point) / samples # calculating the averge number of connections\n w = \\\n np.divide(a, np.sqrt(\n np.asarray(connections_per_point[0][0, E.row]) * np.asarray(connections_per_point[0][0, E.col])))[0]\n W = coo_matrix((w, (E.row, E.col)), [samples, samples])\n print('number of connections:', len(E.data), 'average connection per point', a)\n\n\n rows, cols, _ = find(P)\n pairs_list = []\n for i, j in zip(rows, cols):\n pairs_list.append([i,j])\n\n return E, W, connections_per_point, pairs_list" ]
[ [ "numpy.asarray", "numpy.empty" ], [ "scipy.sparse.coo_matrix", "numpy.sum", "scipy.sparse.find", "numpy.reshape", "numpy.arange", "numpy.asarray", "scipy.spatial.distance.cdist", "numpy.stack", "numpy.ones", "numpy.zeros_like", "numpy.argpartition", "scipy.sparse.triu", "numpy.array", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
RegentLee/master_research
[ "ee8e45abc890c7103c1c9917954c5958b48782f6" ]
[ "util/my_util.py" ]
[ "import numpy as np\n\n#############################################\n# variable #\n#############################################\nval = False\nx = 0\ny = 0\n\n\n#############################################\n# function #\n#############################################\ndef RMSD(A, B):\n mse = np.sum(np.power(A - B, 2)/B.size)\n return np.sqrt(mse)\n\ndef MAE(A, B):\n A = 59.2/2*(A + 1)\n B = 59.2/2*(B + 1)\n mae = np.sum(np.abs(A - B))/B.size\n return mae\n\n'''def DALI(A, B): not used\n \"\"\"Citation:\n Holm, Liisa. \n \"DALI and the persistence of protein shape.\" \n Protein Science 29.1 (2020): 128-140.\n APPENDIX I: SCORES USED IN DALI\n \"\"\"\n DALI_score = 0.2*len(B)\n A = 10*((A + 1)*3)\n B = 10*((B + 1)*3)\n for i in range(len(B)):\n for j in range(i + 1, len(B)):\n DALI_score += 2*(0.2 - 2*np.abs(A[i][j] - B[i][j])/(A[i][j] + B[i][j]))*np.exp(-((A[i][j] + B[i][j])/(2*20))**2)\n m_L = 7.95 + 0.71*len(B) - 0.000259*len(B)**2 - 0.00000192*len(B)**3\n Z_score = (DALI_score - m_L)/(0.5*m_L)\n return Z_score'''\n\n" ]
[ [ "numpy.abs", "numpy.sqrt", "numpy.power" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GregDMeyer/dynamite
[ "440f0c3674bf12a835b8ad4b3c10c303c2d28265", "440f0c3674bf12a835b8ad4b3c10c303c2d28265" ]
[ "dynamite/operators.py", "tests/integration/dynamite_test_runner.py" ]
[ "\"\"\"\nThis module provides the building blocks for Hamiltonians, and\ndefines their built-in behavior and operations.\n\"\"\"\n\nimport numpy as np\n\nfrom . import config, validate, msc_tools\nfrom .computations import evolve, eigsolve\nfrom .subspaces import Full\nfrom .states import State\n\nclass Operator:\n \"\"\"\n A class representing a quantum operator.\n\n This class generally won't be directly instantiated by the user, but is returned by the\n other functions in this module.\n \"\"\"\n\n def __init__(self):\n\n self._L = config.L\n self._max_spin_idx = None\n self._mats = {}\n self._msc = None\n self._is_reduced = False\n self._shell = config.shell\n\n self._subspaces = []\n\n self._tex = r'\\[\\text{operator}\\]'\n self._string = '[operator]'\n self._brackets = ''\n\n def copy(self):\n \"\"\"\n Return a copy of the operator.\n Copy will not have its PETSc matrix already built,\n even if the operator being copied does.\n\n Returns\n -------\n Operator\n A copy of the operator\n \"\"\"\n rtn = Operator()\n rtn.msc = self.msc.copy()\n rtn.is_reduced = self.is_reduced\n rtn.shell = self.shell\n\n if self._subspaces:\n for left, right in self.get_subspace_list():\n rtn.add_subspace(left, right)\n\n rtn.tex = self.tex\n rtn.string = self.string\n rtn.brackets = self.brackets\n\n return rtn\n\n ### computations\n\n def evolve(self, state, t, **kwargs):\n r\"\"\"\n Evolve a state under the Hamiltonian. If the Hamiltonian's chain length has not\n been set, attempts to set it based on the state's length.\n\n This method wraps :meth:`dynamite.computations.evolve` (see that documentation\n for a full description of the method's functionality).\n\n Parameters\n ----------\n state : dynamite.states.State\n The initial state.\n\n t : float\n The time :math:`t` for which to evolve the state (can be negative or complex).\n\n **kwargs :\n Any further keyword arguments are passed to the underlying call to\n :meth:`dynamite.computations.evolve`. See that documentation for a\n detailed description of possible arguments.\n\n Returns\n -------\n dynamite.states.State\n The result vector :math:`\\Psi_f`.\n \"\"\"\n\n if self.L is None:\n self.L = state.L\n\n return evolve(self, state, t, **kwargs)\n\n def eigsolve(self, **kwargs):\n \"\"\"\n Find eigenvalues (and eigenvectors if requested) of the Hamiltonian. This class\n method is a wrapper on :meth:`dynamite.computations.eigsolve`. Any keyword\n arguments are passed to that function; see its documentation for details.\n\n By default, finds one (or possibly a few) eigenvalues with the smallest real\n values (i.e. the ground state).\n\n .. note:: The spin chain length ``L`` must be set before calling ``eigsolve``.\n\n Returns\n -------\n numpy.array or tuple(numpy.array, list(dynamite.states.State))\n Either a 1D numpy array of eigenvalues, or a pair containing that array\n and a list of the corresponding eigenvectors.\n \"\"\"\n return eigsolve(self, **kwargs)\n\n ### properties\n\n @property\n def max_spin_idx(self):\n '''\n Read-only property giving the largest spin index on which this operator\n has support.\n '''\n # save this so we don't recompute it every time.\n # cleared when MSC changes\n\n if self._max_spin_idx is None:\n self._max_spin_idx = msc_tools.max_spin_idx(self.msc)\n\n return self._max_spin_idx\n\n @property\n def L(self):\n \"\"\"\n Property representing the length of the spin chain.\n If L hasn't been set, defaults to the size of support of the operator (from site 0).\n \"\"\"\n return self._L\n\n @L.setter\n def L(self, value):\n L = validate.L(value)\n if L < self.max_spin_idx + 1:\n raise ValueError('Cannot set L smaller than one plus the largest spin index'\n 'on which the operator has support (max_spin_idx = %d)' %\n (self.max_spin_idx))\n for left, right in self.get_subspace_list():\n left.L = L\n right.L = L\n self._L = L\n\n def get_length(self):\n '''\n Returns the length of the spin chain for this operator. It is defined by the\n property :meth:`Operator.L` if it has been set by the user. Otherwise, the\n number of sites on which the operator has support is returned by default.\n '''\n if self.L is None:\n return self.max_spin_idx + 1\n else:\n return self.L\n\n @property\n def dim(self):\n \"\"\"\n Read-only attribute returning the dimensions of the matrix.\n \"\"\"\n return self.left_subspace.get_dimension(), self.right_subspace.get_dimension()\n\n @property\n def nnz(self):\n \"\"\"\n The number of nonzero elements per row of the sparse matrix.\n \"\"\"\n return msc_tools.nnz(self.msc)\n\n @property\n def msc_size(self):\n \"\"\"\n The number of elements in the MSC representation of the matrix.\n \"\"\"\n return len(self.msc)\n\n @property\n def density(self):\n \"\"\"\n The density of the sparse matrix---that is, the number of non-zero\n elements per row divided by the length of a row.\n\n .. note::\n This quantity is not always well defined when using a subspace, since\n it can vary by row. In that case, the returned quantity will be an upper bound.\n \"\"\"\n return self.nnz/self.dim[1]\n\n @property\n def shell(self):\n \"\"\"\n Switch whether to use shell matrices or not. For a description of shell\n matrices and their benefits, see the documentation.\n\n .. note::\n Changing this value after the matrix has been built will invoke a call\n to :meth:`Operator.destroy_mat`.\n \"\"\"\n return self._shell\n\n @shell.setter\n def shell(self,value):\n value = validate.shell(value)\n if value != self._shell:\n self.destroy_mat()\n self._shell = value\n\n @property\n def left_subspace(self):\n \"\"\"\n Get the default left subspace for this operator. This is the subspace most recently\n added with :meth:`Operator.add_subspace`, or config.subspace if\n :meth:`Operator.add_subspace` has not been called.\n \"\"\"\n space = self.get_subspace_list()[-1][0]\n space.L = self.get_length()\n return space\n\n @property\n def right_subspace(self):\n \"\"\"\n Get the default right subspace for this operator. This is the subspace most recently\n added with :meth:`Operator.add_subspace`, or config.subspace if\n :meth:`Operator.add_subspace` has not been called.\n \"\"\"\n space = self.get_subspace_list()[-1][1]\n space.L = self.get_length()\n return space\n\n @property\n def subspace(self):\n \"\"\"\n Get the default subspace for this operator. This is the subspace most recently\n added with :meth:`Operator.add_subspace`, or config.subspace if\n :meth:`Operator.add_subspace` has not been called.\n \"\"\"\n if self.left_subspace != self.right_subspace:\n raise ValueError(\"Left and right subspaces are different for this operator. \"\n \"use Operator.left_subspace and Operator.right_subspace to \"\n \"access them individually.\")\n return self.left_subspace\n\n @subspace.setter\n def subspace(self, value):\n self.add_subspace(value, value)\n\n def add_subspace(self, left, right=None):\n '''\n Add a pair of subspaces that this operator is compatible with.\n\n Parameters\n ----------\n\n left : dynamite.subspaces.Subspace\n A subspace the operator can map to (or multiply from the left)\n\n right : dynamite.subspaces.Subspace, optional\n A subspace the operator can map from (or multiply to the right). If omitted,\n the left subspace is reused for the right.\n '''\n if right is None:\n right = left\n\n left = validate.subspace(left)\n right = validate.subspace(right)\n\n left.L = self.get_length()\n right.L = self.get_length()\n\n if (left, right) not in self.get_subspace_list():\n self.get_subspace_list().append((left, right))\n\n def get_subspace_list(self):\n '''\n Return a list of the subspaces that have been registered for this operator.\n '''\n if not self._subspaces:\n if config.subspace is not None:\n self._subspaces = [(config.subspace, config.subspace)]\n else:\n self._subspaces = [(Full(), Full())]\n\n for left, right in self._subspaces:\n left.L = self.get_length()\n right.L = self.get_length()\n return self._subspaces\n\n ### text representations\n\n # TODO: perhaps encapsulate the string/tex methods into their own class\n\n @property\n def string(self):\n '''\n A text string that will be used to represent the object in printed expressions.\n '''\n return self._string\n\n @string.setter\n def string(self, value):\n self._string = value\n\n @property\n def tex(self):\n '''\n A LaTeX expression corresponding to the object. Can be set to any valid TeX.\n '''\n return self._tex\n\n @tex.setter\n def tex(self, value):\n self._tex = value\n\n @property\n def brackets(self):\n '''\n Which kind of brackets to surround the expression with. Options are\n ``'()'``, ``'[]'``, or ``''``, where the empty string means no brackets.\n '''\n return self._brackets\n\n @brackets.setter\n def brackets(self, value):\n value = validate.brackets(value)\n self._brackets = value\n\n @classmethod\n def _with_brackets(cls, string, brackets, tex=False):\n '''\n Put the given brackets around the string. If tex = True, the brackets\n have \\left and \\right appended to them.\n\n Parameters\n ----------\n string : str\n The string to put brackets around\n\n brackets : str\n The set of brackets. Should be either ``'[]'``, ``'()'``, or ``''``\n for no brackets.\n\n tex : bool, optional\n Whether to prepend ``\\left`` and ``\\right`` to the brackets.\n\n Returns\n -------\n str\n The result\n '''\n if not brackets:\n return string\n if tex:\n brackets = [x+y for x,y in zip([r'\\left',r'\\right'], brackets)]\n return string.join(brackets)\n\n def with_brackets(self, which):\n '''\n Return a string or tex representation of the object, surrounded by brackets\n if necessary. Useful for building larger expressions.\n\n Parameters\n ----------\n\n which : str\n Whether to return a normal string or tex. Options are ``'string'`` or ``'tex'``.\n '''\n if which == 'tex':\n strng = self.tex\n elif which == 'string':\n strng = self.string\n else:\n raise ValueError(\"which must be either 'string' or 'tex'.\")\n\n return self._with_brackets(strng, self._brackets, which == 'tex')\n\n def __str__(self):\n return self.string\n\n def __repr__(self):\n rtn = 'dynamite.Operator on {size} spins:\\n'.format(size = self.get_length())\n rtn += self.string\n return rtn\n\n def table(self):\n '''\n Return a string containing an ASCII table of the coefficients and terms\n that make up this operator.\n\n The table is generated directly from the MSC representation, so it is\n expanded and simplified to the same form no matter how the operator was\n built.\n\n Call :meth:`Operator.reduce_msc` first for a more compact table.\n '''\n return msc_tools.table(self.msc, self.get_length())\n\n def get_latex(self):\n '''\n Return a clean LaTeX representation of the operator.\n '''\n return self.tex.replace('{IDX', '{')\n\n def _repr_latex_(self):\n return '$' + self.get_latex() + '$'\n\n ### save to disk\n\n def serialize(self):\n '''\n Serialize the operator's MSC representation into a string of bytes.\n The byte string ONLY contains the MSC representation and the spin chain\n length. It does not save any other information, such as subspaces etc.\n\n Returns\n -------\n bytes\n The byte string containing the serialized object.\n\n '''\n return msc_tools.serialize(self.msc)\n\n def save(self, filename):\n \"\"\"\n Save the MSC representation of the operator to disk.\n Can be loaded again through :class:`Load`.\n\n .. note::\n If one calls this method in parallel, one MUST call :meth:`dynamite.config.initialize`\n first, or all processes will try to simultaneously write to the same file!\n\n Parameters\n ----------\n filename : str\n The path to the file to save the operator in.\n \"\"\"\n\n if config.initialized:\n from petsc4py import PETSc\n do_save = PETSc.COMM_WORLD.rank == 0\n else:\n # this should be the case when not running under MPI\n do_save = True\n\n # only process 0 should save\n if do_save:\n with open(filename, mode='wb') as f:\n f.write(self.serialize())\n\n if config.initialized:\n PETSc.COMM_WORLD.barrier()\n\n ### interface with PETSc\n\n def get_mat(self, subspaces=None, diag_entries=False):\n \"\"\"\n Get the PETSc matrix corresponding to this operator, building it if necessary.\n\n Parameters\n ----------\n subspaces : tuple(Subspace, Subspace), optional\n The subspace pair to get the matrix for. If the matrix is already built for this\n pair, it will be reused. If this option is omitted, the last subspace added with\n :meth:`Operator.add_subspace` will be used, or the Full space by default.\n\n diag_entries : bool, optional\n Ensure that the sparse matrix has all diagonal elements filled,\n even if they are zero. Some PETSc functions fail if the\n diagonal elements do not exist. Currently a dummy argument; diagonal\n entries are always included.\n\n Returns\n -------\n petsc4py.PETSc.Mat\n The PETSc matrix corresponding to the operator.\n \"\"\"\n if subspaces is None:\n subspaces = (self.left_subspace, self.right_subspace)\n\n if subspaces not in self._mats:\n self.build_mat(subspaces, diag_entries=diag_entries)\n\n return self._mats[subspaces]\n\n def build_mat(self, subspaces=None, diag_entries=False):\n \"\"\"\n Build the PETSc matrix, destroying any matrix that has already been built, and\n store it internally. This function does not return the matrix--see\n :meth:`Operator.get_mat` for that functionality. This function is rarely needed\n by the end user, since it is called automatically whenever the underlying matrix\n needs to be built or rebuilt.\n \"\"\"\n\n if subspaces is None:\n subspaces = (self.left_subspace, self.right_subspace)\n\n if subspaces not in self.get_subspace_list():\n raise ValueError('Attempted to build matrix for a subspace that has not '\n 'been added to the operator.')\n\n config.initialize()\n from ._backend import bpetsc\n\n self.reduce_msc()\n term_array = self.msc\n\n # TODO: keep track of diag_entries\n diag_entries = True\n if term_array[0]['masks'] != 0:\n term_array = np.hstack([np.array([(0,0,0)], dtype=term_array.dtype), term_array])\n\n masks, indices = np.unique(term_array['masks'], return_index=True)\n\n # need to add the last index\n mask_offsets = np.ndarray((indices.size+1,), dtype=term_array.dtype['masks'])\n mask_offsets[:-1] = indices\n mask_offsets[-1] = term_array.shape[0]\n\n if not msc_tools.is_hermitian(term_array):\n raise ValueError('Building non-Hermitian matrices currently not supported.')\n\n mat = bpetsc.build_mat(\n L = self.get_length(),\n masks = np.ascontiguousarray(masks),\n mask_offsets = np.ascontiguousarray(mask_offsets),\n signs = np.ascontiguousarray(term_array['signs']),\n coeffs = np.ascontiguousarray(term_array['coeffs']),\n left_type = subspaces[0].to_enum(),\n left_data = subspaces[0].get_cdata(),\n right_type = subspaces[1].to_enum(),\n right_data = subspaces[1].get_cdata(),\n shell = self.shell,\n gpu = config.gpu\n )\n\n self._mats[subspaces] = mat\n\n def destroy_mat(self, subspaces=None):\n \"\"\"\n Destroy the PETSc matrix, freeing the corresponding memory. If the PETSc\n matrix does not exist (has not been built or has already been destroyed),\n the function has no effect.\n\n Parameters\n ----------\n subspaces : tuple(Subspace, Subspace), optional\n Destroy only the matrix for a particular pair of subspaces.\n \"\"\"\n if subspaces is not None:\n to_destroy = [subspaces]\n else:\n to_destroy = list(self._mats.keys())\n\n for k in to_destroy:\n mat = self._mats.pop(k, None)\n if mat is not None:\n mat.destroy()\n\n def create_states(self):\n '''\n Return a bra and ket compatible with this matrix.\n\n Returns\n -------\n tuple\n The two states\n '''\n bra = State(self.get_length(), self.left_subspace)\n ket = State(self.get_length(), self.right_subspace)\n return (bra, ket)\n\n ### mask, sign, coefficient representation of operators\n\n @property\n def msc(self):\n '''\n The (mask, sign, coefficient) representation of the operator. This\n representation is used internally by dynamite.\n '''\n return self._msc\n\n @msc.setter\n def msc(self, value):\n value = validate.msc(value)\n self._max_spin_idx = None\n self.is_reduced = False\n self._msc = value\n\n def reduce_msc(self):\n '''\n Combine and sort terms in the MSC representation, compressing it and\n preparing it for use in the backend.\n '''\n self.msc = msc_tools.combine_and_sort(self.msc)\n self.is_reduced = True\n\n @property\n def is_reduced(self):\n '''\n Whether :meth:`Operators.reduce_msc` has been called. Can also be set manually to avoid\n calling that function, if you are sure that the terms are sorted already.\n '''\n return self._is_reduced\n\n @is_reduced.setter\n def is_reduced(self, value):\n self._is_reduced = value\n\n def get_shifted_msc(self, shift, wrap_idx = None):\n '''\n Get the MSC representation of the operator, with all terms translated along\n the spin chain (away from zero) by ``shift`` spins.\n\n Parameters\n ----------\n shift : int\n Shift the whole operator along the spin chain by ``shift`` spins.\n\n wrap : bool\n The site at which to begin wrapping around to the beginning of the spin chain.\n e.g. takes a site index ``i`` to ``i % wrap_idx``. If ``None``, do not wrap.\n\n Returns\n -------\n numpy.ndarray\n A numpy array containing the representation.\n '''\n return msc_tools.shift(self.msc, shift, wrap_idx)\n\n ### interface to numpy\n\n def to_numpy(self, subspaces=None, sparse=True):\n '''\n Get a SciPy sparse matrix or dense numpy array representing the operator.\n\n Parameters\n ----------\n subspaces : tuple(Subspace, Subspace), optional\n The subspaces for which to get the matrix. If this option is omitted,\n the last subspace added with :meth:`Operator.add_subspace` will be used,\n or the Full space by default.\n\n sparse : bool, optional\n Whether to return a sparse matrix or a dense array.\n\n Returns\n -------\n np.ndarray(dtype = np.complex128)\n The array\n '''\n\n if subspaces is None:\n subspaces = (self.left_subspace, self.right_subspace)\n\n ary = msc_tools.msc_to_numpy(self.msc,\n (subspaces[0].get_dimension(),\n subspaces[1].get_dimension()),\n subspaces[0].idx_to_state,\n subspaces[1].state_to_idx,\n sparse)\n\n return ary\n\n def spy(self, subspaces=None, max_size=1024):\n '''\n Use matplotlib to show the nonzero structure of the matrix.\n\n Parameters\n ----------\n subspaces : tuple(Subspace, Subspace), optional\n The pair of subspaces for which to plot the matrix. Defaults to the most\n recent added with the Operator.add_subspace method, or otherwise\n config.subspace.\n\n max_size : int, optional\n The maximum matrix dimension for which this function can be called.\n Calling it for too large a matrix will not be informative and probably run\n out of memory, so this is a small safeguard.\n '''\n # TODO: should subspaces really be passed as an argument like that? or should we somehow\n # reference subspaces from the list, like with an index?\n\n if any(dim > max_size for dim in self.dim):\n raise ValueError('Matrix too big to spy. Either build a smaller operator, or adjust '\n 'the maximum spy size with the argument \"max_size\"')\n\n from matplotlib import pyplot as plt\n plt.figure()\n normalized = np.array((self.to_numpy(subspaces=subspaces) != 0).toarray(), dtype = np.float)\n transformed = np.log(normalized + 1E-9)\n plt.imshow(transformed, cmap='Greys')\n plt.show()\n\n ### unary and binary operations\n\n def __add__(self, x):\n if not isinstance(x, Operator):\n x = x*identity()\n return self._op_add(x)\n\n def __radd__(self,x):\n if not isinstance(x, Operator):\n x = x*identity()\n return x + self\n\n def __sub__(self, x):\n return self + -x\n\n def __neg__(self):\n return -1*self\n\n def __mul__(self, x):\n if isinstance(x, Operator):\n return self._op_mul(x)\n elif isinstance(x, State):\n return self._vec_mul(x)\n else:\n return self._num_mul(x)\n\n def __rmul__(self, x):\n if isinstance(x, State):\n return TypeError('Left vector-matrix multiplication not currently '\n 'supported.')\n else:\n return self._num_mul(x)\n\n def __eq__(self, x):\n if isinstance(x, Operator):\n self.reduce_msc()\n x.reduce_msc()\n return np.array_equal(self.msc, x.msc)\n else:\n raise TypeError('Equality not supported for types %s and %s'\n % (str(type(self)), str(type(x))))\n\n def _op_add(self, o):\n rtn = self.copy()\n rtn.msc = msc_tools.msc_sum([self.msc, o.msc])\n rtn.tex = self.tex + ' + ' + o.tex\n rtn.string = self.string + ' + ' + o.string\n rtn.brackets = '()'\n return rtn\n\n def _op_mul(self, o):\n rtn = self.copy()\n rtn.msc = msc_tools.msc_product([self.msc, o.msc])\n rtn.string = self.with_brackets('string') + '*' + o.with_brackets('string')\n rtn.tex = self.with_brackets('tex') + o.with_brackets('tex')\n rtn.brackets = ''\n return rtn\n\n def dot(self, x, result = None):\n r'''\n Compute the matrix-vector product :math:`\\vec{y} = A\\vec{x}`\n\n Parameters\n ----------\n x : dynamite.states.State\n The input state x.\n\n result : dynamite.states.State, optional\n A state in which to store the result. If omitted, a new State object\n is created.\n\n Returns\n -------\n dynamite.states.State\n The result\n '''\n right_subspace = x.subspace\n right_match = [(left, right) for left, right in self.get_subspace_list()\n if right == right_subspace]\n if not right_match:\n raise ValueError('No operator subspace found that matches input vector subspace. '\n 'Try adding the subspace with the Operator.add_subspace method.')\n\n if result is None:\n if len(right_match) != 1:\n raise ValueError('Ambiguous subspace for result vector. Pass a state '\n 'with the desired subspace as the \"result\" option to '\n 'Operator.dot.')\n left_subspace = right_match[0][0]\n result = State(L=left_subspace.L,\n subspace=left_subspace)\n else:\n left_subspace = result.subspace\n\n if (left_subspace, right_subspace) not in right_match:\n raise ValueError('Subspaces of matrix and result vector do not match.')\n\n self.get_mat(subspaces=(left_subspace, right_subspace)).mult(x.vec, result.vec)\n return result\n\n def _vec_mul(self, x):\n return self.dot(x)\n\n def scale(self, x):\n '''\n Scale an operator by a numerical value without making a copy. This is more\n efficient than just doing x*Operator.\n\n Parameters\n ----------\n x : numeric type\n The coefficient to scale by\n '''\n try:\n self.msc['coeffs'] *= x\n except (ValueError,TypeError):\n raise ValueError('Error attempting to multiply operator by type \"%s\"' % str(type(x)))\n\n self.string = '{:.3f}*'.format(x) + self.with_brackets('string')\n self.tex = '{:.3f}*'.format(x) + self.with_brackets('tex')\n self.brackets = ''\n return self\n\n def _num_mul(self, x):\n rtn = self.copy()\n rtn.scale(x)\n return rtn\n\ndef load_from_file(filename):\n '''\n Load the operator in file ``filename`` and return the corresponding object.\n\n Parameters\n ----------\n filename : str\n The path of the file to load.\n\n Returns\n -------\n dynamite.operators.Load\n The operator as a dynamite object.\n '''\n with open(filename, 'rb') as f:\n bytestring = f.read()\n op = from_bytes(bytestring)\n return op\n\ndef from_bytes(data):\n \"\"\"\n Load operator from a byte string generated with the :meth:`Operator.serialize`\n method.\n\n Parameters\n ----------\n data : bytes\n The byte string containing the serialized object.\n\n Returns\n -------\n Operator\n The operator.\n \"\"\"\n o = Operator()\n msc = msc_tools.deserialize(data)\n o.msc = msc\n o.string = '[operator from bytes]'\n o.tex = r'\\left[\\text{operator from bytes}\\right]'\n return o\n\ndef op_sum(terms, nshow = 3):\n r\"\"\"\n A sum of several operators. This object can be used in a couple ways.\n All of the following return the exact same object,\n :math:`\\sigma^x_0 + \\sigma^y_0`\\:\n\n .. code:: python\n\n sigmax() + sigmay()\n op_sum([sigmax(), sigmay()])\n op_sum(s() for s in [sigmax, sigmay])\n\n Parameters\n ----------\n terms : list\n A list of operators to sum\n\n nshow : int, optional\n The number of terms to show in the string representations before adding\n an ellipsis.\n \"\"\"\n\n o = Operator()\n msc_terms = []\n strings = []\n texs = []\n\n iterterms = iter(terms)\n\n done = False\n for n,t in enumerate(iterterms):\n msc_terms.append(t.msc)\n strings.append(t.string)\n texs.append(t.tex)\n if n >= nshow:\n break\n else:\n done = True\n\n if not done:\n strings[-1] = '...'\n texs[-1] = r'\\cdots'\n msc_terms.append(msc_tools.msc_sum(t.msc for t in iterterms))\n\n o.msc = msc_tools.msc_sum(msc_terms)\n o.string = ' + '.join(strings)\n o.tex = ' + '.join(texs)\n o.brackets = '()'\n return o\n\ndef op_product(terms):\n \"\"\"\n A product of several operators. Called in same way as :meth:`op_sum`.\n For example:\n\n .. code:: python\n\n >>> sigmax() * sigmay() == op_product([sigmax(), sigmay()])\n True\n\n Parameters\n ----------\n terms : list\n A list of operators to multiply\n \"\"\"\n\n # from a practical standpoint, there doesn't seem to ever be a use case\n # for taking the product of a huge number of terms. So we assume the number\n # of terms is O(1) in this implementation.\n\n msc_terms = []\n strings = []\n texs = []\n for t in terms:\n msc_terms.append(t.msc)\n strings.append(t.with_brackets(which='string'))\n texs.append(t.with_brackets(which='tex'))\n\n if msc_terms:\n o = Operator()\n o.msc = msc_tools.msc_product(msc_terms)\n o.string = '*'.join(strings)\n o.tex = ''.join(texs)\n o.brackets = ''\n else:\n o = identity()\n\n return o\n\ndef index_sum(op, size = None, start = 0, boundary = 'open'):\n \"\"\"\n Duplicate the operator onto adjacent sites in the spin chain, and sum the resulting\n operators.\n In most cases, ``op`` should have support on site 0 (and possibly others).\n\n See the examples for more information.\n\n Parameters\n ----------\n op : Operator\n The operator to translate along the spin chain.\n\n size : int, optional\n The size of the support of the resulting operator. For open boundary conditions,\n the number of terms in the sum may be smaller than this. If not provided, defaults\n to the value of :attr:`Operator.L`.\n\n start : int, optional\n The site for the first operator in the sum.\n\n boundary : str, optional\n Whether to use 'open' or 'closed' boundary conditions. When ``op`` has support\n on more than one site, this determines whether the last few terms of the sum should\n wrap around to the beginning of the spin chain.\n \"\"\"\n\n if size is None:\n if op.L is None:\n raise ValueError('Must specify index_sum size with either the \"size\" argument '\n 'or by setting Operator.L (possibly through config.L).')\n else:\n size = op.L\n\n if boundary == 'open':\n stop = start + size - op.max_spin_idx\n if stop <= start:\n raise ValueError(\"requested size %d for sum operator's support smaller than \"\n \"summand's support %d; impossible to satisfy\" % \\\n (size, op.max_spin_idx))\n wrap_idx = None\n\n elif boundary == 'closed':\n stop = start + size\n wrap_idx = stop\n if start != 0:\n raise ValueError('cannot set start != 0 for closed boundary conditions.')\n\n else:\n raise ValueError(\"invalid value for argument 'boundary' (can be 'open' or 'closed')\")\n\n rtn = Operator()\n rtn.msc = msc_tools.msc_sum(op.get_shifted_msc(i, wrap_idx) for i in range(start, stop))\n\n rtn.string = 'index_sum(' + op.string + ', sites %d - %d' % (start, stop-1)\n if boundary == 'closed':\n rtn.string += ', wrapped)'\n else:\n rtn.string += ')'\n\n # add i to the indices for TeX representation\n # TODO: use different letters if we have sum of sums\n sub_tex = op.with_brackets(which = 'tex')\n sub_tex = sub_tex.replace('{IDX', '{IDXi+').replace('{IDXi+0','{IDXi')\n\n rtn.tex = r'\\sum_{i=%d}^{%d}' % (start, stop-1) + sub_tex\n rtn.brackets = '[]'\n\n return rtn\n\ndef index_product(op, size = None, start = 0):\n \"\"\"\n Duplicate the operator onto adjacent sites in the spin chain, and multiply the\n resulting operators together.\n In most cases, ``op`` should have support on site 0 (and possibly others).\n\n Parameters\n ----------\n op : Operator\n The operator to translate along the spin chain.\n\n size : int, optional\n The size of the support of the resulting operator. If not provided, defaults\n to the value of :attr:`Operator.L`.\n\n start : int, optional\n The site for the first operator in the sum.\n \"\"\"\n\n if size is None:\n if op.L is None:\n raise ValueError('Must specify index_sum size with either the \"size\" argument '\n 'or by setting Operator.L (possibly through config.L).')\n else:\n size = op.L\n\n if size == 0:\n return identity()\n\n stop = start + size - op.max_spin_idx\n\n rtn = Operator()\n rtn.msc = msc_tools.msc_product(op.get_shifted_msc(i, wrap_idx = None) for i in range(start, stop))\n\n rtn.string = 'index_product(' + op.string + ', sites %d - %d)' % (start, stop-1)\n\n # add i to the indices for TeX representation\n # TODO: use different letters if we have sum of sums\n sub_tex = op.with_brackets(which = 'tex')\n sub_tex = sub_tex.replace('{IDX', '{IDXi+').replace('{IDXi+0','{IDXi')\n rtn.tex = r'\\prod_{i=%d}^{%d}' % (start, stop-1) + sub_tex\n rtn.brackets = '[]'\n\n return rtn\n\ndef sigmax(i=0):\n r\"\"\"\n The Pauli :math:`\\sigma_x` operator on site :math:`i`.\n \"\"\"\n o = Operator()\n o.msc = [(1<<i, 0, 1)]\n o.tex = r'\\sigma^x_{IDX'+str(i)+'}'\n o.string = 'σx'+str(i).join('[]')\n return o\n\ndef sigmay(i=0):\n r\"\"\"\n The Pauli :math:`\\sigma_y` operator on site :math:`i`.\n \"\"\"\n o = Operator()\n o.msc = [(1<<i, 1<<i, 1j)]\n o.tex = r'\\sigma^y_{IDX'+str(i)+'}'\n o.string = 'σy'+str(i).join('[]')\n return o\n\ndef sigmaz(i=0):\n r\"\"\"\n The Pauli :math:`\\sigma_z` operator on site :math:`i`.\n \"\"\"\n o = Operator()\n o.msc = [(0, 1<<i, 1)]\n o.tex = r'\\sigma^z_{IDX'+str(i)+'}'\n o.string = 'σz'+str(i).join('[]')\n return o\n\ndef sigma_plus(i=0):\n r\"\"\"\n The :math:`\\sigma_+ = \\sigma_x + i \\sigma_y` operator.\n\n .. note::\n\n :math:`\\sigma_+ = \\left( \\begin{array}{cc} 0 & 2 \\\\ 0 & 0 \\\\ \\end{array} \\right)`,\n so :math:`S_+ = \\left( \\begin{array}{cc} 0 & 1 \\\\ 0 & 0 \\\\ \\end{array} \\right) = \\frac{1}{2} \\sigma_+`\n \"\"\"\n o = sigmax(i) + 1j*sigmay(i)\n o.tex = r'\\sigma^+_{IDX'+str(i)+'}'\n o.string = 'σ+'+str(i).join('[]')\n return o\n\ndef sigma_minus(i=0):\n r\"\"\"\n The :math:`\\sigma_- = \\sigma_x - i \\sigma_y` operator.\n\n .. note::\n\n :math:`\\sigma_- = \\left( \\begin{array}{cc} 0 & 0 \\\\ 2 & 0 \\\\ \\end{array} \\right)`,\n so :math:`S_- = \\left( \\begin{array}{cc} 0 & 0 \\\\ 1 & 0 \\\\ \\end{array} \\right) = \\frac{1}{2} \\sigma_-`\n \"\"\"\n o = sigmax(i) - 1j*sigmay(i)\n o.tex = r'\\sigma^-_{IDX'+str(i)+'}'\n o.string = 'σ-'+str(i).join('[]')\n return o\n\ndef identity():\n \"\"\"\n The identity operator.\n \"\"\"\n o = Operator()\n o.msc = [(0, 0, 1)]\n # TODO: do a fancy double-lined 1?\n o.tex = '1'\n o.string = '1'\n return o\n\ndef zero():\n \"\"\"\n The zero operator---equivalent to a matrix of all zeros.\n \"\"\"\n o = Operator()\n o.msc = []\n o.tex = '0'\n o.string = '0'\n return o\n", "\nimport argparse\nimport numpy as np\n\nimport mpi_test_runner as mtr\n\nclass DynamiteTestCase(mtr.MPITestCase):\n\n def check_vec_equal(self, a, b, eps=None):\n '''\n Compare two PETSc vectors, checking that they are equal.\n '''\n # compare via dot product\n nrm = (a.vec-b.vec).norm()\n\n # compare the local portions of the vectors\n istart, iend = a.vec.getOwnershipRange()\n\n if istart == iend:\n return\n\n a = a.vec[istart:iend]\n b = b.vec[istart:iend]\n\n # this is the amount of machine rounding error we can accumulate\n if eps is None:\n eps = np.finfo(a.dtype).eps\n\n diff = np.abs(a-b)\n max_idx = np.argmax(diff)\n far_idxs = np.nonzero(~np.isclose(a, b, rtol=0, atol=eps))[0]\n self.assertTrue(far_idxs.size == 0,\n msg = '''\n{nfar} values do not match.\nindices: {idxs}\ndiff norm: {nrm}\nlargest diff:\na: {a_real}+i{a_imag}\nb: {b_real}+i{b_imag}\ndiff: {diff}\nat {max_idx}'''.format(\n nrm = nrm,\n idxs = far_idxs,\n nfar = far_idxs.size,\n a_real = a[max_idx].real,\n a_imag = a[max_idx].imag,\n b_real = b[max_idx].real,\n b_imag = b[max_idx].imag,\n diff = np.abs(a[max_idx]-b[max_idx]),\n max_idx = max_idx,\n))\n\n# add these attributes to the test case\n# checks = [\n# ('Equal', operator.eq),\n# ('Less', operator.lt),\n# ('ArrayEqual', np.array_equal),\n# ('True', bool)\n# ]\n#\n# for name, fn in checks:\n# def tmp(*args, msg):\n# result =\n# setattr(MPITestCase, 'mpiAssert'+name, tmp)\n\n\ndef parse_command_line(cmd_argv=None):\n\n parser = argparse.ArgumentParser(description='Run dynamite integration tests.')\n\n parser.add_argument('name', nargs='?', default=None,\n help='Glob expression to specify specific test cases')\n\n parser.add_argument('-f', '--failfast', action='store_true',\n help='Stop the tests on first failure')\n\n parser.add_argument('-v', '--verbose', choices=[0, 1, 2], default=1, type=int,\n help='Level of detail to show')\n\n parser.add_argument('-L', type=int, default=10,\n help='Spin chain length at which to run tests')\n\n parser.add_argument('--gpu', action='store_true',\n help='Run the tests on a GPU')\n\n parser.add_argument('--shell', action='store_true',\n help='Run the tests using shell matrices')\n\n parser.add_argument('--slepc_args', type=lambda s: s.strip().split(' '),\n help='Arguments to pass to SLEPc initialization')\n\n return parser.parse_args(cmd_argv)\n\ndef main(slepc_args=None):\n from dynamite import config\n args = parse_command_line()\n\n config.L = args.L\n config.shell = args.shell\n\n if slepc_args is None:\n slepc_args = []\n\n if args.slepc_args is not None:\n slepc_args += args.slepc_args\n\n config.initialize(slepc_args, gpu=args.gpu)\n\n mtr.main(name=args.name, failfast=args.failfast, verbose=args.verbose)\n" ]
[ [ "numpy.log", "matplotlib.pyplot.imshow", "numpy.array_equal", "numpy.unique", "numpy.ascontiguousarray", "numpy.ndarray", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "numpy.isclose", "numpy.argmax", "numpy.abs", "numpy.finfo" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Virodroid/galaxy-cluster
[ "11c0b365ed94e1f141b55e905f93abcbf39b3657" ]
[ "clustering/aggOnFeatures.py" ]
[ "from sklearn.cluster import AgglomerativeClustering\nimport pandas as pd\nimport numpy as np\nfrom zoobot import label_metadata, schemas\nfrom sklearn.metrics import confusion_matrix, precision_recall_fscore_support\nfrom scipy.optimize import linear_sum_assignment as linear_assignment\nimport time\n\ndef findChoice(frac):\n choice = np.zeros_like(frac)\n choice[np.arange(len(frac)), frac.argmax(1)] = 1\n return choice\n\ndef getQuestionClasses(auto_f, volunteers, question, seed):\n qcol_name = question.text+'_total-votes'\n fcol_names = [(cols.text+'_fraction') for cols in question.answers]\n anscol_names = [cols.text for cols in question.answers]\n valid_feats = []\n \n valid_vol = volunteers.query('`{}`/`smooth-or-featured_total-votes` >= 0.5'.format(qcol_name))\n valid_idx = valid_vol.index.tolist()\n vol_results = valid_vol[fcol_names].values\n \n auto_values = auto_f.values\n \n for i in valid_idx:\n valid_feats.append(auto_values[i])\n \n rounded_vol_results = findChoice(np.asarray(vol_results))\n support = len(rounded_vol_results)\n \n pred_results = AgglomerativeClustering(n_clusters=len(fcol_names)).fit_predict(valid_feats)\n \n vol_classes = np.argmax(rounded_vol_results, axis=1)\n \n return valid_idx, support, anscol_names, np.array(pred_results), np.array(vol_classes)\n\ndef _make_cost_m(cm):\n s = np.max(cm)\n return (- cm + s)\n\ndef labelMap(vol, pred):\n cm = confusion_matrix(vol, pred)\n indexes = linear_assignment(_make_cost_m(cm))\n indexes = np.asarray(indexes)\n return indexes[1]\n \ndef convertLabels(lmap, pred):\n conv_preds = []\n for i in range(len(pred)):\n conv_preds.append(lmap[pred[i]])\n return np.array(conv_preds)\n\nauto_features = pd.read_csv(\"/users/ezraf/galaxyDECaLS/autoencoder/extracted_features.csv\")\nauto_features = auto_features.drop('file_loc',axis=1)\ndecals_test = pd.read_csv('/users/ezraf/galaxyDECaLS/Ilifu_data/decals_ilifu_test.csv')\nschema = schemas.Schema(label_metadata.decals_pairs, label_metadata.get_gz2_and_decals_dependencies(label_metadata.decals_pairs))\n\ntotal_report = {}\nseeds = [6589,4598,2489,9434,7984,1238,6468,5165,3246,8646]\ntotal_time = {}\nfor question in label_metadata.decals_pairs:\n total_report[question] = {\n 'precision': 0,\n 'recall': 0,\n 'f1': 0,\n 'support': 0\n }\nfor question in label_metadata.decals_pairs:\n total_time[question] = {}\n print('Starting Clustering for ',question)\n start = time.time()\n idxs, support, anscols, valid_preds, valid_vol = getQuestionClasses(auto_features, decals_test, schema.get_question(question), None)\n lmap = labelMap(valid_vol, valid_preds)\n conv_preds = convertLabels(lmap, valid_preds)\n question_report = precision_recall_fscore_support(y_pred=conv_preds, y_true=valid_vol, average='weighted')\n total_report[question]['precision'] += question_report[0]\n total_report[question]['recall'] += question_report[1]\n total_report[question]['f1'] += question_report[2]\n end = time.time()\n total_report[question]['support'] = support\n total_time[question]['total'] = end - start\n print('Question: ',question,' Completed 1 time')\n print('--------------------------------------------------------------')\n\nreport_df = pd.DataFrame.from_dict(total_report, orient='index')\ntime_df = pd.DataFrame.from_dict(total_time, orient='index')\n\nreport_df.to_csv(\"/users/ezraf/clusterResults/agg_accuracy.csv\")\ntime_df.to_csv(\"/users/ezraf/clusterResults/agg_time.csv\")" ]
[ [ "pandas.read_csv", "numpy.asarray", "sklearn.metrics.confusion_matrix", "numpy.max", "sklearn.metrics.precision_recall_fscore_support", "numpy.argmax", "numpy.zeros_like", "pandas.DataFrame.from_dict", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]