repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
cankocagil/TT-SRN
[ "83eb03a9393442e6b09aa736862b3a2d5bdcf5b6" ]
[ "models/spn.py" ]
[ "import torch\r\nfrom torch import nn, einsum\r\nimport torch.nn.functional as F\r\nimport math\r\nfrom einops import rearrange, repeat\r\nfrom einops.layers.torch import Rearrange\r\nfrom siren.init import siren_uniform_\r\n\r\ndef sine_init(x):\r\n siren_uniform_(x, mode='fan_in', c=6)\r\n\r\n\r\nclass Sine(nn.Module):\r\n def __init__(self, w0 = 1.):\r\n super().__init__()\r\n self.w0 = w0\r\n def forward(self, x):\r\n return torch.sin(self.w0 * x)\r\n\r\n\r\nclass Siren(nn.Module):\r\n def __init__(self, dim_in, dim_out, w0 = 1., c = 6., is_first = False, use_bias = True, activation = None):\r\n super().__init__()\r\n self.dim_in = dim_in\r\n self.is_first = is_first\r\n\r\n weight = torch.zeros(dim_out, dim_in)\r\n bias = torch.zeros(dim_out) if use_bias else None\r\n self.init_(weight, bias, c = c, w0 = w0)\r\n\r\n self.weight = nn.Parameter(weight)\r\n self.bias = nn.Parameter(bias) if use_bias else None\r\n self.activation = Sine(w0) if activation is None else activation\r\n\r\n def init_(self, weight, bias, c, w0):\r\n dim = self.dim_in\r\n\r\n w_std = (1 / dim) if self.is_first else (math.sqrt(c / dim) / w0)\r\n weight.uniform_(-w_std, w_std)\r\n\r\n if bias is not None:\r\n bias.uniform_(-w_std, w_std)\r\n\r\n def forward(self, x):\r\n out = F.linear(x, self.weight, self.bias)\r\n out = self.activation(out)\r\n return out\r\n\r\n\r\nclass SinLayerClass(nn.Module):\r\n def __init__(self, dim, hidden_dim, num_heads, dropout = 0.2):\r\n super().__init__()\r\n internal_state_dim = int(hidden_dim//2)\r\n \r\n\r\n self.net = nn.Sequential(\r\n Siren(dim, hidden_dim),\r\n nn.Dropout(dropout),\r\n nn.Linear(hidden_dim, internal_state_dim),\r\n nn.GELU(),\r\n nn.Linear(internal_state_dim, num_heads)\r\n )\r\n def forward(self, x):\r\n return self.net(x)\r\n\r\n\r\n\r\n\r\nclass SinLayer(nn.Module):\r\n def __init__(self, dim, hidden_dim, num_heads, dropout = 0.2):\r\n super().__init__()\r\n internal_state_dim = int(hidden_dim//2)\r\n internal_state_dim2 = int(internal_state_dim//2)\r\n\r\n self.net = nn.Sequential(\r\n Siren(dim, hidden_dim),\r\n nn.Dropout(dropout),\r\n Siren(hidden_dim, internal_state_dim),\r\n nn.Dropout(dropout),\r\n nn.Linear(internal_state_dim, internal_state_dim2),\r\n nn.GELU(),\r\n nn.Linear(internal_state_dim2, num_heads)\r\n )\r\n def forward(self, x):\r\n return self.net(x)\r\n\r\n\r\nclass MLP(nn.Module):\r\n def __init__(self, dim, hidden_dim, num_heads, dropout = 0.):\r\n super().__init__()\r\n internal_state_dim = int(hidden_dim//2)\r\n\r\n self.net = nn.Sequential(\r\n nn.Linear(dim, hidden_dim),\r\n nn.GELU(),\r\n nn.Dropout(dropout),\r\n nn.Linear(hidden_dim, internal_state_dim),\r\n nn.GELU(),\r\n nn.Dropout(dropout),\r\n nn.Linear(internal_state_dim, num_heads),\r\n )\r\n def forward(self, x):\r\n return self.net(x) " ]
[ [ "torch.nn.Linear", "torch.nn.functional.linear", "torch.nn.Parameter", "torch.nn.GELU", "torch.sin", "torch.zeros", "torch.nn.Dropout" ] ]
gf712/shogun
[ "ca2afb8f092288455701539aa58952dbf6743378" ]
[ "examples/undocumented/python/converter_factoranalysis.py" ]
[ "#!/usr/bin/env python\ndata = '../data/fm_train_real.dat'\nparameter_list = [[data]]\n\ndef converter_factoranalysis(data_fname):\n\ttry:\n\t\timport numpy\n\t\tfrom shogun import FactorAnalysis, EuclideanDistance, CSVFile\n\n\t\tfeatures = sg.create_features(CSVFile(data_fname))\n\n\t\tconverter = FactorAnalysis()\n\t\tconverter.set_target_dim(2)\n\t\tembedding = converter.transform(features)\n\n\t\tX = embedding.get_feature_matrix()\n\t\tcovdet = numpy.linalg.det(numpy.dot(X,X.T))\n\n\t\treturn covdet > 0\n\texcept ImportError:\n\t\tprint('No Eigen3 available')\n\nif __name__=='__main__':\n\tprint('Factor Analysis')\n\tconverter_factoranalysis(*parameter_list[0])\n" ]
[ [ "numpy.dot" ] ]
ondrejba/discrete_abstractions
[ "444def53ae2ca6c8a5b5b453448f7c4bbaba07e1" ]
[ "runners/bisim/lehnert_gridworld/LehnertGridworldExactPartition.py" ]
[ "import copy as cp\nimport numpy as np\nfrom envs.lehnert_gridworld import LehnertGridworld\nfrom model.LehnertGridworldModel import LehnertGridworldModel\nfrom runners.runner import Runner\nimport config_constants as cc\nimport vis_utils\nfrom model.cluster import Cluster\nfrom utils.dataset import ArrayDataset\nimport constants\n\n\nclass LehnertGridworldExactPartitionRunner(Runner):\n\n def __init__(self, runner_config, model_config):\n\n super(LehnertGridworldExactPartitionRunner, self).__init__()\n\n self.model_config = model_config\n\n self.saver = runner_config[cc.SAVER]\n self.logger = runner_config[cc.LOGGER]\n self.load_model_path = runner_config[cc.LOAD_MODEL_PATH]\n self.round_to = runner_config[cc.ROUND_TO]\n self.hard_t = runner_config[cc.HARD_T]\n\n def setup(self):\n\n # prepare_dataset_ uses prepare_model_\n self.prepare_model_()\n self.prepare_dataset_()\n\n def main_training_loop(self):\n\n self.partition_iteration_()\n\n def evaluate_and_visualize(self):\n\n #if self.saver.has_dir():\n # self.visualize_partition_()\n\n self.evaluate_purities_all_states_()\n\n def visualize_partition_(self):\n\n for idx, block in enumerate(self.partition):\n\n block_list = list(block)\n\n block_states = self.all_states[block_list]\n\n vis_utils.plot_5x5_grid(block_states, \"block_{:d}\".format(idx), self.saver)\n\n def evaluate_purities_all_states_(self):\n\n dataset = ArrayDataset({\n constants.STATE_LABELS: self.all_state_labels\n })\n\n cluster = Cluster(None, dataset)\n cluster.num_clusters = len(self.partition)\n\n probs = np.zeros((len(self.all_states), len(self.partition)))\n\n for block_idx, block in enumerate(self.partition):\n for state_idx in block:\n probs[state_idx, block_idx] = 1.0\n\n cluster_map = cluster.assign_label_to_each_cluster_soft(probs)\n\n purities, sizes, mean_purity = cluster.evaluate_purity_soft(probs)\n mean_i_purity, i_scores, i_totals = cluster.evaluate_inverse_purity_soft(probs, cluster_map)\n\n self.logger.info(\"state mixture soft purity: {:.2f}%\".format(mean_purity * 100))\n self.logger.info(\"state mixture inverse soft purity: {:.2f}%\".format(mean_i_purity * 100))\n\n self.saver.save_array_as_txt([mean_purity, mean_i_purity, len(self.partition)], \"cluster_purity\")\n\n def partition_iteration_(self):\n\n self.partition = {frozenset(self.all_states_indices)}\n new_partition = self.partition_improvement_()\n\n while new_partition != self.partition:\n\n self.logger.info(\"new partition: {:d} blocks\".format(len(new_partition)))\n self.partition = new_partition\n new_partition = self.partition_improvement_()\n\n self.logger.info(\"final partition: {:d} blocks\".format(len(new_partition)))\n\n def partition_improvement_(self):\n\n new_partition = cp.deepcopy(self.partition)\n\n for ref_block in self.partition:\n\n flag = True\n\n while flag:\n\n flag = False\n\n for block in new_partition:\n\n tmp_new_partition = self.split_(block, ref_block, new_partition)\n\n if tmp_new_partition != new_partition:\n new_partition = tmp_new_partition\n flag = True\n break\n\n return new_partition\n\n def split_(self, block, ref_block, partition):\n\n partition = cp.deepcopy(partition)\n partition.remove(block)\n\n new_blocks = {}\n\n for state in block:\n\n reward = self.all_states_rewards[state]\n next_state = self.all_states_transitions[state]\n\n block_states = self.all_states[list(ref_block)]\n mask = np.sum(block_states, axis=0)\n\n next_state = next_state * mask[np.newaxis, :, :]\n probs = np.sum(next_state, axis=(1, 2))\n\n key = tuple(np.concatenate([reward, probs], axis=0))\n\n if key not in new_blocks.keys():\n new_blocks[key] = []\n\n new_blocks[key].append(state)\n\n for new_block in new_blocks.values():\n partition.add(frozenset(new_block))\n\n return partition\n\n def prepare_dataset_(self):\n\n env = LehnertGridworld()\n self.all_states, self.all_state_labels = env.get_all_states()\n\n self.all_states = np.array(self.all_states)\n self.all_state_labels = np.array(self.all_state_labels)\n\n self.all_states_indices = list(range(len(self.all_states)))\n\n self.all_states_rewards = []\n self.all_states_transitions = []\n\n for action in [LehnertGridworld.A_UP, LehnertGridworld.A_DOWN, LehnertGridworld.A_LEFT,\n LehnertGridworld.A_RIGHT]:\n\n reward, next_state = self.model.predict(self.all_states, np.array([action] * len(self.all_states)))\n\n if self.round_to is not None:\n reward = np.round(reward, decimals=self.round_to)\n\n if self.hard_t:\n next_state = next_state.reshape((next_state.shape[0], next_state.shape[1] * next_state.shape[2]))\n pos = np.argmax(next_state, axis=1)\n next_state = np.zeros((next_state.shape[0], next_state.shape[1]), dtype=np.float32)\n next_state[range(len(pos)), pos] = 1.0\n next_state = next_state.reshape((next_state.shape[0], 30, 3))\n\n self.all_states_rewards.append(reward)\n self.all_states_transitions.append(next_state)\n\n # final shape: |S|x|A| and |S|x|A|x30x3\n self.all_states_rewards = np.array(self.all_states_rewards).transpose((1, 0))\n self.all_states_transitions = np.array(self.all_states_transitions).transpose((1, 0, 2, 3))\n\n def prepare_model_(self):\n\n self.model = LehnertGridworldModel(self.model_config)\n self.model.build()\n self.model.start_session()\n self.model.load(self.load_model_path)\n" ]
[ [ "numpy.sum", "numpy.zeros", "numpy.argmax", "numpy.array", "numpy.concatenate", "numpy.round" ] ]
MaxGhenis/taxcalc-helpers
[ "85d2739d1c96882424cb51ef1806c9e51f88c085" ]
[ "microdf/tests/test_inequality.py" ]
[ "import microdf as mdf\n\nimport pandas as pd\n\n\ndef test_top_pct():\n x = list(range(1, 11)) # 1 to 10. Sum = 10 * 11 / 2 = 55.\n df = pd.DataFrame({\"x\": x})\n ms = mdf.MicroSeries(x)\n RES = 10 / 55\n assert mdf.top_10_pct_share(df, \"x\") == RES\n assert ms.top_10_pct_share() == RES\n x = list(range(1, 4))\n df = pd.DataFrame({\"x\": x, \"w\": x})\n ms = mdf.MicroSeries(x, weights=x)\n # This is equivalent to [1, 2, 2, 3, 3, 3]\n # Sum = 14, top half is 9.\n RES = 9 / 14\n assert mdf.top_50_pct_share(df, \"x\", \"w\") == RES\n assert ms.top_50_pct_share() == RES\n" ]
[ [ "pandas.DataFrame" ] ]
vincentlui/unsupervised-goal-conditioned-rl
[ "4f2e6938e072cb52f8ee779a939fe7bf6a980d45" ]
[ "rlkit/torch/sac/diayn/diayn_cont.py" ]
[ "from collections import OrderedDict\n\nimport math\nimport numpy as np\nimport torch\nimport torch.optim as optim\nfrom torch import nn as nn\nimport torch.nn.functional as F\n\nimport rlkit.torch.pytorch_util as ptu\nfrom rlkit.core.eval_util import create_stats_ordered_dict\nfrom rlkit.torch.torch_rl_algorithm import TorchTrainer\n\n\nclass DIAYNContTrainer(TorchTrainer):\n def __init__(\n self,\n env,\n policy,\n qf1,\n qf2,\n target_qf1,\n target_qf2,\n df,\n\n discount=0.99,\n reward_scale=1.0,\n\n policy_lr=1e-3,\n qf_lr=1e-3,\n df_lr=1e-3,\n optimizer_class=optim.Adam,\n\n soft_target_tau=1e-2,\n target_update_period=1,\n plotter=None,\n render_eval_paths=False,\n\n use_automatic_entropy_tuning=True,\n target_entropy=None,\n ):\n super().__init__()\n self.env = env\n self.policy = policy\n self.qf1 = qf1\n self.qf2 = qf2\n self.target_qf1 = target_qf1\n self.target_qf2 = target_qf2\n self.df = df\n self.soft_target_tau = soft_target_tau\n self.target_update_period = target_update_period\n\n self.use_automatic_entropy_tuning = use_automatic_entropy_tuning\n if self.use_automatic_entropy_tuning:\n if target_entropy:\n self.target_entropy = target_entropy\n else:\n self.target_entropy = -np.prod(self.env.action_space.shape).item() # heuristic value from Tuomas\n self.log_alpha = ptu.zeros(1, requires_grad=True)\n self.alpha_optimizer = optimizer_class(\n [self.log_alpha],\n lr=policy_lr,\n )\n\n self.plotter = plotter\n self.render_eval_paths = render_eval_paths\n\n self.qf_criterion = nn.MSELoss()\n self.vf_criterion = nn.MSELoss()\n self.df_criterion = nn.CrossEntropyLoss()\n\n self.policy_optimizer = optimizer_class(\n self.policy.parameters(),\n lr=policy_lr,\n )\n self.qf1_optimizer = optimizer_class(\n self.qf1.parameters(),\n lr=qf_lr,\n )\n self.qf2_optimizer = optimizer_class(\n self.qf2.parameters(),\n lr=qf_lr,\n )\n self.df_optimizer = optimizer_class(\n self.df.parameters(),\n lr=df_lr,\n )\n\n self.discount = discount\n self.reward_scale = reward_scale\n self.eval_statistics = OrderedDict()\n self._n_train_steps_total = 0\n self._need_to_update_eval_statistics = True\n\n def train_from_torch(self, batch):\n rewards = batch['rewards']\n terminals = batch['terminals']\n obs = batch['observations']\n actions = batch['actions']\n next_obs = batch['next_observations']\n skills = batch['skills']\n\n \"\"\"\n DF Loss and Intrinsic Reward\n \"\"\"\n\n df_input = torch.cat([obs], dim=1)\n df_distribution = self.df(df_input)\n log_likelihood = df_distribution.log_prob(skills)\n rewards = log_likelihood.reshape(-1, 1)\n df_loss = -log_likelihood.mean()\n # z_hat = torch.argmax(skills, dim=1)\n # d_pred = self.df(next_obs)\n # d_pred_log_softmax = F.log_softmax(d_pred, 1)\n # _, pred_z = torch.max(d_pred_log_softmax, dim=1, keepdim=True)\n # rewards = d_pred_log_softmax[torch.arange(d_pred.shape[0]), z_hat] - math.log(1/self.policy.skill_dim)\n # rewards = rewards.reshape(-1, 1)\n # df_loss = self.df_criterion(d_pred, z_hat)\n\n \"\"\"\n Policy and Alpha Loss\n \"\"\"\n new_obs_actions, policy_mean, policy_log_std, log_pi, *_ = self.policy(\n skills, reparameterize=True, return_log_prob=True,\n )\n obs_skills = torch.cat((obs, skills), dim=1)\n if self.use_automatic_entropy_tuning:\n alpha_loss = -(self.log_alpha * (log_pi + self.target_entropy).detach()).mean()\n self.alpha_optimizer.zero_grad()\n alpha_loss.backward()\n self.alpha_optimizer.step()\n alpha = self.log_alpha.exp()\n else:\n alpha_loss = 0\n alpha = .1\n\n q_new_actions = torch.min(\n self.qf1(obs_skills, new_obs_actions),\n self.qf2(obs_skills, new_obs_actions),\n )\n policy_loss = (alpha*log_pi - q_new_actions).mean()\n\n \"\"\"\n QF Loss\n \"\"\"\n q1_pred = self.qf1(obs_skills, actions)\n q2_pred = self.qf2(obs_skills, actions)\n # Make sure policy accounts for squashing functions like tanh correctly!\n new_next_actions, _, _, new_log_pi, *_ = self.policy(\n skills, reparameterize=True, return_log_prob=True,\n )\n next_obs_skills = torch.cat((next_obs, skills), dim=1)\n target_q_values = torch.min(\n self.target_qf1(next_obs_skills, new_next_actions),\n self.target_qf2(next_obs_skills, new_next_actions),\n ) - alpha * new_log_pi\n\n q_target = self.reward_scale * rewards + (1. - terminals) * self.discount * target_q_values\n qf1_loss = self.qf_criterion(q1_pred, q_target.detach())\n qf2_loss = self.qf_criterion(q2_pred, q_target.detach())\n\n \"\"\"\n Update networks\n \"\"\"\n self.df_optimizer.zero_grad()\n df_loss.backward()\n self.df_optimizer.step()\n\n self.qf1_optimizer.zero_grad()\n qf1_loss.backward()\n self.qf1_optimizer.step()\n\n self.qf2_optimizer.zero_grad()\n qf2_loss.backward()\n self.qf2_optimizer.step()\n\n self.policy_optimizer.zero_grad()\n policy_loss.backward()\n self.policy_optimizer.step()\n\n \"\"\"\n Soft Updates\n \"\"\"\n if self._n_train_steps_total % self.target_update_period == 0:\n ptu.soft_update_from_to(\n self.qf1, self.target_qf1, self.soft_target_tau\n )\n ptu.soft_update_from_to(\n self.qf2, self.target_qf2, self.soft_target_tau\n )\n\n \"\"\"\n Save some statistics for eval\n \"\"\"\n # df_accuracy = torch.sum(torch.eq(z_hat, pred_z.reshape(1, list(pred_z.size())[0])[0])).float()/list(pred_z.size())[0]\n\n if self._need_to_update_eval_statistics:\n self._need_to_update_eval_statistics = False\n \"\"\"\n Eval should set this to None.\n This way, these statistics are only computed for one batch.\n \"\"\"\n policy_loss = (log_pi - q_new_actions).mean()\n\n self.eval_statistics['Intrinsic Rewards'] = np.mean(ptu.get_numpy(rewards))\n self.eval_statistics['DF Loss'] = np.mean(ptu.get_numpy(df_loss))\n # self.eval_statistics['DF Accuracy'] = np.mean(ptu.get_numpy(df_accuracy))\n self.eval_statistics['QF1 Loss'] = np.mean(ptu.get_numpy(qf1_loss))\n self.eval_statistics['QF2 Loss'] = np.mean(ptu.get_numpy(qf2_loss))\n self.eval_statistics['Policy Loss'] = np.mean(ptu.get_numpy(\n policy_loss\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Q1 Predictions',\n ptu.get_numpy(q1_pred),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Q2 Predictions',\n ptu.get_numpy(q2_pred),\n ))\n # self.eval_statistics.update(create_stats_ordered_dict(\n # 'D Predictions',\n # ptu.get_numpy(pred_z),\n # ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Q Targets',\n ptu.get_numpy(q_target),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Log Pis',\n ptu.get_numpy(log_pi),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Policy mu',\n ptu.get_numpy(policy_mean),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Policy log std',\n ptu.get_numpy(policy_log_std),\n ))\n if self.use_automatic_entropy_tuning:\n self.eval_statistics['Alpha'] = alpha.item()\n self.eval_statistics['Alpha Loss'] = alpha_loss.item()\n self._n_train_steps_total += 1\n\n def get_diagnostics(self):\n return self.eval_statistics\n\n def end_epoch(self, epoch):\n self._need_to_update_eval_statistics = True\n\n @property\n def networks(self):\n return [\n self.policy,\n self.qf1,\n self.qf2,\n self.target_qf1,\n self.target_qf2,\n self.df\n ]\n\n def get_snapshot(self):\n return dict(\n policy=self.policy,\n qf1=self.qf1,\n qf2=self.qf2,\n target_qf1=self.qf1,\n target_qf2=self.qf2,\n df=self.df\n )\n\n" ]
[ [ "torch.cat", "torch.nn.MSELoss", "torch.nn.CrossEntropyLoss", "numpy.prod" ] ]
coszero/My-Solutions-to-CS224n
[ "98c58c42b55f6a6b3dd984baa8a994ea42f24952" ]
[ "a2/word2vec.py" ]
[ "#!/usr/bin/env python\n\nimport numpy as np\nimport random\n\nfrom utils.gradcheck import gradcheck_naive\nfrom utils.utils import normalizeRows, softmax\n\n\ndef sigmoid(x):\n \"\"\"\n Compute the sigmoid function for the input here.\n Arguments:\n x -- A scalar or numpy array.\n Return:\n s -- sigmoid(x)\n \"\"\"\n\n ### YOUR CODE HERE\n\n s=1/(1+np.exp(-x))\n\n ### END YOUR CODE\n\n return s\n\n\ndef naiveSoftmaxLossAndGradient(\n centerWordVec,\n outsideWordIdx,\n outsideVectors,\n dataset\n):\n \"\"\" Naive Softmax loss & gradient function for word2vec models\n\n Implement the naive softmax loss and gradients between a center word's \n embedding and an outside word's embedding. This will be the building block\n for our word2vec models.\n\n Arguments:\n centerWordVec -- numpy ndarray, center word's embedding\n (v_c in the pdf handout)\n outsideWordIdx -- integer, the index of the outside word\n (o of u_o in the pdf handout)\n outsideVectors -- outside vectors (rows of matrix) for all words in vocab\n (U in the pdf handout)\n dataset -- needed for negative sampling, unused here.\n\n Return:\n loss -- naive softmax loss\n gradCenterVec -- the gradient with respect to the center word vector\n (dJ / dv_c in the pdf handout)\n gradOutsideVecs -- the gradient with respect to all the outside word vectors\n (dJ / dU)\n \"\"\"\n\n ### YOUR CODE HERE\n\n ### Please use the provided softmax function (imported earlier in this file)\n ### This numerically stable implementation helps you avoid issues pertaining\n ### to integer overflow. \n \n \n #dot product similarity\n dot_product=outsideVectors @ centerWordVec\n s_dot=softmax(dot_product)\n loss=-np.log(s_dot[outsideWordIdx])\n \n gradCenterVec=-outsideVectors[outsideWordIdx] \\\n +np.sum(outsideVectors*s_dot[:,np.newaxis],axis=0)\n \n gradOutsideVecs=np.zeros(outsideVectors.shape)\n gradOutsideVecs=s_dot[:,np.newaxis]*centerWordVec\n gradOutsideVecs[outsideWordIdx]-=centerWordVec\n\n\n ### END YOUR CODE\n\n return loss, gradCenterVec, gradOutsideVecs\n\n\ndef getNegativeSamples(outsideWordIdx, dataset, K):\n \"\"\" Samples K indexes which are not the outsideWordIdx \"\"\"\n\n negSampleWordIndices = [None] * K\n for k in range(K):\n newidx = dataset.sampleTokenIdx()\n while newidx == outsideWordIdx:\n newidx = dataset.sampleTokenIdx()\n negSampleWordIndices[k] = newidx\n return negSampleWordIndices\n\n\ndef negSamplingLossAndGradient(\n centerWordVec,\n outsideWordIdx,\n outsideVectors,\n dataset,\n K=10\n):\n \"\"\" Negative sampling loss function for word2vec models\n\n Implement the negative sampling loss and gradients for a centerWordVec\n and a outsideWordIdx word vector as a building block for word2vec\n models. K is the number of negative samples to take.\n\n Note: The same word may be negatively sampled multiple times. For\n example if an outside word is sampled twice, you shall have to\n double count the gradient with respect to this word. Thrice if\n it was sampled three times, and so forth.\n\n Arguments/Return Specifications: same as naiveSoftmaxLossAndGradient\n \"\"\"\n\n # Negative sampling of words is done for you. Do not modify this if you\n # wish to match the autograder and receive points!\n negSampleWordIndices = getNegativeSamples(outsideWordIdx, dataset, K)\n indices = [outsideWordIdx] + negSampleWordIndices\n\n ### YOUR CODE HERE\n\n ### Please use your implementation of sigmoid in here.\n dot_product=outsideVectors[indices] @ centerWordVec\n probTrueScore=sigmoid(dot_product[0])\n probNegScore=sigmoid(-dot_product[1:])\n loss=-np.log(probTrueScore)-np.sum(np.log(probNegScore))\n \n gradCenterVec=-outsideVectors[outsideWordIdx]*(1-probTrueScore)\n gradCenterVec+=np.sum((1-probNegScore)[:,np.newaxis]*outsideVectors[negSampleWordIndices],axis=0)\n \n gradOutsideVecs=np.zeros(outsideVectors.shape)\n gradOutsideVecs[outsideWordIdx]=-centerWordVec*(1-probTrueScore)\n gradOutsideVecs[negSampleWordIndices]=(1-probNegScore)[:,np.newaxis]*centerWordVec\n indcount=np.bincount(indices)\n for i in np.unique(indices):\n gradOutsideVecs[i]*=indcount[i]\n\n\n ### END YOUR CODE\n\n return loss, gradCenterVec, gradOutsideVecs\n\n\ndef skipgram(currentCenterWord, windowSize, outsideWords, word2Ind,\n centerWordVectors, outsideVectors, dataset,\n word2vecLossAndGradient=naiveSoftmaxLossAndGradient):\n \"\"\" Skip-gram model in word2vec\n\n Implement the skip-gram model in this function.\n\n Arguments:\n currentCenterWord -- a string of the current center word\n windowSize -- integer, context window size\n outsideWords -- list of no more than 2*windowSize strings, the outside words\n word2Ind -- a dictionary that maps words to their indices in\n the word vector list\n centerWordVectors -- center word vectors (as rows) for all words in vocab\n (V in pdf handout)\n outsideVectors -- outside word vectors (as rows) for all words in vocab\n (U in pdf handout)\n word2vecLossAndGradient -- the loss and gradient function for\n a prediction vector given the outsideWordIdx\n word vectors, could be one of the two\n loss functions you implemented above.\n\n Return:\n loss -- the loss function value for the skip-gram model\n (J in the pdf handout)\n gradCenterVecs -- the gradient with respect to the center word vectors\n (dJ / dV in the pdf handout)\n gradOutsideVectors -- the gradient with respect to the outside word vectors\n (dJ / dU in the pdf handout)\n \"\"\"\n\n loss = 0.0\n gradCenterVecs = np.zeros(centerWordVectors.shape)\n gradOutsideVectors = np.zeros(outsideVectors.shape)\n\n ### YOUR CODE HERE\n \n centerWordInd=word2Ind.get(currentCenterWord)\n centerwordvec= centerWordVectors[centerWordInd,:]\n outWordInd=[word2Ind.get(x) for x in outsideWords]\n for ind in outWordInd:\n ploss,pgcv,pgov=word2vecLossAndGradient(centerWordVec=centerwordvec,\n outsideWordIdx=ind,\n outsideVectors=outsideVectors,\n dataset=dataset)\n loss+=ploss\n gradCenterVecs[centerWordInd,:]+=pgcv\n gradOutsideVectors+=pgov\n\n ### END YOUR CODE\n\n return loss, gradCenterVecs, gradOutsideVectors\n\n#############################################\n# Testing functions below. DO NOT MODIFY! #\n#############################################\n\ndef word2vec_sgd_wrapper(word2vecModel, word2Ind, wordVectors, dataset, \n windowSize,\n word2vecLossAndGradient=naiveSoftmaxLossAndGradient):\n batchsize = 50\n loss = 0.0\n grad = np.zeros(wordVectors.shape)\n N = wordVectors.shape[0]\n centerWordVectors = wordVectors[:int(N/2),:]\n outsideVectors = wordVectors[int(N/2):,:]\n for i in range(batchsize):\n windowSize1 = random.randint(1, windowSize)\n centerWord, context = dataset.getRandomContext(windowSize1)\n\n c, gin, gout = word2vecModel(\n centerWord, windowSize1, context, word2Ind, centerWordVectors,\n outsideVectors, dataset, word2vecLossAndGradient\n )\n loss += c / batchsize\n grad[:int(N/2), :] += gin / batchsize\n grad[int(N/2):, :] += gout / batchsize\n\n return loss, grad\n\n\ndef test_word2vec():\n \"\"\" Test the two word2vec implementations, before running on Stanford Sentiment Treebank \"\"\"\n dataset = type('dummy', (), {})()\n def dummySampleTokenIdx():\n return random.randint(0, 4)\n\n def getRandomContext(C):\n tokens = [\"a\", \"b\", \"c\", \"d\", \"e\"]\n return tokens[random.randint(0,4)], \\\n [tokens[random.randint(0,4)] for i in range(2*C)]\n dataset.sampleTokenIdx = dummySampleTokenIdx\n dataset.getRandomContext = getRandomContext\n\n random.seed(31415)\n np.random.seed(9265)\n dummy_vectors = normalizeRows(np.random.randn(10,3))\n dummy_tokens = dict([(\"a\",0), (\"b\",1), (\"c\",2),(\"d\",3),(\"e\",4)])\n\n print(\"==== Gradient check for skip-gram with naiveSoftmaxLossAndGradient ====\")\n gradcheck_naive(lambda vec: word2vec_sgd_wrapper(\n skipgram, dummy_tokens, vec, dataset, 5, naiveSoftmaxLossAndGradient),\n dummy_vectors, \"naiveSoftmaxLossAndGradient Gradient\")\n\n print(\"==== Gradient check for skip-gram with negSamplingLossAndGradient ====\")\n gradcheck_naive(lambda vec: word2vec_sgd_wrapper(\n skipgram, dummy_tokens, vec, dataset, 5, negSamplingLossAndGradient),\n dummy_vectors, \"negSamplingLossAndGradient Gradient\")\n\n print(\"\\n=== Results ===\")\n print (\"Skip-Gram with naiveSoftmaxLossAndGradient\")\n\n print (\"Your Result:\")\n print(\"Loss: {}\\nGradient wrt Center Vectors (dJ/dV):\\n {}\\nGradient wrt Outside Vectors (dJ/dU):\\n {}\\n\".format(\n *skipgram(\"c\", 3, [\"a\", \"b\", \"e\", \"d\", \"b\", \"c\"],\n dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset) \n )\n )\n\n print (\"Expected Result: Value should approximate these:\")\n print(\"\"\"Loss: 11.16610900153398\nGradient wrt Center Vectors (dJ/dV):\n [[ 0. 0. 0. ]\n [ 0. 0. 0. ]\n [-1.26947339 -1.36873189 2.45158957]\n [ 0. 0. 0. ]\n [ 0. 0. 0. ]]\nGradient wrt Outside Vectors (dJ/dU):\n [[-0.41045956 0.18834851 1.43272264]\n [ 0.38202831 -0.17530219 -1.33348241]\n [ 0.07009355 -0.03216399 -0.24466386]\n [ 0.09472154 -0.04346509 -0.33062865]\n [-0.13638384 0.06258276 0.47605228]]\n \"\"\")\n\n print (\"Skip-Gram with negSamplingLossAndGradient\") \n print (\"Your Result:\")\n print(\"Loss: {}\\nGradient wrt Center Vectors (dJ/dV):\\n {}\\n Gradient wrt Outside Vectors (dJ/dU):\\n {}\\n\".format(\n *skipgram(\"c\", 1, [\"a\", \"b\"], dummy_tokens, dummy_vectors[:5,:],\n dummy_vectors[5:,:], dataset, negSamplingLossAndGradient)\n )\n )\n print (\"Expected Result: Value should approximate these:\")\n print(\"\"\"Loss: 16.15119285363322\nGradient wrt Center Vectors (dJ/dV):\n [[ 0. 0. 0. ]\n [ 0. 0. 0. ]\n [-4.54650789 -1.85942252 0.76397441]\n [ 0. 0. 0. ]\n [ 0. 0. 0. ]]\n Gradient wrt Outside Vectors (dJ/dU):\n [[-0.69148188 0.31730185 2.41364029]\n [-0.22716495 0.10423969 0.79292674]\n [-0.45528438 0.20891737 1.58918512]\n [-0.31602611 0.14501561 1.10309954]\n [-0.80620296 0.36994417 2.81407799]]\n \"\"\")\n\nif __name__ == \"__main__\":\n test_word2vec()\n" ]
[ [ "numpy.sum", "numpy.bincount", "numpy.zeros", "numpy.random.seed", "numpy.random.randn", "numpy.exp", "numpy.log", "numpy.unique" ] ]
Peter9192/ESMValCore
[ "febd96a39480cc837afbf4e1f5b0ef61571af76a" ]
[ "esmvalcore/preprocessor/_multimodel.py" ]
[ "\"\"\"multimodel statistics.\n\nFunctions for multi-model operations\nsupports a multitude of multimodel statistics\ncomputations; the only requisite is the ingested\ncubes have (TIME-LAT-LON) or (TIME-PLEV-LAT-LON)\ndimensions; and obviously consistent units.\n\nIt operates on different (time) spans:\n- full: computes stats on full dataset time;\n- overlap: computes common time overlap between datasets;\n\n\"\"\"\n\nimport logging\nfrom datetime import datetime\nfrom functools import reduce\n\nimport cf_units\nimport iris\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\n\n\ndef _get_time_offset(time_unit):\n \"\"\"Return a datetime object equivalent to tunit.\"\"\"\n # tunit e.g. 'day since 1950-01-01 00:00:00.0000000 UTC'\n cfunit = cf_units.Unit(time_unit, calendar=cf_units.CALENDAR_STANDARD)\n time_offset = cfunit.num2date(0)\n return time_offset\n\n\ndef _plev_fix(dataset, pl_idx):\n \"\"\"Extract valid plev data.\n\n this function takes care of situations\n in which certain plevs are completely\n masked due to unavailable interpolation\n boundaries.\n \"\"\"\n if np.ma.is_masked(dataset):\n # keep only the valid plevs\n if not np.all(dataset.mask[pl_idx]):\n statj = np.ma.array(dataset[pl_idx], mask=dataset.mask[pl_idx])\n else:\n logger.debug('All vals in plev are masked, ignoring.')\n statj = None\n else:\n mask = np.zeros_like(dataset[pl_idx], bool)\n statj = np.ma.array(dataset[pl_idx], mask=mask)\n\n return statj\n\n\ndef _compute_statistic(datas, statistic_name):\n \"\"\"Compute multimodel statistic.\"\"\"\n datas = np.ma.array(datas)\n statistic = datas[0]\n\n if statistic_name == 'median':\n statistic_function = np.ma.median\n elif statistic_name == 'mean':\n statistic_function = np.ma.mean\n else:\n raise NotImplementedError\n\n # no plevs\n if len(datas[0].shape) < 3:\n # get all NOT fully masked data - u_data\n # datas is per time point\n # so we can safely NOT compute stats for single points\n if datas.ndim == 1:\n u_datas = [data for data in datas]\n else:\n u_datas = [data for data in datas if not np.all(data.mask)]\n if len(u_datas) > 1:\n statistic = statistic_function(datas, axis=0)\n else:\n statistic.mask = True\n return statistic\n\n # plevs\n for j in range(statistic.shape[0]):\n plev_check = []\n for cdata in datas:\n fixed_data = _plev_fix(cdata, j)\n if fixed_data is not None:\n plev_check.append(fixed_data)\n\n # check for nr datasets\n if len(plev_check) > 1:\n plev_check = np.ma.array(plev_check)\n statistic[j] = statistic_function(plev_check, axis=0)\n else:\n statistic.mask[j] = True\n\n return statistic\n\n\ndef _put_in_cube(template_cube, cube_data, statistic, t_axis):\n \"\"\"Quick cube building and saving.\"\"\"\n if t_axis is None:\n times = template_cube.coord('time')\n else:\n times = iris.coords.DimCoord(\n t_axis,\n standard_name='time',\n units=template_cube.coord('time').units)\n lats = template_cube.coord('latitude')\n lons = template_cube.coord('longitude')\n\n # no plevs\n if len(template_cube.shape) == 3:\n cspec = [(times, 0), (lats, 1), (lons, 2)]\n # plevs\n elif len(template_cube.shape) == 4:\n plev = template_cube.coord('air_pressure')\n cspec = [(times, 0), (plev, 1), (lats, 2), (lons, 3)]\n elif len(template_cube.shape) == 1:\n cspec = [\n (times, 0),\n ]\n elif len(template_cube.shape) == 2:\n # If you're going to hardwire air_pressure into this,\n # might as well have depth here too.\n plev = template_cube.coord('depth')\n cspec = [\n (times, 0),\n (plev, 1),\n ]\n\n # correct dspec if necessary\n fixed_dspec = np.ma.fix_invalid(cube_data, copy=False, fill_value=1e+20)\n # put in cube\n stats_cube = iris.cube.Cube(\n fixed_dspec, dim_coords_and_dims=cspec, long_name=statistic)\n coord_names = [coord.name() for coord in template_cube.coords()]\n if 'air_pressure' in coord_names:\n if len(template_cube.shape) == 3:\n stats_cube.add_aux_coord(template_cube.coord('air_pressure'))\n\n stats_cube.var_name = template_cube.var_name\n stats_cube.long_name = template_cube.long_name\n stats_cube.standard_name = template_cube.standard_name\n stats_cube.units = template_cube.units\n return stats_cube\n\n\ndef _datetime_to_int_days(cube):\n \"\"\"Return list of int(days) converted from cube datetime cells.\"\"\"\n time_cells = [cell.point for cell in cube.coord('time').cells()]\n time_unit = cube.coord('time').units.name\n time_offset = _get_time_offset(time_unit)\n\n # extract date info\n real_dates = []\n for date_obj in time_cells:\n # real_date resets the actual data point day\n # to the 1st of the month so that there are no\n # wrong overlap indeces\n # NOTE: this workaround is good only\n # for monthly data\n real_date = datetime(date_obj.year, date_obj.month, 1, 0, 0, 0)\n real_dates.append(real_date)\n\n days = [(date_obj - time_offset).days for date_obj in real_dates]\n return days\n\n\ndef _get_overlap(cubes):\n \"\"\"\n Get discrete time overlaps.\n\n This method gets the bounds of coord time\n from the cube and assembles a continuous time\n axis with smallest unit 1; then it finds the\n overlaps by doing a 1-dim intersect;\n takes the floor of first date and\n ceil of last date.\n \"\"\"\n all_times = []\n for cube in cubes:\n span = _datetime_to_int_days(cube)\n start, stop = span[0], span[-1]\n all_times.append([start, stop])\n bounds = [range(b[0], b[-1] + 1) for b in all_times]\n time_pts = reduce(np.intersect1d, bounds)\n if len(time_pts) > 1:\n time_bounds_list = [time_pts[0], time_pts[-1]]\n return time_bounds_list\n\n\ndef _slice_cube(cube, t_1, t_2):\n \"\"\"\n Efficient slicer.\n\n Simple cube data slicer on indices\n of common time-data elements.\n \"\"\"\n time_pts = [t for t in cube.coord('time').points]\n converted_t = _datetime_to_int_days(cube)\n idxs = sorted([\n time_pts.index(ii) for ii, jj in zip(time_pts, converted_t)\n if t_1 <= jj <= t_2\n ])\n return [idxs[0], idxs[-1]]\n\n\ndef _monthly_t(cubes):\n \"\"\"Rearrange time points for monthly data.\"\"\"\n # get original cubes tpoints\n days = {day for cube in cubes for day in _datetime_to_int_days(cube)}\n return sorted(days)\n\n\ndef _full_time_slice(cubes, ndat, indices, ndatarr, t_idx):\n \"\"\"Construct a contiguous collection over time.\"\"\"\n for idx_cube, cube in enumerate(cubes):\n # reset mask\n ndat.mask = True\n ndat[indices[idx_cube]] = cube.data\n if np.ma.is_masked(cube.data):\n ndat.mask[indices[idx_cube]] = cube.data.mask\n else:\n ndat.mask[indices[idx_cube]] = False\n ndatarr[idx_cube] = ndat[t_idx]\n\n # return time slice\n return ndatarr\n\n\ndef _assemble_overlap_data(cubes, interval, statistic):\n \"\"\"Get statistical data in iris cubes for OVERLAP.\"\"\"\n start, stop = interval\n sl_1, sl_2 = _slice_cube(cubes[0], start, stop)\n stats_dats = np.ma.zeros(cubes[0].data[sl_1:sl_2 + 1].shape)\n\n # keep this outside the following loop\n # this speeds up the code by a factor of 15\n indices = [_slice_cube(cube, start, stop) for cube in cubes]\n\n for i in range(stats_dats.shape[0]):\n time_data = [\n cube.data[indx[0]:indx[1] + 1][i]\n for cube, indx in zip(cubes, indices)\n ]\n stats_dats[i] = _compute_statistic(time_data, statistic)\n stats_cube = _put_in_cube(\n cubes[0][sl_1:sl_2 + 1], stats_dats, statistic, t_axis=None)\n return stats_cube\n\n\ndef _assemble_full_data(cubes, statistic):\n \"\"\"Get statistical data in iris cubes for FULL.\"\"\"\n # all times, new MONTHLY data time axis\n time_axis = [float(fl) for fl in _monthly_t(cubes)]\n\n # new big time-slice array shape\n new_shape = [len(time_axis)] + list(cubes[0].shape[1:])\n\n # assemble an array to hold all time data\n # for all cubes; shape is (ncubes,(plev), lat, lon)\n new_arr = np.ma.empty([len(cubes)] + list(new_shape[1:]))\n\n # data array for stats computation\n stats_dats = np.ma.zeros(new_shape)\n\n # assemble indices list to chop new_arr on\n indices_list = []\n\n # empty data array to hold time slices\n empty_arr = np.ma.empty(new_shape)\n\n # loop through cubes and populate empty_arr with points\n for cube in cubes:\n time_redone = _datetime_to_int_days(cube)\n oidx = [time_axis.index(s) for s in time_redone]\n indices_list.append(oidx)\n for i in range(new_shape[0]):\n # hold time slices only\n new_datas_array = _full_time_slice(cubes, empty_arr, indices_list,\n new_arr, i)\n # list to hold time slices\n time_data = []\n for j in range(len(cubes)):\n time_data.append(new_datas_array[j])\n stats_dats[i] = _compute_statistic(time_data, statistic)\n stats_cube = _put_in_cube(cubes[0], stats_dats, statistic, time_axis)\n return stats_cube\n\n\ndef multi_model_statistics(products, span, output_products, statistics):\n \"\"\"\n Compute multi-model statistics.\n\n Multimodel statistics computed along the time axis. Can be\n computed across a common overlap in time (set span: overlap)\n or across the full length in time of each model (set span: full).\n Restrictive compuation is also available by excluding any set of\n models that the user will not want to include in the statistics\n (set exclude: [excluded models list]).\n\n Restrictions needed by the input data:\n - model datasets must have consistent shapes,\n - higher dimesnional data is not supported (ie dims higher than four:\n time, vertical axis, two horizontal axes).\n\n Parameters\n ----------\n products: list\n list of data products to be used in multimodel stat computation;\n cube attribute of product is the data cube for computing the stats.\n span: str\n overlap or full; if overlap stas are computed on common time-span;\n if full stats are computed on full time spans.\n output_products: dict\n dictionary of output products.\n statistics: str\n statistical measure to be computed (mean or median).\n Returns\n -------\n list\n list of data products containing the multimodel stats computed.\n Raises\n ------\n ValueError\n If span is neither overlap nor full.\n\n \"\"\"\n logger.debug('Multimodel statistics: computing: %s', statistics)\n if len(products) < 2:\n logger.info(\"Single dataset in list: will not compute statistics.\")\n return products\n\n cubes = [cube for product in products for cube in product.cubes]\n # check if we have any time overlap\n interval = _get_overlap(cubes)\n if interval is None:\n logger.info(\"Time overlap between cubes is none or a single point.\"\n \"check datasets: will not compute statistics.\")\n return products\n\n if span == 'overlap':\n logger.debug(\"Using common time overlap between \"\n \"datasets to compute statistics.\")\n elif span == 'full':\n logger.debug(\"Using full time spans to compute statistics.\")\n else:\n raise ValueError(\n \"Unexpected value for span {}, choose from 'overlap', 'full'\"\n .format(span))\n\n statistic_products = set()\n for statistic in statistics:\n # Compute statistic\n if span == 'overlap':\n statistic_cube = _assemble_overlap_data(cubes, interval, statistic)\n elif span == 'full':\n statistic_cube = _assemble_full_data(cubes, statistic)\n statistic_cube.data = np.ma.array(\n statistic_cube.data, dtype=np.dtype('float32'))\n\n # Add to output product and log provenance\n statistic_product = output_products[statistic]\n statistic_product.cubes = [statistic_cube]\n for product in products:\n statistic_product.wasderivedfrom(product)\n logger.info(\"Generated %s\", statistic_product)\n statistic_products.add(statistic_product)\n\n products |= statistic_products\n\n return products\n" ]
[ [ "numpy.zeros_like", "numpy.ma.is_masked", "numpy.dtype", "numpy.ma.zeros", "numpy.ma.array", "numpy.ma.fix_invalid", "numpy.all", "numpy.ma.empty" ] ]
TaiCZ/deepchem
[ "66de10d4f862e0077a82fd7460eea2b11b9472aa" ]
[ "deepchem/metrics/score_function.py" ]
[ "\"\"\"Evaluation metrics.\"\"\"\n\nimport numpy as np\nimport scipy.stats\nfrom sklearn.metrics import matthews_corrcoef # noqa\nfrom sklearn.metrics import recall_score # noqa\nfrom sklearn.metrics import cohen_kappa_score\nfrom sklearn.metrics import r2_score # noqa\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import precision_score # noqa\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import auc\nfrom sklearn.metrics import jaccard_score\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import roc_auc_score # noqa\nfrom sklearn.metrics import accuracy_score # noqa\nfrom sklearn.metrics import balanced_accuracy_score # noqa\nfrom sklearn.metrics import top_k_accuracy_score # noqa\n\n# kappa_score is an alias for `sklearn.metrics.cohen_kappa_score`\nkappa_score = cohen_kappa_score\n\n\ndef pearsonr(y: np.ndarray, y_pred: np.ndarray) -> float:\n \"\"\"Computes Pearson correlation coefficient.\n\n Parameters\n ----------\n y: np.ndarray\n ground truth array\n y_pred: np.ndarray\n predicted array\n\n Returns\n -------\n float\n The Pearson correlation coefficient.\n \"\"\"\n return scipy.stats.pearsonr(y, y_pred)[0]\n\n\ndef pearson_r2_score(y: np.ndarray, y_pred: np.ndarray) -> float:\n \"\"\"Computes Pearson R^2 (square of Pearson correlation).\n\n Parameters\n ----------\n y: np.ndarray\n ground truth array\n y_pred: np.ndarray\n predicted array\n\n Returns\n -------\n float\n The Pearson-R^2 score.\n \"\"\"\n return scipy.stats.pearsonr(y, y_pred)[0]**2\n\n\ndef jaccard_index(y: np.ndarray, y_pred: np.ndarray) -> float:\n \"\"\"Computes Jaccard Index which is the Intersection Over Union metric\n which is commonly used in image segmentation tasks.\n\n DEPRECATED: WILL BE REMOVED IN A FUTURE VERSION OF DEEEPCHEM. USE `jaccard_score` instead.\n\n Parameters\n ----------\n y: np.ndarray\n ground truth array\n y_pred: np.ndarray\n predicted array\n\n Returns\n -------\n score: float\n The jaccard index. A number between 0 and 1.\n \"\"\"\n return jaccard_score(y, y_pred)\n\n\ndef pixel_error(y: np.ndarray, y_pred: np.ndarray) -> float:\n \"\"\"An error metric in case y, y_pred are images.\n\n Defined as 1 - the maximal F-score of pixel similarity, or squared\n Euclidean distance between the original and the result labels.\n\n Parameters\n ----------\n y: np.ndarray\n ground truth array\n y_pred: np.ndarray\n predicted array\n\n Returns\n -------\n score: float\n The pixel-error. A number between 0 and 1.\n \"\"\"\n return 1 - f1_score(y, y_pred)\n\n\ndef prc_auc_score(y: np.ndarray, y_pred: np.ndarray) -> float:\n \"\"\"Compute area under precision-recall curve\n\n Parameters\n ----------\n y: np.ndarray\n A numpy array of shape `(N, n_classes)` or `(N,)` with true labels\n y_pred: np.ndarray\n Of shape `(N, n_classes)` with class probabilities.\n\n Returns\n -------\n float\n The area under the precision-recall curve. A number between 0 and 1.\n \"\"\"\n precision, recall, _ = precision_recall_curve(y[:, 1], y_pred[:, 1])\n return auc(recall, precision)\n\n\ndef rms_score(y_true: np.ndarray, y_pred: np.ndarray) -> float:\n \"\"\"Computes RMS error.\"\"\"\n return np.sqrt(mean_squared_error(y_true, y_pred))\n\n\ndef mae_score(y_true: np.ndarray, y_pred: np.ndarray) -> float:\n \"\"\"Computes MAE.\"\"\"\n return mean_absolute_error(y_true, y_pred)\n\n\ndef bedroc_score(y_true: np.ndarray, y_pred: np.ndarray, alpha: float = 20.0):\n \"\"\"Compute BEDROC metric.\n\n BEDROC metric implemented according to Truchon and Bayley that modifies\n the ROC score by allowing for a factor of early recognition.\n Please confirm details from [1]_.\n\n Parameters\n ----------\n y_true: np.ndarray\n Binary class labels. 1 for positive class, 0 otherwise\n y_pred: np.ndarray\n Predicted labels\n alpha: float, default 20.0\n Early recognition parameter\n\n Returns\n -------\n float\n Value in [0, 1] that indicates the degree of early recognition\n\n Notes\n -----\n This function requires RDKit to be installed.\n\n References\n ----------\n .. [1] Truchon et al. \"Evaluating virtual screening methods: good and bad metrics\n for the “early recognition” problem.\" Journal of chemical information and modeling\n 47.2 (2007): 488-508.\n \"\"\"\n try:\n from rdkit.ML.Scoring.Scoring import CalcBEDROC\n except ModuleNotFoundError:\n raise ImportError(\"This function requires RDKit to be installed.\")\n\n # validation\n assert len(y_true) == len(y_pred), 'Number of examples do not match'\n assert np.array_equal(\n np.unique(y_true).astype(int),\n [0, 1]), ('Class labels must be binary: %s' % np.unique(y_true))\n\n yt = np.asarray(y_true)\n yp = np.asarray(y_pred)\n\n yt = yt.flatten()\n yp = yp[:, 1].flatten() # Index 1 because one_hot predictions\n\n scores = list(zip(yt, yp))\n scores = sorted(scores, key=lambda pair: pair[1], reverse=True)\n\n return CalcBEDROC(scores, 0, alpha)\n\n\ndef concordance_index(y_true: np.ndarray, y_pred: np.ndarray) -> float:\n \"\"\"Compute Concordance index.\n\n Statistical metric indicates the quality of the predicted ranking.\n Please confirm details from [1]_.\n\n Parameters\n ----------\n y_true: np.ndarray\n continous value\n y_pred: np.ndarray\n Predicted value\n\n Returns\n -------\n float\n score between [0,1]\n\n References\n ----------\n .. [1] Steck, Harald, et al. \"On ranking in survival analysis:\n Bounds on the concordance index.\" Advances in neural information processing systems (2008): 1209-1216.\n \"\"\"\n\n idx = np.argsort(y_true)\n y_true = y_true[idx]\n y_pred = y_pred[idx]\n\n pairs = 0\n correct_pairs = 0.0\n\n for i in range(len(y_true)):\n true_a = y_true[i]\n pred_a = y_pred[i]\n\n for j in range(i + 1, len(y_true)):\n true_b = y_true[j]\n pred_b = y_pred[j]\n if true_a != true_b:\n pairs += 1\n if pred_a == pred_b:\n correct_pairs += 0.5\n elif pred_a < pred_b:\n correct_pairs += true_a < true_b\n else:\n correct_pairs += true_a > true_b\n\n assert pairs > 0, 'No pairs for comparision'\n\n return correct_pairs / pairs\n" ]
[ [ "sklearn.metrics.mean_squared_error", "sklearn.metrics.mean_absolute_error", "sklearn.metrics.auc", "numpy.argsort", "numpy.asarray", "sklearn.metrics.f1_score", "sklearn.metrics.precision_recall_curve", "numpy.unique", "sklearn.metrics.jaccard_score" ] ]
BioroboticsLab/deeppipeline
[ "7e60e410b3a1f4d8a65924033f13dbf0ecdd3e94" ]
[ "pipeline/scripts/bb_pipeline_api.py" ]
[ "#!/usr/bin/env python3\n\"\"\"This script provides a RESTful remote endpoint to the detection pipeline.\nAn image is sent to the server, which sends back the requested results.\n\"\"\"\n\nimport inspect\nimport io\nimport json\nfrom tempfile import NamedTemporaryFile\nfrom urllib import parse\n\nimport cachetools\nimport msgpack\nimport numpy as np\nfrom flask import Flask, abort, request\nfrom scipy.misc import imread, imsave\n\nfrom pipeline import Pipeline, objects\nfrom pipeline.pipeline import get_auto_config\n\napp = Flask(__name__)\n\ndefault_output = [objects.LocalizerPositions, objects.Saliencies, objects.IDs]\n\npipeline_cache = cachetools.LRUCache(maxsize=4)\nno_localizer_pipeline_cache = cachetools.LRUCache(maxsize=4)\n\n\ndef init_pipeline(output, no_localizer):\n \"\"\"Helper function to initialize a new pipeline\n that generates the desired output.\n\n Args:\n output (list): pipeline objects that the pipeline will\n generate as the result\n no_localizer (boolean): whether or not the localizer should be\n skipped to decode a single tag in the\n center of a 100x100 image\n\n Returns:\n pipeline object\n \"\"\"\n\n if no_localizer:\n pipeline = Pipeline(\n [objects.Regions, objects.LocalizerPositions], output, **get_auto_config()\n )\n else:\n pipeline = Pipeline([objects.Image], output, **get_auto_config())\n return pipeline\n\n\ndef get_cached_pipeline(output, no_localizer):\n \"\"\"Helper function to get a pipeline that generates the desired output.\n Pipelines are stored in a 'least recently used'-cache of size 4.\n If a pipeline with the specified output is already present in the\n cache, it will be used, otherwise a new one will be created\n and stored in the cache.\n\n Args:\n output (list): pipeline objects that the pipeline will\n generate as the result\n no_localizer (boolean): determines if a pipeline should be\n returned that skips the localizer to\n decode a single tag in the center of a\n 100x100 image\n\n Returns:\n pipeline object\n \"\"\"\n if not (output):\n output_key = frozenset(default_output)\n else:\n output_key = frozenset(output)\n if no_localizer:\n cache = no_localizer_pipeline_cache\n else:\n cache = pipeline_cache\n if output_key in cache:\n print(\"Pipeline is cached.\")\n return cache[output_key]\n else:\n print(\"Pipeline is not cached, initializing new pipeline...\")\n pipeline = init_pipeline(output, no_localizer)\n print(\"...done. Adding to cache.\")\n cache[output_key] = pipeline\n return pipeline\n\n\ndef png_encode(instance):\n \"\"\"Helper function to convert a numpy array to a PNG image.\n\n Args:\n instance (ndarray): numpy array containing\n the image to be converted\n\n Returns:\n bytes: bytes containing the PNG-encoded image.\n \"\"\"\n if isinstance(instance, np.ndarray):\n b = io.BytesIO()\n imsave(b, instance, \"png\")\n return b.getvalue()\n return instance\n\n\ndef process_image(pipeline, image, png, no_localizer):\n \"\"\"Helper function to execute a pipeline and get the results.\n\n Args:\n pipeline: pipeline to be executed\n image (ndarray): image as input to the pipeline\n png (list of str): names of the outputs that will be converted to PNG\n no_localizer (boolean): whether or not the localizer should be\n skipped to decode a single tag in the\n center of a 100x100 image\n\n Returns:\n msgpack: dictionary (serialized as a msgpack) containing\n the results of the pipeline with the object names as keys\n \"\"\"\n if no_localizer:\n positions = np.zeros((1, 2))\n regions = image[np.newaxis, np.newaxis, :, :]\n pipeline_results = pipeline([regions, positions])\n else:\n pipeline_results = pipeline([image])\n results_dict = {}\n for (k, v) in pipeline_results.items():\n results_dict[k.__name__] = png_encode(v) if (k.__name__ in png) else v.tolist()\n return msgpack.packb(results_dict)\n\n\[email protected](\"/decode/<mode>\", methods=[\"POST\"])\ndef api_message(mode):\n \"\"\"This function handles the `/decode` URL call.\n The next URL segment determines the decoding mode, `/single` for the\n decoding of a single tag in the center of a 100x100 image and\n `/automatic` for the localization of tags in an image.\n\n An image is appended as data to the request and the result is returned.\n\n The desired output objects can be specified by assigning a JSON list\n to the optional `output` URL parameter (remember to percent-encode\n the string first). If omitted, the default output will be returned\n ('LocalizerPositions', 'Saliencies', 'IDs').\n\n Additionally, any output that should be encoded as PNG (image output,\n e.g. 'CrownOverlay') can be assigned to the optional `png` URL parameter,\n also as a JSON list.\n\n\n Example:\n\n .. code::\n\n import requests\n import json\n from urllib import parse\n import io\n import msgpack\n from scipy.misc import imread\n\n with open('/local/image/file.png', 'rb') as image_file:\n headers = {'Content-type': 'application/octet-stream'}\n output_json = parse.quote(json.dumps(\n ['LocalizerPositions', 'IDs', 'CrownOverlay']))\n png_json = parse.quote(json.dumps(\n ['CrownOverlay']))\n url_params = {'output': output_json, 'png': png_json}\n url = 'http://localhost:10000/decode/automatic'\n result = requests.post(\n url,\n params=url_params,\n data=image_file.read(),\n headers=headers)\n\n The serialized response will be stored in `result`.\n The results can be loaded like this:\n\n .. code::\n\n # Deserialize results\n result_unpacked = msgpack.unpackb(result.content)\n ids_list = result_unpacked[b'IDs']\n crownoverlay_image = imread(io.BytesIO(result_unpacked[b'CrownOverlay']))\n\n Note:\n The keys in the result dict are binary, since msgpack does not\n support string keys.\n \"\"\"\n if mode not in [\"single\", \"automatic\"]:\n abort(404)\n if request.headers[\"Content-Type\"] != \"application/octet-stream\":\n abort(415)\n\n no_localizer = mode == \"single\"\n\n print(\"\\nRetrieving process request\")\n print(\"Decoding mode: {}\".format(\"single\" if no_localizer else \"automatic\"))\n url_output_param = request.args.get(\"output\")\n if url_output_param is None:\n print(\"No output specified, using defaults\")\n pipeline = get_cached_pipeline(default_output, no_localizer)\n else:\n output_strings = json.loads(parse.unquote(url_output_param))\n if not output_strings:\n print(\"No output specified, using defaults\")\n pipeline = get_cached_pipeline(default_output, no_localizer)\n else:\n output_objects = []\n for o in output_strings:\n if o in [\n \"PipelineObject\",\n \"PipelineObjectDescription\",\n \"NumpyArrayDescription\",\n \"FilenameDescription\",\n ]:\n print(f\"Illegal pipeline output specified: {o}\")\n return f\"Illegal pipeline output specified: {o}\"\n else:\n c = objects.__dict__.get(o)\n if (c is None) or (not inspect.isclass(c)):\n print(f\"Invalid pipeline output specified: {o}\")\n return f\"Invalid pipeline output specified: {o}\"\n else:\n output_objects.append(c)\n print(\"Specified output: {}\".format([o.__name__ for o in output_objects]))\n output = frozenset(output_objects)\n pipeline = get_cached_pipeline(output, no_localizer)\n png_please = request.args.get(\"png\")\n if png_please is None:\n png = []\n else:\n png = json.loads(parse.unquote(png_please))\n if png:\n print(\"Specified png-encoded output: {}\".format([p for p in png]))\n try:\n with NamedTemporaryFile(delete=True) as f:\n print(\"Loading image...\")\n f.write(request.data)\n image = imread(f)\n if no_localizer and (image.shape != (100, 100)):\n print(f\"Input image has wrong dimensions: {image.shape}\")\n return f\"Input image has wrong dimensions: {image.shape}\"\n print(\"Processing image...\")\n return process_image(pipeline, image, png, no_localizer)\n except Exception as err:\n print(f\"Exception: {err}\")\n return f\"Exception: {err}\"\n\n\ndef main(): # pragma: no cover\n app.run(host=\"0.0.0.0\", port=10000)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n main()\n" ]
[ [ "scipy.misc.imsave", "scipy.misc.imread", "numpy.zeros" ] ]
datitran/image-super-resolution
[ "dfa0f2cc429a3f4767a66c7842b7ef663a478875" ]
[ "src/utils/generator.py" ]
[ "import os\nimport numpy as np\nimport keras.backend as K\nfrom imageio import imread\nfrom keras.utils import Sequence\n\n\nclass Generator(Sequence):\n \"\"\"Keras Sequence object to train a model on larger-than-memory data.\n Creates list of LR input and HR label files (images) locations.\n Lists are filtered by extension and sorted by name.\n\n During training takes:\n >> 1 LR image and the corresponding HR image\n\n Returns:\n >> 2 batches of batch_size, 1 for each LR and HR\n >> each batch contains batch_size patches of size patch_size\n >>>> extracted from the one image\n >> each patch in batch is augmented with a random combination of\n >>>> 1 90degree rotation and 1 horizontal/vertical flip\n\n self.index is of the size of the original HR dataset\n (g.e.800 images -> self.len = 800)\n\n the index passed to __getitem__ is of a single image\n \"\"\"\n\n def __init__(\n self,\n input_folder,\n label_folder,\n batch_size,\n mode='train',\n patch_size=None,\n scale=None,\n n_validation_samples=None,\n ):\n self.mode = mode # shuffle when in train mode\n self.scale = scale # scaling factor LR to HR\n self.batch_size = batch_size # batch size\n self.extensions = ['.png', '.jpeg', '.jpg'] # admissible extension\n self.folder = {'HR': label_folder, 'LR': input_folder} # image folders\n # size of patches to extract from LR images\n self.patch_size = {'LR': patch_size, 'HR': patch_size * self.scale}\n\n self.img_list = {} # list of file names\n for res in ['HR', 'LR']:\n file_names = os.listdir(self.folder[res])\n file_names = [file for file in file_names if any(file.lower().endswith(ext) for ext in self.extensions)]\n\n if self.mode is 'valid':\n self.img_list[res] = np.sort(file_names)[0:n_validation_samples]\n else:\n self.img_list[res] = np.sort(file_names)\n\n # order of asserts is important for testing\n assert self.img_list['HR'].shape[0] == self.img_list['LR'].shape[0], 'UnevenDatasets'\n assert self.matching_datasets(), 'Input/LabelsMismatch'\n\n self.indices = np.arange(self.img_list['HR'].shape[0]) # indexes list\n\n def matching_datasets(self):\n # LR_name.png = HR_name+x+scale.png\n LR_name_root = [x.split('.')[0].split('x')[0] for x in self.img_list['LR']]\n HR_name_root = [x.split('.')[0] for x in self.img_list['HR']]\n return np.all(HR_name_root == LR_name_root)\n\n def __len__(self):\n # compute number of batches to yield\n return len(self.img_list['HR'])\n\n def on_epoch_end(self):\n # Shuffles indexes after each epoch if in training mode\n if self.mode == 'train':\n np.random.shuffle(self.indices)\n\n def _crop_imgs(self, imgs, idx=0):\n \"\"\"Get top left corners coordinates in LR space, multiply by scale to\n get HR coordinates.\n During training the corners are randomly chosen.\n During validation the corners are chosen randomly according to\n a specific seed.\n Square crops of size patch_size are taken from the selected\n top left corners.\n \"\"\"\n top_left = {'x': {}, 'y': {}}\n for i, axis in enumerate(['x', 'y']):\n if self.mode == 'train':\n top_left[axis]['LR'] = np.random.randint(\n 0, imgs['LR'].shape[i] - self.patch_size['LR'] + 1, self.batch_size\n )\n if self.mode == 'valid':\n not_random = np.random.RandomState(idx)\n top_left[axis]['LR'] = not_random.randint(\n 0, imgs['LR'].shape[i] - self.patch_size['LR'] + 1, self.batch_size\n )\n top_left[axis]['HR'] = top_left[axis]['LR'] * self.scale\n\n crops = {}\n for res in ['LR', 'HR']:\n slices = [\n [slice(x, x + self.patch_size[res]), slice(y, y + self.patch_size[res])]\n for x, y in zip(top_left['x'][res], top_left['y'][res])\n ]\n crops[res] = np.array([imgs[res][s[0], s[1], slice(None)] for s in slices])\n return crops\n\n def get_batches(self, idx):\n \"\"\"Fetch a batch of images LR and HR.\n Takes #batch_size random patches from 1 LR and 1 HR image.\n Returns the patches as a batch.\n \"\"\"\n imgs = {}\n for res in ['LR', 'HR']:\n imgs[res] = imread(os.path.join(self.folder[res], self.img_list[res][idx])) / 255.0\n\n return self._crop_imgs(imgs, idx)\n\n def _apply_transform(self, img, transform_selection):\n \"\"\"Rotates and flips input image according to transform_selection.\"\"\"\n rotate = {\n 0: lambda x: x,\n 1: lambda x: np.rot90(x, k=1, axes=(1, 0)), # rotate right\n 2: lambda x: np.rot90(x, k=1, axes=(0, 1)),\n } # rotate left\n\n flip = {\n 0: lambda x: x,\n 1: lambda x: np.flip(x, 0), # flip along horizontal axis\n 2: lambda x: np.flip(x, 1),\n } # flip along vertical axis\n\n rot_direction = transform_selection[0]\n flip_axis = transform_selection[1]\n\n img = rotate[rot_direction](img)\n img = flip[flip_axis](img)\n\n return img\n\n def _transform_batch(self, batch, transforms):\n \"\"\"Transforms each individual image of the batch independently.\"\"\"\n\n t_batch = np.array([self._apply_transform(img, transforms[i]) for i, img in enumerate(batch)])\n return t_batch\n\n def __getitem__(self, idx):\n # idx is batch index\n idx = self.indices[idx]\n batches = self.get_batches(idx)\n if self.mode == 'train':\n # Select the random transformations to apply\n transforms = np.random.randint(0, 3, (self.batch_size, 2))\n batches['LR'] = self._transform_batch(batches['LR'], transforms)\n batches['HR'] = self._transform_batch(batches['HR'], transforms)\n\n return batches['LR'], batches['HR']\n" ]
[ [ "numpy.random.shuffle", "numpy.arange", "numpy.random.RandomState", "numpy.all", "numpy.rot90", "numpy.flip", "numpy.sort", "numpy.random.randint" ] ]
ale8193/reinforcement-learning-openai-gym
[ "0b918701ab190f22c5973bd798961b389d43a2d1" ]
[ "algorithms/q_learning.py" ]
[ "import numpy as np\nimport sys\nimport math\n\nfrom utils.plotting import EpisodeStats\nfrom algorithms.discretization_functions import default_discretization\n\n\nclass QLearning:\n\n def __init__(self,\n env,\n buckets=(1, ),\n discount_factor=1.0,\n min_alpha=0.1,\n min_epsilon=0.1,\n discretize_fn=default_discretization,\n ada_divisor=25,\n env_render=False,\n env_wrapper=False\n ):\n \"\"\"\n Q-Learning algorithm: Off-policy TD control. Finds the optimal greedy policy\n while following an epsilon-greedy policy\n\n :param env: OpenAI environment.\n :param buckets: Tuple containing the bins for discretize the continuous features. Default None.\n :param discount_factor: Gamma discount factor. Float between [0, 1].\n :param alpha: Temporal-Difference (TD) learning rate. Float between (0, 1].\n :param discretize_fn: Function used to discretize the state when necessary. Default no trasformation done.\n :param epsilon: Chance to sample a random action. Float between (0, 1].\n :param env_render: Wheter render the env or not. Boolean default False.\n \"\"\"\n self.env = env\n self.buckets = buckets\n self.discount_factor = discount_factor\n self.min_alpha = min_alpha\n self.min_epsilon = min_epsilon\n self.discretize_fn = discretize_fn\n self.ada_divisor = ada_divisor\n self.env_render = env_render\n self.env_wrapper = env_wrapper\n # TODO check the value of the parameters\n\n def __init_action_value_function(self):\n \"\"\"\n Init the action-value function Q\n \"\"\"\n return np.zeros(self.buckets + (self.env.action_space.n, ))\n\n def process_state(self, state):\n \"\"\"\n Method to process the state when necessary using the discretize_fn provided to the agent.\n\n :param state: State to be processed.\n :return: The state processed.\n \"\"\"\n return self.discretize_fn(state, self.env, self.buckets)\n\n def get_epsilon(self, t):\n return max(self.min_epsilon, min(1, 1.0 - math.log10((t + 1) / self.ada_divisor)))\n\n def get_alpha(self, t):\n return max(self.min_alpha, min(1.0, 1.0 - math.log10((t + 1) / self.ada_divisor)))\n\n def learn(self, num_episodes):\n \"\"\"\n Q-learning function approximation: Q(s, a) += alpha * (reward(s,a) + gamma * max(Q(s',a) - Q(s,a))\n\n :param num_episodes:\n :return: A tuple (Q, stats).\n Q is the optimal action-value function founded, a dictionary mapping state -> action values.\n stats is an EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.\n \"\"\"\n\n # The final action-value function.\n # A nested dictionary that maps state -> (action -> action-value).\n Q = self.__init_action_value_function()\n\n # Keeps track of useful statistics\n stats = EpisodeStats(episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes),\n episode_epsilon=np.zeros(num_episodes),\n episode_alpha=np.zeros(num_episodes))\n\n # The policy we're following\n policy = make_epsilon_greedy_policy(Q, self.env.action_space.n)\n\n for i_episode in range(num_episodes):\n # Print out which episode we're on, useful for debugging.\n if (i_episode + 1) % 100 == 0:\n print(\"\\rEpisode {}/{}.\".format(i_episode + 1, num_episodes), end=\"\")\n sys.stdout.flush()\n\n # Get the first random state\n state = self.process_state(self.env.reset())\n\n # Time step t\n t = 0\n\n # Compute the new epsilon and alpha\n epsilon = self.get_epsilon(i_episode)\n alpha = self.get_alpha(i_episode)\n\n while True:\n\n # Render the env if requested\n if self.env_render:\n self.env.render()\n\n # Take a step\n action_probs = policy(state, epsilon)\n action = np.random.choice(np.arange(len(action_probs)), p=action_probs)\n next_state, reward, done, _ = self.env.step(action)\n next_state = self.process_state(next_state)\n\n # Update statistics\n stats.episode_rewards[i_episode] += reward\n stats.episode_lengths[i_episode] = t\n stats.episode_epsilon[i_episode] = epsilon\n stats.episode_alpha[i_episode] = alpha\n\n # TD Update\n best_next_action = np.argmax(Q[next_state])\n td_target = reward + self.discount_factor * Q[next_state][best_next_action]\n td_delta = td_target - Q[state][action]\n Q[state][action] += alpha * td_delta\n\n if done:\n break\n\n state = next_state\n t += 1\n\n return Q, stats\n\n\ndef make_epsilon_greedy_policy(Q, num_actions):\n \"\"\"\n Creates an epsilon-greedy policy based on a given Q-function and epsilon.\n\n :param Q: A dictionary that maps from state -> action-values.\n Each value is a numpy array of length num_actions (see below)\n :param num_actions: Number of actions in the environment.\n :return: A function that takes the state as an argument and returns\n the probabilities for each action in the form of a numpy array of length num_actions.\n \"\"\"\n\n def policy_fn(state, epsilon):\n A = np.ones(num_actions, dtype=float) * epsilon / num_actions\n best_action = np.argmax(Q[state])\n A[best_action] += (1.0 - epsilon)\n return A\n\n return policy_fn\n\n\n\n" ]
[ [ "numpy.ones", "numpy.argmax", "numpy.zeros" ] ]
Hieronymus98/Distributed-Batteryless-Microphone
[ "9924582a46d23cf580f9b8acc998f650cb022bc6" ]
[ "scripts/plottingScripts/sysAvailabilityTimeline.py" ]
[ "from inspect import currentframe, getframeinfo\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport json\nplt.style.use('seaborn-ticks')\n\n# To disable debugging set it to False\n# To print all debugging info set the second entry to 0\n# To print a specific message set its id\nDEBUG = [False, 0]\n\ndef print_debug_info(label,msg, id):\n if DEBUG[0]:\n if DEBUG[1] == 0 or DEBUG[1] == id:\n print(label)\n print(msg)\n\ndef load_cis_data(path):\n data=[]\n with open(path) as f:\n for cis in f:\n print_debug_info(\"+++ cis len\", len(cis), getframeinfo(currentframe()).lineno)\n data.append(json.loads(cis))\n return data\n\ndef cleaned_data(data):\n cleaned_cis=[]\n for cis in data:\n cleaned_cis.append(np.array(cis[0]) * 100 ) # * 100 for plotting \n return cleaned_cis\n\ndef get_labels(data):\n return __get_column(data,0)\n\ndef get_unlabelled_data(data):\n return __get_column(data,1)\n\ndef __get_column(data,idx):\n col=[]\n for row in data:\n col.append(row[idx])\n return col\n\n\ndef color_box(box, color):\n for _, line_list in box.items():\n for line in line_list:\n line.set_color(color)\n\n\ndef main():\n fontSize=16\n ontime_path = '../processed_data/new/availabilityTimeline470_sleep_interval5.json'\n \n # Data Layout in a file\n ## ['label', [[1,2],[1,3,4,5,],[]...]]\n ## ['label', [[4l,2],[1],[9,3,4]...]]\n\n # Get the raw data of cis of 8 nodes\n ontime_data = load_cis_data(ontime_path)\n print_debug_info('+++ on-time raw data', ontime_data, getframeinfo(currentframe()).lineno)\n \n # getting the labels from the data\n ontime_label = get_labels(ontime_data)\n print_debug_info('+++ ontime label', ontime_label, getframeinfo(currentframe()).lineno)\n\n # Get the data without the labels\n unlabelled_ontime_data = get_unlabelled_data(ontime_data)\n print_debug_info('+++ unlabeled ontime data', unlabelled_ontime_data,getframeinfo(currentframe()).lineno )\n\n clean_data = cleaned_data(unlabelled_ontime_data)\n # print (clean_data, file=open(\"debug.txt\", 'w'))\n # exit()\n fig = plt.figure(figsize=(8,3.6))\n\n # for idx, cis in enumerate(clean_data):\n colors = [\"#d73027\", \"#fee090\", \"#4575b4\"]\n label = [ '900 lux', '500 lux','800 lux']\n patterns=['-+', '-o', '->', '-x', '-s', '-*']\n for i in range(len(clean_data)):\n # print(np.mean(clean_data[i]),\"&\", np.std(clean_data[i]))\n plt.plot(clean_data[i], patterns[i], label=label[i], color=colors[i])\n # box = plt.boxplot(clean_data, showfliers=False)\n # color_box(box, \"#000000\")\n \n plt.gca().grid(True, axis='y') \n plt.ylabel(\"Availability (%)\", fontsize=fontSize)\n plt.xlabel(\"Seconds\", fontsize=fontSize)\n # plt.xticks(np.arange(5)+1,(300,500,800,1000,1400),fontsize=fontSize-2)\n plt.yticks(fontsize=fontSize-2)\n plt.xticks(fontsize=fontSize-2)\n plt.legend(fontsize=fontSize, loc=\"lower right\")\n plt.tight_layout()\n plt.savefig('../../paper/figures/sysAvailabilityTimeline_470_sleep_5seconds_2.eps')\n plt.show()\n \nif __name__==\"__main__\":\n main()" ]
[ [ "matplotlib.pyplot.style.use", "matplotlib.pyplot.legend", "matplotlib.pyplot.xticks", "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.savefig", "matplotlib.pyplot.gca", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "numpy.array", "matplotlib.pyplot.plot", "matplotlib.pyplot.yticks", "matplotlib.pyplot.xlabel" ] ]
MillionIntegrals/ddpg-tensorflow
[ "7a61b14ed47766e8cdf70121570e5a0a5a241f02" ]
[ "ddpg/action_noise.py" ]
[ "import numpy as np\n\n\nclass OrnsteinUhlenbeckNoiseProcess:\n \"\"\"\n Taken from https://github.com/openai/baselines/blob/master/baselines/ddpg/noise.py\n \"\"\"\n def __init__(self, mu, sigma, theta=.15, dt=1e-2, x0=None):\n self.theta = theta\n self.mu = mu\n self.sigma = sigma\n self.dt = dt\n self.x0 = x0\n self.x_prev = None\n self.reset()\n\n def __call__(self):\n x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape)\n self.x_prev = x\n return x\n\n def reset(self):\n self.x_prev = self.x0 if self.x0 is not None else np.zeros_like(self.mu)\n\n def __repr__(self):\n return 'OrnsteinUhlenbeckActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)\n\n\nclass NormalActionNoise:\n def __init__(self, mu, sigma):\n self.mu = mu\n self.sigma = sigma\n\n def __call__(self):\n return np.random.normal(self.mu, self.sigma)\n\n def __repr__(self):\n return 'NormalActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)\n" ]
[ [ "numpy.random.normal", "numpy.zeros_like", "numpy.sqrt" ] ]
Ze-Yang/SDR
[ "623bcb6b4fd6775c10ffd6ed72a59c23a0b1fd8a" ]
[ "net/resnet_atrous.py" ]
[ "import torch.nn as nn\nimport math\nimport torch.utils.model_zoo as model_zoo\nfrom net.sync_batchnorm import SynchronizedBatchNorm2d\n\nbn_mom = 0.0003\nmodel_urls = {\n 'resnet18': 'http://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'http://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'http://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'http://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'http://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\ndef conv3x3(in_planes, out_planes, stride=1, atrous=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1*atrous, dilation=atrous, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, atrous=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride, atrous)\n #self.bn1 = nn.BatchNorm2d(planes)\n self.bn1 = SynchronizedBatchNorm2d(planes, momentum=bn_mom)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n #self.bn2 = nn.BatchNorm2d(planes)\n self.bn2 = SynchronizedBatchNorm2d(planes, momentum=bn_mom)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, atrous=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n #self.bn1 = nn.BatchNorm2d(planes)\n self.bn1 = SynchronizedBatchNorm2d(planes, momentum=bn_mom)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1*atrous, dilation=atrous, bias=False)\n #self.bn2 = nn.BatchNorm2d(planes)\n self.bn2 = SynchronizedBatchNorm2d(planes, momentum=bn_mom)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)\n # self.bn3 = nn.BatchNorm2d(planes * self.expansion)\n self.bn3 = SynchronizedBatchNorm2d(planes * self.expansion, momentum=bn_mom)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet_Atrous(nn.Module):\n\n def __init__(self, block, layers, atrous=None, os=16):\n super(ResNet_Atrous, self).__init__()\n stride_list = None\n if os == 8:\n stride_list = [2,1,1]\n elif os == 16:\n stride_list = [2,2,1]\n else:\n raise ValueError('resnet_atrous.py: output stride=%d is not supported.'%os) \n \n self.inplanes = 64\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n# self.conv1 = nn.Sequential(\n# nn.Conv2d(3,64,kernel_size=3, stride=2, padding=1),\n# nn.Conv2d(64,64,kernel_size=3, stride=1, padding=1),\n# nn.Conv2d(64,64,kernel_size=3, stride=1, padding=1),\n# )\n self.bn1 = SynchronizedBatchNorm2d(64, momentum=bn_mom)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, 64, layers[0])\n self.layer2 = self._make_layer(block, 256, 128, layers[1], stride=stride_list[0])\n self.layer3 = self._make_layer(block, 512, 256, layers[2], stride=stride_list[1], atrous=16//os)\n self.layer4 = self._make_layer(block, 1024, 512, layers[3], stride=stride_list[2], atrous=[item*16//os for item in atrous])\n #self.layer5 = self._make_layer(block, 2048, 512, layers[3], stride=1, atrous=[item*16//os for item in atrous])\n #self.layer6 = self._make_layer(block, 2048, 512, layers[3], stride=1, atrous=[item*16//os for item in atrous])\n #self.layer7 = self._make_layer(block, 2048, 512, layers[3], stride=1, atrous=[item*16//os for item in atrous])\n self.layers = []\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, SynchronizedBatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def get_layers(self):\n return self.layers\n\n def _make_layer(self, block, inplanes, planes, blocks, stride=1, atrous=None):\n downsample = None\n if atrous == None:\n atrous = [1]*blocks\n elif isinstance(atrous, int):\n atrous_list = [atrous]*blocks\n atrous = atrous_list\n if stride != 1 or inplanes != planes*block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n SynchronizedBatchNorm2d(planes * block.expansion, momentum=bn_mom),\n )\n\n layers = []\n layers.append(block(inplanes, planes, stride=stride, atrous=atrous[0], downsample=downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(planes*block.expansion, planes, stride=1, atrous=atrous[i]))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n self.layers = []\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n self.layers.append(x)\n x = self.layer1(x)\n self.layers.append(x)\n x = self.layer2(x)\n self.layers.append(x)\n x = self.layer3(x)\n self.layers.append(x)\n x = self.layer4(x)\n #x = self.layer5(x)\n #x = self.layer6(x)\n #x = self.layer7(x)\n self.layers.append(x)\n\n return x\n\ndef resnet50_atrous(pretrained=True, os=16, **kwargs):\n \"\"\"Constructs a atrous ResNet-50 model.\"\"\"\n model = ResNet_Atrous(Bottleneck, [3, 4, 6, 3], atrous=[1,2,1], os=os, **kwargs)\n if pretrained:\n old_dict = model_zoo.load_url(model_urls['resnet50'])\n model_dict = model.state_dict()\n old_dict = {k: v for k,v in old_dict.items() if (k in model_dict)}\n model_dict.update(old_dict)\n model.load_state_dict(model_dict) \n return model\n\n\ndef resnet101_atrous(pretrained=True, os=16, **kwargs):\n \"\"\"Constructs a atrous ResNet-101 model.\"\"\"\n model = ResNet_Atrous(Bottleneck, [3, 4, 23, 3], atrous=[2,2,2], os=os, **kwargs)\n if pretrained:\n old_dict = model_zoo.load_url(model_urls['resnet101'])\n model_dict = model.state_dict()\n old_dict = {k: v for k,v in old_dict.items() if (k in model_dict)}\n model_dict.update(old_dict)\n model.load_state_dict(model_dict) \n return model\n\n\ndef resnet152_atrous(pretrained=True, os=16, **kwargs):\n \"\"\"Constructs a atrous ResNet-152 model.\"\"\"\n model = ResNet_Atrous(Bottleneck, [3, 8, 36, 3], atrous=[2,2,2], os=os, **kwargs)\n if pretrained:\n old_dict = model_zoo.load_url(model_urls['resnet152'])\n model_dict = model.state_dict()\n old_dict = {k: v for k,v in old_dict.items() if (k in model_dict)}\n model_dict.update(old_dict)\n model.load_state_dict(model_dict) \n return model\n" ]
[ [ "torch.nn.MaxPool2d", "torch.nn.init.kaiming_normal_", "torch.nn.init.constant_", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.utils.model_zoo.load_url", "torch.nn.ReLU" ] ]
thaiph99/facenet
[ "8d9c8acdae076383e3354b293a1be47ab1f96a63" ]
[ "src/train_tripletloss.py" ]
[ "\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datetime import datetime\nimport os.path\nimport time\nimport sys\nimport tensorflow.compat.v1 as tf\nimport numpy as np\nimport importlib\nimport itertools\nimport argparse\nimport facenet\nimport lfw\n\nfrom tensorflow.python.ops import data_flow_ops\n\nfrom six.moves import xrange # @UnresolvedImport\n\n\ndef main(args):\n\n network = importlib.import_module(args.model_def)\n\n subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')\n log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)\n if not os.path.isdir(log_dir): # Create the log directory if it doesn't exist\n os.makedirs(log_dir)\n model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)\n if not os.path.isdir(model_dir): # Create the model directory if it doesn't exist\n os.makedirs(model_dir)\n\n # Write arguments to a text file\n facenet.write_arguments_to_file(\n args, os.path.join(log_dir, 'arguments.txt'))\n\n # Store some git revision info in a text file in the log directory\n src_path, _ = os.path.split(os.path.realpath(__file__))\n facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))\n\n np.random.seed(seed=args.seed)\n train_set = facenet.get_dataset(args.data_dir)\n\n print('Model directory: %s' % model_dir)\n print('Log directory: %s' % log_dir)\n if args.pretrained_model:\n print('Pre-trained model: %s' %\n os.path.expanduser(args.pretrained_model))\n\n if args.lfw_dir:\n print('LFW directory: %s' % args.lfw_dir)\n # Read the file containing the pairs used for testing\n pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))\n # Get the paths for the corresponding images\n lfw_paths, actual_issame = lfw.get_paths(\n os.path.expanduser(args.lfw_dir), pairs)\n\n with tf.Graph().as_default():\n tf.set_random_seed(args.seed)\n global_step = tf.Variable(0, trainable=False)\n\n # Placeholder for the learning rate\n learning_rate_placeholder = tf.placeholder(\n tf.float32, name='learning_rate')\n\n batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')\n\n phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')\n\n image_paths_placeholder = tf.placeholder(\n tf.string, shape=(None, 3), name='image_paths')\n labels_placeholder = tf.placeholder(\n tf.int64, shape=(None, 3), name='labels')\n\n input_queue = data_flow_ops.FIFOQueue(capacity=100000,\n dtypes=[tf.string, tf.int64],\n shapes=[(3,), (3,)],\n shared_name=None, name=None)\n enqueue_op = input_queue.enqueue_many(\n [image_paths_placeholder, labels_placeholder])\n\n nrof_preprocess_threads = 4\n images_and_labels = []\n for _ in range(nrof_preprocess_threads):\n filenames, label = input_queue.dequeue()\n images = []\n for filename in tf.unstack(filenames):\n file_contents = tf.read_file(filename)\n image = tf.image.decode_image(file_contents, channels=3)\n image = tf.to_float(image)\n if args.random_crop:\n image = tf.random_crop(\n image, [args.image_size, args.image_size, 3])\n else:\n image = tf.image.resize_image_with_crop_or_pad(\n image, args.image_size, args.image_size)\n if args.random_flip:\n image = tf.image.random_flip_left_right(image)\n\n #pylint: disable=no-member\n image.set_shape((args.image_size, args.image_size, 3))\n images.append(tf.image.per_image_standardization(image))\n images_and_labels.append([images, label])\n\n image_batch, labels_batch = tf.train.batch_join(\n images_and_labels, batch_size=batch_size_placeholder,\n shapes=[(args.image_size, args.image_size, 3), ()], enqueue_many=True,\n capacity=4 * nrof_preprocess_threads * args.batch_size,\n allow_smaller_final_batch=True)\n image_batch = tf.identity(image_batch, 'image_batch')\n image_batch = tf.identity(image_batch, 'input')\n labels_batch = tf.identity(labels_batch, 'label_batch')\n\n # Build the inference graph\n prelogits, _ = network.inference(image_batch, args.keep_probability,\n phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size,\n weight_decay=args.weight_decay)\n\n embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')\n # Split embeddings into anchor, positive and negative and calculate triplet loss\n anchor, positive, negative = tf.unstack(tf.reshape(\n embeddings, [-1, 3, args.embedding_size]), 3, 1)\n triplet_loss = facenet.triplet_loss(\n anchor, positive, negative, args.alpha)\n\n learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,\n args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)\n tf.summary.scalar('learning_rate', learning_rate)\n\n # Calculate the total losses\n regularization_losses = tf.get_collection(\n tf.GraphKeys.REGULARIZATION_LOSSES)\n total_loss = tf.add_n(\n [triplet_loss] + regularization_losses, name='total_loss')\n\n # Build a Graph that trains the model with one batch of examples and updates the model parameters\n train_op = facenet.train(total_loss, global_step, args.optimizer,\n learning_rate, args.moving_average_decay, tf.global_variables())\n\n # Create a saver\n saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.summary.merge_all()\n\n # Start running operations on the Graph.\n gpu_options = tf.GPUOptions(\n per_process_gpu_memory_fraction=args.gpu_memory_fraction)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\n # Initialize variables\n sess.run(tf.global_variables_initializer(),\n feed_dict={phase_train_placeholder: True})\n sess.run(tf.local_variables_initializer(),\n feed_dict={phase_train_placeholder: True})\n\n summary_writer = tf.summary.FileWriter(log_dir, sess.graph)\n coord = tf.train.Coordinator()\n tf.train.start_queue_runners(coord=coord, sess=sess)\n\n with sess.as_default():\n\n if args.pretrained_model:\n print('Restoring pretrained model: %s' % args.pretrained_model)\n saver.restore(sess, os.path.expanduser(args.pretrained_model))\n\n # Training and validation loop\n epoch = 0\n while epoch < args.max_nrof_epochs:\n step = sess.run(global_step, feed_dict=None)\n epoch = step // args.epoch_size\n # Train for one epoch\n train(args, sess, train_set, epoch, image_paths_placeholder, labels_placeholder, labels_batch,\n batch_size_placeholder, learning_rate_placeholder, phase_train_placeholder, enqueue_op, input_queue, global_step,\n embeddings, total_loss, train_op, summary_op, summary_writer, args.learning_rate_schedule_file,\n args.embedding_size, anchor, positive, negative, triplet_loss)\n\n # Save variables and the metagraph if it doesn't exist already\n save_variables_and_metagraph(\n sess, saver, summary_writer, model_dir, subdir, step)\n\n # Evaluate on LFW\n if args.lfw_dir:\n evaluate(sess, lfw_paths, embeddings, labels_batch, image_paths_placeholder, labels_placeholder,\n batch_size_placeholder, learning_rate_placeholder, phase_train_placeholder, enqueue_op, actual_issame, args.batch_size,\n args.lfw_nrof_folds, log_dir, step, summary_writer, args.embedding_size)\n\n return model_dir\n\n\ndef train(args, sess, dataset, epoch, image_paths_placeholder, labels_placeholder, labels_batch,\n batch_size_placeholder, learning_rate_placeholder, phase_train_placeholder, enqueue_op, input_queue, global_step,\n embeddings, loss, train_op, summary_op, summary_writer, learning_rate_schedule_file,\n embedding_size, anchor, positive, negative, triplet_loss):\n batch_number = 0\n\n if args.learning_rate > 0.0:\n lr = args.learning_rate\n else:\n lr = facenet.get_learning_rate_from_file(\n learning_rate_schedule_file, epoch)\n while batch_number < args.epoch_size:\n # Sample people randomly from the dataset\n image_paths, num_per_class = sample_people(\n dataset, args.people_per_batch, args.images_per_person)\n\n print('Running forward pass on sampled images: ', end='')\n start_time = time.time()\n nrof_examples = args.people_per_batch * args.images_per_person\n labels_array = np.reshape(np.arange(nrof_examples), (-1, 3))\n image_paths_array = np.reshape(\n np.expand_dims(np.array(image_paths), 1), (-1, 3))\n sess.run(enqueue_op, {\n image_paths_placeholder: image_paths_array, labels_placeholder: labels_array})\n emb_array = np.zeros((nrof_examples, embedding_size))\n nrof_batches = int(np.ceil(nrof_examples / args.batch_size))\n for i in range(nrof_batches):\n batch_size = min(nrof_examples-i*args.batch_size, args.batch_size)\n emb, lab = sess.run([embeddings, labels_batch], feed_dict={batch_size_placeholder: batch_size,\n learning_rate_placeholder: lr, phase_train_placeholder: True})\n emb_array[lab, :] = emb\n print('%.3f' % (time.time()-start_time))\n\n # Select triplets based on the embeddings\n print('Selecting suitable triplets for training')\n triplets, nrof_random_negs, nrof_triplets = select_triplets(emb_array, num_per_class,\n image_paths, args.people_per_batch, args.alpha)\n selection_time = time.time() - start_time\n print('(nrof_random_negs, nrof_triplets) = (%d, %d): time=%.3f seconds' %\n (nrof_random_negs, nrof_triplets, selection_time))\n\n # Perform training on the selected triplets\n nrof_batches = int(np.ceil(nrof_triplets*3/args.batch_size))\n triplet_paths = list(itertools.chain(*triplets))\n labels_array = np.reshape(np.arange(len(triplet_paths)), (-1, 3))\n triplet_paths_array = np.reshape(\n np.expand_dims(np.array(triplet_paths), 1), (-1, 3))\n sess.run(enqueue_op, {\n image_paths_placeholder: triplet_paths_array, labels_placeholder: labels_array})\n nrof_examples = len(triplet_paths)\n train_time = 0\n i = 0\n emb_array = np.zeros((nrof_examples, embedding_size))\n loss_array = np.zeros((nrof_triplets,))\n summary = tf.Summary()\n step = 0\n while i < nrof_batches:\n start_time = time.time()\n batch_size = min(nrof_examples-i*args.batch_size, args.batch_size)\n feed_dict = {batch_size_placeholder: batch_size,\n learning_rate_placeholder: lr, phase_train_placeholder: True}\n err, _, step, emb, lab = sess.run(\n [loss, train_op, global_step, embeddings, labels_batch], feed_dict=feed_dict)\n emb_array[lab, :] = emb\n loss_array[i] = err\n duration = time.time() - start_time\n print('Epoch: [%d][%d/%d]\\tTime %.3f\\tLoss %2.3f' %\n (epoch, batch_number+1, args.epoch_size, duration, err))\n batch_number += 1\n i += 1\n train_time += duration\n summary.value.add(tag='loss', simple_value=err)\n\n # Add validation loss and accuracy to summary\n summary.value.add(tag='time/selection', simple_value=selection_time)\n summary_writer.add_summary(summary, step)\n return step\n\n\ndef select_triplets(embeddings, nrof_images_per_class, image_paths, people_per_batch, alpha):\n \"\"\" Select the triplets for training\n \"\"\"\n trip_idx = 0\n emb_start_idx = 0\n num_trips = 0\n triplets = []\n\n\n for i in xrange(people_per_batch):\n nrof_images = int(nrof_images_per_class[i])\n for j in xrange(1, nrof_images):\n a_idx = emb_start_idx + j - 1\n neg_dists_sqr = np.sum(\n np.square(embeddings[a_idx] - embeddings), 1)\n # For every possible positive pair.\n for pair in xrange(j, nrof_images):\n p_idx = emb_start_idx + pair\n pos_dist_sqr = np.sum(\n np.square(embeddings[a_idx]-embeddings[p_idx]))\n neg_dists_sqr[emb_start_idx:emb_start_idx+nrof_images] = np.NaN\n # all_neg = np.where(np.logical_and(neg_dists_sqr-pos_dist_sqr<alpha, pos_dist_sqr<neg_dists_sqr))[0] # FaceNet selection\n # VGG Face selecction\n all_neg = np.where(neg_dists_sqr-pos_dist_sqr < alpha)[0]\n nrof_random_negs = all_neg.shape[0]\n if nrof_random_negs > 0:\n rnd_idx = np.random.randint(nrof_random_negs)\n n_idx = all_neg[rnd_idx]\n triplets.append(\n (image_paths[a_idx], image_paths[p_idx], image_paths[n_idx]))\n # print('Triplet %d: (%d, %d, %d), pos_dist=%2.6f, neg_dist=%2.6f (%d, %d, %d, %d, %d)' %\n # (trip_idx, a_idx, p_idx, n_idx, pos_dist_sqr, neg_dists_sqr[n_idx], nrof_random_negs, rnd_idx, i, j, emb_start_idx))\n trip_idx += 1\n\n num_trips += 1\n\n emb_start_idx += nrof_images\n\n np.random.shuffle(triplets)\n return triplets, num_trips, len(triplets)\n\n\ndef sample_people(dataset, people_per_batch, images_per_person):\n nrof_images = people_per_batch * images_per_person\n\n # Sample classes from the dataset\n nrof_classes = len(dataset)\n class_indices = np.arange(nrof_classes)\n np.random.shuffle(class_indices)\n\n i = 0\n image_paths = []\n num_per_class = []\n sampled_class_indices = []\n # Sample images from these classes until we have enough\n while len(image_paths) < nrof_images:\n print(image_paths)\n class_index = class_indices[i]\n nrof_images_in_class = len(dataset[class_index])\n image_indices = np.arange(nrof_images_in_class)\n np.random.shuffle(image_indices)\n nrof_images_from_class = min(\n nrof_images_in_class, images_per_person, nrof_images-len(image_paths))\n idx = image_indices[0:nrof_images_from_class]\n image_paths_for_class = [\n dataset[class_index].image_paths[j] for j in idx]\n sampled_class_indices += [class_index]*nrof_images_from_class\n image_paths += image_paths_for_class\n num_per_class.append(nrof_images_from_class)\n i += 1\n if i == 7:\n break\n\n return image_paths, num_per_class\n\n\ndef evaluate(sess, image_paths, embeddings, labels_batch, image_paths_placeholder, labels_placeholder,\n batch_size_placeholder, learning_rate_placeholder, phase_train_placeholder, enqueue_op, actual_issame, batch_size,\n nrof_folds, log_dir, step, summary_writer, embedding_size):\n start_time = time.time()\n # Run forward pass to calculate embeddings\n print('Running forward pass on LFW images: ', end='')\n\n nrof_images = len(actual_issame)*2\n assert(len(image_paths) == nrof_images)\n labels_array = np.reshape(np.arange(nrof_images), (-1, 3))\n image_paths_array = np.reshape(\n np.expand_dims(np.array(image_paths), 1), (-1, 3))\n sess.run(enqueue_op, {\n image_paths_placeholder: image_paths_array, labels_placeholder: labels_array})\n emb_array = np.zeros((nrof_images, embedding_size))\n nrof_batches = int(np.ceil(nrof_images / batch_size))\n label_check_array = np.zeros((nrof_images,))\n for i in xrange(nrof_batches):\n batch_size = min(nrof_images-i*batch_size, batch_size)\n emb, lab = sess.run([embeddings, labels_batch], feed_dict={batch_size_placeholder: batch_size,\n learning_rate_placeholder: 0.0, phase_train_placeholder: False})\n emb_array[lab, :] = emb\n label_check_array[lab] = 1\n print('%.3f' % (time.time()-start_time))\n\n assert(np.all(label_check_array == 1))\n\n _, _, accuracy, val, val_std, far = lfw.evaluate(\n emb_array, actual_issame, nrof_folds=nrof_folds)\n\n print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy)))\n print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))\n lfw_time = time.time() - start_time\n # Add validation loss and accuracy to summary\n summary = tf.Summary()\n #pylint: disable=maybe-no-member\n summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy))\n summary.value.add(tag='lfw/val_rate', simple_value=val)\n summary.value.add(tag='time/lfw', simple_value=lfw_time)\n summary_writer.add_summary(summary, step)\n with open(os.path.join(log_dir, 'lfw_result.txt'), 'at') as f:\n f.write('%d\\t%.5f\\t%.5f\\n' % (step, np.mean(accuracy), val))\n\n\ndef save_variables_and_metagraph(sess, saver, summary_writer, model_dir, model_name, step):\n # Save the model checkpoint\n print('Saving variables')\n start_time = time.time()\n checkpoint_path = os.path.join(model_dir, 'model-%s.ckpt' % model_name)\n saver.save(sess, checkpoint_path, global_step=step, write_meta_graph=False)\n save_time_variables = time.time() - start_time\n print('Variables saved in %.2f seconds' % save_time_variables)\n metagraph_filename = os.path.join(model_dir, 'model-%s.meta' % model_name)\n save_time_metagraph = 0\n if not os.path.exists(metagraph_filename):\n print('Saving metagraph')\n start_time = time.time()\n saver.export_meta_graph(metagraph_filename)\n save_time_metagraph = time.time() - start_time\n print('Metagraph saved in %.2f seconds' % save_time_metagraph)\n summary = tf.Summary()\n #pylint: disable=maybe-no-member\n summary.value.add(tag='time/save_variables',\n simple_value=save_time_variables)\n summary.value.add(tag='time/save_metagraph',\n simple_value=save_time_metagraph)\n summary_writer.add_summary(summary, step)\n\n\ndef get_learning_rate_from_file(filename, epoch):\n with open(filename, 'r') as f:\n for line in f.readlines():\n line = line.split('#', 1)[0]\n if line:\n par = line.strip().split(':')\n e = int(par[0])\n lr = float(par[1])\n if e <= epoch:\n learning_rate = lr\n else:\n return learning_rate\n\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--logs_base_dir', type=str,\n help='Directory where to write event logs.', default='~/logs/facenet')\n parser.add_argument('--models_base_dir', type=str,\n help='Directory where to write trained models and checkpoints.', default='~/models/facenet')\n parser.add_argument('--gpu_memory_fraction', type=float,\n help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)\n parser.add_argument('--pretrained_model', type=str,\n help='Load a pretrained model before training starts.')\n parser.add_argument('--data_dir', type=str,\n help='Path to the data directory containing aligned face patches.',\n default='~/datasets/casia/casia_maxpy_mtcnnalign_182_160')\n parser.add_argument('--model_def', type=str,\n help='Model definition. Points to a module containing the definition of the inference graph.', default='models.inception_resnet_v1')\n parser.add_argument('--max_nrof_epochs', type=int,\n help='Number of epochs to run.', default=500)\n parser.add_argument('--batch_size', type=int,\n help='Number of images to process in a batch.', default=90)\n parser.add_argument('--image_size', type=int,\n help='Image size (height, width) in pixels.', default=160)\n parser.add_argument('--people_per_batch', type=int,\n help='Number of people per batch.', default=7) # 45\n parser.add_argument('--images_per_person', type=int,\n help='Number of images per person.', default=6) # 40\n parser.add_argument('--epoch_size', type=int,\n help='Number of batches per epoch.', default=1000)\n parser.add_argument('--alpha', type=float,\n help='Positive to negative triplet distance margin.', default=0.2)\n parser.add_argument('--embedding_size', type=int,\n help='Dimensionality of the embedding.', default=128)\n parser.add_argument('--random_crop',\n help='Performs random cropping of training images. If false, the center image_size pixels from the training images are used. ' +\n 'If the size of the images in the data directory is equal to image_size no cropping is performed', action='store_true')\n parser.add_argument('--random_flip',\n help='Performs random horizontal flipping of training images.', action='store_true')\n parser.add_argument('--keep_probability', type=float,\n help='Keep probability of dropout for the fully connected layer(s).', default=1.0)\n parser.add_argument('--weight_decay', type=float,\n help='L2 weight regularization.', default=0.0)\n parser.add_argument('--optimizer', type=str, choices=['ADAGRAD', 'ADADELTA', 'ADAM', 'RMSPROP', 'MOM'],\n help='The optimization algorithm to use', default='ADAGRAD')\n parser.add_argument('--learning_rate', type=float,\n help='Initial learning rate. If set to a negative value a learning rate ' +\n 'schedule can be specified in the file \"learning_rate_schedule.txt\"', default=0.1)\n parser.add_argument('--learning_rate_decay_epochs', type=int,\n help='Number of epochs between learning rate decay.', default=100)\n parser.add_argument('--learning_rate_decay_factor', type=float,\n help='Learning rate decay factor.', default=1.0)\n parser.add_argument('--moving_average_decay', type=float,\n help='Exponential decay for tracking of training parameters.', default=0.9999)\n parser.add_argument('--seed', type=int,\n help='Random seed.', default=666)\n parser.add_argument('--learning_rate_schedule_file', type=str,\n help='File containing the learning rate schedule that is used when learning_rate is set to to -1.', default='data/learning_rate_schedule.txt')\n\n # Parameters for validation on LFW\n parser.add_argument('--lfw_pairs', type=str,\n help='The file containing the pairs to use for validation.', default='data/pairs.txt')\n parser.add_argument('--lfw_dir', type=str,\n help='Path to the data directory containing aligned face patches.', default='')\n parser.add_argument('--lfw_nrof_folds', type=int,\n help='Number of folds to use for cross validation. Mainly used for testing.', default=10)\n return parser.parse_args(argv)\n\n\nif __name__ == '__main__':\n main(parse_arguments(sys.argv[1:]))\n" ]
[ [ "tensorflow.compat.v1.train.batch_join", "tensorflow.compat.v1.Graph", "numpy.random.seed", "tensorflow.compat.v1.train.start_queue_runners", "tensorflow.compat.v1.image.resize_image_with_crop_or_pad", "tensorflow.compat.v1.image.per_image_standardization", "tensorflow.compat.v1.train.Coordinator", "tensorflow.compat.v1.unstack", "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.Variable", "tensorflow.python.ops.data_flow_ops.FIFOQueue", "tensorflow.compat.v1.random_crop", "tensorflow.compat.v1.nn.l2_normalize", "tensorflow.compat.v1.train.exponential_decay", "tensorflow.compat.v1.read_file", "numpy.where", "tensorflow.compat.v1.summary.FileWriter", "tensorflow.compat.v1.add_n", "tensorflow.compat.v1.reshape", "numpy.mean", "tensorflow.compat.v1.GPUOptions", "numpy.zeros", "numpy.ceil", "tensorflow.compat.v1.local_variables_initializer", "numpy.arange", "tensorflow.compat.v1.to_float", "numpy.all", "tensorflow.compat.v1.global_variables_initializer", "numpy.std", "numpy.square", "tensorflow.compat.v1.trainable_variables", "tensorflow.compat.v1.image.random_flip_left_right", "tensorflow.compat.v1.image.decode_image", "numpy.random.shuffle", "tensorflow.compat.v1.summary.merge_all", "tensorflow.compat.v1.summary.scalar", "tensorflow.compat.v1.ConfigProto", "tensorflow.compat.v1.identity", "tensorflow.compat.v1.global_variables", "tensorflow.compat.v1.Summary", "tensorflow.compat.v1.set_random_seed", "numpy.array", "numpy.random.randint", "tensorflow.compat.v1.get_collection" ] ]
uoguelph-mlrg/LDG
[ "203695748fb6d12cef40a801e634fbdab5e23692" ]
[ "dyrep.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom encoder import *\nfrom utils import *\n\n\nclass DyRep(nn.Module):\n def __init__(self,\n node_embeddings,\n # n_event_types,\n N_nodes,\n A_initial=None,\n N_surv_samples=5,\n n_hidden=32,\n bilinear=False,\n bilinear_enc=False,\n sparse=False,\n n_rel=1,\n encoder=None,\n node_degree_global=None,\n rnd=None,\n sym=False,\n model='gcn',\n soft_attn=False,\n freq=False,\n verbose=False,\n device='cuda'):\n super(DyRep, self).__init__()\n\n self.opt = True\n self.exp = True\n self.rnd = rnd\n self.bilinear = bilinear\n self.bilinear_enc = bilinear_enc\n self.n_hidden = n_hidden\n self.sparse = sparse\n self.encoder = encoder\n self.device = device\n self.model = model\n self.N_surv_samples = N_surv_samples\n self.latent_graph = encoder is not None\n self.generate = self.latent_graph\n self.soft_attn = soft_attn\n self.freq = freq\n self.verbose = verbose\n\n if self.verbose:\n print('using {} attention'.format('soft' if self.soft_attn else 'hard').upper())\n\n self.node_degree_global = node_degree_global\n\n self.N_nodes = A_initial.shape[0]\n if A_initial is not None and len(A_initial.shape) == 2:\n A_initial = A_initial[:, :, None]\n\n if self.latent_graph:\n self.n_assoc_types = n_rel\n else:\n self.n_assoc_types = 1\n\n # self.n_relations = self.n_assoc_types + len(EVENT_TYPES) # 3 communication event types + association event\n\n self.initialize(node_embeddings, A_initial)\n\n n_in = 0\n\n if self.model != 'dyrep':\n self.W_h = nn.ModuleList([nn.Linear(in_features=n_hidden, out_features=n_hidden) for _ in range(3)]) # to have a similar number of trainable params as in DyRep\n if self.model == 'gat':\n self.K_heads = 4\n self.layers = 3\n # self.W_alpha = nn.Linear(in_features=n_hidden, out_features=n_hidden)\n self.alpha = nn.ModuleList([nn.ModuleList([\n nn.Linear(in_features=(n_hidden // self.K_heads) * 2, out_features=1) for head in range(self.K_heads)])\n for layer in range(self.layers)])\n self.W_h = nn.ModuleList([nn.ModuleList([\n nn.Linear(in_features=n_hidden, out_features=n_hidden // self.K_heads) for head in range(self.K_heads)])\n for layer in range(self.layers)]) # to have a similar number of trainable params as in DyRep\n else:\n self.W_h = nn.Linear(in_features=n_hidden + n_in, out_features=n_hidden)\n\n self.W_struct = nn.Linear(n_hidden * self.n_assoc_types, n_hidden)\n self.W_rec = nn.Linear(n_hidden + n_in, n_hidden)\n self.W_t = nn.Linear(4, n_hidden) # 4 because we want separate parameters for days, hours, minutes, seconds; otherwise (if we just use seconds) it can be a huge number confusing the network\n\n # Initialize parameters of the intensity rate (edge) prediction functions\n # See https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/linear.py\n n_types = 2 # associative and communicative\n d1 = self.n_hidden + (0)\n d2 = self.n_hidden + (0)\n if self.bilinear:\n self.omega = nn.ModuleList([nn.Bilinear(d1, d1, 1), nn.Bilinear(d2, d2, 1)])\n else:\n # d = 2 * self.n_hidden + n_in\n d1 += self.n_hidden\n d2 += self.n_hidden\n self.omega = nn.ModuleList([nn.Linear(d1, 1), nn.Linear(d2, 1)])\n\n self.psi = nn.Parameter(0.5 * torch.ones(n_types))\n\n # print('omega', self.omega)\n\n self.train_enc = False\n if encoder is not None:\n if encoder.lower() == 'mlp':\n self.encoder = MLPEncoder(n_in=self.n_hidden, n_hid=self.n_hidden,\n n_out=self.n_assoc_types + int(sparse), bilinear=bilinear_enc, n_stages=2,\n sym=sym, bnorm=True)\n self.train_enc = True\n elif encoder.lower() == 'mlp1':\n self.encoder = MLPEncoder(n_in=self.n_hidden, n_hid=self.n_hidden,\n n_out=self.n_assoc_types + int(sparse), bilinear=bilinear_enc, n_stages=1,\n sym=sym, bnorm=True)\n self.train_enc = True\n elif encoder.lower() == 'linear':\n self.encoder = LinearEncoder(n_in=self.n_hidden,\n n_out=self.n_assoc_types + int(sparse))\n self.train_enc = True\n elif encoder.lower() == 'rand':\n self.encoder = None\n else:\n raise NotImplementedError(encoder)\n\n self.init_weights()\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Linear) or isinstance(m, nn.Bilinear):\n # print('before Xavier', m.weight.data.shape, m.weight.data.min(), m.weight.data.max())\n nn.init.xavier_normal_(m.weight.data)\n # print('after Xavier', m.weight.data.shape, m.weight.data.min(), m.weight.data.max())\n\n def generate_S_from_A(self):\n if self.model == 'dyrep':\n S = self.A.new(self.N_nodes, self.N_nodes, self.n_assoc_types).fill_(0)\n for rel in range(self.n_assoc_types):\n D = torch.sum(self.A[:, :, rel], dim=1).float()\n for v in torch.nonzero(D):\n u = torch.nonzero(self.A[v, :, rel].squeeze())\n S[v, u, rel] = 1. / D[v]\n self.S = S\n # Check that values in each row of S add up to 1\n for rel in range(self.n_assoc_types):\n S = self.S[:, :, rel]\n assert torch.sum(S[self.A[:, :, rel] == 0]) < 1e-5, torch.sum(S[self.A[:, :, rel] == 0]) # check that S_uv is zero when A_uv is zero\n elif self.model == 'gcn':\n A_hat = self.A.view(self.N_nodes, self.N_nodes) #.view(self.N_nodes, self.N_nodes) + torch.eye(self.N_nodes).to(self.device)\n assert torch.all(A_hat[np.diag_indices(self.N_nodes)] == 1), A_hat[np.diag_indices(self.N_nodes)]\n D_hat = (torch.sum(A_hat, 0) + 1e-5) ** (-0.5)\n self.S = D_hat.view(self.N_nodes, 1) * A_hat * D_hat.view(1, self.N_nodes)\n else:\n # S is computed for each batch on the fly\n assert self.model == 'gat', self.model\n\n\n def initialize(self, node_embeddings, A_initial, keepS=False):\n if self.verbose:\n print('initialize model''s node embeddings and adjacency matrices for %d nodes' % self.N_nodes)\n # Initial embeddings\n if node_embeddings is not None:\n z = np.pad(node_embeddings, ((0, 0), (0, self.n_hidden - node_embeddings.shape[1])), 'constant')\n z = torch.from_numpy(z).float().to(self.device)\n\n if A_initial is None or self.latent_graph:\n\n if self.verbose:\n print('initial random prediction of A')\n A = torch.zeros(self.N_nodes, self.N_nodes, self.n_assoc_types + int(self.sparse), device=self.device)\n\n for i in range(self.N_nodes):\n for j in range(i + 1, self.N_nodes):\n if self.sparse:\n if self.n_assoc_types == 1:\n pvals = [0.95, 0.05]\n elif self.n_assoc_types == 2:\n pvals = [0.9, 0.05, 0.05]\n elif self.n_assoc_types == 3:\n pvals = [0.91, 0.03, 0.03, 0.03]\n elif self.n_assoc_types == 4:\n pvals = [0.9, 0.025, 0.025, 0.025, 0.025]\n else:\n raise NotImplementedError(self.n_assoc_types)\n ind = np.nonzero(np.random.multinomial(1, pvals))[0][0]\n else:\n ind = np.random.randint(0, self.n_assoc_types, size=1)\n A[i, j, ind] = 1\n A[j, i, ind] = 1\n assert torch.sum(torch.isnan(A)) == 0, (torch.sum(torch.isnan(A)), A)\n if self.sparse:\n A = A[:, :, 1:]\n\n else:\n if self.verbose:\n print('A_initial', A_initial.shape)\n A = torch.from_numpy(A_initial).float().to(self.device)\n if len(A.shape) == 2:\n A = A.unsqueeze(2)\n\n # make these variables part of the model\n # if self.model == 'dyrep':\n self.register_buffer('z', z)\n # else:\n # self.z = nn.Embedding(z.shape[0], z.shape[1]).to(self.device)\n # self.z.weight.data = z.data\n\n self.register_buffer('A', A)\n\n if self.model != 'dyrep':\n self.A = self.A.view(self.N_nodes, self.N_nodes)\n self.A[np.diag_indices(self.N_nodes)] = 1 # add self-loops\n\n if not keepS:\n self.generate_S_from_A()\n\n self.Lambda_dict = torch.zeros(5000, device=self.device)\n self.time_keys = []\n\n self.t_p = 0 # global counter of iterations\n\n def check_S(self):\n for rel in range(self.n_assoc_types):\n rows = torch.nonzero(torch.sum(self.A[:, :, rel], dim=1).float())\n # check that the sum in all rows equal 1\n assert torch.all(torch.abs(torch.sum(self.S[:, :, rel], dim=1)[rows] - 1) < 1e-1), torch.abs(torch.sum(self.S[:, :, rel], dim=1)[rows] - 1)\n\n def g_fn(self, z_cat, k, edge_type=None, z2=None):\n if self.bilinear:\n B = 1 if len(z_cat.shape) == 1 else z_cat.shape[0]\n if z2 is not None:\n z_cat = z_cat.view(B, self.n_hidden)\n z2 = z2.view(B, self.n_hidden)\n else:\n raise NotImplementedError('')\n g = z_cat.new(len(z_cat), 1).fill_(0)\n idx = k <= 0\n if torch.sum(idx) > 0:\n if edge_type is not None:\n z_cat1 = torch.cat((z_cat[idx], edge_type.view(B, -1)[idx, :self.n_assoc_types]), dim=1)\n z21 = torch.cat((z2[idx], edge_type.view(B, -1)[idx, :self.n_assoc_types]), dim=1)\n else:\n z_cat1 = z_cat[idx]\n z21 = z2[idx]\n g[idx] = self.omega[0](z_cat1, z21)\n idx = k > 0\n if torch.sum(idx) > 0:\n if edge_type is not None:\n z_cat1 = torch.cat((z_cat[idx], edge_type.view(B, -1)[idx, self.n_assoc_types:]), dim=1)\n z21 = torch.cat((z2[idx], edge_type.view(B, -1)[idx, self.n_assoc_types:]), dim=1)\n else:\n z_cat1 = z_cat[idx]\n z21 = z2[idx]\n g[idx] = self.omega[1](z_cat1, z21)\n else:\n if z2 is not None:\n z_cat = torch.cat((z_cat, z2), dim=1)\n else:\n raise NotImplementedError('')\n g = z_cat.new(len(z_cat), 1).fill_(0)\n idx = k <= 0\n if torch.sum(idx) > 0:\n if edge_type is not None:\n z_cat1 = torch.cat((z_cat[idx], edge_type[idx, :self.n_assoc_types]), dim=1)\n else:\n z_cat1 = z_cat[idx]\n g[idx] = self.omega[0](z_cat1)\n idx = k > 0\n if torch.sum(idx) > 0:\n if edge_type is not None:\n z_cat1 = torch.cat((z_cat[idx], edge_type[idx, self.n_assoc_types:]), dim=1)\n else:\n z_cat1 = z_cat[idx]\n g[idx] = self.omega[1](z_cat1)\n\n g = g.flatten()\n return g\n\n\n def intensity_rate_lambda(self, z_u, z_v, k):\n z_u = z_u.view(-1, self.n_hidden).contiguous()\n z_v = z_v.view(-1, self.n_hidden).contiguous()\n edge_type = None\n g = 0.5 * (self.g_fn(z_u, (k > 0).long(), edge_type=edge_type, z2=z_v) +\n self.g_fn(z_v, (k > 0).long(), edge_type=edge_type, z2=z_u)) # make it symmetric, because most events are symmetric\n\n psi = self.psi[(k > 0).long()]\n g_psi = torch.clamp(g / (psi + 1e-7), -75, 75) # to prevent overflow\n Lambda = psi * (torch.log(1 + torch.exp(-g_psi)) + g_psi)\n return Lambda\n\n def update_node_embed(self, prev_embed, node1, node2, time_delta_uv, k):\n # self.z contains all node embeddings of previous time \\bar{t}\n # self.S also corresponds to previous time stamp, because it's not updated yet based on this event\n\n node_embed = prev_embed\n\n # compute embeddings for all nodes using the GCN layer, but will be using only nodes u, v\n # it's just not convenient to compute embeddings only for nodes u and v\n # fix that in the future to save computation time\n\n node_degree = {} # we need degrees to update S\n z_new = prev_embed.clone() # to allow in place changes while keeping gradients\n h_u_struct = prev_embed.new(2, self.n_hidden, self.n_assoc_types).fill_(0)\n for c, (v, u, delta_t) in enumerate(zip([node1, node2], [node2, node1], time_delta_uv)): # i is the other node involved in the event\n node_degree[u] = np.zeros(self.n_assoc_types)\n for rel in range(self.n_assoc_types):\n if self.latent_graph:\n Neighb_u = self.S[u, :, rel] > 1e-7\n else:\n Neighb_u = self.A[u, :, rel] > 0 # when update embedding for node v, we need neighbors of u and vice versa!\n N_neighb = torch.sum(Neighb_u).item() # number of neighbors for node u\n node_degree[u][rel] = N_neighb\n if N_neighb > 0: # node has no neighbors\n h_prev_i = self.W_h(node_embed[Neighb_u]).view(N_neighb, self.n_hidden)\n # attention over neighbors\n q_ui = torch.exp(self.S[u, Neighb_u, rel]).view(N_neighb, 1)\n q_ui = q_ui / (torch.sum(q_ui) + 1e-7)\n h_u_struct[c, :, rel] = torch.max(torch.sigmoid(q_ui * h_prev_i), dim=0)[0].view(1, self.n_hidden)\n\n h1 = self.W_struct(h_u_struct.view(2, self.n_hidden * self.n_assoc_types))\n\n h2 = self.W_rec(node_embed[[node1, node2], :].view(2, -1))\n h3 = self.W_t(time_delta_uv.float()).view(2, self.n_hidden)\n\n z_new[[node1, node2], :] = torch.sigmoid(h1 + h2 + h3)\n\n\n return node_degree, z_new\n\n def update_S_A(self, u, v, k, node_degree, lambda_uv_t):\n\n if self.latent_graph:\n raise ValueError('invalid mode')\n\n if k <= 0 and not self.latent_graph: # Association event\n # do not update in case of latent graph\n self.A[u, v, np.abs(k)] = self.A[v, u, np.abs(k)] = 1 # 0 for CloseFriends, k = -1 for the second relation, so it's abs(k) matrix in self.A\n A = self.A\n indices = torch.arange(self.N_nodes, device=self.device)\n for rel in range(self.n_assoc_types):\n if k > 0 and A[u, v, rel] == 0: # Communication event, no Association exists\n continue # do not update S and A\n else:\n for j, i in zip([u, v], [v, u]):\n # i is the \"other node involved in the event\"\n try:\n degree = node_degree[j]\n except:\n print(list(node_degree.keys()))\n raise\n y = self.S[j, :, rel]\n # assert torch.sum(torch.isnan(y)) == 0, ('b', j, degree[rel], node_degree_global[rel][j.item()], y)\n b = 0 if degree[rel] == 0 else 1. / (float(degree[rel]) + 1e-7)\n if k > 0 and A[u, v, rel] > 0: # Communication event, Association exists\n y[i] = b + lambda_uv_t\n elif k <= 0 and A[u, v, rel] > 0: # Association event\n if self.node_degree_global[rel][j] == 0:\n b_prime = 0\n else:\n b_prime = 1. / (float(self.node_degree_global[rel][j]) + 1e-7)\n x = b_prime - b\n y[i] = b + lambda_uv_t\n w = (y != 0) & (indices != int(i))\n y[w] = y[w] - x\n y /= (torch.sum(y) + 1e-7) # normalize\n self.S[j, :, rel] = y\n return\n\n def cond_density(self, time_bar, time_cur, u, v):\n N = self.N_nodes\n s = self.Lambda_dict.new(2, N).fill_(0)\n Lambda_sum = torch.cumsum(self.Lambda_dict.flip(0), 0).flip(0) / len(self.Lambda_dict)\n time_keys_min = self.time_keys[0]\n time_keys_max = self.time_keys[-1]\n\n indices = []\n l_indices = []\n t_bar_min = torch.min(time_bar[[u, v]]).item()\n if t_bar_min < time_keys_min:\n start_ind_min = 0\n elif t_bar_min > time_keys_max:\n # it means t_bar will always be larger, so there is no history for these nodes\n return s\n else:\n start_ind_min = self.time_keys.index(int(t_bar_min))\n\n max_pairs = torch.max(torch.cat((time_bar[[u, v]].view(1, 2).expand(N, -1).t().contiguous().view(2 * N, 1),\n time_bar.repeat(2, 1)), dim=1), dim=1)[0].view(2, N).long().data.cpu().numpy() # 2,N\n\n # compute cond density for all pairs of u and some i, then of v and some i\n c1, c2 = 0, 0\n for c, j in enumerate([u, v]): # range(i + 1, N):\n for i in range(N):\n if i == j:\n continue\n # most recent timestamp of either u or v\n t_bar = max_pairs[c, i]\n c2 += 1\n\n if t_bar < time_keys_min:\n start_ind = 0 # it means t_bar is beyond the history we kept, so use maximum period saved\n elif t_bar > time_keys_max:\n continue # it means t_bar is current event, so there is no history for this pair of nodes\n else:\n # t_bar is somewhere in between time_keys_min and time_keys_min\n start_ind = self.time_keys.index(t_bar, start_ind_min)\n\n indices.append((c, i))\n l_indices.append(start_ind)\n\n indices = np.array(indices)\n l_indices = np.array(l_indices)\n s[indices[:, 0], indices[:, 1]] = Lambda_sum[l_indices]\n\n return s\n\n def edges2matrix(self, x, idx, N):\n edges = x.new(N * N, x.shape[1]).fill_(0)\n edges[idx] = x\n edges = edges.view(N, N, -1)\n return edges\n\n def generate_S(self, prev_embed, u, v, train_enc=False):\n N = self.N_nodes\n\n edges = torch.Tensor([[u, v]]).long()\n\n if not train_enc:\n # do not keep any gradients\n with torch.no_grad():\n logits, idx = self.encoder(prev_embed, edges=edges)\n logits = logits.detach() # not backpropgenerate_S\n else:\n logits, idx = self.encoder(prev_embed, edges=edges)\n\n N = 2\n logits = logits.view(1, N * N, self.n_assoc_types + int(self.sparse)) # N,N,N_assoc # nn.functional.sigmoid\n\n if self.training or train_enc or self.soft_attn:\n hard = False\n else:\n hard = True # hard at test time\n\n edges = gumbel_softmax(logits, tau=0.5, hard=hard)\n\n if train_enc:\n prob = my_softmax(logits, -1)\n if self.sparse:\n if self.n_assoc_types == 1:\n log_prior = torch.FloatTensor(np.log(np.array([0.95, 0.05]))).to(self.device)\n # log_prior = torch.FloatTensor(np.log(np.array([0.9, 0.1]))).to(device)\n elif self.n_assoc_types == 2:\n log_prior = torch.FloatTensor(np.log(np.array([0.9, 0.05, 0.05]))).to(self.device)\n # log_prior = torch.FloatTensor(np.log(np.array([0.8, 0.1, 0.1]))).to(device)\n elif self.n_assoc_types == 3:\n log_prior = torch.FloatTensor(np.log(np.array([0.91, 0.03, 0.03, 0.03]))).to(self.device)\n # log_prior = torch.FloatTensor(np.log(np.array([0.7, 0.1, 0.1, 0.1]))).to(device)\n elif self.n_assoc_types == 4:\n log_prior = torch.FloatTensor(np.log(np.array([0.9, 0.025, 0.025, 0.025, 0.025]))).to(self.device)\n else:\n raise NotImplementedError(self.n_assoc_types)\n log_prior = torch.unsqueeze(log_prior, 0)\n log_prior = torch.unsqueeze(log_prior, 0)\n loss_kl = kl_categorical(prob, log_prior, N)\n else:\n loss_kl = kl_categorical_uniform(prob, N, self.n_assoc_types) # we want all edge types to have uniform probs\n if torch.isnan(loss_kl):\n print(loss_kl, self.S.min(), self.S.max())\n print(prob)\n raise ValueError()\n reg = [loss_kl]\n else:\n reg = []\n\n device = edges.get_device() if edges.is_cuda else 'cpu'\n I_neg = 1 - torch.eye(N, device=device).unsqueeze(2)\n edges = edges.view(N, N, -1) * I_neg\n logits = nn.functional.softmax(logits, dim=-1).view(N, N, -1).detach()\n logits = logits * I_neg\n if self.sparse:\n edges = edges[:, :, 1:]\n logits = logits[:, :, 1:]\n\n return edges, logits, reg\n\n def forward(self, data):\n u, v, time_delta_uv, event_types, time_bar, time_cur = data[:6]\n\n B = len(u)\n assert len(event_types) == B, (len(event_types), B)\n N = self.N_nodes\n\n A_pred, Surv = None, None\n if not self.training:\n A_pred = self.A.new(B, N, N).fill_(0)\n if self.exp:\n Surv = self.A.new(B, N, N).fill_(0) # survival term\n\n if self.opt:\n embeddings1, embeddings2, node_degrees = [], [], []\n embeddings_non1, embeddings_non2 = [], []\n else:\n lambda_uv_t, lambda_uv_t_non_events = [], []\n\n assert torch.min(time_delta_uv) >= 0, ('events must be in chronological order', torch.min(time_delta_uv))\n\n time_mn = torch.from_numpy(np.array([0, 0, 0, 0])).float().to(self.device).view(1, 1, 4)\n time_sd = torch.from_numpy(np.array([50, 7, 15, 15])).float().to(self.device).view(1, 1, 4)\n time_delta_uv = (time_delta_uv - time_mn) / time_sd\n\n reg = []\n\n S_batch = []\n if self.latent_graph:\n if self.encoder is not None and self.t_p == 0:\n if self.verbose:\n print('!!!generate S!!!')\n self.S = self.S / (torch.sum(self.S, dim=1, keepdim=True) + 1e-7)\n self.logits = self.S\n self.A = self.S\n S_batch = [self.S.data.cpu().numpy()]\n\n\n z_all = []\n\n u_all = u.data.cpu().numpy()\n v_all = v.data.cpu().numpy()\n\n update_attn = not self.latent_graph # always update if not latent\n\n\n if self.model == 'gcn':\n for layer in range(len(self.W_h)):\n self.z = torch.mm(self.S, self.W_h[layer](self.z)) # update node embeddings. We want these updates to be predictive of the future\n if layer < len(self.W_h) - 1:\n self.z = F.relu(self.z)\n # self.z = self.W_h(self.z)\n # print(self.z.min().item(), self.z.max().item())\n if self.bilinear:\n self.z = 0.5 * torch.tanh(self.z) # to prevent overflow and nans\n # self.z.data.clamp_(-1, 1)\n\n elif self.model == 'gat':\n\n assert torch.all(self.A[np.diag_indices(self.N_nodes)] == 1), self.A[np.diag_indices(self.N_nodes)]\n\n rows, cols = torch.nonzero(self.A).split([1, 1], dim=1)\n\n for layer in range(len(self.W_h)):\n z_cat = []\n for head in range(self.K_heads):\n z_prime = self.W_h[layer][head](self.z)\n # print(layer, z_prime.shape)\n h = torch.cat((z_prime[rows].view(len(rows), -1), z_prime[cols].view(len(cols), -1)), dim=1)\n\n self.S = torch.zeros(self.N_nodes, self.N_nodes).to(self.device)\n self.S[rows, cols] = F.leaky_relu(self.alpha[layer][head](h).view(-1, 1), negative_slope=0.2)\n\n for r in range(self.N_nodes):\n neighbors = torch.nonzero(self.A[r]).view(-1)\n self.S[r, neighbors] = F.softmax(self.S[r, neighbors] + 1) # +1 for numerical stability\n # print(r, self.S[r, c].sum(), self.S[r, c])\n\n # Alternative to softmax\n # A_hat = self.A.view(self.N_nodes, self.N_nodes) + torch.eye(self.N_nodes).to(self.device)\n # D_hat = (torch.sum(A_hat, 0) + 1e-5) ** (-0.5)\n # self.S = D_hat.view(self.N_nodes, 1) * A_hat * D_hat.view(1, self.N_nodes)\n\n z_head = torch.mm(self.S, z_prime)\n if layer < len(self.W_h) - 1:\n z_head = F.relu(z_head)\n z_cat.append(z_head)\n self.z = torch.cat(z_cat, dim=1)\n # if self.bilinear:\n # self.z.data.clamp_(-2, 2)\n self.z = 0.5 * torch.tanh(self.z) # to prevent overflow and nans\n # self.z = torch.sigmoid(self.z)\n\n elif self.model != 'dyrep':\n raise NotImplementedError(self.model)\n\n for it, k in enumerate(event_types):\n # k = 0: association event (rare)\n # k = 1,2,3: communication event (frequent)\n\n u_it, v_it = u_all[it], v_all[it]\n z_prev = self.z if it == 0 else z_all[it - 1]\n\n # 1. Compute intensity rate lambda based on node embeddings at previous time step (Eq. 1)\n if self.opt:\n # store node embeddings, compute lambda and S,A later based on the entire batch\n embeddings1.append(z_prev[u_it])\n embeddings2.append(z_prev[v_it])\n else:\n # accumulate intensity rate of events for this batch based on new embeddings\n lambda_uv_t.append(self.intensity_rate_lambda(z_prev[u_it], z_prev[v_it], torch.zeros(1).long() + k))\n\n\n # 2. Update node embeddings\n if self.model == 'dyrep':\n node_degree, z_new = self.update_node_embed(z_prev, u_it, v_it, time_delta_uv[it], k) # / 3600.) # hours\n if self.opt:\n node_degrees.append(node_degree)\n\n\n # 3. Update S and A\n if not self.opt and update_attn:\n # we can update S and A based on current pair of nodes even during test time,\n # because S, A are not used in further steps for this iteration\n self.update_S_A(u_it, v_it, k.item(), node_degree, lambda_uv_t[it]) #\n\n # update most recent degrees of nodes used to update S\n if not self.latent_graph:\n assert self.node_degree_global is not None\n for j in [u_it, v_it]:\n for rel in range(self.n_assoc_types):\n self.node_degree_global[rel][j] = node_degree[j][rel]\n else:\n if k <= 0: # Association event\n self.A[u, v] = self.A[v, u] = 1\n self.generate_S_from_A()\n\n z_new = self.z\n\n # Non events loss\n # this is not important for test time, but we still compute these losses for debugging purposes\n # get random nodes except for u_it, v_it\n uv_others = self.rnd.choice(np.delete(np.arange(N), [u_it, v_it]),\n size=self.N_surv_samples * 2, replace=False)\n # assert len(np.unique(uv_others)) == len(uv_others), ('nodes must be unique', uv_others)\n for q in range(self.N_surv_samples):\n assert u_it != uv_others[q], (u_it, uv_others[q])\n assert v_it != uv_others[self.N_surv_samples + q], (v_it, uv_others[self.N_surv_samples + q])\n if self.opt:\n embeddings_non1.extend([z_prev[u_it], z_prev[uv_others[self.N_surv_samples + q]]])\n embeddings_non2.extend([z_prev[uv_others[q]], z_prev[v_it]])\n else:\n for k_ in range(2):\n lambda_uv_t_non_events.append(\n self.intensity_rate_lambda(z_prev[u_it],\n z_prev[uv_others[q]], torch.zeros(1).long() + k_))\n lambda_uv_t_non_events.append(\n self.intensity_rate_lambda(z_prev[uv_others[self.N_surv_samples + q]],\n z_prev[v_it],\n torch.zeros(1).long() + k_))\n\n\n # compute conditional density for all possible pairs\n # here it's important NOT to use any information that the event between nodes u,v has happened\n # so, we use node embeddings of the previous time step: z_prev\n if self.exp or not self.training:\n with torch.no_grad():\n z_cat = torch.cat((z_prev[u_it].detach().unsqueeze(0).expand(N, -1),\n z_prev[v_it].detach().unsqueeze(0).expand(N, -1)), dim=0)\n Lambda = self.intensity_rate_lambda(z_cat, z_prev.detach().repeat(2, 1),\n torch.zeros(len(z_cat)).long() + k).detach()\n if not self.training:\n A_pred[it, u_it, :] = Lambda[:N]\n A_pred[it, v_it, :] = Lambda[N:]\n\n assert torch.sum(torch.isnan(A_pred[it])) == 0, (it, torch.sum(torch.isnan(A_pred[it])))\n if self.exp:\n # Compute the survival term (See page 3 in the paper)\n # we only need to compute the term for rows u_it and v_it in our matrix s to save time\n # because we will compute rank only for nodes u_it and v_it\n s1 = self.cond_density(time_bar[it], time_cur[it], u_it, v_it)\n Surv[it, [u_it, v_it], :] = s1\n\n if self.exp:\n time_key = int(time_cur[it].item())\n idx = np.delete(np.arange(N), [u_it, v_it]) # nonevents for node u\n idx = np.concatenate((idx, idx + N)) # concat with nonevents for node v\n\n if len(self.time_keys) >= len(self.Lambda_dict):\n # shift in time (remove the oldest record)\n time_keys = np.array(self.time_keys)\n time_keys[:-1] = time_keys[1:]\n self.time_keys = list(time_keys[:-1]) # remove last\n self.Lambda_dict[:-1] = self.Lambda_dict.clone()[1:]\n self.Lambda_dict[-1] = 0\n\n self.Lambda_dict[len(self.time_keys)] = Lambda[idx].sum().detach() # total intensity of non events for the current time step\n self.time_keys.append(time_key)\n\n # Once we made predictions for the training and test sample, we can update node embeddings\n z_all.append(z_new)\n # update S\n if self.generate:\n if self.encoder is not None:\n S_tmp, logits_tmp, reg2 = self.generate_S(z_new, u_it, v_it, train_enc=self.training and self.train_enc)\n if self.training:\n reg = reg + reg2\n\n self.S = self.S.clone()\n\n self.S[u_it, v_it] = S_tmp[0, 1]\n self.S[v_it, u_it] = S_tmp[1, 0]\n\n self.S = self.S / (torch.sum(self.S, dim=1, keepdim=True) + 1e-7)\n self.logits[u_it, v_it] = logits_tmp[0, 1]\n self.logits[v_it, u_it] = logits_tmp[1, 0]\n self.A = self.S\n S_batch.append(self.S.data.cpu().numpy())\n\n\n self.t_p += 1\n\n self.z = z_new # update node embeddings\n\n # Batch update\n if self.opt:\n lambda_uv_t = self.intensity_rate_lambda(torch.stack(embeddings1, dim=0),\n torch.stack(embeddings2, dim=0), event_types)\n non_events = len(embeddings_non1)\n n_types = 2\n lambda_uv_t_non_events = torch.zeros(non_events * n_types, device=self.device)\n embeddings_non1 = torch.stack(embeddings_non1, dim=0)\n embeddings_non2 = torch.stack(embeddings_non2, dim=0)\n idx = None\n empty_t = torch.zeros(non_events, dtype=torch.long)\n types_lst = torch.arange(n_types)\n for k in types_lst:\n if idx is None:\n idx = np.arange(non_events)\n else:\n idx += non_events\n lambda_uv_t_non_events[idx] = self.intensity_rate_lambda(embeddings_non1, embeddings_non2, empty_t + k)\n\n if update_attn and self.model == 'dyrep':\n # update only once per batch\n for it, k in enumerate(event_types):\n u_it, v_it = u_all[it], v_all[it]\n self.update_S_A(u_it, v_it, k.item(), node_degrees[it], lambda_uv_t[it].item())\n\n\n else:\n lambda_uv_t = torch.cat(lambda_uv_t)\n lambda_uv_t_non_events = torch.cat(lambda_uv_t_non_events)\n\n if len(S_batch) > 0:\n S_batch = np.stack(S_batch)\n\n if len(reg) > 1:\n reg = [torch.stack(reg).mean()]\n\n return lambda_uv_t, lambda_uv_t_non_events / self.N_surv_samples, [A_pred, Surv], S_batch, reg" ]
[ [ "torch.min", "torch.stack", "torch.nonzero", "torch.nn.functional.softmax", "torch.no_grad", "torch.mm", "numpy.stack", "torch.eye", "torch.cat", "torch.nn.init.xavier_normal_", "numpy.abs", "torch.arange", "torch.nn.Bilinear", "torch.tanh", "torch.from_numpy", "torch.sigmoid", "numpy.diag_indices", "numpy.random.multinomial", "torch.Tensor", "torch.unsqueeze", "torch.ones", "numpy.zeros", "numpy.arange", "torch.isnan", "numpy.pad", "numpy.array", "torch.sum", "torch.nn.Linear", "torch.exp", "torch.nn.functional.relu", "torch.zeros", "numpy.concatenate", "numpy.random.randint", "torch.clamp" ] ]
takumihonda/AIP_realtime
[ "03d6cf3e1405c21b73bf2923cb6914aeb6f3634d" ]
[ "realtime_20200825/plot_monit.py" ]
[ "import numpy as np\nimport sys\nimport os\nfrom netCDF4 import Dataset\n\nfrom datetime import datetime, timedelta\n\n\nquick = True\nquick = False\n\n#theight = 3000.0\ntheight = 2000.0\n\n# UTC\n \nstime = datetime( 2020, 8, 24, 7, 0 )\netime = datetime( 2020, 9, 7, 0, 0 )\n\n# Figure x range\nstime_ = datetime( 2020, 8, 25, 0, 0 )\netime_ = datetime( 2020, 9, 7, 0, 0 )\n\ntmin = 0\ntmax = 61 # max time dimension includes FT=0\ntskip = 20\n \nitmax = int( ( etime - stime ).total_seconds() / 30 ) + 1\n\ntlevs = np.arange( tmin, tmax, tskip, dtype=np.int32 )\ntlev_max = np.shape( tlevs )[0]\n \n\n\ndbz = 15.0\ndbz = 30.0\n\n###########\n\n\ndef read_scores():\n\n odir = \"ts_npz/realtime_score\"\n \n fn_ts_t = os.path.join( odir, \"TS_thrs{0:.1f}dbz_z{1:.1f}_tskip{2:0=3}_s{3:}_e{4:}.npz\".format( dbz, theight, tskip, stime.strftime('%H%M%S_%Y%m%d'), etime.strftime('%H%M%S_%Y%m%d') ) )\n \n try:\n print( \"Read \", fn_ts_t )\n data = np.load( fn_ts_t, allow_pickle=True )\n return( data['ts_l'], data['bs_l'], data['itimes_l'] )\n except:\n print( \"Read all files\")\n\n ts_l = np.zeros( ( itmax, tlev_max, ) )\n bs_l = np.zeros( ( itmax, tlev_max, ) )\n \n ts_l[:] = np.nan\n bs_l[:] = np.nan\n \n \n itimes_l = []\n \n time = stime\n cnt = 0\n while (time <= etime):\n# print( \"Initial time:\", time )\n \n itimes_l.append( time + timedelta(hours=9))\n \n fn_ts = \"TS_thrs{0:.1f}dbz_z{1:.1f}_tskip{2:0=3}_i{3:}.npz\".format( dbz, theight, tskip, time.strftime('%H%M%S_%Y%m%d') )\n \n try:\n data = np.load( os.path.join(odir,fn_ts) )\n ts_l[cnt,:] = data['ts_l']\n bs_l[cnt,:] = data['bs_l']\n except:\n print( \"failed to read\", time )\n \n time += timedelta(seconds=30)\n cnt += 1\n\n np.savez( fn_ts_t, ts_l=ts_l, bs_l=bs_l, itimes_l=itimes_l )\n return( ts_l, bs_l, itimes_l )\n\n\nts_l, bs_l, itimes_l = read_scores()\n\n# get lead times for nan\npath = \"/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_20200825\"\nofn = os.path.join( path, \"result_leadtime/leadtime.npz\" )\ndata = np.load( ofn, allow_pickle=True )\nlt_l = data['lt_l']\nftime_l = data['ftime_l']\n\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\n\nymin1 = 0.0\nymax1 = 1.0\nymin2 = 0.0\nymax2 = 5.0\n\nprint( tlevs.shape)\nprint( tlevs )\n\nfig, (ax1,ax2) = plt.subplots( 2, 1, figsize=( 16,8 ) )\nfig.subplots_adjust(left=0.05, bottom=0.07, right=0.98, top=0.95,\n hspace=0.3 )\n\ncc_l = [ \"k\", \"r\", \"b\", \"g\" ]\nprint( ts_l.shape )\n\n\nlw = 0.3\nalp = 0.5\nfor i, ft in enumerate( tlevs ):\n ft_ = int( ft * 30 / 60 ) # min\n\n itimes_l_ = itimes_l + timedelta( minutes=ft_ )\n \n\n ax1.plot( itimes_l_, ts_l[:,i], color=cc_l[i], label=\"FT={0:.0f}min\".format(ft_), \n lw=lw )\n ax2.plot( itimes_l_, bs_l[:,i], color=cc_l[i], lw=lw )\n\nax1.vlines( x=ftime_l[ np.isnan( lt_l ) ], ymin=ymin1, ymax=ymax1, color='gray', \n ls='dashed', lw=0.01, alpha=alp )\nax2.vlines( x=ftime_l[ np.isnan( lt_l ) ], ymin=ymin2, ymax=ymax2, color='gray', \n ls='dashed', lw=0.01, alpha=alp )\n\n\nleg = ax1.legend( loc='lower left', fontsize=12, framealpha=1.0,\n )\n\nfor line, text in zip(leg.get_lines(), leg.get_texts()):\n text.set_color(line.get_color())\n\n#for i, text in enumerate( leg.get_texts() ):\n# ax1.setp( text, color=cc_l[i] )\n\nbbox = { 'facecolor':'w', 'alpha':0.95, 'pad':3 }\n\ndmin = 120\ndmin = 360\ndmin = 720\n\ntit_l = [ \"Threat score\", \"Bias score\" ]\nax_l = [ ax1, ax2 ]\nfor i, ax in enumerate( ax_l ):\n ax.xaxis.set_major_formatter( mdates.DateFormatter('%H:%M\\n%m/%d') )\n ax.xaxis.set_major_locator( mdates.MinuteLocator(interval=dmin) )\n ax.tick_params( axis='x', labelsize=8 )\n\n #ax.set_xlim( itimes_l[0], itimes_l[-1] )\n ax.set_xlim( stime_, etime_ )\n\n ax.text( 0.5, 1.01, tit_l[i],\n fontsize=14, transform=ax.transAxes,\n ha=\"center\", va=\"bottom\", )\n #bbox=bbox )\n \n ax.set_ylabel( tit_l[i], fontsize=12 )\n \n\nax2.hlines( y=1.0, xmin=stime, xmax=etime, ls='dashed',\n linewidth=0.5, color='gray' ) \n\nax2.text( 0.01, 0.05, \"Threshold: {0:.0f}dBZ\\nZ={1:.0f}km\".format( dbz, theight/1000.0, ),\n fontsize=11, transform=ax2.transAxes, \n ha=\"left\", va=\"bottom\", \n bbox=bbox)\n\nax1.set_ylim( ymin1, ymax1 )\nax2.set_ylim( ymin2, ymax2 )\n\nax1.set_xlabel( \"Valid time (JST)\", fontsize=10 )\nax2.set_xlabel( \"Valid time (JST)\", fontsize=10 )\n\n\npnum_l = [ \"(a)\", \"(b)\" ]\nax_l = [ ax1, ax2 ] \nfor i, ax in enumerate( ax_l ):\n ax.text( 0.01, 0.95, pnum_l[i],\n fontsize=11, transform=ax.transAxes,\n ha='left',\n va='top', \n bbox=bbox )\n \n\nofig = \"2p_realtime_score_thrs{0:.1f}dbz_z{1:.1f}.png\".format( dbz, theight )\n\nprint( ofig )\nif quick:\n plt.show()\nelse:\n plt.savefig( ofig,\n bbox_inches=\"tight\", pad_inches = 0.1)\n plt.clf()\n plt.close('all')\n\n\n\n" ]
[ [ "numpy.load", "numpy.zeros", "matplotlib.pyplot.savefig", "numpy.savez", "matplotlib.dates.DateFormatter", "matplotlib.dates.MinuteLocator", "matplotlib.pyplot.subplots", "matplotlib.pyplot.clf", "numpy.arange", "matplotlib.pyplot.show", "numpy.shape", "matplotlib.pyplot.close", "numpy.isnan" ] ]
Sniel/icml18-jtnn
[ "eb35c73458dff160b2319b73a5aa8fdc1a8b3783" ]
[ "jtnn/datautils.py" ]
[ "from torch.utils.data import Dataset\nfrom jtnn.mol_tree import MolTree\nimport numpy as np\n\nclass MoleculeDataset(Dataset):\n\n def __init__(self, data_file):\n with open(data_file) as f:\n self.data = [line.strip(\"\\r\\n \").split()[0] for line in f]\n\n def __len__(self):\n return len(self.data)\n \n def __getitem__(self, idx):\n smiles = self.data[idx]\n mol_tree = MolTree(smiles)\n mol_tree.recover()\n mol_tree.assemble()\n return mol_tree\n\nclass PropDataset(Dataset):\n\n def __init__(self, data_file, prop_file):\n self.prop_data = np.loadtxt(prop_file)\n with open(data_file) as f:\n self.data = [line.strip(\"\\r\\n \").split()[0] for line in f]\n\n def __len__(self):\n return len(self.data)\n \n def __getitem__(self, idx):\n smiles = self.data[idx]\n mol_tree = MolTree(smiles)\n mol_tree.recover()\n mol_tree.assemble()\n return mol_tree, self.prop_data[idx]\n\n" ]
[ [ "numpy.loadtxt" ] ]
Data-Science-in-Mechanical-Engineering/furuta-pixel-to-torque-control
[ "41da2099c8a913f77b3b557dcc8f08b44d282cbb" ]
[ "package/statetoinput/state_estimator_wrapper.py" ]
[ "\"\"\"\nOpenAI Gym environments with predicted state vector of specified VisionToState model (data_id, model_name) as state\n\n@Author: Steffen Bleher\n\"\"\"\nfrom gym import spaces\nfrom gym_brt.data.config.configuration import FREQUENCY\n\nimport numpy as np\n\nfrom gym_brt.envs.reinforcementlearning_extensions.vision_wrapping_classes import VisionQubeBeginDownEnv\nfrom visiontostate.vision_predictor import VisionPredictor\n\nOBS_MAX = np.asarray([1, 1, 1, 1, np.inf, np.inf], dtype=np.float64)\n\n\nclass VtSQubeBeginDownEnv(VisionQubeBeginDownEnv):\n def __init__(self, data_id, model_name, frequency=FREQUENCY, batch_size=2048, use_simulator=False,\n simulation_mode='ode', encoder_reset_steps=int(1e8), no_image_normalization=False):\n super().__init__(frequency, batch_size, use_simulator, simulation_mode, encoder_reset_steps,\n no_image_normalization)\n\n self.observation_space = spaces.Box(-OBS_MAX, OBS_MAX, dtype=np.float32)\n self.predictor = VisionPredictor(data_id, model_name)\n\n def _get_state(self):\n image = super()._get_state()\n state = self.predictor.predict(image)\n return state[0:6]" ]
[ [ "numpy.asarray" ] ]
steven-lang/e2cnn
[ "48f49760766ec958b52d0dd7b02483886dfa2096" ]
[ "e2cnn/nn/modules/nonlinearities/gated1.py" ]
[ "\n\nfrom typing import List, Tuple, Any\n\nimport numpy as np\n\nfrom collections import defaultdict\n\nfrom e2cnn.gspaces import *\nfrom e2cnn.nn import FieldType\nfrom e2cnn.nn import GeometricTensor\n\nfrom ..equivariant_module import EquivariantModule\n\nimport torch\n\nfrom torch.nn import Parameter\n\n\n__all__ = [\"GatedNonLinearity1\", \"GATED_ID\", \"GATES_ID\"]\n\n\nGATED_ID = \"gated\"\nGATES_ID = \"gate\"\n\n\nclass GatedNonLinearity1(EquivariantModule):\n \n def __init__(self,\n in_type: FieldType,\n gates: List = None,\n drop_gates: bool = True,\n **kwargs\n ):\n r\"\"\"\n \n Gated non-linearities.\n This module applies a bias and a sigmoid function of the gates fields and, then, multiplies each gated\n field by one of the gates.\n \n The input representation of the gated fields is preserved by this operation while the gate fields are\n discarded.\n \n The gates and the gated fields are provided in one unique input tensor and, therefore, :attr:`in_repr` should\n be the representation of the fiber containing both gates and gated fields.\n Moreover, the parameter :attr:`gates` needs to be set with a list long as the total number of fields,\n containing in a position ``i`` the string ``\"gate\"`` if the ``i``-th field is a gate or the string ``\"gated\"``\n if the ``i``-th field is a gated field. No other strings are allowed.\n By default (``gates = None``), the first half of the fields is assumed to contain the gates (and, so, these\n fields have to be trivial fields) while the second one is assumed to contain the gated fields.\n \n In any case, the number of gates and the number of gated fields have to match (therefore, the number of\n fields has to be an even number).\n \n Args:\n in_type (FieldType): the input field type\n gates (list, optional): list of strings specifying which field in input is a gate and which is a gated field\n drop_gates (bool, optional): if ``True`` (default), drop the trivial fields after using them to compute\n the gates. If ``False``, the gates are stacked with the gated fields in the output\n \n \"\"\"\n\n assert isinstance(in_type.gspace, GeneralOnR2)\n \n if gates is None:\n assert len(in_type) % 2 == 0\n \n g = len(in_type) // 2\n gates = [GATES_ID]*g + [GATED_ID]*g\n \n assert len(gates) == len(in_type)\n \n super(GatedNonLinearity1, self).__init__()\n\n self.space = in_type.gspace\n self.in_type = in_type\n\n self.drop_gates = drop_gates\n \n self._contiguous = {}\n _input_indices = defaultdict(lambda: [])\n _output_indices = defaultdict(lambda: [])\n\n self._nfields = defaultdict(int)\n \n self.branching = None\n \n for g, r in zip(gates, in_type.representations):\n if g == GATES_ID:\n # assert GATES_ID in r.supported_nonlinearities, \\\n assert r.is_trivial(), \\\n \"Error! Representation \\\"{}\\\" can't be a \\\"gate\\\"\".format(r.name)\n elif g == GATED_ID:\n assert GATED_ID in r.supported_nonlinearities, \\\n 'Error! Representation \"{}\" does not support \"gated\" non-linearity'.format(r.name)\n else:\n raise ValueError('Error! \"{}\" type not recognized'.format(g))\n\n ngates = len([g for g in gates if g == GATES_ID])\n ngated = len([g for g in gates if g == GATED_ID])\n\n assert ngates == ngated, \\\n 'Error! Number of gates ({}) does not match the number of gated non-linearities required ({})' \\\n .format(ngates, ngated)\n\n if self.drop_gates:\n # only gated fields are preserved\n # therefore, the output representation is computed from the input one, removing the gates\n self.out_type = in_type.index_select([i for i, g in enumerate(gates) if g == GATED_ID])\n else:\n self.out_type = in_type\n\n in_last_position = 0\n out_last_position = 0\n last_type = None\n\n # group fields by their type (gated or gate) and their size, check if fields of the same type are\n # contiguous and retrieve the indices of the fields\n for g, r in zip(gates, in_type.representations):\n if g == GATES_ID:\n type = g\n else:\n type = r.size\n self._nfields[r.size] += 1\n\n if type != last_type:\n if not type in self._contiguous:\n self._contiguous[type] = True\n else:\n self._contiguous[type] = False\n last_type = type\n \n _input_indices[type] += list(range(in_last_position, in_last_position + r.size))\n in_last_position += r.size\n \n if g != GATES_ID or not self.drop_gates:\n # since gates are discarded in output, the position on the output fiber is shifted\n # only when a gated field is met\n _output_indices[type] += list(range(out_last_position, out_last_position + r.size))\n out_last_position += r.size\n \n _input_indices = dict(_input_indices)\n # if self.drop_gates:\n _output_indices = dict(_output_indices)\n # else:\n # self._output_indices = self._input_indices\n\n for t, contiguous in self._contiguous.items():\n if contiguous:\n # for contiguous fields, only the first and last indices are kept\n _input_indices[t] = torch.LongTensor([min(_input_indices[t]), max(_input_indices[t]) + 1])\n if t != GATES_ID or not self.drop_gates:\n _output_indices[t] = torch.LongTensor([min(_output_indices[t]), max(_output_indices[t]) + 1])\n else:\n # otherwise, transform the list of indices into a tensor\n _input_indices[t] = torch.LongTensor(_input_indices[t])\n \n if t != GATES_ID or not self.drop_gates:\n _output_indices[t] = torch.LongTensor(_output_indices[t])\n \n # register the indices tensors as parameters of this module\n self.register_buffer('input_indices_{}'.format(t), _input_indices[t])\n if t != GATES_ID or not self.drop_gates:\n self.register_buffer('output_indices_{}'.format(t), _output_indices[t])\n\n # gates need to be distinguished from gated fields\n _gates_indices = _input_indices.pop(GATES_ID)\n self.register_buffer('gates_indices', _gates_indices)\n \n # build a sorted list of the fields groups, such that every time they are iterated through in the same order\n self._order = sorted(_input_indices.keys())\n \n # the bias for the gates\n self.bias = Parameter(torch.randn(1, ngates, 1, 1, dtype=torch.float), requires_grad=True)\n\n def forward(self, input: GeometricTensor) -> GeometricTensor:\n r\"\"\"\n \n Apply the gated non-linearity to the input feature map.\n \n Args:\n input (GeometricTensor): the input feature map\n\n Returns:\n the resulting feature map\n \n \"\"\"\n \n assert isinstance(input, GeometricTensor)\n assert input.type == self.in_type\n \n # retrieve the gates\n \n if self._contiguous[GATES_ID]:\n gates = input.tensor[:, self.gates_indices[0]:self.gates_indices[1], ...]\n else:\n gates = input.tensor[:, self.gates_indices, ...]\n\n # retrieving only gated fields from the joint tensor is worthless\n input = input.tensor\n \n # transform the gates\n gates = torch.sigmoid(gates - self.bias)\n \n b, c, h, w = input.shape\n \n # build the output tensor\n output = torch.empty(b, self.out_type.size, h, w, dtype=torch.float, device=self.bias.device)\n\n if not self.drop_gates:\n # copy the gates in the output\n if self._contiguous[GATES_ID]:\n output[:, self.gates_indices[0]:self.gates_indices[1], ...] = gates\n else:\n output[:, self.gates_indices, ...] = gates\n \n next_gate = 0\n \n # for each field size\n for size in self._order:\n \n # retrieve the needed gates\n g = gates[:, next_gate:next_gate + self._nfields[size], ...].view(b, -1, 1, h, w)\n \n input_indices = getattr(self, f\"input_indices_{size}\")\n output_indices = getattr(self, f\"output_indices_{size}\")\n \n if self._contiguous[size]:\n # if the fields were contiguous, we can use slicing\n output[:, output_indices[0]:output_indices[1], ...] =\\\n (\n input[:, input_indices[0]:input_indices[1], ...]\n .view(b, -1, size, h, w)\n * g\n ).view(b, -1, h, w)\n \n else:\n # otherwise we have to use indexing\n output[:, output_indices, :, :] = \\\n (\n input[:, input_indices, ...]\n .view(b, -1, size, h, w)\n * g\n ).view(b, -1, h, w)\n \n # shift the position on the gates fiber\n next_gate += self._nfields[size]\n \n # wrap the result in a GeometricTensor\n return GeometricTensor(output, self.out_type)\n\n def evaluate_output_shape(self, input_shape: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]:\n \n assert len(input_shape) == 4\n assert input_shape[1] == self.in_type.size\n \n b, c, hi, wi = input_shape\n \n return b, self.out_type.size, hi, wi\n \n def check_equivariance(self, atol: float = 1e-6, rtol: float = 1e-5) -> List[Tuple[Any, float]]:\n \n c = self.in_type.size\n\n x = torch.randn(3, c, 10, 10)\n\n x = GeometricTensor(x, self.in_type)\n\n errors = []\n\n for el in self.space.testing_elements:\n out1 = self(x).transform_fibers(el)\n out2 = self(x.transform_fibers(el))\n \n errs = (out1.tensor - out2.tensor).detach().numpy()\n errs = np.abs(errs).reshape(-1)\n print(el, errs.max(), errs.mean(), errs.var())\n \n assert torch.allclose(out1.tensor, out2.tensor, atol=atol, rtol=rtol), \\\n 'The error found during equivariance check with element \"{}\" is too high: max = {}, mean = {} var ={}' \\\n .format(el, errs.max(), errs.mean(), errs.var())\n \n errors.append((el, errs.mean()))\n\n return errors\n\n" ]
[ [ "torch.empty", "torch.randn", "numpy.abs", "torch.sigmoid", "torch.LongTensor", "torch.allclose" ] ]
carbonscott/pyrotein
[ "4c41eade0d014e70aadf9f9c475cbc4255a0a32e" ]
[ "examples/xfam-loop.plot_u.box.seq.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pyrotein as pr\nimport givens as gv\nfrom display import plot_dmat, plot_singular, plot_left_singular, plot_coeff\nimport multiprocessing as mp\nfrom loaddata import load_gpcrdb_xlsx\nimport colorsimple as cs\n\n\ndef reverse_sign(u, vh, rank, index_from_zero = True):\n # Comply with the convention (1-based index)\n rank_in_data = rank if index_from_zero else rank - 1\n\n # Reverse sign...\n u[:, rank_in_data] = - u[:, rank_in_data]\n vh[rank_in_data, :] = -vh[rank_in_data, :]\n\n return None\n\n\n# Set job name...\njob_name = \"xfam-loop\"\nfl_path = f\"{job_name}.u\"\n\n# Specify chains to process...\nfl_chain = \"gpcrdb.all.xlsx\"\nsheet = f\"xfam\"\nlines = load_gpcrdb_xlsx(fl_chain, sheet = sheet, splitchain = True)\n\n# Load upstream data...\nb, e = 0, 40 + 1\nu = np.load(f\"{job_name}.u.seq.trunc.{b:02d}to{e:02d}.npy\")\ns = np.load(f\"{job_name}.s.seq.trunc.{b:02d}to{e:02d}.npy\")\nvh = np.load(f\"{job_name}.vh.seq.trunc.{b:02d}to{e:02d}.npy\")\nlen_res = np.load(f\"{job_name}.len_res.npy\")\nlen_seq = np.load(f\"{job_name}.len_seq.npy\")\n\n# Allow positive value that fits the distance in nature (optinal)...\nreverse_sign(u, vh, 1, index_from_zero = False)\nrev_list = [4]\nfor rev_i in rev_list: reverse_sign(u, vh, rev_i, index_from_zero = False)\n\n# Calculate the coefficients...\nc = np.matmul(np.diag(s), vh)\n\n# Standardize u and c and assign units...\nu_ave = np.sqrt(u.shape[0])\nc = c / u_ave\n\n# Define a series of rotation...\nrotations = [\n [ 3, 4, -32.5],\n [ 2, 4, 51.8],\n [ 2, 5, -14.4],\n]\nfor i, (x, y, _) in enumerate(rotations):\n if x in rev_list: rotations[i][2] *= -1\n if y in rev_list: rotations[i][2] *= -1\ndisp_index = -1 # 0-based Python convention\nif len(rotations): rank1_last, rank2_last = rotations[disp_index][0:0 + 2]\nfor rank1, rank2, theta in rotations:\n gv.givens_rotation(u, s, c, rank1, rank2, theta, index_from_zero = False)\n\n## rank1_last, rank2_last = 8, 11\n\n# Create labels...\n# +1 to make it a right-end closed\nlabels = {'H8': [722, 735],\n 'TM1': [0, 34],\n 'TM2': [63, 96],\n 'TM3': [158, 194],\n 'TM4': [227, 254],\n 'TM5': [316, 357],\n 'TM6': [591, 631],\n 'TM7': [680, 708]}\nfor k, v in labels.items(): labels[k] = [ (i) * len_res for i in v ]\n\n## box_range = [591, 591+20]\nbox_range = [591+20, 591+40]\nbox_range = [ i * len_res for i in box_range ]\n\ncmds = [\n ## \"unset xtics\",\n ## \"unset ytics\",\n ## \"unset xlabel\",\n ## \"unset ylabel\",\n ## \"unset border\"\n ]\n\ntop = 10\n\n\n# Visualize a u matrix...\ndef plot_left_singualr_by_rank(rank):\n return plot_left_singular(u, rank, \n length_mat = len_seq, \n lbl = labels,\n\n ## width = 10,\n ## height = 12,\n ## fontsize = 29,\n ## lbl_fontsize = 29,\n\n width = 2.946,\n height = 3.535,\n fontsize = 8,\n lbl_fontsize = 8,\n\n lbl_linewidth = 1,\n curve_linewidth = 1,\n\n frac = 1.0,\n binning = 1,\n intst_min = -0.01,\n intst_max = 0.01,\n fl_path = fl_path,\n fl_postfix = f'',\n box_range = box_range,\n index_from_zero = False)\n\nif 0:\n if __name__ == \"__main__\":\n num_job = 10\n if __name__ == \"__main__\":\n with mp.Pool(num_job) as proc:\n proc.map( plot_left_singualr_by_rank, range(1, 20) )\n ## proc.map( plot_left_singualr_by_rank, range(2, 5) )\nif 0:\n num_job = 2\n with mp.Pool(num_job) as proc:\n proc.map( plot_left_singualr_by_rank, (rank1_last, rank2_last) )\n\nif 1:\n plot_left_singualr_by_rank(2)\n\nif 0:\n if __name__ == \"__main__\":\n num_job = 2\n with mp.Pool(num_job) as proc:\n proc.map( plot_left_singualr_by_rank, [rank1_last, rank2_last] )\n" ]
[ [ "numpy.sqrt", "numpy.load", "numpy.diag" ] ]
Xiejiu/second_age_estimation
[ "89e9ef371a07aba0bbba496697176381e4e9432c" ]
[ "comparisons_among_different_model_formulations/on_AgeDB/DOEL_3groups_resnet18/train_1_4.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 27 10:09:57 2019\n\n@author: xjc\n\"\"\"\n\n\n\n\nimport math\nimport numpy as np\nimport fire\nimport os\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import models, transforms\nfrom Dataset_folder import Dataset_floder as data_prepare\nfrom ensemble_learning_resnet_three_group_AgeDB import el_resnet18\nimport torch.utils.model_zoo as model_zoo\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\n\n\nclass AverageMeter(object):\n \"\"\"\n Computes and stores the average and current value\n Copied from: https://github.com/pytorch/examples/blob/master/imagenet/main.py\n \"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef train(model, loader, optimizer, epoch, n_epochs, device, print_freq=1):\n\n batch_time = AverageMeter()\n cross_entropy_loss=AverageMeter() \n kl_loss=AverageMeter()\n total_loss=AverageMeter()\n argmax_MAE=AverageMeter()\n accuracy=AverageMeter()\n\n # Model on train mode\n model.train()\n\n\n end = time.time()\n for batch_idx, (input, target) in enumerate(loader):\n # Create vaiables\n input_var, target_var = input.to(device), target.to(device) \n\n \n # compute output\n output = model(input_var)\n\n batch_size=target.size(0)\n\n\n cross_entropy_term_1 = torch.nn.functional.cross_entropy(output[:,0:3], target_var[:,3].long())\n cross_entropy_term_2 = torch.nn.functional.cross_entropy(output[:,3:6], target_var[:,7].long())\n cross_entropy_term_3 = torch.nn.functional.cross_entropy(output[:,6:9], target_var[:,11].long())\n cross_entropy_term_4 = torch.nn.functional.cross_entropy(output[:,9:12], target_var[:,15].long())\n cross_entropy_term_5 = torch.nn.functional.cross_entropy(output[:,12:15], target_var[:,19].long())\n cross_entropy_term_6 = torch.nn.functional.cross_entropy(output[:,15:18], target_var[:,23].long())\n cross_entropy_term_7 = torch.nn.functional.cross_entropy(output[:,18:21], target_var[:,27].long())\n cross_entropy_term_8 = torch.nn.functional.cross_entropy(output[:,21:24], target_var[:,31].long())\n cross_entropy_term_9 = torch.nn.functional.cross_entropy(output[:,24:27], target_var[:,35].long())\n cross_entropy_term_10 = torch.nn.functional.cross_entropy(output[:,27:30], target_var[:,39].long())\n cross_entropy_term_11 = torch.nn.functional.cross_entropy(output[:,30:33], target_var[:,43].long())\n cross_entropy_term_12 = torch.nn.functional.cross_entropy(output[:,33:36], target_var[:,47].long())\n cross_entropy_term_13 = torch.nn.functional.cross_entropy(output[:,36:39], target_var[:,51].long())\n cross_entropy_term_14 = torch.nn.functional.cross_entropy(output[:,39:42], target_var[:,55].long())\n cross_entropy_term_15 = torch.nn.functional.cross_entropy(output[:,42:45], target_var[:,59].long())\n cross_entropy_term_16 = torch.nn.functional.cross_entropy(output[:,45:48], target_var[:,63].long())\n cross_entropy_term_17 = torch.nn.functional.cross_entropy(output[:,48:51], target_var[:,67].long())\n cross_entropy_term_18 = torch.nn.functional.cross_entropy(output[:,51:54], target_var[:,71].long())\n cross_entropy_term_19 = torch.nn.functional.cross_entropy(output[:,54:57], target_var[:,75].long())\n cross_entropy_term_20 = torch.nn.functional.cross_entropy(output[:,57:60], target_var[:,79].long())\n cross_entropy_term_21 = torch.nn.functional.cross_entropy(output[:,60:63], target_var[:,83].long())\n cross_entropy_term_22 = torch.nn.functional.cross_entropy(output[:,63:66], target_var[:,87].long())\n cross_entropy_term_23 = torch.nn.functional.cross_entropy(output[:,66:69], target_var[:,91].long())\n cross_entropy_term_24 = torch.nn.functional.cross_entropy(output[:,69:72], target_var[:,95].long())\n cross_entropy_term_25 = torch.nn.functional.cross_entropy(output[:,72:75], target_var[:,99].long())\n cross_entropy_term_26 = torch.nn.functional.cross_entropy(output[:,75:78], target_var[:,103].long())\n cross_entropy_term_27 = torch.nn.functional.cross_entropy(output[:,78:81], target_var[:,107].long())\n cross_entropy_term_28 = torch.nn.functional.cross_entropy(output[:,81:84], target_var[:,111].long())\n cross_entropy_term_29 = torch.nn.functional.cross_entropy(output[:,84:87], target_var[:,115].long())\n cross_entropy_term_30 = torch.nn.functional.cross_entropy(output[:,87:90], target_var[:,119].long())\n cross_entropy_term_31 = torch.nn.functional.cross_entropy(output[:,90:93], target_var[:,123].long())\n cross_entropy_term_32 = torch.nn.functional.cross_entropy(output[:,93:96], target_var[:,127].long())\n cross_entropy_term_33 = torch.nn.functional.cross_entropy(output[:,96:99], target_var[:,131].long())\n cross_entropy_term_34 = torch.nn.functional.cross_entropy(output[:,99:102], target_var[:,135].long())\n cross_entropy_term_35 = torch.nn.functional.cross_entropy(output[:,102:105], target_var[:,139].long())\n cross_entropy_term_36 = torch.nn.functional.cross_entropy(output[:,105:108], target_var[:,143].long())\n cross_entropy_term_37 = torch.nn.functional.cross_entropy(output[:,108:111], target_var[:,147].long())\n cross_entropy_term_38 = torch.nn.functional.cross_entropy(output[:,111:114], target_var[:,151].long())\n cross_entropy_term_39 = torch.nn.functional.cross_entropy(output[:,114:117], target_var[:,155].long())\n cross_entropy_term_40 = torch.nn.functional.cross_entropy(output[:,117:120], target_var[:,159].long())\n cross_entropy_term_41 = torch.nn.functional.cross_entropy(output[:,120:123], target_var[:,163].long())\n cross_entropy_term_42 = torch.nn.functional.cross_entropy(output[:,123:126], target_var[:,167].long())\n cross_entropy_term_43 = torch.nn.functional.cross_entropy(output[:,126:129], target_var[:,171].long())\n cross_entropy_term_44 = torch.nn.functional.cross_entropy(output[:,129:132], target_var[:,175].long())\n cross_entropy_term_45 = torch.nn.functional.cross_entropy(output[:,132:135], target_var[:,179].long())\n cross_entropy_term_46 = torch.nn.functional.cross_entropy(output[:,135:138], target_var[:,183].long())\n cross_entropy_term_47 = torch.nn.functional.cross_entropy(output[:,138:141], target_var[:,187].long())\n cross_entropy_term_48 = torch.nn.functional.cross_entropy(output[:,141:144], target_var[:,191].long())\n cross_entropy_term_49 = torch.nn.functional.cross_entropy(output[:,144:147], target_var[:,195].long())\n cross_entropy_term_50 = torch.nn.functional.cross_entropy(output[:,147:150], target_var[:,199].long())\n cross_entropy_term_51 = torch.nn.functional.cross_entropy(output[:,150:153], target_var[:,203].long())\n cross_entropy_term_52 = torch.nn.functional.cross_entropy(output[:,153:156], target_var[:,207].long())\n cross_entropy_term_53 = torch.nn.functional.cross_entropy(output[:,156:159], target_var[:,211].long())\n cross_entropy_term_54 = torch.nn.functional.cross_entropy(output[:,159:162], target_var[:,215].long())\n cross_entropy_term_55 = torch.nn.functional.cross_entropy(output[:,162:165], target_var[:,219].long())\n cross_entropy_term_56 = torch.nn.functional.cross_entropy(output[:,165:168], target_var[:,223].long())\n cross_entropy_term_57 = torch.nn.functional.cross_entropy(output[:,168:171], target_var[:,227].long())\n cross_entropy_term_58 = torch.nn.functional.cross_entropy(output[:,171:174], target_var[:,231].long())\n cross_entropy_term_59 = torch.nn.functional.cross_entropy(output[:,174:177], target_var[:,235].long())\n cross_entropy_term_60 = torch.nn.functional.cross_entropy(output[:,177:180], target_var[:,239].long())\n cross_entropy_term_61 = torch.nn.functional.cross_entropy(output[:,180:183], target_var[:,243].long())\n cross_entropy_term_62 = torch.nn.functional.cross_entropy(output[:,183:186], target_var[:,247].long())\n cross_entropy_term_63 = torch.nn.functional.cross_entropy(output[:,186:189], target_var[:,251].long())\n cross_entropy_term_64 = torch.nn.functional.cross_entropy(output[:,189:192], target_var[:,255].long())\n cross_entropy_term_65 = torch.nn.functional.cross_entropy(output[:,192:195], target_var[:,259].long())\n cross_entropy_term_66 = torch.nn.functional.cross_entropy(output[:,195:198], target_var[:,263].long())\n cross_entropy_term_67 = torch.nn.functional.cross_entropy(output[:,198:201], target_var[:,267].long())\n cross_entropy_term_68 = torch.nn.functional.cross_entropy(output[:,201:204], target_var[:,271].long())\n cross_entropy_term_69 = torch.nn.functional.cross_entropy(output[:,204:207], target_var[:,275].long())\n cross_entropy_term_70 = torch.nn.functional.cross_entropy(output[:,207:210], target_var[:,279].long())\n cross_entropy_term_71 = torch.nn.functional.cross_entropy(output[:,210:213], target_var[:,283].long())\n cross_entropy_term_72 = torch.nn.functional.cross_entropy(output[:,213:216], target_var[:,287].long())\n cross_entropy_term_73 = torch.nn.functional.cross_entropy(output[:,216:219], target_var[:,291].long())\n cross_entropy_term_74 = torch.nn.functional.cross_entropy(output[:,219:222], target_var[:,295].long())\n cross_entropy_term_75 = torch.nn.functional.cross_entropy(output[:,222:225], target_var[:,299].long())\n cross_entropy_term_76 = torch.nn.functional.cross_entropy(output[:,225:228], target_var[:,303].long())\n cross_entropy_term_77 = torch.nn.functional.cross_entropy(output[:,228:231], target_var[:,307].long())\n cross_entropy_term_78 = torch.nn.functional.cross_entropy(output[:,231:234], target_var[:,311].long())\n cross_entropy_term_79 = torch.nn.functional.cross_entropy(output[:,234:237], target_var[:,315].long())\n cross_entropy_term_80 = torch.nn.functional.cross_entropy(output[:,237:240], target_var[:,319].long())\n cross_entropy_term_81 = torch.nn.functional.cross_entropy(output[:,240:243], target_var[:,323].long())\n cross_entropy_term_82 = torch.nn.functional.cross_entropy(output[:,243:246], target_var[:,327].long())\n cross_entropy_term_83 = torch.nn.functional.cross_entropy(output[:,246:249], target_var[:,331].long())\n cross_entropy_term_84 = torch.nn.functional.cross_entropy(output[:,249:252], target_var[:,335].long())\n cross_entropy_term_85 = torch.nn.functional.cross_entropy(output[:,252:255], target_var[:,339].long())\n cross_entropy_term_86 = torch.nn.functional.cross_entropy(output[:,255:258], target_var[:,343].long())\n cross_entropy_term_87 = torch.nn.functional.cross_entropy(output[:,258:261], target_var[:,347].long())\n cross_entropy_term_88 = torch.nn.functional.cross_entropy(output[:,261:264], target_var[:,351].long())\n cross_entropy_term_89 = torch.nn.functional.cross_entropy(output[:,264:267], target_var[:,355].long())\n cross_entropy_term_90 = torch.nn.functional.cross_entropy(output[:,267:270], target_var[:,359].long())\n cross_entropy_term_91 = torch.nn.functional.cross_entropy(output[:,270:273], target_var[:,363].long())\n cross_entropy_term_92 = torch.nn.functional.cross_entropy(output[:,273:276], target_var[:,367].long())\n cross_entropy_term_93 = torch.nn.functional.cross_entropy(output[:,276:279], target_var[:,371].long())\n cross_entropy_term_94 = torch.nn.functional.cross_entropy(output[:,279:282], target_var[:,375].long())\n cross_entropy_term_95 = torch.nn.functional.cross_entropy(output[:,282:285], target_var[:,379].long())\n cross_entropy_term_96 = torch.nn.functional.cross_entropy(output[:,285:288], target_var[:,383].long())\n cross_entropy_term_97 = torch.nn.functional.cross_entropy(output[:,288:291], target_var[:,387].long())\n cross_entropy_term_98 = torch.nn.functional.cross_entropy(output[:,291:294], target_var[:,391].long())\n cross_entropy_term_99 = torch.nn.functional.cross_entropy(output[:,294:297], target_var[:,395].long())\n cross_entropy_term_100 = torch.nn.functional.cross_entropy(output[:,297:300], target_var[:,399].long())\n\n total_cross_entroy_term=(cross_entropy_term_1+cross_entropy_term_2+cross_entropy_term_3+cross_entropy_term_4+cross_entropy_term_5+\n cross_entropy_term_6+cross_entropy_term_7+cross_entropy_term_8+cross_entropy_term_9+cross_entropy_term_10+\n cross_entropy_term_11+cross_entropy_term_12+cross_entropy_term_13+cross_entropy_term_14+cross_entropy_term_15+\n cross_entropy_term_16+cross_entropy_term_17+cross_entropy_term_18+cross_entropy_term_19+cross_entropy_term_20+\n cross_entropy_term_21+cross_entropy_term_22+cross_entropy_term_23+cross_entropy_term_24+cross_entropy_term_25+\n cross_entropy_term_26+cross_entropy_term_27+cross_entropy_term_28+cross_entropy_term_29+cross_entropy_term_30+\n cross_entropy_term_31+cross_entropy_term_32+cross_entropy_term_33+cross_entropy_term_34+cross_entropy_term_35+\n cross_entropy_term_36+cross_entropy_term_37+cross_entropy_term_38+cross_entropy_term_39+cross_entropy_term_40+\n cross_entropy_term_41+cross_entropy_term_42+cross_entropy_term_43+cross_entropy_term_44+cross_entropy_term_45+\n cross_entropy_term_46+cross_entropy_term_47+cross_entropy_term_48+cross_entropy_term_49+cross_entropy_term_50+\n cross_entropy_term_51+cross_entropy_term_52+cross_entropy_term_53+cross_entropy_term_54+cross_entropy_term_55+\n cross_entropy_term_56+cross_entropy_term_57+cross_entropy_term_58+cross_entropy_term_59+cross_entropy_term_60+\n cross_entropy_term_61+cross_entropy_term_62+cross_entropy_term_63+cross_entropy_term_64+cross_entropy_term_65+\n cross_entropy_term_66+cross_entropy_term_67+cross_entropy_term_68+cross_entropy_term_69+cross_entropy_term_70+\n cross_entropy_term_71+cross_entropy_term_72+cross_entropy_term_73+cross_entropy_term_74+cross_entropy_term_75+\n cross_entropy_term_76+cross_entropy_term_77+cross_entropy_term_78+cross_entropy_term_79+cross_entropy_term_80+\n cross_entropy_term_81+cross_entropy_term_82+cross_entropy_term_83+cross_entropy_term_84+cross_entropy_term_85+\n cross_entropy_term_86+cross_entropy_term_87+cross_entropy_term_88+cross_entropy_term_89+cross_entropy_term_90+\n cross_entropy_term_91+cross_entropy_term_92+cross_entropy_term_93+cross_entropy_term_94+cross_entropy_term_95+\n cross_entropy_term_96+cross_entropy_term_97+cross_entropy_term_98+cross_entropy_term_99+cross_entropy_term_100)\n \n KLloss=nn.KLDivLoss(reduction='sum')\n kl_loss_term_1=KLloss(F.log_softmax(output[:,0:3],1), target_var[:,0:3])/batch_size\n kl_loss_term_2=KLloss(F.log_softmax(output[:,3:6],1), target_var[:,4:7])/batch_size\n kl_loss_term_3=KLloss(F.log_softmax(output[:,6:9],1), target_var[:,8:11])/batch_size\n kl_loss_term_4=KLloss(F.log_softmax(output[:,9:12],1), target_var[:,12:15])/batch_size\n kl_loss_term_5=KLloss(F.log_softmax(output[:,12:15],1), target_var[:,16:19])/batch_size\n kl_loss_term_6=KLloss(F.log_softmax(output[:,15:18],1), target_var[:,20:23])/batch_size\n kl_loss_term_7=KLloss(F.log_softmax(output[:,18:21],1), target_var[:,24:27])/batch_size\n kl_loss_term_8=KLloss(F.log_softmax(output[:,21:24],1), target_var[:,28:31])/batch_size\n kl_loss_term_9=KLloss(F.log_softmax(output[:,24:27],1), target_var[:,32:35])/batch_size\n kl_loss_term_10=KLloss(F.log_softmax(output[:,27:30],1), target_var[:,36:39])/batch_size\n kl_loss_term_11=KLloss(F.log_softmax(output[:,30:33],1), target_var[:,40:43])/batch_size\n kl_loss_term_12=KLloss(F.log_softmax(output[:,33:36],1), target_var[:,44:47])/batch_size\n kl_loss_term_13=KLloss(F.log_softmax(output[:,36:39],1), target_var[:,48:51])/batch_size\n kl_loss_term_14=KLloss(F.log_softmax(output[:,39:42],1), target_var[:,52:55])/batch_size\n kl_loss_term_15=KLloss(F.log_softmax(output[:,42:45],1), target_var[:,56:59])/batch_size\n kl_loss_term_16=KLloss(F.log_softmax(output[:,45:48],1), target_var[:,60:63])/batch_size\n kl_loss_term_17=KLloss(F.log_softmax(output[:,48:51],1), target_var[:,64:67])/batch_size\n kl_loss_term_18=KLloss(F.log_softmax(output[:,51:54],1), target_var[:,68:71])/batch_size\n kl_loss_term_19=KLloss(F.log_softmax(output[:,54:57],1), target_var[:,72:75])/batch_size\n kl_loss_term_20=KLloss(F.log_softmax(output[:,57:60],1), target_var[:,76:79])/batch_size\n kl_loss_term_21=KLloss(F.log_softmax(output[:,60:63],1), target_var[:,80:83])/batch_size\n kl_loss_term_22=KLloss(F.log_softmax(output[:,63:66],1), target_var[:,84:87])/batch_size\n kl_loss_term_23=KLloss(F.log_softmax(output[:,66:69],1), target_var[:,88:91])/batch_size\n kl_loss_term_24=KLloss(F.log_softmax(output[:,69:72],1), target_var[:,92:95])/batch_size\n kl_loss_term_25=KLloss(F.log_softmax(output[:,72:75],1), target_var[:,96:99])/batch_size\n kl_loss_term_26=KLloss(F.log_softmax(output[:,75:78],1), target_var[:,100:103])/batch_size\n kl_loss_term_27=KLloss(F.log_softmax(output[:,78:81],1), target_var[:,104:107])/batch_size\n kl_loss_term_28=KLloss(F.log_softmax(output[:,81:84],1), target_var[:,108:111])/batch_size\n kl_loss_term_29=KLloss(F.log_softmax(output[:,84:87],1), target_var[:,112:115])/batch_size\n kl_loss_term_30=KLloss(F.log_softmax(output[:,87:90],1), target_var[:,116:119])/batch_size\n kl_loss_term_31=KLloss(F.log_softmax(output[:,90:93],1), target_var[:,120:123])/batch_size\n kl_loss_term_32=KLloss(F.log_softmax(output[:,93:96],1), target_var[:,124:127])/batch_size\n kl_loss_term_33=KLloss(F.log_softmax(output[:,96:99],1), target_var[:,128:131])/batch_size\n kl_loss_term_34=KLloss(F.log_softmax(output[:,99:102],1), target_var[:,132:135])/batch_size\n kl_loss_term_35=KLloss(F.log_softmax(output[:,102:105],1), target_var[:,136:139])/batch_size\n kl_loss_term_36=KLloss(F.log_softmax(output[:,105:108],1), target_var[:,140:143])/batch_size\n kl_loss_term_37=KLloss(F.log_softmax(output[:,108:111],1), target_var[:,144:147])/batch_size\n kl_loss_term_38=KLloss(F.log_softmax(output[:,111:114],1), target_var[:,148:151])/batch_size\n kl_loss_term_39=KLloss(F.log_softmax(output[:,114:117],1), target_var[:,152:155])/batch_size\n kl_loss_term_40=KLloss(F.log_softmax(output[:,117:120],1), target_var[:,156:159])/batch_size\n kl_loss_term_41=KLloss(F.log_softmax(output[:,120:123],1), target_var[:,160:163])/batch_size\n kl_loss_term_42=KLloss(F.log_softmax(output[:,123:126],1), target_var[:,164:167])/batch_size\n kl_loss_term_43=KLloss(F.log_softmax(output[:,126:129],1), target_var[:,168:171])/batch_size\n kl_loss_term_44=KLloss(F.log_softmax(output[:,129:132],1), target_var[:,172:175])/batch_size\n kl_loss_term_45=KLloss(F.log_softmax(output[:,132:135],1), target_var[:,176:179])/batch_size\n kl_loss_term_46=KLloss(F.log_softmax(output[:,135:138],1), target_var[:,180:183])/batch_size\n kl_loss_term_47=KLloss(F.log_softmax(output[:,138:141],1), target_var[:,184:187])/batch_size\n kl_loss_term_48=KLloss(F.log_softmax(output[:,141:144],1), target_var[:,188:191])/batch_size\n kl_loss_term_49=KLloss(F.log_softmax(output[:,144:147],1), target_var[:,192:195])/batch_size\n kl_loss_term_50=KLloss(F.log_softmax(output[:,147:150],1), target_var[:,196:199])/batch_size\n kl_loss_term_51=KLloss(F.log_softmax(output[:,150:153],1), target_var[:,200:203])/batch_size\n kl_loss_term_52=KLloss(F.log_softmax(output[:,153:156],1), target_var[:,204:207])/batch_size\n kl_loss_term_53=KLloss(F.log_softmax(output[:,156:159],1), target_var[:,208:211])/batch_size\n kl_loss_term_54=KLloss(F.log_softmax(output[:,159:162],1), target_var[:,212:215])/batch_size\n kl_loss_term_55=KLloss(F.log_softmax(output[:,162:165],1), target_var[:,216:219])/batch_size\n kl_loss_term_56=KLloss(F.log_softmax(output[:,165:168],1), target_var[:,220:223])/batch_size\n kl_loss_term_57=KLloss(F.log_softmax(output[:,168:171],1), target_var[:,224:227])/batch_size\n kl_loss_term_58=KLloss(F.log_softmax(output[:,171:174],1), target_var[:,228:231])/batch_size\n kl_loss_term_59=KLloss(F.log_softmax(output[:,174:177],1), target_var[:,232:235])/batch_size\n kl_loss_term_60=KLloss(F.log_softmax(output[:,177:180],1), target_var[:,236:239])/batch_size\n kl_loss_term_61=KLloss(F.log_softmax(output[:,180:183],1), target_var[:,240:243])/batch_size\n kl_loss_term_62=KLloss(F.log_softmax(output[:,183:186],1), target_var[:,244:247])/batch_size\n kl_loss_term_63=KLloss(F.log_softmax(output[:,186:189],1), target_var[:,248:251])/batch_size\n kl_loss_term_64=KLloss(F.log_softmax(output[:,189:192],1), target_var[:,252:255])/batch_size\n kl_loss_term_65=KLloss(F.log_softmax(output[:,192:195],1), target_var[:,256:259])/batch_size\n kl_loss_term_66=KLloss(F.log_softmax(output[:,195:198],1), target_var[:,260:263])/batch_size\n kl_loss_term_67=KLloss(F.log_softmax(output[:,198:201],1), target_var[:,264:267])/batch_size\n kl_loss_term_68=KLloss(F.log_softmax(output[:,201:204],1), target_var[:,268:271])/batch_size\n kl_loss_term_69=KLloss(F.log_softmax(output[:,204:207],1), target_var[:,272:275])/batch_size\n kl_loss_term_70=KLloss(F.log_softmax(output[:,207:210],1), target_var[:,276:279])/batch_size\n kl_loss_term_71=KLloss(F.log_softmax(output[:,210:213],1), target_var[:,280:283])/batch_size\n kl_loss_term_72=KLloss(F.log_softmax(output[:,213:216],1), target_var[:,284:287])/batch_size\n kl_loss_term_73=KLloss(F.log_softmax(output[:,216:219],1), target_var[:,288:291])/batch_size\n kl_loss_term_74=KLloss(F.log_softmax(output[:,219:222],1), target_var[:,292:295])/batch_size\n kl_loss_term_75=KLloss(F.log_softmax(output[:,222:225],1), target_var[:,296:299])/batch_size\n kl_loss_term_76=KLloss(F.log_softmax(output[:,225:228],1), target_var[:,300:303])/batch_size\n kl_loss_term_77=KLloss(F.log_softmax(output[:,228:231],1), target_var[:,304:307])/batch_size\n kl_loss_term_78=KLloss(F.log_softmax(output[:,231:234],1), target_var[:,308:311])/batch_size\n kl_loss_term_79=KLloss(F.log_softmax(output[:,234:237],1), target_var[:,312:315])/batch_size\n kl_loss_term_80=KLloss(F.log_softmax(output[:,237:240],1), target_var[:,316:319])/batch_size\n kl_loss_term_81=KLloss(F.log_softmax(output[:,240:243],1), target_var[:,320:323])/batch_size\n kl_loss_term_82=KLloss(F.log_softmax(output[:,243:246],1), target_var[:,324:327])/batch_size\n kl_loss_term_83=KLloss(F.log_softmax(output[:,246:249],1), target_var[:,328:331])/batch_size\n kl_loss_term_84=KLloss(F.log_softmax(output[:,249:252],1), target_var[:,332:335])/batch_size\n kl_loss_term_85=KLloss(F.log_softmax(output[:,252:255],1), target_var[:,336:339])/batch_size\n kl_loss_term_86=KLloss(F.log_softmax(output[:,255:258],1), target_var[:,340:343])/batch_size\n kl_loss_term_87=KLloss(F.log_softmax(output[:,258:261],1), target_var[:,344:347])/batch_size\n kl_loss_term_88=KLloss(F.log_softmax(output[:,261:264],1), target_var[:,348:351])/batch_size\n kl_loss_term_89=KLloss(F.log_softmax(output[:,264:267],1), target_var[:,352:355])/batch_size\n kl_loss_term_90=KLloss(F.log_softmax(output[:,267:270],1), target_var[:,356:359])/batch_size\n kl_loss_term_91=KLloss(F.log_softmax(output[:,270:273],1), target_var[:,360:363])/batch_size\n kl_loss_term_92=KLloss(F.log_softmax(output[:,273:276],1), target_var[:,364:367])/batch_size\n kl_loss_term_93=KLloss(F.log_softmax(output[:,276:279],1), target_var[:,368:371])/batch_size\n kl_loss_term_94=KLloss(F.log_softmax(output[:,279:282],1), target_var[:,372:375])/batch_size\n kl_loss_term_95=KLloss(F.log_softmax(output[:,282:285],1), target_var[:,376:379])/batch_size\n kl_loss_term_96=KLloss(F.log_softmax(output[:,285:288],1), target_var[:,380:383])/batch_size\n kl_loss_term_97=KLloss(F.log_softmax(output[:,288:291],1), target_var[:,384:387])/batch_size\n kl_loss_term_98=KLloss(F.log_softmax(output[:,291:294],1), target_var[:,388:391])/batch_size\n kl_loss_term_99=KLloss(F.log_softmax(output[:,294:297],1), target_var[:,392:395])/batch_size\n kl_loss_term_100=KLloss(F.log_softmax(output[:,297:300],1), target_var[:,396:399])/batch_size\n\n total_kl_loss_term=(kl_loss_term_1+kl_loss_term_2+kl_loss_term_3+kl_loss_term_4+kl_loss_term_5+\n kl_loss_term_6+kl_loss_term_7+kl_loss_term_8+kl_loss_term_9+kl_loss_term_10+\n kl_loss_term_11+kl_loss_term_12+kl_loss_term_13+kl_loss_term_14+kl_loss_term_15+\n kl_loss_term_16+kl_loss_term_17+kl_loss_term_18+kl_loss_term_19+kl_loss_term_20+\n kl_loss_term_21+kl_loss_term_22+kl_loss_term_23+kl_loss_term_24+kl_loss_term_25+\n kl_loss_term_26+kl_loss_term_27+kl_loss_term_28+kl_loss_term_29+kl_loss_term_30+\n kl_loss_term_31+kl_loss_term_32+kl_loss_term_33+kl_loss_term_34+kl_loss_term_35+\n kl_loss_term_36+kl_loss_term_37+kl_loss_term_38+kl_loss_term_39+kl_loss_term_40+\n kl_loss_term_41+kl_loss_term_42+kl_loss_term_43+kl_loss_term_44+kl_loss_term_45+\n kl_loss_term_46+kl_loss_term_47+kl_loss_term_48+kl_loss_term_49+kl_loss_term_50+\n kl_loss_term_51+kl_loss_term_52+kl_loss_term_53+kl_loss_term_54+kl_loss_term_55+\n kl_loss_term_56+kl_loss_term_57+kl_loss_term_58+kl_loss_term_59+kl_loss_term_60+\n kl_loss_term_61+kl_loss_term_62+kl_loss_term_63+kl_loss_term_64+kl_loss_term_65+\n kl_loss_term_66+kl_loss_term_67+kl_loss_term_68+kl_loss_term_69+kl_loss_term_70+\n kl_loss_term_71+kl_loss_term_72+kl_loss_term_73+kl_loss_term_74+kl_loss_term_75+\n kl_loss_term_76+kl_loss_term_77+kl_loss_term_78+kl_loss_term_79+kl_loss_term_80+\n kl_loss_term_81+kl_loss_term_82+kl_loss_term_83+kl_loss_term_84+kl_loss_term_85+\n kl_loss_term_86+kl_loss_term_87+kl_loss_term_88+kl_loss_term_89+kl_loss_term_90+\n kl_loss_term_91+kl_loss_term_92+kl_loss_term_93+kl_loss_term_94+kl_loss_term_95+\n kl_loss_term_96+kl_loss_term_97+kl_loss_term_98+kl_loss_term_99+kl_loss_term_100)\n \n\n# loss=mean_cross_entroy_term+mean_kl_loss_term\n loss=total_kl_loss_term\n\n# ARGMAX_MAE,acc,_=argmax_mae_acc(output.cpu(), target)\n batch_mae,batch_acc,_=argmax_mae_acc(output.cpu(), target)\n\n\n cross_entropy_loss.update(total_cross_entroy_term.item(), batch_size) \n kl_loss.update(total_kl_loss_term.item(), batch_size)\n total_loss.update(loss.item(), batch_size)\n argmax_MAE.update(batch_mae.item(), batch_size)\n accuracy.update(batch_acc.item(),batch_size)\n\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n# # print stats\n# if batch_idx % print_freq == 0:\n# res = '\\t'.join([\n# 'Epoch: [%d/%d]' % (epoch + 1, n_epochs),\n# 'Iter: [%d/%d]' % (batch_idx + 1, len(loader)),\n# 'Time %.3f (%.3f)' % (batch_time.val, batch_time.avg),\n# 'total_loss %.4f (%.4f)' % (total_loss.val, total_loss.avg),\n# 'cross_entropy_Loss %.4f (%.4f)' % (cross_entropy_loss.val, cross_entropy_loss.avg),\n# 'kl_loss %.4f (%.4f)' % (kl_loss.val, kl_loss.avg),\n# 'argmax_MAE %.3f (%.3f)' % (argmax_MAE.val, argmax_MAE.avg),\n# 'accuracy %.3f (%.3f)' % (accuracy.val, accuracy.avg)\n# ])\n# print(res)\n\n # Return summary statistics\n return batch_time.avg, total_loss.avg, cross_entropy_loss.avg, kl_loss.avg, argmax_MAE.avg, accuracy.avg\n\n\ndef validate(model, loader, epoch, n_epochs, device, print_freq=1):\n\n batch_time = AverageMeter()\n cross_entropy_loss=AverageMeter() \n kl_loss=AverageMeter()\n total_loss=AverageMeter()\n argmax_MAE=AverageMeter()\n accuracy=AverageMeter()\n\n # Model on train mode\n model.eval()\n\n\n end = time.time()\n with torch.no_grad():\n for batch_idx, (input, target) in enumerate(loader):\n # Create vaiables\n input_var, target_var = input.to(device), target.to(device) \n \n \n # compute output\n output = model(input_var)\n \n batch_size=target.size(0)\n \n \n cross_entropy_term_1 = torch.nn.functional.cross_entropy(output[:,0:3], target_var[:,3].long())\n cross_entropy_term_2 = torch.nn.functional.cross_entropy(output[:,3:6], target_var[:,7].long())\n cross_entropy_term_3 = torch.nn.functional.cross_entropy(output[:,6:9], target_var[:,11].long())\n cross_entropy_term_4 = torch.nn.functional.cross_entropy(output[:,9:12], target_var[:,15].long())\n cross_entropy_term_5 = torch.nn.functional.cross_entropy(output[:,12:15], target_var[:,19].long())\n cross_entropy_term_6 = torch.nn.functional.cross_entropy(output[:,15:18], target_var[:,23].long())\n cross_entropy_term_7 = torch.nn.functional.cross_entropy(output[:,18:21], target_var[:,27].long())\n cross_entropy_term_8 = torch.nn.functional.cross_entropy(output[:,21:24], target_var[:,31].long())\n cross_entropy_term_9 = torch.nn.functional.cross_entropy(output[:,24:27], target_var[:,35].long())\n cross_entropy_term_10 = torch.nn.functional.cross_entropy(output[:,27:30], target_var[:,39].long())\n cross_entropy_term_11 = torch.nn.functional.cross_entropy(output[:,30:33], target_var[:,43].long())\n cross_entropy_term_12 = torch.nn.functional.cross_entropy(output[:,33:36], target_var[:,47].long())\n cross_entropy_term_13 = torch.nn.functional.cross_entropy(output[:,36:39], target_var[:,51].long())\n cross_entropy_term_14 = torch.nn.functional.cross_entropy(output[:,39:42], target_var[:,55].long())\n cross_entropy_term_15 = torch.nn.functional.cross_entropy(output[:,42:45], target_var[:,59].long())\n cross_entropy_term_16 = torch.nn.functional.cross_entropy(output[:,45:48], target_var[:,63].long())\n cross_entropy_term_17 = torch.nn.functional.cross_entropy(output[:,48:51], target_var[:,67].long())\n cross_entropy_term_18 = torch.nn.functional.cross_entropy(output[:,51:54], target_var[:,71].long())\n cross_entropy_term_19 = torch.nn.functional.cross_entropy(output[:,54:57], target_var[:,75].long())\n cross_entropy_term_20 = torch.nn.functional.cross_entropy(output[:,57:60], target_var[:,79].long())\n cross_entropy_term_21 = torch.nn.functional.cross_entropy(output[:,60:63], target_var[:,83].long())\n cross_entropy_term_22 = torch.nn.functional.cross_entropy(output[:,63:66], target_var[:,87].long())\n cross_entropy_term_23 = torch.nn.functional.cross_entropy(output[:,66:69], target_var[:,91].long())\n cross_entropy_term_24 = torch.nn.functional.cross_entropy(output[:,69:72], target_var[:,95].long())\n cross_entropy_term_25 = torch.nn.functional.cross_entropy(output[:,72:75], target_var[:,99].long())\n cross_entropy_term_26 = torch.nn.functional.cross_entropy(output[:,75:78], target_var[:,103].long())\n cross_entropy_term_27 = torch.nn.functional.cross_entropy(output[:,78:81], target_var[:,107].long())\n cross_entropy_term_28 = torch.nn.functional.cross_entropy(output[:,81:84], target_var[:,111].long())\n cross_entropy_term_29 = torch.nn.functional.cross_entropy(output[:,84:87], target_var[:,115].long())\n cross_entropy_term_30 = torch.nn.functional.cross_entropy(output[:,87:90], target_var[:,119].long())\n cross_entropy_term_31 = torch.nn.functional.cross_entropy(output[:,90:93], target_var[:,123].long())\n cross_entropy_term_32 = torch.nn.functional.cross_entropy(output[:,93:96], target_var[:,127].long())\n cross_entropy_term_33 = torch.nn.functional.cross_entropy(output[:,96:99], target_var[:,131].long())\n cross_entropy_term_34 = torch.nn.functional.cross_entropy(output[:,99:102], target_var[:,135].long())\n cross_entropy_term_35 = torch.nn.functional.cross_entropy(output[:,102:105], target_var[:,139].long())\n cross_entropy_term_36 = torch.nn.functional.cross_entropy(output[:,105:108], target_var[:,143].long())\n cross_entropy_term_37 = torch.nn.functional.cross_entropy(output[:,108:111], target_var[:,147].long())\n cross_entropy_term_38 = torch.nn.functional.cross_entropy(output[:,111:114], target_var[:,151].long())\n cross_entropy_term_39 = torch.nn.functional.cross_entropy(output[:,114:117], target_var[:,155].long())\n cross_entropy_term_40 = torch.nn.functional.cross_entropy(output[:,117:120], target_var[:,159].long())\n cross_entropy_term_41 = torch.nn.functional.cross_entropy(output[:,120:123], target_var[:,163].long())\n cross_entropy_term_42 = torch.nn.functional.cross_entropy(output[:,123:126], target_var[:,167].long())\n cross_entropy_term_43 = torch.nn.functional.cross_entropy(output[:,126:129], target_var[:,171].long())\n cross_entropy_term_44 = torch.nn.functional.cross_entropy(output[:,129:132], target_var[:,175].long())\n cross_entropy_term_45 = torch.nn.functional.cross_entropy(output[:,132:135], target_var[:,179].long())\n cross_entropy_term_46 = torch.nn.functional.cross_entropy(output[:,135:138], target_var[:,183].long())\n cross_entropy_term_47 = torch.nn.functional.cross_entropy(output[:,138:141], target_var[:,187].long())\n cross_entropy_term_48 = torch.nn.functional.cross_entropy(output[:,141:144], target_var[:,191].long())\n cross_entropy_term_49 = torch.nn.functional.cross_entropy(output[:,144:147], target_var[:,195].long())\n cross_entropy_term_50 = torch.nn.functional.cross_entropy(output[:,147:150], target_var[:,199].long())\n cross_entropy_term_51 = torch.nn.functional.cross_entropy(output[:,150:153], target_var[:,203].long())\n cross_entropy_term_52 = torch.nn.functional.cross_entropy(output[:,153:156], target_var[:,207].long())\n cross_entropy_term_53 = torch.nn.functional.cross_entropy(output[:,156:159], target_var[:,211].long())\n cross_entropy_term_54 = torch.nn.functional.cross_entropy(output[:,159:162], target_var[:,215].long())\n cross_entropy_term_55 = torch.nn.functional.cross_entropy(output[:,162:165], target_var[:,219].long())\n cross_entropy_term_56 = torch.nn.functional.cross_entropy(output[:,165:168], target_var[:,223].long())\n cross_entropy_term_57 = torch.nn.functional.cross_entropy(output[:,168:171], target_var[:,227].long())\n cross_entropy_term_58 = torch.nn.functional.cross_entropy(output[:,171:174], target_var[:,231].long())\n cross_entropy_term_59 = torch.nn.functional.cross_entropy(output[:,174:177], target_var[:,235].long())\n cross_entropy_term_60 = torch.nn.functional.cross_entropy(output[:,177:180], target_var[:,239].long())\n cross_entropy_term_61 = torch.nn.functional.cross_entropy(output[:,180:183], target_var[:,243].long())\n cross_entropy_term_62 = torch.nn.functional.cross_entropy(output[:,183:186], target_var[:,247].long())\n cross_entropy_term_63 = torch.nn.functional.cross_entropy(output[:,186:189], target_var[:,251].long())\n cross_entropy_term_64 = torch.nn.functional.cross_entropy(output[:,189:192], target_var[:,255].long())\n cross_entropy_term_65 = torch.nn.functional.cross_entropy(output[:,192:195], target_var[:,259].long())\n cross_entropy_term_66 = torch.nn.functional.cross_entropy(output[:,195:198], target_var[:,263].long())\n cross_entropy_term_67 = torch.nn.functional.cross_entropy(output[:,198:201], target_var[:,267].long())\n cross_entropy_term_68 = torch.nn.functional.cross_entropy(output[:,201:204], target_var[:,271].long())\n cross_entropy_term_69 = torch.nn.functional.cross_entropy(output[:,204:207], target_var[:,275].long())\n cross_entropy_term_70 = torch.nn.functional.cross_entropy(output[:,207:210], target_var[:,279].long())\n cross_entropy_term_71 = torch.nn.functional.cross_entropy(output[:,210:213], target_var[:,283].long())\n cross_entropy_term_72 = torch.nn.functional.cross_entropy(output[:,213:216], target_var[:,287].long())\n cross_entropy_term_73 = torch.nn.functional.cross_entropy(output[:,216:219], target_var[:,291].long())\n cross_entropy_term_74 = torch.nn.functional.cross_entropy(output[:,219:222], target_var[:,295].long())\n cross_entropy_term_75 = torch.nn.functional.cross_entropy(output[:,222:225], target_var[:,299].long())\n cross_entropy_term_76 = torch.nn.functional.cross_entropy(output[:,225:228], target_var[:,303].long())\n cross_entropy_term_77 = torch.nn.functional.cross_entropy(output[:,228:231], target_var[:,307].long())\n cross_entropy_term_78 = torch.nn.functional.cross_entropy(output[:,231:234], target_var[:,311].long())\n cross_entropy_term_79 = torch.nn.functional.cross_entropy(output[:,234:237], target_var[:,315].long())\n cross_entropy_term_80 = torch.nn.functional.cross_entropy(output[:,237:240], target_var[:,319].long())\n cross_entropy_term_81 = torch.nn.functional.cross_entropy(output[:,240:243], target_var[:,323].long())\n cross_entropy_term_82 = torch.nn.functional.cross_entropy(output[:,243:246], target_var[:,327].long())\n cross_entropy_term_83 = torch.nn.functional.cross_entropy(output[:,246:249], target_var[:,331].long())\n cross_entropy_term_84 = torch.nn.functional.cross_entropy(output[:,249:252], target_var[:,335].long())\n cross_entropy_term_85 = torch.nn.functional.cross_entropy(output[:,252:255], target_var[:,339].long())\n cross_entropy_term_86 = torch.nn.functional.cross_entropy(output[:,255:258], target_var[:,343].long())\n cross_entropy_term_87 = torch.nn.functional.cross_entropy(output[:,258:261], target_var[:,347].long())\n cross_entropy_term_88 = torch.nn.functional.cross_entropy(output[:,261:264], target_var[:,351].long())\n cross_entropy_term_89 = torch.nn.functional.cross_entropy(output[:,264:267], target_var[:,355].long())\n cross_entropy_term_90 = torch.nn.functional.cross_entropy(output[:,267:270], target_var[:,359].long())\n cross_entropy_term_91 = torch.nn.functional.cross_entropy(output[:,270:273], target_var[:,363].long())\n cross_entropy_term_92 = torch.nn.functional.cross_entropy(output[:,273:276], target_var[:,367].long())\n cross_entropy_term_93 = torch.nn.functional.cross_entropy(output[:,276:279], target_var[:,371].long())\n cross_entropy_term_94 = torch.nn.functional.cross_entropy(output[:,279:282], target_var[:,375].long())\n cross_entropy_term_95 = torch.nn.functional.cross_entropy(output[:,282:285], target_var[:,379].long())\n cross_entropy_term_96 = torch.nn.functional.cross_entropy(output[:,285:288], target_var[:,383].long())\n cross_entropy_term_97 = torch.nn.functional.cross_entropy(output[:,288:291], target_var[:,387].long())\n cross_entropy_term_98 = torch.nn.functional.cross_entropy(output[:,291:294], target_var[:,391].long())\n cross_entropy_term_99 = torch.nn.functional.cross_entropy(output[:,294:297], target_var[:,395].long())\n cross_entropy_term_100 = torch.nn.functional.cross_entropy(output[:,297:300], target_var[:,399].long())\n \n total_cross_entroy_term=(cross_entropy_term_1+cross_entropy_term_2+cross_entropy_term_3+cross_entropy_term_4+cross_entropy_term_5+\n cross_entropy_term_6+cross_entropy_term_7+cross_entropy_term_8+cross_entropy_term_9+cross_entropy_term_10+\n cross_entropy_term_11+cross_entropy_term_12+cross_entropy_term_13+cross_entropy_term_14+cross_entropy_term_15+\n cross_entropy_term_16+cross_entropy_term_17+cross_entropy_term_18+cross_entropy_term_19+cross_entropy_term_20+\n cross_entropy_term_21+cross_entropy_term_22+cross_entropy_term_23+cross_entropy_term_24+cross_entropy_term_25+\n cross_entropy_term_26+cross_entropy_term_27+cross_entropy_term_28+cross_entropy_term_29+cross_entropy_term_30+\n cross_entropy_term_31+cross_entropy_term_32+cross_entropy_term_33+cross_entropy_term_34+cross_entropy_term_35+\n cross_entropy_term_36+cross_entropy_term_37+cross_entropy_term_38+cross_entropy_term_39+cross_entropy_term_40+\n cross_entropy_term_41+cross_entropy_term_42+cross_entropy_term_43+cross_entropy_term_44+cross_entropy_term_45+\n cross_entropy_term_46+cross_entropy_term_47+cross_entropy_term_48+cross_entropy_term_49+cross_entropy_term_50+\n cross_entropy_term_51+cross_entropy_term_52+cross_entropy_term_53+cross_entropy_term_54+cross_entropy_term_55+\n cross_entropy_term_56+cross_entropy_term_57+cross_entropy_term_58+cross_entropy_term_59+cross_entropy_term_60+\n cross_entropy_term_61+cross_entropy_term_62+cross_entropy_term_63+cross_entropy_term_64+cross_entropy_term_65+\n cross_entropy_term_66+cross_entropy_term_67+cross_entropy_term_68+cross_entropy_term_69+cross_entropy_term_70+\n cross_entropy_term_71+cross_entropy_term_72+cross_entropy_term_73+cross_entropy_term_74+cross_entropy_term_75+\n cross_entropy_term_76+cross_entropy_term_77+cross_entropy_term_78+cross_entropy_term_79+cross_entropy_term_80+\n cross_entropy_term_81+cross_entropy_term_82+cross_entropy_term_83+cross_entropy_term_84+cross_entropy_term_85+\n cross_entropy_term_86+cross_entropy_term_87+cross_entropy_term_88+cross_entropy_term_89+cross_entropy_term_90+\n cross_entropy_term_91+cross_entropy_term_92+cross_entropy_term_93+cross_entropy_term_94+cross_entropy_term_95+\n cross_entropy_term_96+cross_entropy_term_97+cross_entropy_term_98+cross_entropy_term_99+cross_entropy_term_100)\n \n KLloss=nn.KLDivLoss(reduction='sum')\n kl_loss_term_1=KLloss(F.log_softmax(output[:,0:3],1), target_var[:,0:3])/batch_size\n kl_loss_term_2=KLloss(F.log_softmax(output[:,3:6],1), target_var[:,4:7])/batch_size\n kl_loss_term_3=KLloss(F.log_softmax(output[:,6:9],1), target_var[:,8:11])/batch_size\n kl_loss_term_4=KLloss(F.log_softmax(output[:,9:12],1), target_var[:,12:15])/batch_size\n kl_loss_term_5=KLloss(F.log_softmax(output[:,12:15],1), target_var[:,16:19])/batch_size\n kl_loss_term_6=KLloss(F.log_softmax(output[:,15:18],1), target_var[:,20:23])/batch_size\n kl_loss_term_7=KLloss(F.log_softmax(output[:,18:21],1), target_var[:,24:27])/batch_size\n kl_loss_term_8=KLloss(F.log_softmax(output[:,21:24],1), target_var[:,28:31])/batch_size\n kl_loss_term_9=KLloss(F.log_softmax(output[:,24:27],1), target_var[:,32:35])/batch_size\n kl_loss_term_10=KLloss(F.log_softmax(output[:,27:30],1), target_var[:,36:39])/batch_size\n kl_loss_term_11=KLloss(F.log_softmax(output[:,30:33],1), target_var[:,40:43])/batch_size\n kl_loss_term_12=KLloss(F.log_softmax(output[:,33:36],1), target_var[:,44:47])/batch_size\n kl_loss_term_13=KLloss(F.log_softmax(output[:,36:39],1), target_var[:,48:51])/batch_size\n kl_loss_term_14=KLloss(F.log_softmax(output[:,39:42],1), target_var[:,52:55])/batch_size\n kl_loss_term_15=KLloss(F.log_softmax(output[:,42:45],1), target_var[:,56:59])/batch_size\n kl_loss_term_16=KLloss(F.log_softmax(output[:,45:48],1), target_var[:,60:63])/batch_size\n kl_loss_term_17=KLloss(F.log_softmax(output[:,48:51],1), target_var[:,64:67])/batch_size\n kl_loss_term_18=KLloss(F.log_softmax(output[:,51:54],1), target_var[:,68:71])/batch_size\n kl_loss_term_19=KLloss(F.log_softmax(output[:,54:57],1), target_var[:,72:75])/batch_size\n kl_loss_term_20=KLloss(F.log_softmax(output[:,57:60],1), target_var[:,76:79])/batch_size\n kl_loss_term_21=KLloss(F.log_softmax(output[:,60:63],1), target_var[:,80:83])/batch_size\n kl_loss_term_22=KLloss(F.log_softmax(output[:,63:66],1), target_var[:,84:87])/batch_size\n kl_loss_term_23=KLloss(F.log_softmax(output[:,66:69],1), target_var[:,88:91])/batch_size\n kl_loss_term_24=KLloss(F.log_softmax(output[:,69:72],1), target_var[:,92:95])/batch_size\n kl_loss_term_25=KLloss(F.log_softmax(output[:,72:75],1), target_var[:,96:99])/batch_size\n kl_loss_term_26=KLloss(F.log_softmax(output[:,75:78],1), target_var[:,100:103])/batch_size\n kl_loss_term_27=KLloss(F.log_softmax(output[:,78:81],1), target_var[:,104:107])/batch_size\n kl_loss_term_28=KLloss(F.log_softmax(output[:,81:84],1), target_var[:,108:111])/batch_size\n kl_loss_term_29=KLloss(F.log_softmax(output[:,84:87],1), target_var[:,112:115])/batch_size\n kl_loss_term_30=KLloss(F.log_softmax(output[:,87:90],1), target_var[:,116:119])/batch_size\n kl_loss_term_31=KLloss(F.log_softmax(output[:,90:93],1), target_var[:,120:123])/batch_size\n kl_loss_term_32=KLloss(F.log_softmax(output[:,93:96],1), target_var[:,124:127])/batch_size\n kl_loss_term_33=KLloss(F.log_softmax(output[:,96:99],1), target_var[:,128:131])/batch_size\n kl_loss_term_34=KLloss(F.log_softmax(output[:,99:102],1), target_var[:,132:135])/batch_size\n kl_loss_term_35=KLloss(F.log_softmax(output[:,102:105],1), target_var[:,136:139])/batch_size\n kl_loss_term_36=KLloss(F.log_softmax(output[:,105:108],1), target_var[:,140:143])/batch_size\n kl_loss_term_37=KLloss(F.log_softmax(output[:,108:111],1), target_var[:,144:147])/batch_size\n kl_loss_term_38=KLloss(F.log_softmax(output[:,111:114],1), target_var[:,148:151])/batch_size\n kl_loss_term_39=KLloss(F.log_softmax(output[:,114:117],1), target_var[:,152:155])/batch_size\n kl_loss_term_40=KLloss(F.log_softmax(output[:,117:120],1), target_var[:,156:159])/batch_size\n kl_loss_term_41=KLloss(F.log_softmax(output[:,120:123],1), target_var[:,160:163])/batch_size\n kl_loss_term_42=KLloss(F.log_softmax(output[:,123:126],1), target_var[:,164:167])/batch_size\n kl_loss_term_43=KLloss(F.log_softmax(output[:,126:129],1), target_var[:,168:171])/batch_size\n kl_loss_term_44=KLloss(F.log_softmax(output[:,129:132],1), target_var[:,172:175])/batch_size\n kl_loss_term_45=KLloss(F.log_softmax(output[:,132:135],1), target_var[:,176:179])/batch_size\n kl_loss_term_46=KLloss(F.log_softmax(output[:,135:138],1), target_var[:,180:183])/batch_size\n kl_loss_term_47=KLloss(F.log_softmax(output[:,138:141],1), target_var[:,184:187])/batch_size\n kl_loss_term_48=KLloss(F.log_softmax(output[:,141:144],1), target_var[:,188:191])/batch_size\n kl_loss_term_49=KLloss(F.log_softmax(output[:,144:147],1), target_var[:,192:195])/batch_size\n kl_loss_term_50=KLloss(F.log_softmax(output[:,147:150],1), target_var[:,196:199])/batch_size\n kl_loss_term_51=KLloss(F.log_softmax(output[:,150:153],1), target_var[:,200:203])/batch_size\n kl_loss_term_52=KLloss(F.log_softmax(output[:,153:156],1), target_var[:,204:207])/batch_size\n kl_loss_term_53=KLloss(F.log_softmax(output[:,156:159],1), target_var[:,208:211])/batch_size\n kl_loss_term_54=KLloss(F.log_softmax(output[:,159:162],1), target_var[:,212:215])/batch_size\n kl_loss_term_55=KLloss(F.log_softmax(output[:,162:165],1), target_var[:,216:219])/batch_size\n kl_loss_term_56=KLloss(F.log_softmax(output[:,165:168],1), target_var[:,220:223])/batch_size\n kl_loss_term_57=KLloss(F.log_softmax(output[:,168:171],1), target_var[:,224:227])/batch_size\n kl_loss_term_58=KLloss(F.log_softmax(output[:,171:174],1), target_var[:,228:231])/batch_size\n kl_loss_term_59=KLloss(F.log_softmax(output[:,174:177],1), target_var[:,232:235])/batch_size\n kl_loss_term_60=KLloss(F.log_softmax(output[:,177:180],1), target_var[:,236:239])/batch_size\n kl_loss_term_61=KLloss(F.log_softmax(output[:,180:183],1), target_var[:,240:243])/batch_size\n kl_loss_term_62=KLloss(F.log_softmax(output[:,183:186],1), target_var[:,244:247])/batch_size\n kl_loss_term_63=KLloss(F.log_softmax(output[:,186:189],1), target_var[:,248:251])/batch_size\n kl_loss_term_64=KLloss(F.log_softmax(output[:,189:192],1), target_var[:,252:255])/batch_size\n kl_loss_term_65=KLloss(F.log_softmax(output[:,192:195],1), target_var[:,256:259])/batch_size\n kl_loss_term_66=KLloss(F.log_softmax(output[:,195:198],1), target_var[:,260:263])/batch_size\n kl_loss_term_67=KLloss(F.log_softmax(output[:,198:201],1), target_var[:,264:267])/batch_size\n kl_loss_term_68=KLloss(F.log_softmax(output[:,201:204],1), target_var[:,268:271])/batch_size\n kl_loss_term_69=KLloss(F.log_softmax(output[:,204:207],1), target_var[:,272:275])/batch_size\n kl_loss_term_70=KLloss(F.log_softmax(output[:,207:210],1), target_var[:,276:279])/batch_size\n kl_loss_term_71=KLloss(F.log_softmax(output[:,210:213],1), target_var[:,280:283])/batch_size\n kl_loss_term_72=KLloss(F.log_softmax(output[:,213:216],1), target_var[:,284:287])/batch_size\n kl_loss_term_73=KLloss(F.log_softmax(output[:,216:219],1), target_var[:,288:291])/batch_size\n kl_loss_term_74=KLloss(F.log_softmax(output[:,219:222],1), target_var[:,292:295])/batch_size\n kl_loss_term_75=KLloss(F.log_softmax(output[:,222:225],1), target_var[:,296:299])/batch_size\n kl_loss_term_76=KLloss(F.log_softmax(output[:,225:228],1), target_var[:,300:303])/batch_size\n kl_loss_term_77=KLloss(F.log_softmax(output[:,228:231],1), target_var[:,304:307])/batch_size\n kl_loss_term_78=KLloss(F.log_softmax(output[:,231:234],1), target_var[:,308:311])/batch_size\n kl_loss_term_79=KLloss(F.log_softmax(output[:,234:237],1), target_var[:,312:315])/batch_size\n kl_loss_term_80=KLloss(F.log_softmax(output[:,237:240],1), target_var[:,316:319])/batch_size\n kl_loss_term_81=KLloss(F.log_softmax(output[:,240:243],1), target_var[:,320:323])/batch_size\n kl_loss_term_82=KLloss(F.log_softmax(output[:,243:246],1), target_var[:,324:327])/batch_size\n kl_loss_term_83=KLloss(F.log_softmax(output[:,246:249],1), target_var[:,328:331])/batch_size\n kl_loss_term_84=KLloss(F.log_softmax(output[:,249:252],1), target_var[:,332:335])/batch_size\n kl_loss_term_85=KLloss(F.log_softmax(output[:,252:255],1), target_var[:,336:339])/batch_size\n kl_loss_term_86=KLloss(F.log_softmax(output[:,255:258],1), target_var[:,340:343])/batch_size\n kl_loss_term_87=KLloss(F.log_softmax(output[:,258:261],1), target_var[:,344:347])/batch_size\n kl_loss_term_88=KLloss(F.log_softmax(output[:,261:264],1), target_var[:,348:351])/batch_size\n kl_loss_term_89=KLloss(F.log_softmax(output[:,264:267],1), target_var[:,352:355])/batch_size\n kl_loss_term_90=KLloss(F.log_softmax(output[:,267:270],1), target_var[:,356:359])/batch_size\n kl_loss_term_91=KLloss(F.log_softmax(output[:,270:273],1), target_var[:,360:363])/batch_size\n kl_loss_term_92=KLloss(F.log_softmax(output[:,273:276],1), target_var[:,364:367])/batch_size\n kl_loss_term_93=KLloss(F.log_softmax(output[:,276:279],1), target_var[:,368:371])/batch_size\n kl_loss_term_94=KLloss(F.log_softmax(output[:,279:282],1), target_var[:,372:375])/batch_size\n kl_loss_term_95=KLloss(F.log_softmax(output[:,282:285],1), target_var[:,376:379])/batch_size\n kl_loss_term_96=KLloss(F.log_softmax(output[:,285:288],1), target_var[:,380:383])/batch_size\n kl_loss_term_97=KLloss(F.log_softmax(output[:,288:291],1), target_var[:,384:387])/batch_size\n kl_loss_term_98=KLloss(F.log_softmax(output[:,291:294],1), target_var[:,388:391])/batch_size\n kl_loss_term_99=KLloss(F.log_softmax(output[:,294:297],1), target_var[:,392:395])/batch_size\n kl_loss_term_100=KLloss(F.log_softmax(output[:,297:300],1), target_var[:,396:399])/batch_size\n\n total_kl_loss_term=(kl_loss_term_1+kl_loss_term_2+kl_loss_term_3+kl_loss_term_4+kl_loss_term_5+\n kl_loss_term_6+kl_loss_term_7+kl_loss_term_8+kl_loss_term_9+kl_loss_term_10+\n kl_loss_term_11+kl_loss_term_12+kl_loss_term_13+kl_loss_term_14+kl_loss_term_15+\n kl_loss_term_16+kl_loss_term_17+kl_loss_term_18+kl_loss_term_19+kl_loss_term_20+\n kl_loss_term_21+kl_loss_term_22+kl_loss_term_23+kl_loss_term_24+kl_loss_term_25+\n kl_loss_term_26+kl_loss_term_27+kl_loss_term_28+kl_loss_term_29+kl_loss_term_30+\n kl_loss_term_31+kl_loss_term_32+kl_loss_term_33+kl_loss_term_34+kl_loss_term_35+\n kl_loss_term_36+kl_loss_term_37+kl_loss_term_38+kl_loss_term_39+kl_loss_term_40+\n kl_loss_term_41+kl_loss_term_42+kl_loss_term_43+kl_loss_term_44+kl_loss_term_45+\n kl_loss_term_46+kl_loss_term_47+kl_loss_term_48+kl_loss_term_49+kl_loss_term_50+\n kl_loss_term_51+kl_loss_term_52+kl_loss_term_53+kl_loss_term_54+kl_loss_term_55+\n kl_loss_term_56+kl_loss_term_57+kl_loss_term_58+kl_loss_term_59+kl_loss_term_60+\n kl_loss_term_61+kl_loss_term_62+kl_loss_term_63+kl_loss_term_64+kl_loss_term_65+\n kl_loss_term_66+kl_loss_term_67+kl_loss_term_68+kl_loss_term_69+kl_loss_term_70+\n kl_loss_term_71+kl_loss_term_72+kl_loss_term_73+kl_loss_term_74+kl_loss_term_75+\n kl_loss_term_76+kl_loss_term_77+kl_loss_term_78+kl_loss_term_79+kl_loss_term_80+\n kl_loss_term_81+kl_loss_term_82+kl_loss_term_83+kl_loss_term_84+kl_loss_term_85+\n kl_loss_term_86+kl_loss_term_87+kl_loss_term_88+kl_loss_term_89+kl_loss_term_90+\n kl_loss_term_91+kl_loss_term_92+kl_loss_term_93+kl_loss_term_94+kl_loss_term_95+\n kl_loss_term_96+kl_loss_term_97+kl_loss_term_98+kl_loss_term_99+kl_loss_term_100)\n \n \n# loss=mean_cross_entroy_term+mean_kl_loss_term\n loss=total_kl_loss_term\n \n# ARGMAX_MAE,acc=argmax_mae_acc(output.cpu(), target)\n batch_mae,batch_acc,_=argmax_mae_acc(output.cpu(), target)\n \n \n cross_entropy_loss.update(total_cross_entroy_term.item(), batch_size) \n kl_loss.update(total_kl_loss_term.item(), batch_size)\n total_loss.update(loss.item(), batch_size)\n argmax_MAE.update(batch_mae.item(), batch_size)\n accuracy.update(batch_acc.item(),batch_size)\n \n \n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n \n# # print stats\n# if batch_idx % print_freq == 0:\n# res = '\\t'.join([\n# 'Valid', \n# 'Iter: [%d/%d]' % (batch_idx + 1, len(loader)),\n# 'Time %.3f (%.3f)' % (batch_time.val, batch_time.avg),\n# 'total_loss %.4f (%.4f)' % (total_loss.val, total_loss.avg),\n# 'cross_entropy_Loss %.4f (%.4f)' % (cross_entropy_loss.val, cross_entropy_loss.avg),\n# 'kl_loss %.4f (%.4f)' % (kl_loss.val, kl_loss.avg),\n# 'argmax_MAE %.3f (%.3f)' % (argmax_MAE.val, argmax_MAE.avg),\n# 'accuracy %.3f (%.3f)' % (accuracy.val, accuracy.avg)\n# ])\n# print(res)\n \n # Return summary statistics\n return batch_time.avg, total_loss.avg, cross_entropy_loss.avg, kl_loss.avg, argmax_MAE.avg, accuracy.avg\n\n\n\n\ndef test(model, loader, device, model_state_dir, print_freq=1):\n\n batch_time = AverageMeter()\n cross_entropy_loss=AverageMeter() \n kl_loss=AverageMeter()\n total_loss=AverageMeter()\n argmax_MAE=AverageMeter()\n accuracy=AverageMeter()\n\n # Model on train mode\n model.eval()\n AE_list=[]\n predict_age_list=[]\n real_age_list=[]\n\n end = time.time()\n with torch.no_grad():\n for batch_idx, (input, target) in enumerate(loader):\n # Create vaiables\n input_var, target_var = input.to(device), target.to(device) \n \n \n # compute output\n model.load_state_dict(torch.load(model_state_dir))\n output = model(input_var)\n \n batch_size=target.size(0)\n \n \n cross_entropy_term_1 = torch.nn.functional.cross_entropy(output[:,0:3], target_var[:,3].long())\n cross_entropy_term_2 = torch.nn.functional.cross_entropy(output[:,3:6], target_var[:,7].long())\n cross_entropy_term_3 = torch.nn.functional.cross_entropy(output[:,6:9], target_var[:,11].long())\n cross_entropy_term_4 = torch.nn.functional.cross_entropy(output[:,9:12], target_var[:,15].long())\n cross_entropy_term_5 = torch.nn.functional.cross_entropy(output[:,12:15], target_var[:,19].long())\n cross_entropy_term_6 = torch.nn.functional.cross_entropy(output[:,15:18], target_var[:,23].long())\n cross_entropy_term_7 = torch.nn.functional.cross_entropy(output[:,18:21], target_var[:,27].long())\n cross_entropy_term_8 = torch.nn.functional.cross_entropy(output[:,21:24], target_var[:,31].long())\n cross_entropy_term_9 = torch.nn.functional.cross_entropy(output[:,24:27], target_var[:,35].long())\n cross_entropy_term_10 = torch.nn.functional.cross_entropy(output[:,27:30], target_var[:,39].long())\n cross_entropy_term_11 = torch.nn.functional.cross_entropy(output[:,30:33], target_var[:,43].long())\n cross_entropy_term_12 = torch.nn.functional.cross_entropy(output[:,33:36], target_var[:,47].long())\n cross_entropy_term_13 = torch.nn.functional.cross_entropy(output[:,36:39], target_var[:,51].long())\n cross_entropy_term_14 = torch.nn.functional.cross_entropy(output[:,39:42], target_var[:,55].long())\n cross_entropy_term_15 = torch.nn.functional.cross_entropy(output[:,42:45], target_var[:,59].long())\n cross_entropy_term_16 = torch.nn.functional.cross_entropy(output[:,45:48], target_var[:,63].long())\n cross_entropy_term_17 = torch.nn.functional.cross_entropy(output[:,48:51], target_var[:,67].long())\n cross_entropy_term_18 = torch.nn.functional.cross_entropy(output[:,51:54], target_var[:,71].long())\n cross_entropy_term_19 = torch.nn.functional.cross_entropy(output[:,54:57], target_var[:,75].long())\n cross_entropy_term_20 = torch.nn.functional.cross_entropy(output[:,57:60], target_var[:,79].long())\n cross_entropy_term_21 = torch.nn.functional.cross_entropy(output[:,60:63], target_var[:,83].long())\n cross_entropy_term_22 = torch.nn.functional.cross_entropy(output[:,63:66], target_var[:,87].long())\n cross_entropy_term_23 = torch.nn.functional.cross_entropy(output[:,66:69], target_var[:,91].long())\n cross_entropy_term_24 = torch.nn.functional.cross_entropy(output[:,69:72], target_var[:,95].long())\n cross_entropy_term_25 = torch.nn.functional.cross_entropy(output[:,72:75], target_var[:,99].long())\n cross_entropy_term_26 = torch.nn.functional.cross_entropy(output[:,75:78], target_var[:,103].long())\n cross_entropy_term_27 = torch.nn.functional.cross_entropy(output[:,78:81], target_var[:,107].long())\n cross_entropy_term_28 = torch.nn.functional.cross_entropy(output[:,81:84], target_var[:,111].long())\n cross_entropy_term_29 = torch.nn.functional.cross_entropy(output[:,84:87], target_var[:,115].long())\n cross_entropy_term_30 = torch.nn.functional.cross_entropy(output[:,87:90], target_var[:,119].long())\n cross_entropy_term_31 = torch.nn.functional.cross_entropy(output[:,90:93], target_var[:,123].long())\n cross_entropy_term_32 = torch.nn.functional.cross_entropy(output[:,93:96], target_var[:,127].long())\n cross_entropy_term_33 = torch.nn.functional.cross_entropy(output[:,96:99], target_var[:,131].long())\n cross_entropy_term_34 = torch.nn.functional.cross_entropy(output[:,99:102], target_var[:,135].long())\n cross_entropy_term_35 = torch.nn.functional.cross_entropy(output[:,102:105], target_var[:,139].long())\n cross_entropy_term_36 = torch.nn.functional.cross_entropy(output[:,105:108], target_var[:,143].long())\n cross_entropy_term_37 = torch.nn.functional.cross_entropy(output[:,108:111], target_var[:,147].long())\n cross_entropy_term_38 = torch.nn.functional.cross_entropy(output[:,111:114], target_var[:,151].long())\n cross_entropy_term_39 = torch.nn.functional.cross_entropy(output[:,114:117], target_var[:,155].long())\n cross_entropy_term_40 = torch.nn.functional.cross_entropy(output[:,117:120], target_var[:,159].long())\n cross_entropy_term_41 = torch.nn.functional.cross_entropy(output[:,120:123], target_var[:,163].long())\n cross_entropy_term_42 = torch.nn.functional.cross_entropy(output[:,123:126], target_var[:,167].long())\n cross_entropy_term_43 = torch.nn.functional.cross_entropy(output[:,126:129], target_var[:,171].long())\n cross_entropy_term_44 = torch.nn.functional.cross_entropy(output[:,129:132], target_var[:,175].long())\n cross_entropy_term_45 = torch.nn.functional.cross_entropy(output[:,132:135], target_var[:,179].long())\n cross_entropy_term_46 = torch.nn.functional.cross_entropy(output[:,135:138], target_var[:,183].long())\n cross_entropy_term_47 = torch.nn.functional.cross_entropy(output[:,138:141], target_var[:,187].long())\n cross_entropy_term_48 = torch.nn.functional.cross_entropy(output[:,141:144], target_var[:,191].long())\n cross_entropy_term_49 = torch.nn.functional.cross_entropy(output[:,144:147], target_var[:,195].long())\n cross_entropy_term_50 = torch.nn.functional.cross_entropy(output[:,147:150], target_var[:,199].long())\n cross_entropy_term_51 = torch.nn.functional.cross_entropy(output[:,150:153], target_var[:,203].long())\n cross_entropy_term_52 = torch.nn.functional.cross_entropy(output[:,153:156], target_var[:,207].long())\n cross_entropy_term_53 = torch.nn.functional.cross_entropy(output[:,156:159], target_var[:,211].long())\n cross_entropy_term_54 = torch.nn.functional.cross_entropy(output[:,159:162], target_var[:,215].long())\n cross_entropy_term_55 = torch.nn.functional.cross_entropy(output[:,162:165], target_var[:,219].long())\n cross_entropy_term_56 = torch.nn.functional.cross_entropy(output[:,165:168], target_var[:,223].long())\n cross_entropy_term_57 = torch.nn.functional.cross_entropy(output[:,168:171], target_var[:,227].long())\n cross_entropy_term_58 = torch.nn.functional.cross_entropy(output[:,171:174], target_var[:,231].long())\n cross_entropy_term_59 = torch.nn.functional.cross_entropy(output[:,174:177], target_var[:,235].long())\n cross_entropy_term_60 = torch.nn.functional.cross_entropy(output[:,177:180], target_var[:,239].long())\n cross_entropy_term_61 = torch.nn.functional.cross_entropy(output[:,180:183], target_var[:,243].long())\n cross_entropy_term_62 = torch.nn.functional.cross_entropy(output[:,183:186], target_var[:,247].long())\n cross_entropy_term_63 = torch.nn.functional.cross_entropy(output[:,186:189], target_var[:,251].long())\n cross_entropy_term_64 = torch.nn.functional.cross_entropy(output[:,189:192], target_var[:,255].long())\n cross_entropy_term_65 = torch.nn.functional.cross_entropy(output[:,192:195], target_var[:,259].long())\n cross_entropy_term_66 = torch.nn.functional.cross_entropy(output[:,195:198], target_var[:,263].long())\n cross_entropy_term_67 = torch.nn.functional.cross_entropy(output[:,198:201], target_var[:,267].long())\n cross_entropy_term_68 = torch.nn.functional.cross_entropy(output[:,201:204], target_var[:,271].long())\n cross_entropy_term_69 = torch.nn.functional.cross_entropy(output[:,204:207], target_var[:,275].long())\n cross_entropy_term_70 = torch.nn.functional.cross_entropy(output[:,207:210], target_var[:,279].long())\n cross_entropy_term_71 = torch.nn.functional.cross_entropy(output[:,210:213], target_var[:,283].long())\n cross_entropy_term_72 = torch.nn.functional.cross_entropy(output[:,213:216], target_var[:,287].long())\n cross_entropy_term_73 = torch.nn.functional.cross_entropy(output[:,216:219], target_var[:,291].long())\n cross_entropy_term_74 = torch.nn.functional.cross_entropy(output[:,219:222], target_var[:,295].long())\n cross_entropy_term_75 = torch.nn.functional.cross_entropy(output[:,222:225], target_var[:,299].long())\n cross_entropy_term_76 = torch.nn.functional.cross_entropy(output[:,225:228], target_var[:,303].long())\n cross_entropy_term_77 = torch.nn.functional.cross_entropy(output[:,228:231], target_var[:,307].long())\n cross_entropy_term_78 = torch.nn.functional.cross_entropy(output[:,231:234], target_var[:,311].long())\n cross_entropy_term_79 = torch.nn.functional.cross_entropy(output[:,234:237], target_var[:,315].long())\n cross_entropy_term_80 = torch.nn.functional.cross_entropy(output[:,237:240], target_var[:,319].long())\n cross_entropy_term_81 = torch.nn.functional.cross_entropy(output[:,240:243], target_var[:,323].long())\n cross_entropy_term_82 = torch.nn.functional.cross_entropy(output[:,243:246], target_var[:,327].long())\n cross_entropy_term_83 = torch.nn.functional.cross_entropy(output[:,246:249], target_var[:,331].long())\n cross_entropy_term_84 = torch.nn.functional.cross_entropy(output[:,249:252], target_var[:,335].long())\n cross_entropy_term_85 = torch.nn.functional.cross_entropy(output[:,252:255], target_var[:,339].long())\n cross_entropy_term_86 = torch.nn.functional.cross_entropy(output[:,255:258], target_var[:,343].long())\n cross_entropy_term_87 = torch.nn.functional.cross_entropy(output[:,258:261], target_var[:,347].long())\n cross_entropy_term_88 = torch.nn.functional.cross_entropy(output[:,261:264], target_var[:,351].long())\n cross_entropy_term_89 = torch.nn.functional.cross_entropy(output[:,264:267], target_var[:,355].long())\n cross_entropy_term_90 = torch.nn.functional.cross_entropy(output[:,267:270], target_var[:,359].long())\n cross_entropy_term_91 = torch.nn.functional.cross_entropy(output[:,270:273], target_var[:,363].long())\n cross_entropy_term_92 = torch.nn.functional.cross_entropy(output[:,273:276], target_var[:,367].long())\n cross_entropy_term_93 = torch.nn.functional.cross_entropy(output[:,276:279], target_var[:,371].long())\n cross_entropy_term_94 = torch.nn.functional.cross_entropy(output[:,279:282], target_var[:,375].long())\n cross_entropy_term_95 = torch.nn.functional.cross_entropy(output[:,282:285], target_var[:,379].long())\n cross_entropy_term_96 = torch.nn.functional.cross_entropy(output[:,285:288], target_var[:,383].long())\n cross_entropy_term_97 = torch.nn.functional.cross_entropy(output[:,288:291], target_var[:,387].long())\n cross_entropy_term_98 = torch.nn.functional.cross_entropy(output[:,291:294], target_var[:,391].long())\n cross_entropy_term_99 = torch.nn.functional.cross_entropy(output[:,294:297], target_var[:,395].long())\n cross_entropy_term_100 = torch.nn.functional.cross_entropy(output[:,297:300], target_var[:,399].long())\n \n total_cross_entroy_term=(cross_entropy_term_1+cross_entropy_term_2+cross_entropy_term_3+cross_entropy_term_4+cross_entropy_term_5+\n cross_entropy_term_6+cross_entropy_term_7+cross_entropy_term_8+cross_entropy_term_9+cross_entropy_term_10+\n cross_entropy_term_11+cross_entropy_term_12+cross_entropy_term_13+cross_entropy_term_14+cross_entropy_term_15+\n cross_entropy_term_16+cross_entropy_term_17+cross_entropy_term_18+cross_entropy_term_19+cross_entropy_term_20+\n cross_entropy_term_21+cross_entropy_term_22+cross_entropy_term_23+cross_entropy_term_24+cross_entropy_term_25+\n cross_entropy_term_26+cross_entropy_term_27+cross_entropy_term_28+cross_entropy_term_29+cross_entropy_term_30+\n cross_entropy_term_31+cross_entropy_term_32+cross_entropy_term_33+cross_entropy_term_34+cross_entropy_term_35+\n cross_entropy_term_36+cross_entropy_term_37+cross_entropy_term_38+cross_entropy_term_39+cross_entropy_term_40+\n cross_entropy_term_41+cross_entropy_term_42+cross_entropy_term_43+cross_entropy_term_44+cross_entropy_term_45+\n cross_entropy_term_46+cross_entropy_term_47+cross_entropy_term_48+cross_entropy_term_49+cross_entropy_term_50+\n cross_entropy_term_51+cross_entropy_term_52+cross_entropy_term_53+cross_entropy_term_54+cross_entropy_term_55+\n cross_entropy_term_56+cross_entropy_term_57+cross_entropy_term_58+cross_entropy_term_59+cross_entropy_term_60+\n cross_entropy_term_61+cross_entropy_term_62+cross_entropy_term_63+cross_entropy_term_64+cross_entropy_term_65+\n cross_entropy_term_66+cross_entropy_term_67+cross_entropy_term_68+cross_entropy_term_69+cross_entropy_term_70+\n cross_entropy_term_71+cross_entropy_term_72+cross_entropy_term_73+cross_entropy_term_74+cross_entropy_term_75+\n cross_entropy_term_76+cross_entropy_term_77+cross_entropy_term_78+cross_entropy_term_79+cross_entropy_term_80+\n cross_entropy_term_81+cross_entropy_term_82+cross_entropy_term_83+cross_entropy_term_84+cross_entropy_term_85+\n cross_entropy_term_86+cross_entropy_term_87+cross_entropy_term_88+cross_entropy_term_89+cross_entropy_term_90+\n cross_entropy_term_91+cross_entropy_term_92+cross_entropy_term_93+cross_entropy_term_94+cross_entropy_term_95+\n cross_entropy_term_96+cross_entropy_term_97+cross_entropy_term_98+cross_entropy_term_99+cross_entropy_term_100)\n \n KLloss=nn.KLDivLoss(reduction='sum')\n kl_loss_term_1=KLloss(F.log_softmax(output[:,0:3],1), target_var[:,0:3])/batch_size\n kl_loss_term_2=KLloss(F.log_softmax(output[:,3:6],1), target_var[:,4:7])/batch_size\n kl_loss_term_3=KLloss(F.log_softmax(output[:,6:9],1), target_var[:,8:11])/batch_size\n kl_loss_term_4=KLloss(F.log_softmax(output[:,9:12],1), target_var[:,12:15])/batch_size\n kl_loss_term_5=KLloss(F.log_softmax(output[:,12:15],1), target_var[:,16:19])/batch_size\n kl_loss_term_6=KLloss(F.log_softmax(output[:,15:18],1), target_var[:,20:23])/batch_size\n kl_loss_term_7=KLloss(F.log_softmax(output[:,18:21],1), target_var[:,24:27])/batch_size\n kl_loss_term_8=KLloss(F.log_softmax(output[:,21:24],1), target_var[:,28:31])/batch_size\n kl_loss_term_9=KLloss(F.log_softmax(output[:,24:27],1), target_var[:,32:35])/batch_size\n kl_loss_term_10=KLloss(F.log_softmax(output[:,27:30],1), target_var[:,36:39])/batch_size\n kl_loss_term_11=KLloss(F.log_softmax(output[:,30:33],1), target_var[:,40:43])/batch_size\n kl_loss_term_12=KLloss(F.log_softmax(output[:,33:36],1), target_var[:,44:47])/batch_size\n kl_loss_term_13=KLloss(F.log_softmax(output[:,36:39],1), target_var[:,48:51])/batch_size\n kl_loss_term_14=KLloss(F.log_softmax(output[:,39:42],1), target_var[:,52:55])/batch_size\n kl_loss_term_15=KLloss(F.log_softmax(output[:,42:45],1), target_var[:,56:59])/batch_size\n kl_loss_term_16=KLloss(F.log_softmax(output[:,45:48],1), target_var[:,60:63])/batch_size\n kl_loss_term_17=KLloss(F.log_softmax(output[:,48:51],1), target_var[:,64:67])/batch_size\n kl_loss_term_18=KLloss(F.log_softmax(output[:,51:54],1), target_var[:,68:71])/batch_size\n kl_loss_term_19=KLloss(F.log_softmax(output[:,54:57],1), target_var[:,72:75])/batch_size\n kl_loss_term_20=KLloss(F.log_softmax(output[:,57:60],1), target_var[:,76:79])/batch_size\n kl_loss_term_21=KLloss(F.log_softmax(output[:,60:63],1), target_var[:,80:83])/batch_size\n kl_loss_term_22=KLloss(F.log_softmax(output[:,63:66],1), target_var[:,84:87])/batch_size\n kl_loss_term_23=KLloss(F.log_softmax(output[:,66:69],1), target_var[:,88:91])/batch_size\n kl_loss_term_24=KLloss(F.log_softmax(output[:,69:72],1), target_var[:,92:95])/batch_size\n kl_loss_term_25=KLloss(F.log_softmax(output[:,72:75],1), target_var[:,96:99])/batch_size\n kl_loss_term_26=KLloss(F.log_softmax(output[:,75:78],1), target_var[:,100:103])/batch_size\n kl_loss_term_27=KLloss(F.log_softmax(output[:,78:81],1), target_var[:,104:107])/batch_size\n kl_loss_term_28=KLloss(F.log_softmax(output[:,81:84],1), target_var[:,108:111])/batch_size\n kl_loss_term_29=KLloss(F.log_softmax(output[:,84:87],1), target_var[:,112:115])/batch_size\n kl_loss_term_30=KLloss(F.log_softmax(output[:,87:90],1), target_var[:,116:119])/batch_size\n kl_loss_term_31=KLloss(F.log_softmax(output[:,90:93],1), target_var[:,120:123])/batch_size\n kl_loss_term_32=KLloss(F.log_softmax(output[:,93:96],1), target_var[:,124:127])/batch_size\n kl_loss_term_33=KLloss(F.log_softmax(output[:,96:99],1), target_var[:,128:131])/batch_size\n kl_loss_term_34=KLloss(F.log_softmax(output[:,99:102],1), target_var[:,132:135])/batch_size\n kl_loss_term_35=KLloss(F.log_softmax(output[:,102:105],1), target_var[:,136:139])/batch_size\n kl_loss_term_36=KLloss(F.log_softmax(output[:,105:108],1), target_var[:,140:143])/batch_size\n kl_loss_term_37=KLloss(F.log_softmax(output[:,108:111],1), target_var[:,144:147])/batch_size\n kl_loss_term_38=KLloss(F.log_softmax(output[:,111:114],1), target_var[:,148:151])/batch_size\n kl_loss_term_39=KLloss(F.log_softmax(output[:,114:117],1), target_var[:,152:155])/batch_size\n kl_loss_term_40=KLloss(F.log_softmax(output[:,117:120],1), target_var[:,156:159])/batch_size\n kl_loss_term_41=KLloss(F.log_softmax(output[:,120:123],1), target_var[:,160:163])/batch_size\n kl_loss_term_42=KLloss(F.log_softmax(output[:,123:126],1), target_var[:,164:167])/batch_size\n kl_loss_term_43=KLloss(F.log_softmax(output[:,126:129],1), target_var[:,168:171])/batch_size\n kl_loss_term_44=KLloss(F.log_softmax(output[:,129:132],1), target_var[:,172:175])/batch_size\n kl_loss_term_45=KLloss(F.log_softmax(output[:,132:135],1), target_var[:,176:179])/batch_size\n kl_loss_term_46=KLloss(F.log_softmax(output[:,135:138],1), target_var[:,180:183])/batch_size\n kl_loss_term_47=KLloss(F.log_softmax(output[:,138:141],1), target_var[:,184:187])/batch_size\n kl_loss_term_48=KLloss(F.log_softmax(output[:,141:144],1), target_var[:,188:191])/batch_size\n kl_loss_term_49=KLloss(F.log_softmax(output[:,144:147],1), target_var[:,192:195])/batch_size\n kl_loss_term_50=KLloss(F.log_softmax(output[:,147:150],1), target_var[:,196:199])/batch_size\n kl_loss_term_51=KLloss(F.log_softmax(output[:,150:153],1), target_var[:,200:203])/batch_size\n kl_loss_term_52=KLloss(F.log_softmax(output[:,153:156],1), target_var[:,204:207])/batch_size\n kl_loss_term_53=KLloss(F.log_softmax(output[:,156:159],1), target_var[:,208:211])/batch_size\n kl_loss_term_54=KLloss(F.log_softmax(output[:,159:162],1), target_var[:,212:215])/batch_size\n kl_loss_term_55=KLloss(F.log_softmax(output[:,162:165],1), target_var[:,216:219])/batch_size\n kl_loss_term_56=KLloss(F.log_softmax(output[:,165:168],1), target_var[:,220:223])/batch_size\n kl_loss_term_57=KLloss(F.log_softmax(output[:,168:171],1), target_var[:,224:227])/batch_size\n kl_loss_term_58=KLloss(F.log_softmax(output[:,171:174],1), target_var[:,228:231])/batch_size\n kl_loss_term_59=KLloss(F.log_softmax(output[:,174:177],1), target_var[:,232:235])/batch_size\n kl_loss_term_60=KLloss(F.log_softmax(output[:,177:180],1), target_var[:,236:239])/batch_size\n kl_loss_term_61=KLloss(F.log_softmax(output[:,180:183],1), target_var[:,240:243])/batch_size\n kl_loss_term_62=KLloss(F.log_softmax(output[:,183:186],1), target_var[:,244:247])/batch_size\n kl_loss_term_63=KLloss(F.log_softmax(output[:,186:189],1), target_var[:,248:251])/batch_size\n kl_loss_term_64=KLloss(F.log_softmax(output[:,189:192],1), target_var[:,252:255])/batch_size\n kl_loss_term_65=KLloss(F.log_softmax(output[:,192:195],1), target_var[:,256:259])/batch_size\n kl_loss_term_66=KLloss(F.log_softmax(output[:,195:198],1), target_var[:,260:263])/batch_size\n kl_loss_term_67=KLloss(F.log_softmax(output[:,198:201],1), target_var[:,264:267])/batch_size\n kl_loss_term_68=KLloss(F.log_softmax(output[:,201:204],1), target_var[:,268:271])/batch_size\n kl_loss_term_69=KLloss(F.log_softmax(output[:,204:207],1), target_var[:,272:275])/batch_size\n kl_loss_term_70=KLloss(F.log_softmax(output[:,207:210],1), target_var[:,276:279])/batch_size\n kl_loss_term_71=KLloss(F.log_softmax(output[:,210:213],1), target_var[:,280:283])/batch_size\n kl_loss_term_72=KLloss(F.log_softmax(output[:,213:216],1), target_var[:,284:287])/batch_size\n kl_loss_term_73=KLloss(F.log_softmax(output[:,216:219],1), target_var[:,288:291])/batch_size\n kl_loss_term_74=KLloss(F.log_softmax(output[:,219:222],1), target_var[:,292:295])/batch_size\n kl_loss_term_75=KLloss(F.log_softmax(output[:,222:225],1), target_var[:,296:299])/batch_size\n kl_loss_term_76=KLloss(F.log_softmax(output[:,225:228],1), target_var[:,300:303])/batch_size\n kl_loss_term_77=KLloss(F.log_softmax(output[:,228:231],1), target_var[:,304:307])/batch_size\n kl_loss_term_78=KLloss(F.log_softmax(output[:,231:234],1), target_var[:,308:311])/batch_size\n kl_loss_term_79=KLloss(F.log_softmax(output[:,234:237],1), target_var[:,312:315])/batch_size\n kl_loss_term_80=KLloss(F.log_softmax(output[:,237:240],1), target_var[:,316:319])/batch_size\n kl_loss_term_81=KLloss(F.log_softmax(output[:,240:243],1), target_var[:,320:323])/batch_size\n kl_loss_term_82=KLloss(F.log_softmax(output[:,243:246],1), target_var[:,324:327])/batch_size\n kl_loss_term_83=KLloss(F.log_softmax(output[:,246:249],1), target_var[:,328:331])/batch_size\n kl_loss_term_84=KLloss(F.log_softmax(output[:,249:252],1), target_var[:,332:335])/batch_size\n kl_loss_term_85=KLloss(F.log_softmax(output[:,252:255],1), target_var[:,336:339])/batch_size\n kl_loss_term_86=KLloss(F.log_softmax(output[:,255:258],1), target_var[:,340:343])/batch_size\n kl_loss_term_87=KLloss(F.log_softmax(output[:,258:261],1), target_var[:,344:347])/batch_size\n kl_loss_term_88=KLloss(F.log_softmax(output[:,261:264],1), target_var[:,348:351])/batch_size\n kl_loss_term_89=KLloss(F.log_softmax(output[:,264:267],1), target_var[:,352:355])/batch_size\n kl_loss_term_90=KLloss(F.log_softmax(output[:,267:270],1), target_var[:,356:359])/batch_size\n kl_loss_term_91=KLloss(F.log_softmax(output[:,270:273],1), target_var[:,360:363])/batch_size\n kl_loss_term_92=KLloss(F.log_softmax(output[:,273:276],1), target_var[:,364:367])/batch_size\n kl_loss_term_93=KLloss(F.log_softmax(output[:,276:279],1), target_var[:,368:371])/batch_size\n kl_loss_term_94=KLloss(F.log_softmax(output[:,279:282],1), target_var[:,372:375])/batch_size\n kl_loss_term_95=KLloss(F.log_softmax(output[:,282:285],1), target_var[:,376:379])/batch_size\n kl_loss_term_96=KLloss(F.log_softmax(output[:,285:288],1), target_var[:,380:383])/batch_size\n kl_loss_term_97=KLloss(F.log_softmax(output[:,288:291],1), target_var[:,384:387])/batch_size\n kl_loss_term_98=KLloss(F.log_softmax(output[:,291:294],1), target_var[:,388:391])/batch_size\n kl_loss_term_99=KLloss(F.log_softmax(output[:,294:297],1), target_var[:,392:395])/batch_size\n kl_loss_term_100=KLloss(F.log_softmax(output[:,297:300],1), target_var[:,396:399])/batch_size\n\n total_kl_loss_term=(kl_loss_term_1+kl_loss_term_2+kl_loss_term_3+kl_loss_term_4+kl_loss_term_5+\n kl_loss_term_6+kl_loss_term_7+kl_loss_term_8+kl_loss_term_9+kl_loss_term_10+\n kl_loss_term_11+kl_loss_term_12+kl_loss_term_13+kl_loss_term_14+kl_loss_term_15+\n kl_loss_term_16+kl_loss_term_17+kl_loss_term_18+kl_loss_term_19+kl_loss_term_20+\n kl_loss_term_21+kl_loss_term_22+kl_loss_term_23+kl_loss_term_24+kl_loss_term_25+\n kl_loss_term_26+kl_loss_term_27+kl_loss_term_28+kl_loss_term_29+kl_loss_term_30+\n kl_loss_term_31+kl_loss_term_32+kl_loss_term_33+kl_loss_term_34+kl_loss_term_35+\n kl_loss_term_36+kl_loss_term_37+kl_loss_term_38+kl_loss_term_39+kl_loss_term_40+\n kl_loss_term_41+kl_loss_term_42+kl_loss_term_43+kl_loss_term_44+kl_loss_term_45+\n kl_loss_term_46+kl_loss_term_47+kl_loss_term_48+kl_loss_term_49+kl_loss_term_50+\n kl_loss_term_51+kl_loss_term_52+kl_loss_term_53+kl_loss_term_54+kl_loss_term_55+\n kl_loss_term_56+kl_loss_term_57+kl_loss_term_58+kl_loss_term_59+kl_loss_term_60+\n kl_loss_term_61+kl_loss_term_62+kl_loss_term_63+kl_loss_term_64+kl_loss_term_65+\n kl_loss_term_66+kl_loss_term_67+kl_loss_term_68+kl_loss_term_69+kl_loss_term_70+\n kl_loss_term_71+kl_loss_term_72+kl_loss_term_73+kl_loss_term_74+kl_loss_term_75+\n kl_loss_term_76+kl_loss_term_77+kl_loss_term_78+kl_loss_term_79+kl_loss_term_80+\n kl_loss_term_81+kl_loss_term_82+kl_loss_term_83+kl_loss_term_84+kl_loss_term_85+\n kl_loss_term_86+kl_loss_term_87+kl_loss_term_88+kl_loss_term_89+kl_loss_term_90+\n kl_loss_term_91+kl_loss_term_92+kl_loss_term_93+kl_loss_term_94+kl_loss_term_95+\n kl_loss_term_96+kl_loss_term_97+kl_loss_term_98+kl_loss_term_99+kl_loss_term_100)\n \n \n# loss=mean_cross_entroy_term+mean_kl_loss_term\n loss=total_kl_loss_term\n \n batch_mae,batch_acc,batch_pred_ages=argmax_mae_acc(output.cpu(), target)\n \n \n# predict_age_list.extend(predicted_classes.cpu())\n predict_age_list.extend(batch_pred_ages)\n real_age_list.extend(target[:,400].long())\n batch_AE=torch.abs(batch_pred_ages-target[:,400].long())\n# AE_list.extend(batch_AE.cpu())\n AE_list.extend(batch_AE)\n \n cross_entropy_loss.update(total_cross_entroy_term.item(), batch_size) \n kl_loss.update(total_kl_loss_term.item(), batch_size)\n total_loss.update(loss.item(), batch_size)\n argmax_MAE.update(batch_mae.item(), batch_size)\n accuracy.update(batch_acc.item(),batch_size)\n \n \n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n \n# # print stats\n# if batch_idx % print_freq == 0:\n# res = '\\t'.join([\n# 'Test', \n# 'Iter: [%d/%d]' % (batch_idx + 1, len(loader)),\n# 'Time %.3f (%.3f)' % (batch_time.val, batch_time.avg),\n# 'total_loss %.4f (%.4f)' % (total_loss.val, total_loss.avg),\n# 'cross_entropy_Loss %.4f (%.4f)' % (cross_entropy_loss.val, cross_entropy_loss.avg),\n# 'kl_loss %.4f (%.4f)' % (kl_loss.val, kl_loss.avg),\n# 'argmax_MAE %.3f (%.3f)' % (argmax_MAE.val, argmax_MAE.avg),\n# 'accuracy %.3f (%.3f)' % (accuracy.val, accuracy.avg)\n# ])\n# print(res)\n \n # Return summary statistics\n return batch_time.avg, total_loss.avg, cross_entropy_loss.avg, kl_loss.avg, argmax_MAE.avg, accuracy.avg, AE_list, predict_age_list, real_age_list\n\n\n\n\ndef argmax_mae_acc(output, target):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n with torch.no_grad():\n batch_size = target.size(0)\n \n true_predict_count=0\n for i in range(1,101):\n# predicted_group=torch.argmax(F.softmax(output[:,3*i-3:3*i],1),1)\n predicted_group=torch.argmax(output[:,3*i-3:3*i],1)\n# print(predicted_group)\n# os._exit(0)\n true_predict_count+=torch.sum(torch.eq(predicted_group,target[:,4*i-1].long()))\n \n for j in range(batch_size):\n predicted_classes=[]\n if predicted_group[j]==0:\n predicted_g1=1\n predicted_g2=0\n predicted_g3=0\n elif predicted_group[j]==1:\n predicted_g1=0\n predicted_g2=1\n predicted_g3=0\n else:\n predicted_g1=0\n predicted_g2=0\n predicted_g3=1\n for k in range(0,i):\n predicted_classes.append(predicted_g1)\n predicted_classes.append(predicted_g2)\n for l in range(0,101-i):\n predicted_classes.append(predicted_g3)\n if j==0:\n tmp_batch_predicted_classes=torch.tensor(predicted_classes).view(1,-1)\n else:\n tmp_batch_predicted_classes=torch.cat((tmp_batch_predicted_classes, torch.tensor(predicted_classes).view(1,-1)),0)\n if i==1:\n batch_predicted_classes=tmp_batch_predicted_classes.unsqueeze(2)\n else:\n batch_predicted_classes=torch.cat((batch_predicted_classes, tmp_batch_predicted_classes.unsqueeze(2)),2) \n predicted_classes_count=torch.sum(batch_predicted_classes,2)\n# predicted_classes=torch.argmax(predicted_classes_count,1)\n predicted_ages=torch.argmax(predicted_classes_count,1)\n\n mae=torch.sum(torch.abs(predicted_ages-target[:,400].long())).float()/batch_size\n \n acc=true_predict_count.float().mul_(100.0/(batch_size*100))\n \n return mae, acc, predicted_ages\n\n\n\ndef demo(data_root, train_list, validate_list, test_list, save, n_epochs=1,\n batch_size=64, lr=0.01, wd=0.0005, momentum=0.9, seed=None):\n \"\"\"\n A demo to show off training and testing of :\n \"Deep facial age estimation using conditional multitask learning with weak label esxpansion.\"\n Trains and evaluates a mean-variance loss on MOPPH Album2 dataset.\n\n Args:\n data_root (str) - path to directory where data exist\n train_list (str) - path to directory where train_data_list exist\n validation_list (str) - path to directory where validation_data_list exist\n test_list (str) - path to directory where test_data_list exist\n save (str) - path to save the model and results to \n\n n_epochs (int) - number of epochs for training (default 3)\n batch_size (int) - size of minibatch (default 64)\n lr (float) - base lerning rate (default 0.001)\n wd (float) -weight deday (default 0.0001)\n momentum (float) momentum (default 0.9)\n seed (int) - manually set the random seed (default None)\n \"\"\"\n\n \n\n # Mean and std value from Imagenet \n mean=[0.485, 0.456, 0.406]\n stdv=[0.229, 0.224, 0.225]\n train_transforms = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(5), \n transforms.ColorJitter(0.05, 0.05, 0.05, 0.05),\n transforms.ToTensor(),\n transforms.Normalize(mean=mean, std=stdv),\n ])\n test_transforms = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=mean, std=stdv),\n ])\n \n \n \n \n if os.path.exists(os.path.join(save, 'el_resnet18_nesterov_three_group_train_1_4.csv')):\n os.remove(os.path.join(save, 'el_resnet18_nesterov_three_group_train_1_4.csv'))\n with open(os.path.join(save, 'el_resnet18_nesterov_three_group_train_1_4.csv'), 'w') as f:\n f.write('******************************************************************\\n')\n f.write('records on AgeDB dataset under ''80%-20%'' protocol\\n')\n f.write('******************************************************************\\n')\n f.write('\\n')\n f.write('\\n')\n \n train_set = data_prepare(data_root=data_root, data_list=train_list, transform=train_transforms)\n valid_set = data_prepare(data_root=data_root, data_list=validate_list, transform=test_transforms)\n test_set = data_prepare(data_root=data_root, data_list=test_list, transform=test_transforms) \n\n \n ensemble_learning_model = el_resnet18(num_classes=3)\n pretrained_dict=model_zoo.load_url(model_urls['resnet18'])\n model_dict=ensemble_learning_model.state_dict()\n pretrained_dict={k:v for k,v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n ensemble_learning_model.load_state_dict(model_dict)\n\n model=ensemble_learning_model\n\n # Make save directory\n if not os.path.exists(save):\n os.makedirs(save)\n if not os.path.isdir(save):\n raise Exception('%s is not a dir' % save)\n\n # Model on cuda\n use_cuda=torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n # prepare data\n if seed is not None:\n torch.manual_seed(seed)\n if use_cuda:\n torch.cuda.manual_seed_all(seed)\n \n train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True,\n pin_memory=(torch.cuda.is_available()), num_workers=4)\n valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=batch_size, shuffle=False,\n pin_memory=(torch.cuda.is_available()), num_workers=4) \n test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False,\n pin_memory=(torch.cuda.is_available()), num_workers=4) \n\n # Wrap model for multi-GPUs, if necessary\n if torch.cuda.is_available() and torch.cuda.device_count() > 1:\n# model.features = torch.nn.DataParallel(model.features)\n model = model = torch.nn.DataParallel(model)\n model_wrapper = model.to(device)\n # Optimizer\n optimizer = torch.optim.SGD(model_wrapper.parameters(), lr=lr, momentum=momentum, nesterov=True, weight_decay=wd)\n# scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[0.5 * n_epochs, 0.75 * n_epochs],\n# gamma=0.1)\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[30,50,60],\n gamma=0.1)\n \n # Train and validate model\n best_argmax_MAE = 100\n best_accuracy = 0\n model_state_name_1='el_resnet18_nesterov_three_group_model_1_4_1.dat'\n model_state_dir_1=os.path.join(save, model_state_name_1)\n model_state_name_2='el_resnet18_nesterov_three_group_model_1_4_2.dat'\n model_state_dir_2=os.path.join(save, model_state_name_2)\n \n with open(os.path.join(save, 'el_resnet18_nesterov_three_group_train_1_4.csv'), 'a') as f:\n# f.write('epoch,train_loss,train_male_age_accuracy,train_female_age_accuracy,train_gender_accuracy,train_MAE\\n')\n f.write('epoch, train_total_loss, train_cross_entropy_loss, train_kl_loss, train_argmax_MAE, train_accuracy, '\n 'valid_total_loss, valid_cross_entropy_loss, valid_kl_loss, valid_argmax_MAE, valid_accuracy\\n')\n\n for epoch in range(n_epochs):\n\n scheduler.step()\n _, train_total_loss, train_cross_entropy_loss, train_kl_loss, train_argmax_MAE, train_accuracy = train(\n model=model_wrapper,\n loader=train_loader,\n optimizer=optimizer,\n epoch=epoch,\n n_epochs=n_epochs,\n device=device\n )\n _, valid_total_loss, valid_cross_entropy_loss, valid_kl_loss, valid_argmax_MAE, valid_accuracy = validate(\n model=model_wrapper,\n loader=valid_loader,\n epoch=epoch,\n n_epochs=n_epochs,\n device=device\n )\n# Determine if model is the best\n\n if valid_argmax_MAE < best_argmax_MAE:\n best_argmax_MAE = valid_argmax_MAE\n if os.path.exists(model_state_dir_1):\n os.remove(model_state_dir_1)\n torch.save(model_wrapper.state_dict(), model_state_dir_1)\n if valid_accuracy > best_accuracy:\n best_accuracy=valid_accuracy\n if os.path.exists(model_state_dir_2):\n os.remove(model_state_dir_2)\n torch.save(model_wrapper.state_dict(), model_state_dir_2)\n \n with open(os.path.join(save, 'el_resnet18_nesterov_three_group_train_1_4.csv'), 'a') as f:\n f.write('%03d, %0.4f, %0.4f, %0.4f, %0.4f, %0.4f, %0.4f, %0.4f, %0.4f, %0.4f, %0.4f\\n'\n % ((epoch + 1), train_total_loss, train_cross_entropy_loss, train_kl_loss, train_argmax_MAE, train_accuracy,\n valid_total_loss, valid_cross_entropy_loss, valid_kl_loss, valid_argmax_MAE, valid_accuracy))\n if math.isnan(float(train_argmax_MAE)):\n break\n\n\n # Test model \n if os.path.exists(model_state_dir_1): \n _, test_total_loss, test_cross_entropy_loss, test_kl_loss, test_argmax_MAE, test_accuracy, AE_list, predict_age_list, real_age_list= test(\n model=model_wrapper,\n loader=test_loader,\n device=device,\n model_state_dir=model_state_dir_1,\n )\n os.remove(model_state_dir_1)\n with open(os.path.join(save, 'el_resnet18_nesterov_three_group_train_1_4.csv'), 'a') as f:\n f.write('test_total_loss, test_cross_entropy_loss, test_kl_loss, test_argmax_MAE, test_accuracy:\\n')\n f.write('%0.4f, %0.4f, %0.4f, %0.4f, %0.4f\\n' % (test_total_loss, test_cross_entropy_loss, test_kl_loss, test_argmax_MAE, test_accuracy))\n# f.write('\\n') \n\n CS_1_numerator=CS_2_numerator=CS_3_numerator=CS_4_numerator=CS_5_numerator=CS_6_numerator=CS_7_numerator=CS_8_numerator=CS_9_numerator=CS_10_numerator=0\n for i in range(len(AE_list)):\n if AE_list[i]<=1:\n CS_1_numerator+=1\n if AE_list[i]<=2:\n CS_2_numerator+=1\n if AE_list[i]<=3:\n CS_3_numerator+=1\n if AE_list[i]<=4:\n CS_4_numerator+=1\n if AE_list[i]<=5:\n CS_5_numerator+=1\n if AE_list[i]<=6:\n CS_6_numerator+=1\n if AE_list[i]<=7:\n CS_7_numerator+=1\n if AE_list[i]<=8:\n CS_8_numerator+=1\n if AE_list[i]<=9:\n CS_9_numerator+=1\n if AE_list[i]<=10:\n CS_10_numerator+=1\n \n CS_1=CS_1_numerator/len(AE_list)\n CS_2=CS_2_numerator/len(AE_list)\n CS_3=CS_3_numerator/len(AE_list)\n CS_4=CS_4_numerator/len(AE_list)\n CS_5=CS_5_numerator/len(AE_list)\n CS_6=CS_6_numerator/len(AE_list)\n CS_7=CS_7_numerator/len(AE_list)\n CS_8=CS_8_numerator/len(AE_list)\n CS_9=CS_9_numerator/len(AE_list)\n CS_10=CS_10_numerator/len(AE_list)\n \n with open(os.path.join(save, 'el_resnet18_nesterov_three_group_train_1_4.csv'), 'a') as f:\n f.write('CS_1, CS_2, CS_3, CS_4, CS_5, CS_6, CS_7, CS_8, CS_9, CS_10:\\n')\n f.write('%0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f\\n'\n % (CS_1, CS_2, CS_3, CS_4, CS_5, CS_6, CS_7, CS_8, CS_9, CS_10))\n f.write('\\n')\n\n\n\n \n if os.path.exists(model_state_dir_2): \n _, test_total_loss, test_cross_entropy_loss, test_kl_loss, test_argmax_MAE, test_accuracy, AE_list, predict_age_list, real_age_list= test(\n model=model_wrapper,\n loader=test_loader,\n device=device,\n model_state_dir=model_state_dir_2,\n )\n os.remove(model_state_dir_2)\n with open(os.path.join(save, 'el_resnet18_nesterov_three_group_train_1_4.csv'), 'a') as f:\n f.write('test_total_loss, test_cross_entropy_loss, test_kl_loss, test_argmax_MAE, test_accuracy:\\n')\n f.write('%0.4f, %0.4f, %0.4f, %0.4f, %0.4f\\n' % (test_total_loss, test_cross_entropy_loss, test_kl_loss, test_argmax_MAE, test_accuracy))\n# f.write('\\n') \n CS_1_numerator=CS_2_numerator=CS_3_numerator=CS_4_numerator=CS_5_numerator=CS_6_numerator=CS_7_numerator=CS_8_numerator=CS_9_numerator=CS_10_numerator=0\n for i in range(len(AE_list)):\n if AE_list[i]<=1:\n CS_1_numerator+=1\n if AE_list[i]<=2:\n CS_2_numerator+=1\n if AE_list[i]<=3:\n CS_3_numerator+=1\n if AE_list[i]<=4:\n CS_4_numerator+=1\n if AE_list[i]<=5:\n CS_5_numerator+=1\n if AE_list[i]<=6:\n CS_6_numerator+=1\n if AE_list[i]<=7:\n CS_7_numerator+=1\n if AE_list[i]<=8:\n CS_8_numerator+=1\n if AE_list[i]<=9:\n CS_9_numerator+=1\n if AE_list[i]<=10:\n CS_10_numerator+=1\n \n CS_1=CS_1_numerator/len(AE_list)\n CS_2=CS_2_numerator/len(AE_list)\n CS_3=CS_3_numerator/len(AE_list)\n CS_4=CS_4_numerator/len(AE_list)\n CS_5=CS_5_numerator/len(AE_list)\n CS_6=CS_6_numerator/len(AE_list)\n CS_7=CS_7_numerator/len(AE_list)\n CS_8=CS_8_numerator/len(AE_list)\n CS_9=CS_9_numerator/len(AE_list)\n CS_10=CS_10_numerator/len(AE_list)\n \n with open(os.path.join(save, 'el_resnet18_nesterov_three_group_train_1_4.csv'), 'a') as f:\n f.write('CS_1, CS_2, CS_3, CS_4, CS_5, CS_6, CS_7, CS_8, CS_9, CS_10:\\n')\n f.write('%0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f\\n'\n % (CS_1, CS_2, CS_3, CS_4, CS_5, CS_6, CS_7, CS_8, CS_9, CS_10))\n f.write('\\n')\n\n\n\n\n\"\"\"\nA demo to train and testv MORPH Album2 dataset with protocol S1-S2-S3.\n\nusage:\npython demo.py --data_root <path_to_data_dir> --data_list <path_to_data_list_dir> --save <path_to_save_dir>\n\n\n\"\"\"\nif __name__ == '__main__':\n fire.Fire(demo)" ]
[ [ "torch.sum", "torch.nn.functional.log_softmax", "torch.cuda.manual_seed_all", "torch.load", "torch.argmax", "torch.manual_seed", "torch.no_grad", "torch.tensor", "torch.nn.KLDivLoss", "torch.cuda.device_count", "torch.cuda.is_available", "torch.optim.lr_scheduler.MultiStepLR", "torch.utils.model_zoo.load_url", "torch.nn.DataParallel", "torch.device" ] ]
rexzheng324-c/tensorbay-python-sdk
[ "764c28f34069229daa41474e2f104786dbfa973f" ]
[ "tensorbay/opendataset/SegTrack2/loader.py" ]
[ "#!/usr/bin/env python3\n#\n# Copytright 2021 Graviti. Licensed under MIT License.\n#\n# pylint: disable=invalid-name\n# pylint: disable=missing-module-docstring\n\nimport os\nfrom typing import Callable, Dict\n\nimport numpy as np\n\nfrom tensorbay.dataset import Data, Dataset\nfrom tensorbay.label import InstanceMask\nfrom tensorbay.opendataset._utility import glob\n\ntry:\n from PIL import Image\nexcept ModuleNotFoundError:\n from tensorbay.opendataset._utility.mocker import Image # pylint:disable=ungrouped-imports\n\nDATASET_NAME = \"SegTrack2\"\n\n_SEGMENTS_INFO = {\n 1: (\"bird_of_paradise\", \"birdfall\", \"frog\", \"girl\", \"monkey\", \"parachute\", \"soldier\", \"worm\"),\n 2: (\"bmx\", \"cheetah\", \"drift\", \"hummingbird\", \"monkeydog\"),\n 6: (\"penguin\",),\n}\n_FILENAME_REFORMATTERS = (\n lambda filename: filename,\n lambda filename: f\"{os.path.splitext(filename)[0]}.png\",\n)\n_MASK_GETTER = Callable[[str, str, str, int, Callable[[str], str]], InstanceMask]\n\n\ndef SegTrack2(path: str) -> Dataset:\n \"\"\"`SegTrack2 <https://web.engr.oregonstate.edu/~lif/SegTrack2/dataset.html>`_ dataset.\n\n The file structure of SegTrack looks like::\n\n <path>\n GroundTruth/\n bird_of_paradise/\n bird_of_paradise_00000.png\n ...\n bmx/\n 1/\n bmx_06668.png\n ...\n 2/\n bmx_06668.png\n ...\n ...\n JPEGImages/\n bird_of_paradise/\n bird_of_paradise_00000.png\n ...\n ...\n\n Arguments:\n path: The root directory of the dataset.\n\n Returns:\n Loaded :class:`~tensorbay.dataset.dataset.Dataset` instance.\n\n \"\"\"\n root_path = os.path.join(os.path.abspath(os.path.expanduser(path)))\n image_dir = os.path.join(root_path, \"JPEGImages\")\n original_mask_dir = os.path.join(root_path, \"GroundTruth\")\n mask_dir = os.path.join(root_path, \"Masks\")\n dataset = Dataset(DATASET_NAME)\n dataset.notes.is_continuous = True\n dataset.load_catalog(os.path.join(os.path.dirname(__file__), \"catalog.json\"))\n for instance_num, segment_names in _SEGMENTS_INFO.items():\n for segment_name in segment_names:\n segment = dataset.create_segment(segment_name)\n mask_subdir = os.path.join(mask_dir, segment_name)\n os.makedirs(mask_subdir, exist_ok=True)\n all_mask_subdirs = {\n \"original_mask_subdir\": os.path.join(original_mask_dir, segment_name),\n \"mask_subdir\": mask_subdir,\n }\n\n filename_reformatter = (\n _FILENAME_REFORMATTERS[1]\n if segment_name in {\"penguin\", \"monkeydog\"}\n else _FILENAME_REFORMATTERS[0]\n )\n mask_getter: _MASK_GETTER = (\n _get_cheetah_instance_mask if segment_name == \"cheetah\" else _get_instance_mask\n )\n for image_path in glob(os.path.join(image_dir, segment_name, \"*\")):\n segment.append(\n _get_data(\n image_path,\n all_mask_subdirs,\n instance_num,\n filename_reformatter,\n mask_getter,\n )\n )\n return dataset\n\n\ndef _get_data(\n image_path: str,\n all_mask_subdirs: Dict[str, str],\n instance_num: int,\n filename_reformatter: Callable[[str], str],\n mask_getter: _MASK_GETTER,\n) -> Data:\n data = Data(image_path)\n data.label.instance_mask = mask_getter(\n image_path,\n all_mask_subdirs[\"mask_subdir\"],\n all_mask_subdirs[\"original_mask_subdir\"],\n instance_num,\n filename_reformatter,\n )\n return data\n\n\ndef _get_instance_mask(\n image_path: str,\n mask_subdir: str,\n original_mask_subdir: str,\n instance_num: int,\n filename_reformatter: Callable[[str], str],\n) -> InstanceMask:\n filename = filename_reformatter(os.path.basename(image_path))\n mask_path = os.path.join(mask_subdir, f\"{os.path.splitext(filename)[0]}.png\")\n if instance_num == 1:\n mask = _get_reformatted_mask(os.path.join(original_mask_subdir, filename))\n else:\n mask = _get_reformatted_mask(os.path.join(original_mask_subdir, \"1\", filename))\n for instance_id in range(2, instance_num + 1):\n alter_mask = np.array(\n Image.open(os.path.join(original_mask_subdir, str(instance_id), filename)),\n )[:, :, 0]\n mask[alter_mask == 255] = instance_id\n\n Image.fromarray(mask).save(mask_path)\n return InstanceMask(mask_path)\n\n\ndef _get_cheetah_instance_mask(\n image_path: str, mask_subdir: str, original_mask_subdir: str, _: int, __: Callable[[str], str]\n) -> InstanceMask:\n filename = os.path.basename(image_path)\n new_filename = f\"{os.path.splitext(filename)[0]}.png\"\n mask_path = os.path.join(mask_subdir, new_filename)\n mask = _get_reformatted_mask(os.path.join(original_mask_subdir, \"1\", filename))\n alter_mask = np.array(\n Image.open(os.path.join(original_mask_subdir, \"2\", new_filename)),\n )[:, :, 0]\n mask[alter_mask == 255] = 2\n\n Image.fromarray(mask).save(mask_path)\n return InstanceMask(mask_path)\n\n\ndef _get_reformatted_mask(original_mask_path: str) -> np.ndarray:\n mask = np.array(Image.open(original_mask_path))[:, :, 0]\n # reformat mask\n # from {background: 0, overlap: 1~254, target: 255}\n # to {background: 0, target: 1, overlap: 255}\n overlap = np.logical_and(mask > 0, mask < 255)\n mask[mask == 255] = 1\n mask[overlap] = 255\n return mask\n" ]
[ [ "numpy.logical_and" ] ]
gfabbris/pypressruby
[ "ce5b89821b0bd829c0aac85f1b364a6f0202716e" ]
[ "pypressruby/widgets_logic.py" ]
[ "\"\"\"\n Copyright (c) 2018-2021, UChicago Argonne, LLC\n See LICENSE file.\n\"\"\"\n\nimport seabreeze.spectrometers as sb\nfrom PyQt5.QtCore import QObject\nimport numpy as np\nfrom time import sleep\nimport _thread as thread\nfrom pypressruby.logic import make_dummy, fit_data, calculate_pressure\n\n\nclass LogicWidgets(QObject):\n def __init__(self, status, options, plot):\n\n super(LogicWidgets, self).__init__()\n\n self.status = status\n self.spectrometer = options.spectrometer\n self.graph = options.graph\n self.pressure = options.pressure\n self.plot = plot\n\n self.make_connections()\n\n self.spec = None\n self.dark = None\n self.y = None\n self.fit = None\n self.data_line = None\n self.vline = None\n self.stop_signal = 0\n self.timer = None\n self.integration_time = float(\n self.spectrometer.integrate.toPlainText()\n )\n\n self.test_option = False\n\n self.load_devices()\n\n def make_connections(self):\n\n self.spectrometer.start_button.clicked.connect(self.collect_signal)\n self.spectrometer.stop_button.clicked.connect(self.stop_spectrometer)\n\n self.spectrometer.reload_button.clicked.connect(self.load_devices)\n self.spectrometer.spec_name.activated[str].connect(self.update_spec)\n\n self.spectrometer.dark_button.clicked.connect(self.collect_dark)\n self.spectrometer.dark_box.stateChanged.connect(self.dark_msg)\n\n self.spectrometer.integrate.textChanged.connect(self.change_integrate)\n\n self.pressure.fit_button.clicked.connect(self.run_fit)\n self.pressure.clearfit_button.clicked.connect(self.clear_fit)\n self.pressure.pressure_button.clicked.connect(self.get_pressure)\n\n self.plot.canvas.mpl_connect(\"button_press_event\", self.onclick)\n\n self.graph.xmin.valueChanged.connect(self.update_xlimit)\n self.graph.xmax.valueChanged.connect(self.update_xlimit)\n self.graph.xauto_box.stateChanged.connect(self.update_xlimit)\n\n self.graph.ymin.valueChanged.connect(self.update_ylimit)\n self.graph.ymax.valueChanged.connect(self.update_ylimit)\n self.graph.yauto_box.stateChanged.connect(self.update_ylimit)\n\n def load_devices(self):\n\n self.stop_signal = 0\n sleep(self.integration_time / 1000.0 * 1.1)\n\n self.devices = {}\n\n try:\n for device in sb.list_devices():\n self.devices[device.model] = device\n\n self.update_spec_list(self.devices.keys())\n self.update_spec(self.spectrometer.spec_name.currentText())\n except KeyError:\n self.status.showMessage(\"No spectrometer was found!\")\n\n def update_spec_list(self, devices):\n self.spectrometer.spec_name.clear()\n self.spectrometer.spec_name.addItems(devices)\n\n def update_spec(self, text):\n try:\n self.spec.close()\n except AttributeError:\n pass\n\n self.spec = sb.Spectrometer(self.devices[text])\n\n def collect_signal(self):\n\n if self.stop_signal != 0:\n self.stop_spectrometer()\n\n if self.spec is not None:\n\n self.stop_signal = 1\n\n self.integration_time = float(\n self.spectrometer.integrate.toPlainText()\n )\n self.spec.integration_time_micros(1e3 * self.integration_time)\n\n self.x = self.spec.wavelengths()\n self.y = self.spec.intensities()\n\n self.start_figure()\n\n thread.start_new_thread(self.start_spectrometer, ())\n\n self.status.showMessage(\"Ready!\")\n else:\n self.status.showMessage(\"Spectrometer is not loaded!\")\n\n if self.test_option is True:\n\n self.stop_signal = 1\n self.x = np.linspace(650, 850, 1000)\n self.y = (\n make_dummy(self.x)\n + (np.random.rand(self.x.size) - 0.5) * 10\n )\n\n self.start_figure()\n\n thread.start_new_thread(self.start_dummy, ())\n\n def start_spectrometer(self):\n\n while self.stop_signal != 0:\n\n self.y = self.spec.intensities()\n\n if self.spectrometer.dark_box.isChecked():\n if self.dark is not None:\n self.y -= self.dark\n\n def start_dummy(self):\n\n while self.stop_signal != 0:\n sleep(self.integration_time / 1000)\n self.y = (\n make_dummy(self.x, amplitude1=50)\n + (np.random.rand(self.x.size) - 0.5) * 10\n )\n\n def stop_spectrometer(self):\n self.stop_signal = 0\n sleep(self.integration_time * 1.05 / 1000)\n if self.timer:\n self.timer.stop()\n self.timer = None\n\n def collect_dark(self):\n\n if self.spec is None:\n return\n\n if self.stop_signal != 0:\n restart = True\n self.stop_spectrometer()\n else:\n restart = False\n self.integration_time = float(\n self.spectrometer.integrate.toPlainText()\n )\n self.spec.integration_time_micros(1e3 * self.integration_time)\n\n self.dark = self.spec.intensities()\n\n if restart is True:\n self.collect_signal()\n\n self.status.showMessage(\"Ready!\")\n\n def dark_msg(self):\n if self.spectrometer.dark_box.isChecked():\n if self.dark is None:\n self.status.showMessage(\n \"Dark spectrum has not been collected!!\"\n )\n\n def start_figure(self):\n\n if self.data_line is None:\n self.plot.figure.clear()\n self.ax = self.plot.figure.add_subplot(111)\n\n self.ax.set_ylabel(\"Intensity\")\n self.ax.set_xlabel(\"Wavelength (nm)\")\n\n self.ax.tick_params(\n which=\"both\", direction=\"in\", right=True, top=True, labelsize=8\n )\n\n self.data_line = self.ax.plot(self.x, self.y, color=\"black\")[0]\n\n self.timer = self.plot.canvas.new_timer(\n 10, [(self.update_canvas, (), {})]\n )\n self.timer.start()\n\n def update_canvas(self):\n self.data_line.set_ydata(self.y)\n self.update_xlimit()\n self.update_ylimit()\n self.plot.canvas.draw()\n\n def update_xlimit(self):\n\n if self.spec is None:\n return\n\n autox = self.graph.xauto_box.isChecked()\n if autox:\n dx = (self.x.max() - self.x.min()) * 0.05\n self.ax.set_xlim(self.x.min() - dx, self.x.max() + dx)\n else:\n self.ax.set_xlim(self.graph.xmin.value(), self.graph.xmax.value())\n\n if self.timer is None:\n self.single_canvas_update()\n\n def update_ylimit(self):\n\n if self.spec is None:\n return\n\n autoy = self.graph.yauto_box.isChecked()\n if autoy:\n if (self.y.min() == 0) & (self.y.max() == 0):\n self.ax.set_ylim(-1, 1)\n else:\n dy = (self.y.max() - self.y.min()) * 0.05\n self.ax.set_ylim(self.y.min() - dy, self.y.max() + dy)\n else:\n self.ax.set_ylim(self.graph.ymin.value(), self.graph.ymax.value())\n\n if self.timer is None:\n self.single_canvas_update()\n\n def run_fit(self):\n\n if self.spec is None:\n return\n\n xmin, xmax = self.ax.get_xlim()\n result = fit_data(self.x, self.y, xmin=xmin, xmax=xmax)\n\n if type(result) is str:\n self.status.showMessage(result)\n else:\n if self.fit is not None:\n self.ax.lines.remove(self.fit)\n\n self.fit = self.ax.plot(\n self.x, result.eval(x=self.x), color=\"red\"\n )[0]\n\n first = result.best_values[\"peak1_center\"]\n second = result.best_values[\"peak2_center\"]\n\n self.plot_vline(second)\n self.pressure.firstpeak_value.setText(\"{:.2f}\".format(first))\n self.pressure.secondpeak_value.setText(\"{:.2f}\".format(second))\n\n def clear_fit(self):\n\n if self.fit is not None:\n self.ax.lines.remove(self.fit)\n\n def get_pressure(self):\n\n try:\n wavenumber = float(self.pressure.secondpeak_value.toPlainText())\n wavenumber_ref = float(\n self.pressure.secondpeak_reference.toPlainText()\n )\n temperature = float(self.pressure.temperature_value.toPlainText())\n temperature_ref = float(\n self.pressure.temperature_reference.toPlainText()\n )\n reference = self.pressure.pressure_reference.currentText()\n\n pressure = calculate_pressure(\n wavenumber,\n wavenumber_ref,\n temperature,\n temperature_ref,\n reference=reference,\n )\n\n self.pressure.print_pressure.setText(\n \"P = {:.2f} GPa\".format(pressure)\n )\n self.status.showMessage(\"Ready\")\n\n except ValueError:\n self.status.showMessage(\"Pressure calculation failed!!!\")\n\n def onclick(self, event):\n\n if self.data_line:\n if event.xdata:\n self.plot_vline(event.xdata)\n self.pressure.secondpeak_value.setText(\n \"{:.2f}\".format(event.xdata)\n )\n\n def single_canvas_update(self):\n self.plot.canvas.draw()\n self.plot.canvas.flush_events()\n\n def plot_vline(self, x0):\n\n if self.vline:\n self.ax.lines.remove(self.vline)\n self.vline = self.ax.axvline(x=x0, ls=\"--\", lw=1, color=\"blue\")\n if self.timer is None:\n self.single_canvas_update()\n\n def change_integrate(self):\n\n if self.spec is None:\n return\n\n try:\n value = float(self.spectrometer.integrate.toPlainText())\n if value > 1:\n if self.spectrometer.dark_box.isChecked():\n self.collect_dark()\n else:\n self.integration_time = float(\n self.spectrometer.integrate.toPlainText()\n )\n if self.test_option is False:\n self.spec.integration_time_micros(\n 1e3 * self.integration_time\n )\n except ValueError:\n pass\n" ]
[ [ "numpy.linspace", "numpy.random.rand" ] ]
danielgis/invest
[ "b9687d249361556b874750368e856ef049447b5a" ]
[ "src/natcap/invest/ndr/ndr.py" ]
[ "\"\"\"InVEST Nutrient Delivery Ratio (NDR) module.\"\"\"\nfrom __future__ import absolute_import\nimport pickle\nimport itertools\nimport logging\nimport os\n\nfrom osgeo import gdal\nfrom osgeo import ogr\nimport numpy\nimport taskgraph\nimport pygeoprocessing\nimport pygeoprocessing.routing\n\nfrom .. import validation\nfrom .. import utils\nfrom . import ndr_core\n\nLOGGER = logging.getLogger('natcap.invest.ndr.ndr')\n\n_OUTPUT_BASE_FILES = {\n 'n_export_path': 'n_export.tif',\n 'p_export_path': 'p_export.tif',\n 'watershed_results_ndr_path': 'watershed_results_ndr.shp',\n }\n\n_INTERMEDIATE_BASE_FILES = {\n 'ic_factor_path': 'ic_factor.tif',\n 'load_n_path': 'load_n.tif',\n 'load_p_path': 'load_p.tif',\n 'modified_load_n_path': 'modified_load_n.tif',\n 'modified_load_p_path': 'modified_load_p.tif',\n 'ndr_n_path': 'ndr_n.tif',\n 'ndr_p_path': 'ndr_p.tif',\n 'runoff_proxy_index_path': 'runoff_proxy_index.tif',\n 's_accumulation_path': 's_accumulation.tif',\n 's_bar_path': 's_bar.tif',\n 's_factor_inverse_path': 's_factor_inverse.tif',\n 'stream_path': 'stream.tif',\n 'sub_crit_len_n_path': 'sub_crit_len_n.tif',\n 'sub_crit_len_p_path': 'sub_crit_len_p.tif',\n 'sub_eff_n_path': 'sub_eff_n.tif',\n 'sub_eff_p_path': 'sub_eff_p.tif',\n 'sub_effective_retention_n_path': 'sub_effective_retention_n.tif',\n 'sub_effective_retention_p_path': 'sub_effective_retention_p.tif',\n 'sub_load_n_path': 'sub_load_n.tif',\n 'sub_load_p_path': 'sub_load_p.tif',\n 'surface_load_n_path': 'surface_load_n.tif',\n 'surface_load_p_path': 'surface_load_p.tif',\n 'sub_ndr_n_path': 'sub_ndr_n.tif',\n 'sub_ndr_p_path': 'sub_ndr_p.tif',\n 'crit_len_n_path': 'crit_len_n.tif',\n 'crit_len_p_path': 'crit_len_p.tif',\n 'd_dn_path': 'd_dn.tif',\n 'd_up_path': 'd_up.tif',\n 'eff_n_path': 'eff_n.tif',\n 'eff_p_path': 'eff_p.tif',\n 'effective_retention_n_path': 'effective_retention_n.tif',\n 'effective_retention_p_path': 'effective_retention_p.tif',\n 'flow_accumulation_path': 'flow_accumulation.tif',\n 'flow_direction_path': 'flow_direction.tif',\n 'thresholded_slope_path': 'thresholded_slope.tif',\n 'dist_to_channel_path': 'dist_to_channel.tif',\n }\n\n_CACHE_BASE_FILES = {\n 'filled_dem_path': 'filled_dem.tif',\n 'aligned_dem_path': 'aligned_dem.tif',\n 'slope_path': 'slope.tif',\n 'aligned_lulc_path': 'aligned_lulc.tif',\n 'aligned_runoff_proxy_path': 'aligned_runoff_proxy.tif',\n 'runoff_mean_pickle_path': 'runoff_mean.pickle',\n 'surface_load_n_pickle_path': 'surface_load_n.pickle',\n 'surface_load_p_pickle_path': 'surface_load_p.pickle',\n 'subsurface_load_n_pickle_path': 'subsurface_load_n.pickle',\n 'subsurface_load_p_pickle_path': 'subsurface_load_p.pickle',\n 'export_n_pickle_path': 'export_n.pickle',\n 'export_p_pickle_path': 'export_p.pickle',\n }\n\n_TARGET_NODATA = -1\n\n\ndef execute(args):\n \"\"\"Nutrient Delivery Ratio.\n\n Parameters:\n args['workspace_dir'] (string): path to current workspace\n args['dem_path'] (string): path to digital elevation map raster\n args['lulc_path'] (string): a path to landcover map raster\n args['runoff_proxy_path'] (string): a path to a runoff proxy raster\n args['watersheds_path'] (string): path to the watershed shapefile\n args['biophysical_table_path'] (string): path to csv table on disk\n containing nutrient retention values.\n\n For each nutrient type [t] in args['calc_[t]'] that is true, must\n contain the following headers:\n\n 'load_[t]', 'eff_[t]', 'crit_len_[t]'\n\n If args['calc_n'] is True, must also contain the header\n 'proportion_subsurface_n' field.\n\n args['calc_p'] (boolean): if True, phosphorous is modeled,\n additionally if True then biophysical table must have p fields in\n them\n args['calc_n'] (boolean): if True nitrogen will be modeled,\n additionally biophysical table must have n fields in them.\n args['results_suffix'] (string): (optional) a text field to append to\n all output files\n args['threshold_flow_accumulation']: a number representing the flow\n accumulation in terms of upstream pixels.\n args['n_workers'] (int): if present, indicates how many worker\n processes should be used in parallel processing. -1 indicates\n single process mode, 0 is single process but non-blocking mode,\n and >= 1 is number of processes.\n\n Returns:\n None\n\n \"\"\"\n def _validate_inputs(nutrients_to_process, lucode_to_parameters):\n \"\"\"Validate common errors in inputs.\n\n Parameters:\n nutrients_to_process (list): list of 'n' and/or 'p'\n lucode_to_parameters (dictionary): biophysical input table mapping\n lucode to dictionary of table parameters. Used to validate\n the correct columns are input\n\n Returns:\n None\n\n Raises:\n ValueError whenever a missing field in the parameter table is\n detected along with a message describing every missing field.\n\n \"\"\"\n # Make sure all the nutrient inputs are good\n if len(nutrients_to_process) == 0:\n raise ValueError(\"Neither phosphorous nor nitrogen was selected\"\n \" to be processed. Choose at least one.\")\n\n # Build up a list that'll let us iterate through all the input tables\n # and check for the required rows, and report errors if something\n # is missing.\n row_header_table_list = []\n\n lu_parameter_row = lucode_to_parameters.values()[0]\n row_header_table_list.append(\n (lu_parameter_row, ['load_', 'eff_', 'crit_len_'],\n args['biophysical_table_path']))\n\n missing_headers = []\n for row, header_prefixes, table_type in row_header_table_list:\n for nutrient_id in nutrients_to_process:\n for header_prefix in header_prefixes:\n header = header_prefix + nutrient_id\n if header not in row:\n missing_headers.append(\n \"Missing header %s from %s\" % (\n header, table_type))\n\n # proportion_subsurface_n is a special case in which phosphorous does\n # not have an equivalent.\n if ('n' in nutrients_to_process and\n 'proportion_subsurface_n' not in lu_parameter_row):\n missing_headers.append(\n \"Missing header proportion_subsurface_n from \" +\n args['biophysical_table_path'])\n\n if len(missing_headers) > 0:\n raise ValueError('\\n'.join(missing_headers))\n\n # Load all the tables for preprocessing\n output_dir = os.path.join(args['workspace_dir'])\n intermediate_output_dir = os.path.join(\n args['workspace_dir'], 'intermediate_outputs')\n output_dir = os.path.join(args['workspace_dir'])\n cache_dir = os.path.join(intermediate_output_dir, 'cache_dir')\n for dir_path in [output_dir, intermediate_output_dir, cache_dir]:\n try:\n os.makedirs(dir_path)\n except OSError:\n pass\n\n n_workers = -1 # single process mode, but adjust if in args\n if 'n_workers' in args:\n n_workers = int(args['n_workers'])\n task_graph = taskgraph.TaskGraph(\n cache_dir, n_workers, reporting_interval=5.0)\n\n file_suffix = utils.make_suffix_string(args, 'results_suffix')\n f_reg = utils.build_file_registry(\n [(_OUTPUT_BASE_FILES, output_dir),\n (_INTERMEDIATE_BASE_FILES, intermediate_output_dir),\n (_CACHE_BASE_FILES, cache_dir)], file_suffix)\n\n # Build up a list of nutrients to process based on what's checked on\n nutrients_to_process = []\n for nutrient_id in ['n', 'p']:\n if args['calc_' + nutrient_id]:\n nutrients_to_process.append(nutrient_id)\n\n lucode_to_parameters = utils.build_lookup_from_csv(\n args['biophysical_table_path'], 'lucode')\n\n _validate_inputs(nutrients_to_process, lucode_to_parameters)\n\n # these are used for aggregation in the last step\n field_pickle_map = {}\n field_header_order_list = []\n\n create_vector_task = task_graph.add_task(\n func=create_vector_copy,\n args=(args['watersheds_path'], f_reg['watershed_results_ndr_path']),\n target_path_list=[f_reg['watershed_results_ndr_path']],\n task_name='create target vector')\n\n dem_info = pygeoprocessing.get_raster_info(args['dem_path'])\n\n base_raster_list = [\n args['dem_path'], args['lulc_path'], args['runoff_proxy_path']]\n aligned_raster_list = [\n f_reg['aligned_dem_path'], f_reg['aligned_lulc_path'],\n f_reg['aligned_runoff_proxy_path']]\n align_raster_task = task_graph.add_task(\n func=pygeoprocessing.align_and_resize_raster_stack,\n args=(\n base_raster_list, aligned_raster_list,\n ['near']*len(base_raster_list), dem_info['pixel_size'],\n 'intersection'),\n kwargs={\n 'base_vector_path_list': [args['watersheds_path']],\n 'vector_mask_options': {\n 'mask_vector_path': args['watersheds_path']}},\n target_path_list=aligned_raster_list,\n task_name='align rasters')\n\n fill_pits_task = task_graph.add_task(\n func=pygeoprocessing.routing.fill_pits,\n args=(\n (f_reg['aligned_dem_path'], 1), f_reg['filled_dem_path']),\n kwargs={'working_dir': cache_dir},\n dependent_task_list=[align_raster_task],\n target_path_list=[f_reg['filled_dem_path']],\n task_name='fill pits')\n\n flow_dir_task = task_graph.add_task(\n func=pygeoprocessing.routing.flow_dir_mfd,\n args=(\n (f_reg['filled_dem_path'], 1), f_reg['flow_direction_path']),\n kwargs={'working_dir': cache_dir},\n dependent_task_list=[fill_pits_task],\n target_path_list=[f_reg['flow_direction_path']],\n task_name='flow dir')\n\n flow_accum_task = task_graph.add_task(\n func=pygeoprocessing.routing.flow_accumulation_mfd,\n args=(\n (f_reg['flow_direction_path'], 1),\n f_reg['flow_accumulation_path']),\n target_path_list=[f_reg['flow_accumulation_path']],\n dependent_task_list=[flow_dir_task],\n task_name='flow accum')\n\n stream_extraction_task = task_graph.add_task(\n func=pygeoprocessing.routing.extract_streams_mfd,\n args=(\n (f_reg['flow_accumulation_path'], 1),\n (f_reg['flow_direction_path'], 1),\n float(args['threshold_flow_accumulation']), f_reg['stream_path']),\n target_path_list=[f_reg['stream_path']],\n dependent_task_list=[flow_accum_task],\n task_name='stream extraction')\n\n calculate_slope_task = task_graph.add_task(\n func=pygeoprocessing.calculate_slope,\n args=((f_reg['filled_dem_path'], 1), f_reg['slope_path']),\n target_path_list=[f_reg['slope_path']],\n dependent_task_list=[stream_extraction_task],\n task_name='calculate slope')\n\n threshold_slope_task = task_graph.add_task(\n func=_slope_proportion_and_threshold,\n args=(f_reg['slope_path'], f_reg['thresholded_slope_path']),\n target_path_list=[f_reg['thresholded_slope_path']],\n dependent_task_list=[calculate_slope_task],\n task_name='threshold slope')\n\n runoff_proxy_index_task = task_graph.add_task(\n func=_normalize_raster,\n args=((f_reg['aligned_runoff_proxy_path'], 1),\n f_reg['runoff_proxy_index_path']),\n target_path_list=[f_reg['runoff_proxy_index_path']],\n dependent_task_list=[align_raster_task],\n task_name='runoff proxy mean')\n\n s_task = task_graph.add_task(\n func=pygeoprocessing.routing.flow_accumulation_mfd,\n args=((f_reg['flow_direction_path'], 1), f_reg['s_accumulation_path']),\n kwargs={\n 'weight_raster_path_band': (f_reg['thresholded_slope_path'], 1)},\n target_path_list=[f_reg['s_accumulation_path']],\n dependent_task_list=[flow_dir_task, threshold_slope_task],\n task_name='route s')\n\n s_bar_task = task_graph.add_task(\n func=s_bar_calculate,\n args=(f_reg['s_accumulation_path'], f_reg['flow_accumulation_path'],\n f_reg['s_bar_path']),\n target_path_list=[f_reg['s_bar_path']],\n dependent_task_list=[s_task, flow_accum_task],\n task_name='calculate s bar')\n\n d_up_task = task_graph.add_task(\n func=d_up_calculation,\n args=(f_reg['s_bar_path'], f_reg['flow_accumulation_path'],\n f_reg['d_up_path']),\n target_path_list=[f_reg['d_up_path']],\n dependent_task_list=[s_bar_task, flow_accum_task],\n task_name='d up')\n\n s_inv_task = task_graph.add_task(\n func=invert_raster_values,\n args=(f_reg['thresholded_slope_path'], f_reg['s_factor_inverse_path']),\n target_path_list=[f_reg['s_factor_inverse_path']],\n dependent_task_list=[threshold_slope_task],\n task_name='s inv')\n\n d_dn_task = task_graph.add_task(\n func=pygeoprocessing.routing.distance_to_channel_mfd,\n args=(\n (f_reg['flow_direction_path'], 1), (f_reg['stream_path'], 1),\n f_reg['d_dn_path']),\n kwargs={'weight_raster_path_band': (\n f_reg['s_factor_inverse_path'], 1)},\n dependent_task_list=[stream_extraction_task, s_inv_task],\n target_path_list=[f_reg['d_dn_path']],\n task_name='d dn')\n\n dist_to_channel_task = task_graph.add_task(\n func=pygeoprocessing.routing.distance_to_channel_mfd,\n args=(\n (f_reg['flow_direction_path'], 1), (f_reg['stream_path'], 1),\n f_reg['dist_to_channel_path']),\n dependent_task_list=[stream_extraction_task],\n target_path_list=[f_reg['dist_to_channel_path']],\n task_name='dist to channel')\n\n ic_task = task_graph.add_task(\n func=calculate_ic,\n args=(\n f_reg['d_up_path'], f_reg['d_dn_path'], f_reg['ic_factor_path']),\n target_path_list=[f_reg['ic_factor_path']],\n dependent_task_list=[d_dn_task, d_up_task],\n task_name='calc ic')\n\n for nutrient in nutrients_to_process:\n load_path = f_reg['load_%s_path' % nutrient]\n modified_load_path = f_reg['modified_load_%s_path' % nutrient]\n # Perrine says that 'n' is the only case where we could consider a\n # prop subsurface component. So there's a special case for that.\n if nutrient == 'n':\n subsurface_proportion_type = 'proportion_subsurface_n'\n else:\n subsurface_proportion_type = None\n load_task = task_graph.add_task(\n func=_calculate_load,\n args=(\n f_reg['aligned_lulc_path'], lucode_to_parameters,\n 'load_%s' % nutrient, load_path),\n dependent_task_list=[align_raster_task],\n target_path_list=[load_path],\n task_name='%s load' % nutrient)\n\n modified_load_task = task_graph.add_task(\n func=_multiply_rasters,\n args=([load_path, f_reg['runoff_proxy_index_path']],\n _TARGET_NODATA, modified_load_path),\n target_path_list=[modified_load_path],\n dependent_task_list=[load_task, runoff_proxy_index_task],\n task_name='modified load %s' % nutrient)\n\n surface_load_path = f_reg['surface_load_%s_path' % nutrient]\n surface_load_task = task_graph.add_task(\n func=_map_surface_load,\n args=(modified_load_path, f_reg['aligned_lulc_path'],\n lucode_to_parameters, subsurface_proportion_type,\n surface_load_path),\n target_path_list=[surface_load_path],\n dependent_task_list=[modified_load_task, align_raster_task],\n task_name='map surface load %s' % nutrient)\n\n subsurface_load_path = f_reg['sub_load_%s_path' % nutrient]\n subsurface_load_task = task_graph.add_task(\n func=_map_subsurface_load,\n args=(modified_load_path, f_reg['aligned_lulc_path'],\n lucode_to_parameters,\n subsurface_proportion_type, subsurface_load_path),\n target_path_list=[subsurface_load_path],\n dependent_task_list=[modified_load_task, align_raster_task],\n task_name='map subsurface load %s' % nutrient)\n\n eff_path = f_reg['eff_%s_path' % nutrient]\n eff_task = task_graph.add_task(\n func=_map_lulc_to_val_mask_stream,\n args=(\n f_reg['aligned_lulc_path'], f_reg['stream_path'],\n lucode_to_parameters, 'eff_%s' % nutrient, eff_path),\n target_path_list=[eff_path],\n dependent_task_list=[align_raster_task, stream_extraction_task],\n task_name='ret eff %s' % nutrient)\n\n crit_len_path = f_reg['crit_len_%s_path' % nutrient]\n crit_len_task = task_graph.add_task(\n func=_map_lulc_to_val_mask_stream,\n args=(\n f_reg['aligned_lulc_path'], f_reg['stream_path'],\n lucode_to_parameters, 'crit_len_%s' % nutrient, crit_len_path),\n target_path_list=[crit_len_path],\n dependent_task_list=[align_raster_task, stream_extraction_task],\n task_name='ret eff %s' % nutrient)\n\n effective_retention_path = (\n f_reg['effective_retention_%s_path' % nutrient])\n ndr_eff_task = task_graph.add_task(\n func=ndr_core.ndr_eff_calculation,\n args=(\n f_reg['flow_direction_path'], f_reg['stream_path'], eff_path,\n crit_len_path, effective_retention_path),\n target_path_list=[effective_retention_path],\n dependent_task_list=[\n stream_extraction_task, eff_task, crit_len_task],\n task_name='eff ret %s' % nutrient)\n\n ndr_path = f_reg['ndr_%s_path' % nutrient]\n ndr_task = task_graph.add_task(\n func=_calculate_ndr,\n args=(\n effective_retention_path, f_reg['ic_factor_path'],\n float(args['k_param']), ndr_path),\n target_path_list=[ndr_path],\n dependent_task_list=[ndr_eff_task, ic_task],\n task_name='calc ndr %s' % nutrient)\n\n sub_ndr_path = f_reg['sub_ndr_%s_path' % nutrient]\n sub_ndr_task = task_graph.add_task(\n func=_calculate_sub_ndr,\n args=(\n float(args['subsurface_eff_%s' % nutrient]),\n float(args['subsurface_critical_length_%s' % nutrient]),\n f_reg['dist_to_channel_path'], sub_ndr_path),\n target_path_list=[sub_ndr_path],\n dependent_task_list=[dist_to_channel_task],\n task_name='sub ndr %s' % nutrient)\n\n export_path = f_reg['%s_export_path' % nutrient]\n calculate_export_task = task_graph.add_task(\n func=_calculate_export,\n args=(\n surface_load_path, ndr_path, subsurface_load_path,\n sub_ndr_path, export_path),\n target_path_list=[export_path],\n dependent_task_list=[\n load_task, ndr_task, surface_load_task, subsurface_load_task,\n sub_ndr_task],\n task_name='export %s' % nutrient)\n\n aggregate_export_task = task_graph.add_task(\n func=_aggregate_and_pickle_total,\n args=(\n (export_path, 1), f_reg['watershed_results_ndr_path'],\n f_reg['export_%s_pickle_path' % nutrient]),\n target_path_list=[f_reg['export_%s_pickle_path' % nutrient]],\n dependent_task_list=[calculate_export_task],\n task_name='aggregate %s export' % nutrient)\n\n aggregate_surface_load_task = task_graph.add_task(\n func=_aggregate_and_pickle_total,\n args=(\n (surface_load_path, 1), f_reg['watershed_results_ndr_path'],\n f_reg['surface_load_%s_pickle_path' % nutrient]),\n target_path_list=[f_reg['surface_load_%s_pickle_path' % nutrient]],\n dependent_task_list=[surface_load_task, create_vector_task],\n task_name='aggregate %s surface load' % nutrient)\n\n aggregate_subsurface_load_task = task_graph.add_task(\n func=_aggregate_and_pickle_total,\n args=(\n (subsurface_load_path, 1), f_reg['watershed_results_ndr_path'],\n f_reg['subsurface_load_%s_pickle_path' % nutrient]),\n target_path_list=[\n f_reg['subsurface_load_%s_pickle_path' % nutrient]],\n dependent_task_list=[subsurface_load_task, create_vector_task],\n task_name='aggregate %s subsurface load' % nutrient)\n\n field_pickle_map['surf_%s_ld' % nutrient] = (\n f_reg['surface_load_%s_pickle_path' % nutrient])\n field_pickle_map['sub_%s_ld' % nutrient] = (\n f_reg['subsurface_load_%s_pickle_path' % nutrient])\n field_pickle_map['%s_exp_tot' % nutrient] = (\n f_reg['export_%s_pickle_path' % nutrient])\n field_header_order_list = (\n [x % nutrient for x in [\n 'surf_%s_ld', 'sub_%s_ld', '%s_exp_tot']] +\n field_header_order_list)\n\n task_graph.close()\n task_graph.join()\n\n LOGGER.info('Writing summaries to output shapefile')\n _add_fields_to_shapefile(\n field_pickle_map, field_header_order_list,\n f_reg['watershed_results_ndr_path'])\n\n LOGGER.info(r'NDR complete!')\n LOGGER.info(r' _ _ ____ ____ ')\n LOGGER.info(r' | \\ |\"| | _\"\\U | _\"\\ u ')\n LOGGER.info(r'<| \\| |>/| | | |\\| |_) |/ ')\n LOGGER.info(r'U| |\\ |uU| |_| |\\| _ < ')\n LOGGER.info(r' |_| \\_| |____/ u|_| \\_\\ ')\n LOGGER.info(r' || \\\\,-.|||_ // \\\\_ ')\n LOGGER.info(r' (_\") (_/(__)_) (__) (__) ')\n\n\ndef _slope_proportion_and_threshold(slope_path, target_threshold_slope_path):\n \"\"\"Rescale slope to proportion and threshold to between 0.005 and 1.0.\n\n Parameters:\n slope_path (string): a raster with slope values in percent.\n target_threshold_slope_path (string): generated raster with slope\n values as a proportion (100% is 1.0) and thresholded to values\n between 0.005 and 1.0.\n\n Returns:\n None.\n\n \"\"\"\n slope_nodata = pygeoprocessing.get_raster_info(slope_path)['nodata'][0]\n\n def _slope_proportion_and_threshold_op(slope):\n \"\"\"Rescale and threshold slope between 0.005 and 1.0.\"\"\"\n valid_mask = slope != slope_nodata\n result = numpy.empty(valid_mask.shape, dtype=numpy.float32)\n result[:] = slope_nodata\n slope_fraction = slope[valid_mask] / 100\n slope_fraction[slope_fraction < 0.005] = 0.005\n slope_fraction[slope_fraction > 1.0] = 1.0\n result[valid_mask] = slope_fraction\n return result\n\n pygeoprocessing.raster_calculator(\n [(slope_path, 1)], _slope_proportion_and_threshold_op,\n target_threshold_slope_path, gdal.GDT_Float32, slope_nodata)\n\n\ndef _add_fields_to_shapefile(\n field_pickle_map, field_header_order, target_vector_path):\n \"\"\"Add fields and values to an OGR layer open for writing.\n\n Parameters:\n field_pickle_map (dict): maps field name to a pickle file that is a\n result of pygeoprocessing.zonal_stats with FIDs that match\n `target_vector_path`.\n field_header_order (list of string): a list of field headers in the\n order to appear in the output table.\n target_vector_path (string): path to target vector file.\n\n Returns:\n None.\n\n \"\"\"\n target_vector = gdal.OpenEx(\n target_vector_path, gdal.OF_VECTOR | gdal.GA_Update)\n target_layer = target_vector.GetLayer()\n field_summaries = {}\n for field_name in field_header_order:\n field_def = ogr.FieldDefn(field_name, ogr.OFTReal)\n field_def.SetWidth(24)\n field_def.SetPrecision(11)\n target_layer.CreateField(field_def)\n with open(field_pickle_map[field_name]) as pickle_file:\n field_summaries[field_name] = pickle.load(pickle_file)\n\n for feature in target_layer:\n fid = feature.GetFID()\n for field_name in field_header_order:\n feature.SetField(\n field_name, float(field_summaries[field_name][fid]['sum']))\n # Save back to datasource\n target_layer.SetFeature(feature)\n target_layer = None\n target_vector = None\n\n\[email protected]_validator\ndef validate(args, limit_to=None):\n \"\"\"Validate args to ensure they conform to `execute`'s contract.\n\n Parameters:\n args (dict): dictionary of key(str)/value pairs where keys and\n values are specified in `execute` docstring.\n limit_to (str): (optional) if not None indicates that validation\n should only occur on the args[limit_to] value. The intent that\n individual key validation could be significantly less expensive\n than validating the entire `args` dictionary.\n\n Returns:\n list of ([invalid key_a, invalid_keyb, ...], 'warning/error message')\n tuples. Where an entry indicates that the invalid keys caused\n the error message in the second part of the tuple. This should\n be an empty list if validation succeeds.\n\n \"\"\"\n missing_key_list = []\n no_value_list = []\n validation_error_list = []\n\n required_keys = [\n 'workspace_dir',\n 'dem_path',\n 'lulc_path',\n 'runoff_proxy_path',\n 'watersheds_path',\n 'biophysical_table_path',\n 'calc_p',\n 'calc_n',\n 'threshold_flow_accumulation',\n 'k_param']\n\n if 'calc_n' in args and args['calc_n']:\n required_keys.extend([\n 'subsurface_critical_length_n', 'subsurface_eff_n'])\n\n if 'calc_p' in args and args['calc_p']:\n required_keys.extend([\n 'subsurface_critical_length_p', 'subsurface_eff_p'])\n\n for key in required_keys:\n if limit_to is None or limit_to == key:\n if key not in args:\n missing_key_list.append(key)\n elif args[key] in ['', None]:\n no_value_list.append(key)\n\n if len(missing_key_list) > 0:\n # if there are missing keys, we have raise KeyError to stop hard\n raise KeyError(\n \"The following keys were expected in `args` but were missing \" +\n ', '.join(missing_key_list))\n\n if limit_to is None and (not args['calc_p'] and not args['calc_n']):\n validation_error_list.append(\n (['calc_p', 'calc_n', 'dem_path', 'lulc_path'],\n \"At least nitrogen or phosphorous must be selected\"))\n\n if len(no_value_list) > 0:\n validation_error_list.append(\n (no_value_list, 'parameter has no value'))\n\n file_type_list = [\n ('lulc_path', 'raster'),\n ('dem_path', 'raster'),\n ('runoff_proxy_path', 'raster'),\n ('biophysical_table_path', 'table'),\n ('watersheds_path', 'vector')]\n\n # check that existing/optional files are the correct types\n with utils.capture_gdal_logging():\n for key, key_type in file_type_list:\n if (limit_to is None or limit_to == key) and key in args:\n if not os.path.exists(args[key]):\n validation_error_list.append(\n ([key], 'not found on disk'))\n continue\n if key_type == 'raster':\n raster = gdal.OpenEx(args[key], gdal.OF_RASTER)\n if raster is None:\n validation_error_list.append(\n ([key], 'not a raster'))\n del raster\n elif key_type == 'vector':\n vector = gdal.OpenEx(args[key], gdal.OF_VECTOR)\n if vector is None:\n validation_error_list.append(\n ([key], 'not a vector'))\n del vector\n\n return validation_error_list\n\n\ndef _normalize_raster(base_raster_path_band, target_normalized_raster_path):\n \"\"\"Calculate normalize raster by dividing by the mean value.\n\n Parameters:\n base_raster_path_band (tuple): raster path/band tuple to calculate\n mean.\n target_normalized_raster_path (string): path to target normalized\n raster from base_raster_path_band.\n\n Returns:\n None.\n\n \"\"\"\n value_sum = 0.0\n value_count = 0.0\n base_nodata = pygeoprocessing.get_raster_info(\n base_raster_path_band[0])['nodata'][base_raster_path_band[1]-1]\n for _, raster_block in pygeoprocessing.iterblocks(\n base_raster_path_band):\n valid_block = raster_block[~numpy.isclose(raster_block, base_nodata)]\n value_sum += numpy.sum(valid_block)\n value_count += valid_block.size\n\n value_mean = value_sum\n if value_count > 0.0:\n value_mean /= value_count\n\n def _normalize_raster_op(array):\n \"\"\"Divide values by mean.\"\"\"\n valid_mask = ~numpy.isclose(array, base_nodata)\n result = numpy.empty(valid_mask.shape, dtype=numpy.float32)\n result[:] = base_nodata\n result[valid_mask] = array[valid_mask]\n if value_mean != 0:\n result[valid_mask] /= value_mean\n return result\n\n pygeoprocessing.raster_calculator(\n [base_raster_path_band], _normalize_raster_op,\n target_normalized_raster_path, gdal.GDT_Float32, base_nodata)\n\n\ndef _calculate_load(\n lulc_raster_path, lucode_to_parameters, load_type,\n target_load_raster):\n \"\"\"Calculate load raster by mapping landcover and multiplying by area.\n\n Parameters:\n lulc_raster_path (string): path to integer landcover raster.\n lucode_to_parameters (dict): a mapping of landcover IDs to a\n dictionary indexed by the value of `load_{load_type}` that\n represents a per-area nutrient load.\n load_type (string): represent nutrient to map, either 'load_n' or\n 'load_p'.\n target_load_raster (string): path to target raster that will have\n total load per pixel.\n\n Returns:\n None.\n\n \"\"\"\n lulc_raster_info = pygeoprocessing.get_raster_info(lulc_raster_path)\n nodata_landuse = lulc_raster_info['nodata'][0]\n cell_area_ha = abs(numpy.prod(lulc_raster_info['pixel_size'])) * 0.0001\n\n def _map_load_op(lucode_array):\n \"\"\"Convert unit load to total load & handle nodata.\"\"\"\n result = numpy.empty(lucode_array.shape)\n result[:] = _TARGET_NODATA\n for lucode in numpy.unique(lucode_array):\n if lucode != nodata_landuse:\n result[lucode_array == lucode] = (\n lucode_to_parameters[lucode][load_type] *\n cell_area_ha)\n return result\n\n pygeoprocessing.raster_calculator(\n [(lulc_raster_path, 1)], _map_load_op, target_load_raster,\n gdal.GDT_Float32, _TARGET_NODATA)\n\n\ndef _multiply_rasters(raster_path_list, target_nodata, target_result_path):\n \"\"\"Multiply the rasters in `raster_path_list`.\n\n Parameters:\n raster_path_list (list): list of single band raster paths.\n target_nodata (float): desired target nodata value.\n target_result_path (string): path to float 32 target raster\n multiplied where all rasters are not nodata.\n\n Returns:\n None.\n\n \"\"\"\n def _mult_op(*array_nodata_list):\n \"\"\"Multiply non-nodata stacks.\"\"\"\n result = numpy.empty(array_nodata_list[0].shape)\n result[:] = target_nodata\n valid_mask = ~numpy.isclose(\n array_nodata_list[0], array_nodata_list[1])\n for array, nodata in zip(*[iter(array_nodata_list[2:])]*2):\n valid_mask &= ~numpy.isclose(array, nodata)\n result[valid_mask] = array_nodata_list[0][valid_mask]\n for array in array_nodata_list[2::2]:\n result[valid_mask] *= array[valid_mask]\n return result\n\n # make a list of (raster_path_band, nodata) tuples, then flatten it\n path_nodata_list = list(itertools.chain(*[\n ((path, 1),\n (pygeoprocessing.get_raster_info(path)['nodata'][0], 'raw'))\n for path in raster_path_list]))\n pygeoprocessing.raster_calculator(\n path_nodata_list, _mult_op, target_result_path,\n gdal.GDT_Float32, target_nodata)\n\n\ndef _map_surface_load(\n modified_load_path, lulc_raster_path, lucode_to_parameters,\n subsurface_proportion_type, target_surface_load_path):\n \"\"\"Calculate surface load from landcover raster.\n\n Parameters:\n modified_load_path (string): path to modified load raster with units\n of kg/pixel.\n lulc_raster_path (string): path to landcover raster.\n lucode_to_parameters (dict): maps landcover codes to a dictionary that\n can be indexed by `subsurface_proportion_type`.\n subsurface_proportion_type (string): if None no subsurface transfer\n is mapped. Otherwise indexed from lucode_to_parameters.\n target_surface_load_path (string): path to target raster.\n\n Returns:\n None.\n\n \"\"\"\n lulc_raster_info = pygeoprocessing.get_raster_info(lulc_raster_path)\n nodata_landuse = lulc_raster_info['nodata'][0]\n\n keys = sorted(numpy.array(lucode_to_parameters.keys()))\n if subsurface_proportion_type is not None:\n subsurface_values = numpy.array(\n [lucode_to_parameters[x][subsurface_proportion_type]\n for x in keys])\n\n def _map_surface_load_op(lucode_array, modified_load_array):\n \"\"\"Convert unit load to total load & handle nodata.\"\"\"\n # If we don't have subsurface, just return 0.0.\n if subsurface_proportion_type is None:\n return numpy.where(\n lucode_array != nodata_landuse, modified_load_array,\n _TARGET_NODATA)\n\n valid_mask = lucode_array != nodata_landuse\n result = numpy.empty(valid_mask.shape, dtype=numpy.float32)\n result[:] = _TARGET_NODATA\n index = numpy.digitize(\n lucode_array[valid_mask].ravel(), keys, right=True)\n result[valid_mask] = (\n modified_load_array[valid_mask] * (1 - subsurface_values[index]))\n return result\n\n pygeoprocessing.raster_calculator(\n [(lulc_raster_path, 1), (modified_load_path, 1)],\n _map_surface_load_op, target_surface_load_path, gdal.GDT_Float32,\n _TARGET_NODATA)\n\n\ndef _map_subsurface_load(\n modified_load_path, lulc_raster_path, lucode_to_parameters,\n subsurface_proportion_type, target_sub_load_path):\n \"\"\"Calculate subsurface load from landcover raster.\n\n Parameters:\n modified_load_path (string): path to modified load raster.\n lulc_raster_path (string): path to landcover raster.\n lucode_to_parameters (dict): maps landcover codes to a dictionary that\n can be indexed by by `subsurface_proportion_type`.\n subsurface_proportion_type (string): if None no subsurface transfer\n is mapped. Otherwise indexed from lucode_to_parameters.\n target_sub_load_path (string): path to target raster.\n\n Returns:\n None.\n\n \"\"\"\n lulc_raster_info = pygeoprocessing.get_raster_info(lulc_raster_path)\n nodata_landuse = lulc_raster_info['nodata'][0]\n\n keys = sorted(numpy.array(lucode_to_parameters.keys()))\n if subsurface_proportion_type is not None:\n subsurface_permeance_values = numpy.array(\n [lucode_to_parameters[x][subsurface_proportion_type]\n for x in keys])\n\n def _map_subsurface_load_op(lucode_array, modified_load_array):\n \"\"\"Convert unit load to total load & handle nodata.\"\"\"\n # If we don't have subsurface, just return 0.0.\n if subsurface_proportion_type is None:\n return numpy.where(\n lucode_array != nodata_landuse, 0, _TARGET_NODATA)\n\n valid_mask = lucode_array != nodata_landuse\n result = numpy.empty(valid_mask.shape, dtype=numpy.float32)\n result[:] = _TARGET_NODATA\n index = numpy.digitize(\n lucode_array[valid_mask].ravel(), keys, right=True)\n result[valid_mask] = (\n modified_load_array[valid_mask] *\n subsurface_permeance_values[index])\n return result\n\n pygeoprocessing.raster_calculator(\n [(lulc_raster_path, 1), (modified_load_path, 1)],\n _map_subsurface_load_op, target_sub_load_path, gdal.GDT_Float32,\n _TARGET_NODATA)\n\n\ndef _map_lulc_to_val_mask_stream(\n lulc_raster_path, stream_path, lucode_to_parameters, map_id,\n target_eff_path):\n \"\"\"Make retention efficiency raster from landcover.\n\n Parameters:\n lulc_raster_path (string): path to landcover raster.\n stream_path (string) path to stream layer 0, no stream 1 stream.\n lucode_to_parameters (dict) mapping of landcover code to a dictionary\n that contains the key in `map_id`\n map_id (string): the id in the lookup table with values to map\n landcover to efficiency.\n target_eff_path (string): target raster that contains the mapping of\n landcover codes to retention efficiency values except where there\n is a stream in which case the retention efficiency is 0.\n\n Returns:\n None.\n\n \"\"\"\n keys = sorted(numpy.array(lucode_to_parameters.keys()))\n values = numpy.array(\n [lucode_to_parameters[x][map_id] for x in keys])\n\n nodata_landuse = pygeoprocessing.get_raster_info(\n lulc_raster_path)['nodata'][0]\n nodata_stream = pygeoprocessing.get_raster_info(stream_path)['nodata'][0]\n\n def _map_eff_op(lucode_array, stream_array):\n \"\"\"Map efficiency from LULC and handle nodata/streams.\"\"\"\n valid_mask = (\n (lucode_array != nodata_landuse) &\n (stream_array != nodata_stream))\n result = numpy.empty(valid_mask.shape, dtype=numpy.float32)\n result[:] = _TARGET_NODATA\n index = numpy.digitize(\n lucode_array[valid_mask].ravel(), keys, right=True)\n result[valid_mask] = (\n values[index] * (1 - stream_array[valid_mask]))\n return result\n\n pygeoprocessing.raster_calculator(\n ((lulc_raster_path, 1), (stream_path, 1)), _map_eff_op,\n target_eff_path, gdal.GDT_Float32, _TARGET_NODATA)\n\n\ndef s_bar_calculate(\n s_accumulation_path, flow_accumulation_path, target_s_bar_path):\n \"\"\"Calculate bar op which is s/flow.\"\"\"\n s_nodata = pygeoprocessing.get_raster_info(\n s_accumulation_path)['nodata'][0]\n flow_nodata = pygeoprocessing.get_raster_info(\n flow_accumulation_path)['nodata'][0]\n\n def _bar_op(s_accumulation, flow_accumulation):\n \"\"\"Calculate bar operation of s_accum / flow_accum.\"\"\"\n valid_mask = (\n (s_accumulation != s_nodata) &\n (flow_accumulation != flow_nodata))\n result = numpy.empty(valid_mask.shape, dtype=numpy.float32)\n result[:] = _TARGET_NODATA\n result[valid_mask] = (\n s_accumulation[valid_mask] / flow_accumulation[valid_mask])\n return result\n\n pygeoprocessing.raster_calculator(\n ((s_accumulation_path, 1), (flow_accumulation_path, 1)), _bar_op,\n target_s_bar_path, gdal.GDT_Float32, _TARGET_NODATA)\n\n\ndef d_up_calculation(s_bar_path, flow_accum_path, target_d_up_path):\n \"\"\"Calculate d_up = s_bar * sqrt(upstream area).\"\"\"\n s_bar_info = pygeoprocessing.get_raster_info(s_bar_path)\n s_bar_nodata = s_bar_info['nodata'][0]\n flow_accum_nodata = pygeoprocessing.get_raster_info(\n flow_accum_path)['nodata'][0]\n cell_area_m2 = abs(numpy.prod(s_bar_info['pixel_size']))\n\n def _d_up_op(s_bar, flow_accumulation):\n \"\"\"Calculate d_up index.\"\"\"\n valid_mask = (\n (s_bar != s_bar_nodata) &\n (flow_accumulation != flow_accum_nodata))\n result = numpy.empty(valid_mask.shape, dtype=numpy.float32)\n result[:] = _TARGET_NODATA\n result[valid_mask] = (\n s_bar[valid_mask] * numpy.sqrt(\n flow_accumulation[valid_mask] * cell_area_m2))\n return result\n\n pygeoprocessing.raster_calculator(\n [(s_bar_path, 1), (flow_accum_path, 1)], _d_up_op,\n target_d_up_path, gdal.GDT_Float32, _TARGET_NODATA)\n\n\ndef invert_raster_values(base_raster_path, target_raster_path):\n \"\"\"Invert (1/x) the values in `base`.\n\n Parameters:\n base_raster_path (string): path to floating point raster.\n target_raster_path (string): path to created output raster whose\n values are 1/x of base.\n\n Returns:\n None.\n\n \"\"\"\n base_nodata = pygeoprocessing.get_raster_info(\n base_raster_path)['nodata'][0]\n\n def _inverse_op(base_val):\n \"\"\"Calculate inverse of S factor.\"\"\"\n result = numpy.empty(base_val.shape, dtype=numpy.float32)\n result[:] = _TARGET_NODATA\n valid_mask = ~numpy.isclose(base_val, base_nodata)\n zero_mask = base_val == 0.0\n result[valid_mask & ~zero_mask] = (\n 1.0 / base_val[valid_mask & ~zero_mask])\n result[zero_mask] = 0.0\n return result\n\n pygeoprocessing.raster_calculator(\n ((base_raster_path, 1),), _inverse_op,\n target_raster_path, gdal.GDT_Float32, _TARGET_NODATA)\n\n\ndef calculate_ic(d_up_path, d_dn_path, target_ic_path):\n \"\"\"Calculate IC as log_10(d_up/d_dn).\"\"\"\n ic_nodata = float(numpy.finfo(numpy.float32).min)\n d_up_nodata = pygeoprocessing.get_raster_info(d_up_path)['nodata'][0]\n d_dn_nodata = pygeoprocessing.get_raster_info(d_dn_path)['nodata'][0]\n\n def _ic_op(d_up, d_dn):\n \"\"\"Calculate IC0.\"\"\"\n valid_mask = (\n (d_up != d_up_nodata) & (d_dn != d_dn_nodata) & (d_up != 0) &\n (d_dn != 0))\n result = numpy.empty(valid_mask.shape, dtype=numpy.float32)\n result[:] = ic_nodata\n result[valid_mask] = numpy.log10(d_up[valid_mask] / d_dn[valid_mask])\n return result\n\n pygeoprocessing.raster_calculator(\n [(d_up_path, 1), (d_dn_path, 1)], _ic_op,\n target_ic_path, gdal.GDT_Float32, ic_nodata)\n\n\ndef _calculate_ndr(\n effective_retention_path, ic_factor_path, k_param, target_ndr_path):\n \"\"\"Calculate NDR as a function of Equation 4 in the user's guide.\"\"\"\n ic_factor_raster = gdal.OpenEx(ic_factor_path, gdal.OF_RASTER)\n ic_factor_band = ic_factor_raster.GetRasterBand(1)\n ic_min, ic_max, _, _ = ic_factor_band.GetStatistics(0, 1)\n ic_factor_band = None\n ic_factor_raster = None\n ic_0_param = (ic_min + ic_max) / 2.0\n effective_retention_nodata = pygeoprocessing.get_raster_info(\n effective_retention_path)['nodata'][0]\n ic_nodata = pygeoprocessing.get_raster_info(ic_factor_path)['nodata'][0]\n\n def _calculate_ndr_op(effective_retention_array, ic_array):\n \"\"\"Calculate NDR.\"\"\"\n valid_mask = (\n (effective_retention_array != effective_retention_nodata) &\n (ic_array != ic_nodata))\n result = numpy.empty(valid_mask.shape, dtype=numpy.float32)\n result[:] = _TARGET_NODATA\n result[valid_mask] = (\n (1.0 - effective_retention_array[valid_mask]) /\n (1.0 + numpy.exp(\n (ic_0_param - ic_array[valid_mask]) / k_param)))\n return result\n\n pygeoprocessing.raster_calculator(\n [(effective_retention_path, 1), (ic_factor_path, 1)],\n _calculate_ndr_op, target_ndr_path, gdal.GDT_Float32, _TARGET_NODATA)\n\n\ndef _calculate_sub_ndr(\n eff_sub, crit_len_sub, dist_to_channel_path, target_sub_ndr_path):\n \"\"\"Calculate subsurface: subndr = eff_sub(1-e^(-5*l/crit_len).\"\"\"\n dist_to_channel_nodata = pygeoprocessing.get_raster_info(\n dist_to_channel_path)['nodata'][0]\n\n def _sub_ndr_op(dist_to_channel_array):\n \"\"\"Calculate subsurface NDR.\"\"\"\n valid_mask = ~numpy.isclose(\n dist_to_channel_array, dist_to_channel_nodata)\n result = numpy.empty(valid_mask.shape, dtype=numpy.float32)\n result[:] = _TARGET_NODATA\n result[valid_mask] = 1.0 - eff_sub * (\n 1-numpy.exp(-5*dist_to_channel_array[valid_mask]/crit_len_sub))\n return result\n\n pygeoprocessing.raster_calculator(\n [(dist_to_channel_path, 1)], _sub_ndr_op, target_sub_ndr_path,\n gdal.GDT_Float32, _TARGET_NODATA)\n\n\ndef _calculate_export(\n surface_load_path, ndr_path, subsurface_load_path,\n subsurface_ndr_path, target_export_path):\n \"\"\"Calculate export.\"\"\"\n load_nodata = pygeoprocessing.get_raster_info(\n surface_load_path)['nodata'][0]\n subsurface_load_nodata = pygeoprocessing.get_raster_info(\n subsurface_load_path)['nodata'][0]\n ndr_nodata = pygeoprocessing.get_raster_info(\n ndr_path)['nodata'][0]\n sub_ndr_nodata = pygeoprocessing.get_raster_info(\n subsurface_ndr_path)['nodata'][0]\n\n def _calculate_export_op(\n modified_load_array, ndr_array, modified_sub_load_array,\n sub_ndr_array):\n \"\"\"Combine NDR and subsurface NDR.\"\"\"\n valid_mask = (\n ~numpy.isclose(modified_load_array, load_nodata) &\n ~numpy.isclose(ndr_array, ndr_nodata) &\n ~numpy.isclose(modified_sub_load_array, subsurface_load_nodata) &\n ~numpy.isclose(sub_ndr_array, sub_ndr_nodata))\n result = numpy.empty(valid_mask.shape, dtype=numpy.float32)\n result[:] = _TARGET_NODATA\n result[valid_mask] = (\n modified_load_array[valid_mask] * ndr_array[valid_mask] +\n modified_sub_load_array[valid_mask] *\n sub_ndr_array[valid_mask])\n return result\n\n pygeoprocessing.raster_calculator(\n [(surface_load_path, 1), (ndr_path, 1),\n (subsurface_load_path, 1), (subsurface_ndr_path, 1)],\n _calculate_export_op, target_export_path, gdal.GDT_Float32,\n _TARGET_NODATA)\n\n\ndef _aggregate_and_pickle_total(\n base_raster_path_band, aggregate_vector_path, target_pickle_path):\n \"\"\"Aggregate base raster path to vector path FIDs and pickle result.\n\n Parameters:\n base_raster_path_band (tuple): raster/path band to aggregate over.\n aggregate_vector_path (string): path to vector to use geometry to\n aggregate over.\n target_pickle_path (string): path to a file that will contain the\n result of a pygeoprocessing.zonal_statistics call over\n base_raster_path_band from aggregate_vector_path.\n\n Returns:\n None.\n\n \"\"\"\n result = pygeoprocessing.zonal_statistics(\n base_raster_path_band, aggregate_vector_path,\n working_dir=os.path.dirname(target_pickle_path))\n\n with open(target_pickle_path, 'w') as target_pickle_file:\n pickle.dump(result, target_pickle_file)\n\n\ndef create_vector_copy(base_vector_path, target_vector_path):\n \"\"\"Create a copy of base vector.\"\"\"\n if os.path.isfile(target_vector_path):\n os.remove(target_vector_path)\n base_vector = gdal.OpenEx(base_vector_path, gdal.OF_VECTOR)\n driver = gdal.GetDriverByName('ESRI Shapefile')\n target_vector = driver.CreateCopy(\n target_vector_path, base_vector)\n target_vector = None # seemingly uncessary but gdal seems to like it.\n" ]
[ [ "numpy.sqrt", "numpy.sum", "numpy.empty", "numpy.finfo", "numpy.isclose", "numpy.exp", "numpy.log10", "numpy.prod", "numpy.array", "numpy.where", "numpy.unique" ] ]
slowglow/pyAudioAnalysis
[ "67587e5b24eb1430641823a6add085a65c8affe5" ]
[ "pyAudioAnalysis/MidTermFeatures.py" ]
[ "from __future__ import print_function\nimport os\nimport time\nimport glob\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nsys.path.insert(0, os.path.join(\n os.path.dirname(os.path.realpath(__file__)), \"../\"))\nfrom pyAudioAnalysis import utilities\nfrom pyAudioAnalysis import audioBasicIO\nfrom pyAudioAnalysis import ShortTermFeatures\neps = 0.00000001\n\n\"\"\" Time-domain audio features \"\"\"\n\n\ndef beat_extraction(short_features, window_size, plot=False):\n \"\"\"\n This function extracts an estimate of the beat rate for a musical signal.\n ARGUMENTS:\n - short_features: a np array (n_feats x numOfShortTermWindows)\n - window_size: window size in seconds\n RETURNS:\n - bpm: estimates of beats per minute\n - ratio: a confidence measure\n \"\"\"\n\n # Features that are related to the beat tracking task:\n selected_features = [0, 1, 3, 4, 5, 6, 7, 8, 9, 10,\n 11, 12, 13, 14, 15, 16, 17, 18]\n\n max_beat_time = int(round(2.0 / window_size))\n hist_all = np.zeros((max_beat_time,))\n # for each feature\n for ii, i in enumerate(selected_features):\n # dif threshold (3 x Mean of Difs)\n dif_threshold = 2.0 * (np.abs(short_features[i, 0:-1] -\n short_features[i, 1::])).mean()\n if dif_threshold <= 0:\n dif_threshold = 0.0000000000000001\n # detect local maxima\n [pos1, _] = utilities.peakdet(short_features[i, :], dif_threshold)\n position_diffs = []\n # compute histograms of local maxima changes\n for j in range(len(pos1)-1):\n position_diffs.append(pos1[j+1]-pos1[j])\n histogram_times, histogram_edges = \\\n np.histogram(position_diffs, np.arange(0.5, max_beat_time + 1.5))\n hist_centers = (histogram_edges[0:-1] + histogram_edges[1::]) / 2.0\n histogram_times = \\\n histogram_times.astype(float) / short_features.shape[1]\n hist_all += histogram_times\n if plot:\n plt.subplot(9, 2, ii + 1)\n plt.plot(short_features[i, :], 'k')\n for k in pos1:\n plt.plot(k, short_features[i, k], 'k*')\n f1 = plt.gca()\n f1.axes.get_xaxis().set_ticks([])\n f1.axes.get_yaxis().set_ticks([])\n\n if plot:\n plt.show(block=False)\n plt.figure()\n\n # Get beat as the argmax of the agregated histogram:\n max_indices = np.argmax(hist_all)\n bpms = 60 / (hist_centers * window_size)\n bpm = bpms[max_indices]\n # ... and the beat ratio:\n ratio = hist_all[max_indices] / (hist_all.sum() + eps)\n\n if plot:\n # filter out >500 beats from plotting:\n hist_all = hist_all[bpms < 500]\n bpms = bpms[bpms < 500]\n\n plt.plot(bpms, hist_all, 'k')\n plt.xlabel('Beats per minute')\n plt.ylabel('Freq Count')\n plt.show(block=True)\n\n return bpm, ratio\n\n\ndef mid_feature_extraction(signal, sampling_rate, mid_window, mid_step,\n short_window, short_step):\n \"\"\"\n Mid-term feature extraction\n \"\"\"\n\n short_features, short_feature_names = \\\n ShortTermFeatures.feature_extraction(signal, sampling_rate,\n short_window, short_step)\n\n n_stats = 2\n n_feats = len(short_features)\n #mid_window_ratio = int(round(mid_window / short_step))\n mid_window_ratio = round((mid_window -\n (short_window - short_step)) / short_step)\n mt_step_ratio = int(round(mid_step / short_step))\n\n mid_features, mid_feature_names = [], []\n for i in range(n_stats * n_feats):\n mid_features.append([])\n mid_feature_names.append(\"\")\n\n # for each of the short-term features:\n for i in range(n_feats):\n cur_position = 0\n num_short_features = len(short_features[i])\n mid_feature_names[i] = short_feature_names[i] + \"_\" + \"mean\"\n mid_feature_names[i + n_feats] = short_feature_names[i] + \"_\" + \"std\"\n\n while cur_position < num_short_features:\n end = cur_position + mid_window_ratio\n if end > num_short_features:\n end = num_short_features\n cur_st_feats = short_features[i][cur_position:end]\n\n mid_features[i].append(np.mean(cur_st_feats))\n mid_features[i + n_feats].append(np.std(cur_st_feats))\n cur_position += mt_step_ratio\n mid_features = np.array(mid_features)\n mid_features = np.nan_to_num(mid_features)\n return mid_features, short_features, mid_feature_names\n\n\n\"\"\" Feature Extraction Wrappers\n - The first two feature extraction wrappers are used to extract \n long-term averaged audio features for a list of WAV files stored in a \n given category.\n It is important to note that, one single feature is extracted per WAV \n file (not the whole sequence of feature vectors)\n\n \"\"\"\n\n\ndef directory_feature_extraction(folder_path, mid_window, mid_step,\n short_window, short_step,\n compute_beat=True):\n \"\"\"\n This function extracts the mid-term features of the WAVE files of a \n particular folder.\n\n The resulting feature vector is extracted by long-term averaging the\n mid-term features.\n Therefore ONE FEATURE VECTOR is extracted for each WAV file.\n\n ARGUMENTS:\n - folder_path: the path of the WAVE directory\n - mid_window, mid_step: mid-term window and step (in seconds)\n - short_window, short_step: short-term window and step (in seconds)\n \"\"\"\n\n mid_term_features = np.array([])\n process_times = []\n\n types = ('*.wav', '*.aif', '*.aiff', '*.mp3', '*.au', '*.ogg')\n wav_file_list = []\n for files in types:\n wav_file_list.extend(glob.glob(os.path.join(folder_path, files)))\n\n wav_file_list = sorted(wav_file_list) \n wav_file_list2, mid_feature_names = [], []\n for i, file_path in enumerate(wav_file_list):\n print(\"Analyzing file {0:d} of {1:d}: {2:s}\".format(i + 1,\n len(wav_file_list),\n file_path))\n if os.stat(file_path).st_size == 0:\n print(\" (EMPTY FILE -- SKIPPING)\")\n continue \n sampling_rate, signal = audioBasicIO.read_audio_file(file_path)\n if sampling_rate == 0:\n continue \n\n t1 = time.time() \n signal = audioBasicIO.stereo_to_mono(signal)\n if signal.shape[0] < float(sampling_rate)/5:\n print(\" (AUDIO FILE TOO SMALL - SKIPPING)\")\n continue\n wav_file_list2.append(file_path)\n if compute_beat:\n mid_features, short_features, mid_feature_names = \\\n mid_feature_extraction(signal, sampling_rate,\n round(mid_window * sampling_rate),\n round(mid_step * sampling_rate),\n round(sampling_rate * short_window),\n round(sampling_rate * short_step))\n beat, beat_conf = beat_extraction(short_features, short_step)\n else:\n mid_features, _, mid_feature_names = \\\n mid_feature_extraction(signal, sampling_rate,\n round(mid_window * sampling_rate),\n round(mid_step * sampling_rate),\n round(sampling_rate * short_window),\n round(sampling_rate * short_step))\n\n mid_features = np.transpose(mid_features)\n mid_features = mid_features.mean(axis=0)\n # long term averaging of mid-term statistics\n if (not np.isnan(mid_features).any()) and \\\n (not np.isinf(mid_features).any()):\n if compute_beat:\n mid_features = np.append(mid_features, beat)\n mid_features = np.append(mid_features, beat_conf)\n mid_feature_names += [\"bpm\",\"ratio\"]\n if len(mid_term_features) == 0:\n # append feature vector\n mid_term_features = mid_features\n else:\n mid_term_features = np.vstack((mid_term_features, mid_features))\n t2 = time.time()\n duration = float(len(signal)) / sampling_rate\n process_times.append((t2 - t1) / duration)\n if len(process_times) > 0:\n print(\"Feature extraction complexity ratio: \"\n \"{0:.1f} x realtime\".format((1.0 / \n np.mean(np.array(process_times)))))\n return mid_term_features, wav_file_list2, mid_feature_names\n\n\ndef multiple_directory_feature_extraction(path_list, mid_window, mid_step,\n short_window, short_step,\n compute_beat=False):\n \"\"\"\n Same as dirWavFeatureExtraction, but instead of a single dir it\n takes a list of paths as input and returns a list of feature matrices.\n EXAMPLE:\n [features, classNames] =\n a.dirsWavFeatureExtraction(['audioData/classSegmentsRec/noise',\n 'audioData/classSegmentsRec/speech',\n 'audioData/classSegmentsRec/brush-teeth',\n 'audioData/classSegmentsRec/shower'], 1, \n 1, 0.02, 0.02);\n\n It can be used during the training process of a classification model ,\n in order to get feature matrices from various audio classes (each stored in\n a separate path)\n \"\"\"\n\n # feature extraction for each class:\n features = []\n class_names = []\n file_names = []\n for i, d in enumerate(path_list):\n f, fn, feature_names = \\\n directory_feature_extraction(d, mid_window, mid_step,\n short_window, short_step,\n compute_beat=compute_beat)\n if f.shape[0] > 0:\n # if at least one audio file has been found in the provided folder:\n features.append(f)\n file_names.append(fn)\n if d[-1] == os.sep:\n class_names.append(d.split(os.sep)[-2])\n else:\n class_names.append(d.split(os.sep)[-1])\n return features, class_names, file_names\n\n\ndef directory_feature_extraction_no_avg(folder_path, mid_window, mid_step,\n short_window, short_step):\n \"\"\"\n This function extracts the mid-term features of the WAVE\n files of a particular folder without averaging each file.\n\n ARGUMENTS:\n - folder_path: the path of the WAVE directory\n - mid_window, mid_step: mid-term window and step (in seconds)\n - short_window, short_step: short-term window and step (in seconds)\n RETURNS:\n - X: A feature matrix\n - Y: A matrix of file labels\n - filenames:\n \"\"\"\n\n wav_file_list = []\n signal_idx = np.array([])\n mid_features = np.array([])\n types = ('*.wav', '*.aif', '*.aiff', '*.ogg')\n for files in types:\n wav_file_list.extend(glob.glob(os.path.join(folder_path, files)))\n\n wav_file_list = sorted(wav_file_list)\n\n for i, file_path in enumerate(wav_file_list):\n sampling_rate, signal = audioBasicIO.read_audio_file(file_path)\n if sampling_rate == 0:\n continue\n signal = audioBasicIO.stereo_to_mono(signal)\n mid_feature_vector, _, _ = \\\n mid_feature_extraction(signal, sampling_rate,\n round(mid_window * sampling_rate),\n round(mid_step * sampling_rate),\n round(sampling_rate * short_window),\n round(sampling_rate * short_step))\n\n mid_feature_vector = np.transpose(mid_feature_vector)\n if len(mid_features) == 0: # append feature vector\n mid_features = mid_feature_vector\n signal_idx = np.zeros((mid_feature_vector.shape[0], ))\n else:\n mid_features = np.vstack((mid_features, mid_feature_vector))\n signal_idx = np.append(signal_idx, i *\n np.ones((mid_feature_vector.shape[0], )))\n\n return mid_features, signal_idx, wav_file_list\n\n\n\"\"\"\nThe following two feature extraction wrappers extract features for given audio\nfiles, however NO LONG-TERM AVERAGING is performed. Therefore, the output for\neach audio file is NOT A SINGLE FEATURE VECTOR but a whole feature matrix.\n\nAlso, another difference between the following two wrappers and the previous\nis that they NO LONG-TERM AVERAGING IS PERFORMED. In other words, the WAV\nfiles in these functions are not used as uniform samples that need to be\naveraged but as sequences\n\"\"\"\n\n\ndef mid_feature_extraction_to_file(file_path, mid_window, mid_step,\n short_window, short_step, output_file,\n store_short_features=False, store_csv=False,\n plot=False):\n \"\"\"\n This function is used as a wrapper to:\n a) read the content of a WAV file\n b) perform mid-term feature extraction on that signal\n c) write the mid-term feature sequences to a np file\n d) optionally write contents to csv file as well\n e) optionally write short-term features in csv and np file\n \"\"\"\n sampling_rate, signal = audioBasicIO.read_audio_file(file_path)\n signal = audioBasicIO.stereo_to_mono(signal)\n mid_features, short_features, _ = \\\n mid_feature_extraction(signal, sampling_rate,\n round(sampling_rate * mid_window),\n round(sampling_rate * mid_step),\n round(sampling_rate * short_window),\n round(sampling_rate * short_step))\n if store_short_features:\n # save st features to np file\n np.save(output_file + \"_st\", short_features)\n if plot:\n print(\"Short-term np file: \" + output_file + \"_st.npy saved\")\n if store_csv:\n # store st features to CSV file\n np.savetxt(output_file + \"_st.csv\", short_features.T, delimiter=\",\")\n if plot:\n print(\"Short-term CSV file: \" + output_file + \"_st.csv saved\")\n\n # save mt features to np file\n np.save(output_file + \"_mt\", mid_features)\n if plot:\n print(\"Mid-term np file: \" + output_file + \"_mt.npy saved\")\n if store_csv:\n np.savetxt(output_file + \"_mt.csv\", mid_features.T, delimiter=\",\")\n if plot:\n print(\"Mid-term CSV file: \" + output_file + \"_mt.csv saved\")\n\n\ndef mid_feature_extraction_file_dir(folder_path, mid_window, mid_step,\n short_window, short_step,\n store_short_features=False, store_csv=False,\n plot=False):\n types = (folder_path + os.sep + '*.wav',)\n files_list = []\n for t in types:\n files_list.extend(glob.glob(t))\n for f in files_list:\n output_path = f\n mid_feature_extraction_to_file(f, mid_window, mid_step, short_window,\n short_step, output_path,\n store_short_features, store_csv, plot)\n" ]
[ [ "numpy.save", "numpy.ones", "numpy.savetxt", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "numpy.nan_to_num", "numpy.vstack", "numpy.transpose", "numpy.append", "matplotlib.pyplot.figure", "matplotlib.pyplot.gca", "numpy.abs", "numpy.isnan", "numpy.mean", "numpy.zeros", "numpy.argmax", "numpy.arange", "numpy.std", "numpy.isinf", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "numpy.array", "matplotlib.pyplot.xlabel" ] ]
t-arae/empirical-JTK_CYCLE-with-asymmetry
[ "f2a5fd76b8ff902443ea9e1824c49f9e66a5e217" ]
[ "bin/mpfit.py" ]
[ "\"\"\"\r\nPerform Levenberg-Marquardt least-squares minimization, based on MINPACK-1.\r\n\r\n AUTHORS\r\n The original version of this software, called LMFIT, was written in FORTRAN\r\n as part of the MINPACK-1 package by XXX.\r\n\r\n Craig Markwardt converted the FORTRAN code to IDL. The information for the\r\n IDL version is:\r\n Craig B. Markwardt, NASA/GSFC Code 662, Greenbelt, MD 20770\r\n [email protected]\r\n UPDATED VERSIONs can be found on my WEB PAGE: \r\n http://cow.physics.wisc.edu/~craigm/idl/idl.html\r\n\r\n Mark Rivers created this Python version from Craig's IDL version.\r\n Mark Rivers, University of Chicago\r\n Building 434A, Argonne National Laboratory\r\n 9700 South Cass Avenue, Argonne, IL 60439\r\n [email protected]\r\n Updated versions can be found at http://cars.uchicago.edu/software\r\n\r\n\r\n DESCRIPTION\r\n\r\n MPFIT uses the Levenberg-Marquardt technique to solve the\r\n least-squares problem. In its typical use, MPFIT will be used to\r\n fit a user-supplied function (the \"model\") to user-supplied data\r\n points (the \"data\") by adjusting a set of parameters. MPFIT is\r\n based upon MINPACK-1 (LMDIF.F) by More' and collaborators.\r\n\r\n For example, a researcher may think that a set of observed data\r\n points is best modelled with a Gaussian curve. A Gaussian curve is\r\n parameterized by its mean, standard deviation and normalization.\r\n MPFIT will, within certain constraints, find the set of parameters\r\n which best fits the data. The fit is \"best\" in the least-squares\r\n sense; that is, the sum of the weighted squared differences between\r\n the model and data is minimized.\r\n\r\n The Levenberg-Marquardt technique is a particular strategy for\r\n iteratively searching for the best fit. This particular\r\n implementation is drawn from MINPACK-1 (see NETLIB), and is much faster\r\n and more accurate than the version provided in the Scientific Python package\r\n in Scientific.Functions.LeastSquares.\r\n This version allows upper and lower bounding constraints to be placed on each\r\n parameter, or the parameter can be held fixed.\r\n\r\n The user-supplied Python function should return an array of weighted\r\n deviations between model and data. In a typical scientific problem\r\n the residuals should be weighted so that each deviate has a\r\n gaussian sigma of 1.0. If X represents values of the independent\r\n variable, Y represents a measurement for each value of X, and ERR\r\n represents the error in the measurements, then the deviates could\r\n be calculated as follows:\r\n\r\n DEVIATES = (Y - F(X)) / ERR\r\n\r\n where F is the analytical function representing the model. You are\r\n recommended to use the convenience functions MPFITFUN and\r\n MPFITEXPR, which are driver functions that calculate the deviates\r\n for you. If ERR are the 1-sigma uncertainties in Y, then\r\n\r\n TOTAL( DEVIATES^2 ) \r\n\r\n will be the total chi-squared value. MPFIT will minimize the\r\n chi-square value. The values of X, Y and ERR are passed through\r\n MPFIT to the user-supplied function via the FUNCTKW keyword.\r\n\r\n Simple constraints can be placed on parameter values by using the\r\n PARINFO keyword to MPFIT. See below for a description of this\r\n keyword.\r\n\r\n MPFIT does not perform more general optimization tasks. See TNMIN\r\n instead. MPFIT is customized, based on MINPACK-1, to the\r\n least-squares minimization problem.\r\n\r\n\r\n USER FUNCTION\r\n\r\n The user must define a function which returns the appropriate\r\n values as specified above. The function should return the weighted\r\n deviations between the model and the data. It should also return a status\r\n flag and an optional partial derivative array. For applications which\r\n use finite-difference derivatives -- the default -- the user\r\n function should be declared in the following way:\r\n\r\n def myfunct(p, fjac=None, x=None, y=None, err=None)\r\n # Parameter values are passed in \"p\"\r\n # If fjac==None then partial derivatives should not be\r\n # computed. It will always be None if MPFIT is called with default\r\n # flag.\r\n model = F(x, p)\r\n # Non-negative status value means MPFIT should continue, negative means\r\n # stop the calculation.\r\n status = 0\r\n return([status, (y-model)/err]\r\n\r\n See below for applications with analytical derivatives.\r\n\r\n The keyword parameters X, Y, and ERR in the example above are\r\n suggestive but not required. Any parameters can be passed to\r\n MYFUNCT by using the functkw keyword to MPFIT. Use MPFITFUN and\r\n MPFITEXPR if you need ideas on how to do that. The function *must*\r\n accept a parameter list, P.\r\n\r\n In general there are no restrictions on the number of dimensions in\r\n X, Y or ERR. However the deviates *must* be returned in a\r\n one-dimensional numpy array of type float.\r\n\r\n User functions may also indicate a fatal error condition using the\r\n status return described above. If status is set to a number between\r\n -15 and -1 then MPFIT will stop the calculation and return to the caller.\r\n\r\n\r\n ANALYTIC DERIVATIVES\r\n\r\n In the search for the best-fit solution, MPFIT by default\r\n calculates derivatives numerically via a finite difference\r\n approximation. The user-supplied function need not calculate the\r\n derivatives explicitly. However, if you desire to compute them\r\n analytically, then the AUTODERIVATIVE=0 keyword must be passed to MPFIT.\r\n As a practical matter, it is often sufficient and even faster to allow\r\n MPFIT to calculate the derivatives numerically, and so\r\n AUTODERIVATIVE=0 is not necessary.\r\n\r\n If AUTODERIVATIVE=0 is used then the user function must check the parameter\r\n FJAC, and if FJAC!=None then return the partial derivative array in the\r\n return list.\r\n def myfunct(p, fjac=None, x=None, y=None, err=None)\r\n # Parameter values are passed in \"p\"\r\n # If FJAC!=None then partial derivatives must be comptuer.\r\n # FJAC contains an array of len(p), where each entry\r\n # is 1 if that parameter is free and 0 if it is fixed. \r\n model = F(x, p)\r\n Non-negative status value means MPFIT should continue, negative means\r\n # stop the calculation.\r\n status = 0\r\n if (dojac):\r\n pderiv = numpy.zeros([len(x), len(p)], numpy.float)\r\n for j in range(len(p)):\r\n pderiv[:,j] = FGRAD(x, p, j)\r\n else:\r\n pderiv = None\r\n return([status, (y-model)/err, pderiv]\r\n\r\n where FGRAD(x, p, i) is a user function which must compute the\r\n derivative of the model with respect to parameter P[i] at X. When\r\n finite differencing is used for computing derivatives (ie, when\r\n AUTODERIVATIVE=1), or when MPFIT needs only the errors but not the\r\n derivatives the parameter FJAC=None. \r\n\r\n Derivatives should be returned in the PDERIV array. PDERIV should be an m x\r\n n array, where m is the number of data points and n is the number\r\n of parameters. dp[i,j] is the derivative at the ith point with\r\n respect to the jth parameter. \r\n\r\n The derivatives with respect to fixed parameters are ignored; zero\r\n is an appropriate value to insert for those derivatives. Upon\r\n input to the user function, FJAC is set to a vector with the same\r\n length as P, with a value of 1 for a parameter which is free, and a\r\n value of zero for a parameter which is fixed (and hence no\r\n derivative needs to be calculated).\r\n\r\n If the data is higher than one dimensional, then the *last*\r\n dimension should be the parameter dimension. Example: fitting a\r\n 50x50 image, \"dp\" should be 50x50xNPAR.\r\n\r\n\r\n CONSTRAINING PARAMETER VALUES WITH THE PARINFO KEYWORD\r\n\r\n The behavior of MPFIT can be modified with respect to each\r\n parameter to be fitted. A parameter value can be fixed; simple\r\n boundary constraints can be imposed; limitations on the parameter\r\n changes can be imposed; properties of the automatic derivative can\r\n be modified; and parameters can be tied to one another.\r\n\r\n These properties are governed by the PARINFO structure, which is\r\n passed as a keyword parameter to MPFIT.\r\n\r\n PARINFO should be a list of dictionaries, one list entry for each parameter.\r\n Each parameter is associated with one element of the array, in\r\n numerical order. The dictionary can have the following keys\r\n (none are required, keys are case insensitive):\r\n\r\n 'value' - the starting parameter value (but see the START_PARAMS\r\n parameter for more information).\r\n\r\n 'fixed' - a boolean value, whether the parameter is to be held\r\n fixed or not. Fixed parameters are not varied by\r\n MPFIT, but are passed on to MYFUNCT for evaluation.\r\n\r\n 'limited' - a two-element boolean array. If the first/second\r\n element is set, then the parameter is bounded on the\r\n lower/upper side. A parameter can be bounded on both\r\n sides. Both LIMITED and LIMITS must be given\r\n together.\r\n\r\n 'limits' - a two-element float array. Gives the\r\n parameter limits on the lower and upper sides,\r\n respectively. Zero, one or two of these values can be\r\n set, depending on the values of LIMITED. Both LIMITED\r\n and LIMITS must be given together.\r\n\r\n 'parname' - a string, giving the name of the parameter. The\r\n fitting code of MPFIT does not use this tag in any\r\n way. However, the default iterfunct will print the\r\n parameter name if available.\r\n\r\n 'step' - the step size to be used in calculating the numerical\r\n derivatives. If set to zero, then the step size is\r\n computed automatically. Ignored when AUTODERIVATIVE=0.\r\n\r\n 'mpside' - the sidedness of the finite difference when computing\r\n numerical derivatives. This field can take four\r\n values:\r\n\r\n 0 - one-sided derivative computed automatically\r\n 1 - one-sided derivative (f(x+h) - f(x) )/h\r\n -1 - one-sided derivative (f(x) - f(x-h))/h\r\n 2 - two-sided derivative (f(x+h) - f(x-h))/(2*h)\r\n\r\n Where H is the STEP parameter described above. The\r\n \"automatic\" one-sided derivative method will chose a\r\n direction for the finite difference which does not\r\n violate any constraints. The other methods do not\r\n perform this check. The two-sided method is in\r\n principle more precise, but requires twice as many\r\n function evaluations. Default: 0.\r\n\r\n 'mpmaxstep' - the maximum change to be made in the parameter\r\n value. During the fitting process, the parameter\r\n will never be changed by more than this value in\r\n one iteration.\r\n\r\n A value of 0 indicates no maximum. Default: 0.\r\n\r\n 'tied' - a string expression which \"ties\" the parameter to other\r\n free or fixed parameters. Any expression involving\r\n constants and the parameter array P are permitted.\r\n Example: if parameter 2 is always to be twice parameter\r\n 1 then use the following: parinfo(2).tied = '2 * p(1)'.\r\n Since they are totally constrained, tied parameters are\r\n considered to be fixed; no errors are computed for them.\r\n [ NOTE: the PARNAME can't be used in expressions. ]\r\n\r\n 'mpprint' - if set to 1, then the default iterfunct will print the\r\n parameter value. If set to 0, the parameter value\r\n will not be printed. This tag can be used to\r\n selectively print only a few parameter values out of\r\n many. Default: 1 (all parameters printed)\r\n\r\n\r\n Future modifications to the PARINFO structure, if any, will involve\r\n adding dictionary tags beginning with the two letters \"MP\".\r\n Therefore programmers are urged to avoid using tags starting with\r\n the same letters; otherwise they are free to include their own\r\n fields within the PARINFO structure, and they will be ignored.\r\n\r\n PARINFO Example:\r\n parinfo = [{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}]*5\r\n parinfo[0]['fixed'] = 1\r\n parinfo[4]['limited'][0] = 1\r\n parinfo[4]['limits'][0] = 50.\r\n values = [5.7, 2.2, 500., 1.5, 2000.]\r\n for i in range(5): parinfo[i]['value']=values[i]\r\n\r\n A total of 5 parameters, with starting values of 5.7,\r\n 2.2, 500, 1.5, and 2000 are given. The first parameter\r\n is fixed at a value of 5.7, and the last parameter is\r\n constrained to be above 50.\r\n\r\n\r\n EXAMPLE\r\n\r\n import mpfit\r\n import numpy\r\n x = numpy.arange(100, numpy.float)\r\n p0 = [5.7, 2.2, 500., 1.5, 2000.]\r\n y = ( p[0] + p[1]*[x] + p[2]*[x**2] + p[3]*numpy.sqrt(x) +\r\n p[4]*numpy.log(x))\r\n fa = {'x':x, 'y':y, 'err':err}\r\n m = mpfit('myfunct', p0, functkw=fa)\r\n print 'status = ', m.status\r\n if (m.status <= 0): print 'error message = ', m.errmsg\r\n print 'parameters = ', m.params\r\n\r\n Minimizes sum of squares of MYFUNCT. MYFUNCT is called with the X,\r\n Y, and ERR keyword parameters that are given by FUNCTKW. The\r\n results can be obtained from the returned object m.\r\n\r\n\r\n THEORY OF OPERATION\r\n\r\n There are many specific strategies for function minimization. One\r\n very popular technique is to use function gradient information to\r\n realize the local structure of the function. Near a local minimum\r\n the function value can be taylor expanded about x0 as follows:\r\n\r\n f(x) = f(x0) + f'(x0) . (x-x0) + (1/2) (x-x0) . f''(x0) . (x-x0)\r\n ----- --------------- ------------------------------- (1)\r\n Order 0th 1st 2nd\r\n\r\n Here f'(x) is the gradient vector of f at x, and f''(x) is the\r\n Hessian matrix of second derivatives of f at x. The vector x is\r\n the set of function parameters, not the measured data vector. One\r\n can find the minimum of f, f(xm) using Newton's method, and\r\n arrives at the following linear equation:\r\n\r\n f''(x0) . (xm-x0) = - f'(x0) (2)\r\n\r\n If an inverse can be found for f''(x0) then one can solve for\r\n (xm-x0), the step vector from the current position x0 to the new\r\n projected minimum. Here the problem has been linearized (ie, the\r\n gradient information is known to first order). f''(x0) is\r\n symmetric n x n matrix, and should be positive definite.\r\n\r\n The Levenberg - Marquardt technique is a variation on this theme.\r\n It adds an additional diagonal term to the equation which may aid the\r\n convergence properties:\r\n\r\n (f''(x0) + nu I) . (xm-x0) = -f'(x0) (2a)\r\n\r\n where I is the identity matrix. When nu is large, the overall\r\n matrix is diagonally dominant, and the iterations follow steepest\r\n descent. When nu is small, the iterations are quadratically\r\n convergent.\r\n\r\n In principle, if f''(x0) and f'(x0) are known then xm-x0 can be\r\n determined. However the Hessian matrix is often difficult or\r\n impossible to compute. The gradient f'(x0) may be easier to\r\n compute, if even by finite difference techniques. So-called\r\n quasi-Newton techniques attempt to successively estimate f''(x0)\r\n by building up gradient information as the iterations proceed.\r\n\r\n In the least squares problem there are further simplifications\r\n which assist in solving eqn (2). The function to be minimized is\r\n a sum of squares:\r\n\r\n f = Sum(hi^2) (3)\r\n\r\n where hi is the ith residual out of m residuals as described\r\n above. This can be substituted back into eqn (2) after computing\r\n the derivatives:\r\n\r\n f' = 2 Sum(hi hi') \r\n f'' = 2 Sum(hi' hj') + 2 Sum(hi hi'') (4)\r\n\r\n If one assumes that the parameters are already close enough to a\r\n minimum, then one typically finds that the second term in f'' is\r\n negligible [or, in any case, is too difficult to compute]. Thus,\r\n equation (2) can be solved, at least approximately, using only\r\n gradient information.\r\n\r\n In matrix notation, the combination of eqns (2) and (4) becomes:\r\n\r\n hT' . h' . dx = - hT' . h (5)\r\n\r\n Where h is the residual vector (length m), hT is its transpose, h'\r\n is the Jacobian matrix (dimensions n x m), and dx is (xm-x0). The\r\n user function supplies the residual vector h, and in some cases h'\r\n when it is not found by finite differences (see MPFIT_FDJAC2,\r\n which finds h and hT'). Even if dx is not the best absolute step\r\n to take, it does provide a good estimate of the best *direction*,\r\n so often a line minimization will occur along the dx vector\r\n direction.\r\n\r\n The method of solution employed by MINPACK is to form the Q . R\r\n factorization of h', where Q is an orthogonal matrix such that QT .\r\n Q = I, and R is upper right triangular. Using h' = Q . R and the\r\n ortogonality of Q, eqn (5) becomes\r\n\r\n (RT . QT) . (Q . R) . dx = - (RT . QT) . h\r\n RT . R . dx = - RT . QT . h (6)\r\n R . dx = - QT . h\r\n\r\n where the last statement follows because R is upper triangular.\r\n Here, R, QT and h are known so this is a matter of solving for dx.\r\n The routine MPFIT_QRFAC provides the QR factorization of h, with\r\n pivoting, and MPFIT_QRSOLV provides the solution for dx.\r\n\r\n\r\n REFERENCES\r\n\r\n MINPACK-1, Jorge More', available from netlib (www.netlib.org).\r\n \"Optimization Software Guide,\" Jorge More' and Stephen Wright, \r\n SIAM, *Frontiers in Applied Mathematics*, Number 14.\r\n More', Jorge J., \"The Levenberg-Marquardt Algorithm:\r\n Implementation and Theory,\" in *Numerical Analysis*, ed. Watson,\r\n G. A., Lecture Notes in Mathematics 630, Springer-Verlag, 1977.\r\n\r\n\r\n MODIFICATION HISTORY\r\n\r\n Translated from MINPACK-1 in FORTRAN, Apr-Jul 1998, CM\r\n Copyright (C) 1997-2002, Craig Markwardt\r\n This software is provided as is without any warranty whatsoever.\r\n Permission to use, copy, modify, and distribute modified or\r\n unmodified copies is granted, provided this copyright and disclaimer\r\n are included unchanged.\r\n\r\n Translated from MPFIT (Craig Markwardt's IDL package) to Python,\r\n August, 2002. Mark Rivers\r\n\"\"\"\r\n\r\nimport numpy\r\nimport types\r\n\r\n\f\r\n# Original FORTRAN documentation\r\n# **********\r\n#\r\n# subroutine lmdif\r\n#\r\n# the purpose of lmdif is to minimize the sum of the squares of\r\n# m nonlinear functions in n variables by a modification of\r\n# the levenberg-marquardt algorithm. the user must provide a\r\n# subroutine which calculates the functions. the jacobian is\r\n# then calculated by a forward-difference approximation.\r\n#\r\n# the subroutine statement is\r\n#\r\n# subroutine lmdif(fcn,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn,\r\n# diag,mode,factor,nprint,info,nfev,fjac,\r\n# ldfjac,ipvt,qtf,wa1,wa2,wa3,wa4)\r\n#\r\n# where\r\n#\r\n# fcn is the name of the user-supplied subroutine which\r\n# calculates the functions. fcn must be declared\r\n# in an external statement in the user calling\r\n# program, and should be written as follows.\r\n#\r\n# subroutine fcn(m,n,x,fvec,iflag)\r\n# integer m,n,iflag\r\n# double precision x(n),fvec(m)\r\n# ----------\r\n# calculate the functions at x and\r\n# return this vector in fvec.\r\n# ----------\r\n# return\r\n# end\r\n#\r\n# the value of iflag should not be changed by fcn unless\r\n# the user wants to terminate execution of lmdif.\r\n# in this case set iflag to a negative integer.\r\n#\r\n# m is a positive integer input variable set to the number\r\n# of functions.\r\n#\r\n# n is a positive integer input variable set to the number\r\n# of variables. n must not exceed m.\r\n#\r\n# x is an array of length n. on input x must contain\r\n# an initial estimate of the solution vector. on output x\r\n# contains the final estimate of the solution vector.\r\n#\r\n# fvec is an output array of length m which contains\r\n# the functions evaluated at the output x.\r\n#\r\n# ftol is a nonnegative input variable. termination\r\n# occurs when both the actual and predicted relative\r\n# reductions in the sum of squares are at most ftol.\r\n# therefore, ftol measures the relative error desired\r\n# in the sum of squares.\r\n#\r\n# xtol is a nonnegative input variable. termination\r\n# occurs when the relative error between two consecutive\r\n# iterates is at most xtol. therefore, xtol measures the\r\n# relative error desired in the approximate solution.\r\n#\r\n# gtol is a nonnegative input variable. termination\r\n# occurs when the cosine of the angle between fvec and\r\n# any column of the jacobian is at most gtol in absolute\r\n# value. therefore, gtol measures the orthogonality\r\n# desired between the function vector and the columns\r\n# of the jacobian.\r\n#\r\n# maxfev is a positive integer input variable. termination\r\n# occurs when the number of calls to fcn is at least\r\n# maxfev by the end of an iteration.\r\n#\r\n# epsfcn is an input variable used in determining a suitable\r\n# step length for the forward-difference approximation. this\r\n# approximation assumes that the relative errors in the\r\n# functions are of the order of epsfcn. if epsfcn is less\r\n# than the machine precision, it is assumed that the relative\r\n# errors in the functions are of the order of the machine\r\n# precision.\r\n#\r\n# diag is an array of length n. if mode = 1 (see\r\n# below), diag is internally set. if mode = 2, diag\r\n# must contain positive entries that serve as\r\n# multiplicative scale factors for the variables.\r\n#\r\n# mode is an integer input variable. if mode = 1, the\r\n# variables will be scaled internally. if mode = 2,\r\n# the scaling is specified by the input diag. other\r\n# values of mode are equivalent to mode = 1.\r\n#\r\n# factor is a positive input variable used in determining the\r\n# initial step bound. this bound is set to the product of\r\n# factor and the euclidean norm of diag*x if nonzero, or else\r\n# to factor itself. in most cases factor should lie in the\r\n# interval (.1,100.). 100. is a generally recommended value.\r\n#\r\n# nprint is an integer input variable that enables controlled\r\n# printing of iterates if it is positive. in this case,\r\n# fcn is called with iflag = 0 at the beginning of the first\r\n# iteration and every nprint iterations thereafter and\r\n# immediately prior to return, with x and fvec available\r\n# for printing. if nprint is not positive, no special calls\r\n# of fcn with iflag = 0 are made.\r\n#\r\n# info is an integer output variable. if the user has\r\n# terminated execution, info is set to the (negative)\r\n# value of iflag. see description of fcn. otherwise,\r\n# info is set as follows.\r\n#\r\n# info = 0 improper input parameters.\r\n#\r\n# info = 1 both actual and predicted relative reductions\r\n# in the sum of squares are at most ftol.\r\n#\r\n# info = 2 relative error between two consecutive iterates\r\n# is at most xtol.\r\n#\r\n# info = 3 conditions for info = 1 and info = 2 both hold.\r\n#\r\n# info = 4 the cosine of the angle between fvec and any\r\n# column of the jacobian is at most gtol in\r\n# absolute value.\r\n#\r\n# info = 5 number of calls to fcn has reached or\r\n# exceeded maxfev.\r\n#\r\n# info = 6 ftol is too small. no further reduction in\r\n# the sum of squares is possible.\r\n#\r\n# info = 7 xtol is too small. no further improvement in\r\n# the approximate solution x is possible.\r\n#\r\n# info = 8 gtol is too small. fvec is orthogonal to the\r\n# columns of the jacobian to machine precision.\r\n#\r\n# nfev is an integer output variable set to the number of\r\n# calls to fcn.\r\n#\r\n# fjac is an output m by n array. the upper n by n submatrix\r\n# of fjac contains an upper triangular matrix r with\r\n# diagonal elements of nonincreasing magnitude such that\r\n#\r\n# t t t\r\n# p *(jac *jac)*p = r *r,\r\n#\r\n# where p is a permutation matrix and jac is the final\r\n# calculated jacobian. column j of p is column ipvt(j)\r\n# (see below) of the identity matrix. the lower trapezoidal\r\n# part of fjac contains information generated during\r\n# the computation of r.\r\n#\r\n# ldfjac is a positive integer input variable not less than m\r\n# which specifies the leading dimension of the array fjac.\r\n#\r\n# ipvt is an integer output array of length n. ipvt\r\n# defines a permutation matrix p such that jac*p = q*r,\r\n# where jac is the final calculated jacobian, q is\r\n# orthogonal (not stored), and r is upper triangular\r\n# with diagonal elements of nonincreasing magnitude.\r\n# column j of p is column ipvt(j) of the identity matrix.\r\n#\r\n# qtf is an output array of length n which contains\r\n# the first n elements of the vector (q transpose)*fvec.\r\n#\r\n# wa1, wa2, and wa3 are work arrays of length n.\r\n#\r\n# wa4 is a work array of length m.\r\n#\r\n# subprograms called\r\n#\r\n# user-supplied ...... fcn\r\n#\r\n# minpack-supplied ... dpmpar,enorm,fdjac2,,qrfac\r\n#\r\n# fortran-supplied ... dabs,dmax1,dmin1,dsqrt,mod\r\n#\r\n# argonne national laboratory. minpack project. march 1980.\r\n# burton s. garbow, kenneth e. hillstrom, jorge j. more\r\n#\r\n# **********\r\n\f\r\nclass mpfit:\r\n def __init__(self, fcn, xall=None, functkw={}, parinfo=None,\r\n ftol=1.e-10, xtol=1.e-10, gtol=1.e-10,\r\n damp=0., maxiter=200, factor=100., nprint=1,\r\n iterfunct='default', iterkw={}, nocovar=0,\r\n fastnorm=0, rescale=0, autoderivative=1, quiet=0,\r\n diag=None, epsfcn=None, debug=0):\r\n \"\"\"\r\nInputs:\r\n fcn:\r\n The function to be minimized. The function should return the weighted\r\n deviations between the model and the data, as described above.\r\n\r\n xall:\r\n An array of starting values for each of the parameters of the model.\r\n The number of parameters should be fewer than the number of measurements.\r\n\r\n This parameter is optional if the parinfo keyword is used (but see\r\n parinfo). The parinfo keyword provides a mechanism to fix or constrain\r\n individual parameters. \r\n\r\nKeywords:\r\n\r\n autoderivative:\r\n If this is set, derivatives of the function will be computed\r\n automatically via a finite differencing procedure. If not set, then\r\n fcn must provide the (analytical) derivatives.\r\n Default: set (=1) \r\n NOTE: to supply your own analytical derivatives,\r\n explicitly pass autoderivative=0\r\n\r\n fastnorm:\r\n Set this keyword to select a faster algorithm to compute sum-of-square\r\n values internally. For systems with large numbers of data points, the\r\n standard algorithm can become prohibitively slow because it cannot be\r\n vectorized well. By setting this keyword, MPFIT will run faster, but\r\n it will be more prone to floating point overflows and underflows. Thus, setting\r\n this keyword may sacrifice some stability in the fitting process.\r\n Default: clear (=0)\r\n\r\n ftol:\r\n A nonnegative input variable. Termination occurs when both the actual\r\n and predicted relative reductions in the sum of squares are at most\r\n ftol (and status is accordingly set to 1 or 3). Therefore, ftol\r\n measures the relative error desired in the sum of squares.\r\n Default: 1E-10\r\n\r\n functkw:\r\n A dictionary which contains the parameters to be passed to the\r\n user-supplied function specified by fcn via the standard Python\r\n keyword dictionary mechanism. This is the way you can pass additional\r\n data to your user-supplied function without using global variables.\r\n\r\n Consider the following example:\r\n if functkw = {'xval':[1.,2.,3.], 'yval':[1.,4.,9.],\r\n 'errval':[1.,1.,1.] }\r\n then the user supplied function should be declared like this:\r\n def myfunct(p, fjac=None, xval=None, yval=None, errval=None):\r\n\r\n Default: {} No extra parameters are passed to the user-supplied\r\n function. \r\n\r\n gtol:\r\n A nonnegative input variable. Termination occurs when the cosine of\r\n the angle between fvec and any column of the jacobian is at most gtol\r\n in absolute value (and status is accordingly set to 4). Therefore,\r\n gtol measures the orthogonality desired between the function vector\r\n and the columns of the jacobian.\r\n Default: 1e-10\r\n\r\n iterkw:\r\n The keyword arguments to be passed to iterfunct via the dictionary\r\n keyword mechanism. This should be a dictionary and is similar in\r\n operation to FUNCTKW.\r\n Default: {} No arguments are passed.\r\n\r\n iterfunct:\r\n The name of a function to be called upon each NPRINT iteration of the\r\n MPFIT routine. It should be declared in the following way:\r\n def iterfunct(myfunct, p, iter, fnorm, functkw=None, \r\n parinfo=None, quiet=0, dof=None, [iterkw keywords here])\r\n # perform custom iteration update\r\n\r\n iterfunct must accept all three keyword parameters (FUNCTKW, PARINFO\r\n and QUIET). \r\n\r\n myfunct: The user-supplied function to be minimized,\r\n p: The current set of model parameters\r\n iter: The iteration number\r\n functkw: The arguments to be passed to myfunct.\r\n fnorm: The chi-squared value.\r\n quiet: Set when no textual output should be printed.\r\n dof: The number of degrees of freedom, normally the number of points\r\n less the number of free parameters.\r\n See below for documentation of parinfo.\r\n\r\n In implementation, iterfunct can perform updates to the terminal or\r\n graphical user interface, to provide feedback while the fit proceeds.\r\n If the fit is to be stopped for any reason, then iterfunct should return a\r\n a status value between -15 and -1. Otherwise it should return None\r\n (e.g. no return statement) or 0.\r\n In principle, iterfunct should probably not modify the parameter values,\r\n because it may interfere with the algorithm's stability. In practice it\r\n is allowed.\r\n\r\n Default: an internal routine is used to print the parameter values.\r\n\r\n Set iterfunct=None if there is no user-defined routine and you don't\r\n want the internal default routine be called.\r\n\r\n maxiter:\r\n The maximum number of iterations to perform. If the number is exceeded,\r\n then the status value is set to 5 and MPFIT returns.\r\n Default: 200 iterations\r\n\r\n nocovar:\r\n Set this keyword to prevent the calculation of the covariance matrix\r\n before returning (see COVAR)\r\n Default: clear (=0) The covariance matrix is returned\r\n\r\n nprint:\r\n The frequency with which iterfunct is called. A value of 1 indicates\r\n that iterfunct is called with every iteration, while 2 indicates every\r\n other iteration, etc. Note that several Levenberg-Marquardt attempts\r\n can be made in a single iteration.\r\n Default value: 1\r\n\r\n parinfo\r\n Provides a mechanism for more sophisticated constraints to be placed on\r\n parameter values. When parinfo is not passed, then it is assumed that\r\n all parameters are free and unconstrained. Values in parinfo are never\r\n modified during a call to MPFIT.\r\n\r\n See description above for the structure of PARINFO.\r\n\r\n Default value: None All parameters are free and unconstrained.\r\n\r\n quiet:\r\n Set this keyword when no textual output should be printed by MPFIT\r\n\r\n damp:\r\n A scalar number, indicating the cut-off value of residuals where\r\n \"damping\" will occur. Residuals with magnitudes greater than this\r\n number will be replaced by their hyperbolic tangent. This partially\r\n mitigates the so-called large residual problem inherent in\r\n least-squares solvers (as for the test problem CURVI,\r\n http://www.maxthis.com/curviex.htm).\r\n A value of 0 indicates no damping.\r\n Default: 0\r\n\r\n Note: DAMP doesn't work with autoderivative=0\r\n\r\n xtol:\r\n A nonnegative input variable. Termination occurs when the relative error\r\n between two consecutive iterates is at most xtol (and status is\r\n accordingly set to 2 or 3). Therefore, xtol measures the relative error\r\n desired in the approximate solution.\r\n Default: 1E-10\r\n\r\n Outputs:\r\n\r\n Returns an object of type mpfit. The results are attributes of this class,\r\n e.g. mpfit.status, mpfit.errmsg, mpfit.params, npfit.niter, mpfit.covar.\r\n\r\n .status\r\n An integer status code is returned. All values greater than zero can\r\n represent success (however .status == 5 may indicate failure to\r\n converge). It can have one of the following values:\r\n\r\n -16\r\n A parameter or function value has become infinite or an undefined\r\n number. This is usually a consequence of numerical overflow in the\r\n user's model function, which must be avoided.\r\n\r\n -15 to -1 \r\n These are error codes that either MYFUNCT or iterfunct may return to\r\n terminate the fitting process. Values from -15 to -1 are reserved\r\n for the user functions and will not clash with MPFIT.\r\n\r\n 0 Improper input parameters.\r\n\r\n 1 Both actual and predicted relative reductions in the sum of squares\r\n are at most ftol.\r\n\r\n 2 Relative error between two consecutive iterates is at most xtol\r\n\r\n 3 Conditions for status = 1 and status = 2 both hold.\r\n\r\n 4 The cosine of the angle between fvec and any column of the jacobian\r\n is at most gtol in absolute value.\r\n\r\n 5 The maximum number of iterations has been reached.\r\n\r\n 6 ftol is too small. No further reduction in the sum of squares is\r\n possible.\r\n\r\n 7 xtol is too small. No further improvement in the approximate solution\r\n x is possible.\r\n\r\n 8 gtol is too small. fvec is orthogonal to the columns of the jacobian\r\n to machine precision.\r\n\r\n .fnorm\r\n The value of the summed squared residuals for the returned parameter\r\n values.\r\n\r\n .covar\r\n The covariance matrix for the set of parameters returned by MPFIT.\r\n The matrix is NxN where N is the number of parameters. The square root\r\n of the diagonal elements gives the formal 1-sigma statistical errors on\r\n the parameters if errors were treated \"properly\" in fcn.\r\n Parameter errors are also returned in .perror.\r\n\r\n To compute the correlation matrix, pcor, use this example:\r\n cov = mpfit.covar\r\n pcor = cov * 0.\r\n for i in range(n):\r\n for j in range(n):\r\n pcor[i,j] = cov[i,j]/numpy.sqrt(cov[i,i]*cov[j,j])\r\n\r\n If nocovar is set or MPFIT terminated abnormally, then .covar is set to\r\n a scalar with value None.\r\n\r\n .errmsg\r\n A string error or warning message is returned.\r\n\r\n .nfev\r\n The number of calls to MYFUNCT performed.\r\n\r\n .niter\r\n The number of iterations completed.\r\n\r\n .perror\r\n The formal 1-sigma errors in each parameter, computed from the\r\n covariance matrix. If a parameter is held fixed, or if it touches a\r\n boundary, then the error is reported as zero.\r\n\r\n If the fit is unweighted (i.e. no errors were given, or the weights\r\n were uniformly set to unity), then .perror will probably not represent\r\n the true parameter uncertainties. \r\n\r\n *If* you can assume that the true reduced chi-squared value is unity --\r\n meaning that the fit is implicitly assumed to be of good quality --\r\n then the estimated parameter uncertainties can be computed by scaling\r\n .perror by the measured chi-squared value.\r\n\r\n dof = len(x) - len(mpfit.params) # deg of freedom\r\n # scaled uncertainties\r\n pcerror = mpfit.perror * numpy.sqrt(mpfit.fnorm / dof)\r\n\r\n \"\"\"\r\n self.niter = 0\r\n self.params = None\r\n self.covar = None\r\n self.perror = None\r\n self.status = 0 # Invalid input flag set while we check inputs\r\n self.debug = debug\r\n self.errmsg = ''\r\n self.fastnorm = fastnorm\r\n self.nfev = 0\r\n self.damp = damp\r\n self.machar = machar(double=1)\r\n machep = self.machar.machep\r\n\r\n if (fcn==None):\r\n self.errmsg = \"Usage: parms = mpfit('myfunt', ... )\"\r\n return\r\n\r\n if (iterfunct == 'default'): iterfunct = self.defiter\r\n\r\n ## Parameter damping doesn't work when user is providing their own\r\n ## gradients.\r\n if (self.damp != 0) and (autoderivative == 0):\r\n self.errmsg = 'ERROR: keywords DAMP and AUTODERIVATIVE are mutually exclusive'\r\n return\r\n\r\n ## Parameters can either be stored in parinfo, or x. x takes precedence if it exists\r\n if (xall == None) and (parinfo == None):\r\n self.errmsg = 'ERROR: must pass parameters in P or PARINFO'\r\n return\r\n\r\n ## Be sure that PARINFO is of the right type\r\n if (parinfo != None):\r\n if (type(parinfo) != list):\r\n self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'\r\n return\r\n else:\r\n if (type(parinfo[0]) != dict):\r\n self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'\r\n return\r\n if ((xall != None) and (len(xall) != len(parinfo))):\r\n self.errmsg = 'ERROR: number of elements in PARINFO and P must agree'\r\n return\r\n\r\n ## If the parameters were not specified at the command line, then\r\n ## extract them from PARINFO\r\n if (xall == None):\r\n xall = self.parinfo(parinfo, 'value')\r\n if (xall == None):\r\n self.errmsg = 'ERROR: either P or PARINFO(*)[\"value\"] must be supplied.'\r\n return\r\n\r\n ## Make sure parameters are numpy arrays of type numpy.float\r\n xall = numpy.asarray(xall, numpy.float)\r\n\r\n npar = len(xall)\r\n self.fnorm = -1.\r\n fnorm1 = -1.\r\n\r\n ## TIED parameters?\r\n ptied = self.parinfo(parinfo, 'tied', default='', n=npar)\r\n self.qanytied = 0\r\n for i in range(npar):\r\n ptied[i] = ptied[i].strip()\r\n if (ptied[i] != ''): self.qanytied = 1\r\n self.ptied = ptied\r\n\r\n ## FIXED parameters ?\r\n pfixed = self.parinfo(parinfo, 'fixed', default=0, n=npar)\r\n pfixed = (pfixed == 1)\r\n for i in range(npar):\r\n pfixed[i] = pfixed[i] or (ptied[i] != '') ## Tied parameters are also effectively fixed\r\n\r\n ## Finite differencing step, absolute and relative, and sidedness of deriv.\r\n step = self.parinfo(parinfo, 'step', default=0., n=npar)\r\n dstep = self.parinfo(parinfo, 'relstep', default=0., n=npar)\r\n dside = self.parinfo(parinfo, 'mpside', default=0, n=npar)\r\n\r\n ## Maximum and minimum steps allowed to be taken in one iteration\r\n maxstep = self.parinfo(parinfo, 'mpmaxstep', default=0., n=npar)\r\n minstep = self.parinfo(parinfo, 'mpminstep', default=0., n=npar)\r\n qmin = minstep * 0 ## Remove minstep for now!!\r\n qmax = maxstep != 0\r\n wh, = numpy.nonzero(((qmin != 0.) & (qmax != 0.)) & (maxstep < minstep))\r\n if (len(wh) > 0):\r\n self.errmsg = 'ERROR: MPMINSTEP is greater than MPMAXSTEP'\r\n return\r\n wh, = numpy.nonzero((qmin!=0.) & (qmax!=0.))\r\n qminmax = len(wh > 0)\r\n\r\n ## Finish up the free parameters\r\n ifree, = numpy.nonzero(pfixed != 1)\r\n nfree = len(ifree)\r\n if nfree == 0:\r\n self.errmsg = 'ERROR: no free parameters'\r\n return\r\n dside = dside.take(ifree)\r\n\r\n ## Compose only VARYING parameters\r\n self.params = xall ## self.params is the set of parameters to be returned\r\n x = numpy.take(self.params, ifree) ## x is the set of free parameters\r\n\r\n ## LIMITED parameters ?\r\n limited = self.parinfo(parinfo, 'limited', default=[0,0])\r\n limits = self.parinfo(parinfo, 'limits', default=[0.,0.])\r\n if (limited != None) and (limits != None):\r\n ## Error checking on limits in parinfo\r\n wh, = numpy.nonzero((limited[:,0] & (xall < limits[:,0])) |\r\n (limited[:,1] & (xall > limits[:,1])))\r\n if (len(wh) > 0):\r\n self.errmsg = 'ERROR: parameters are not within PARINFO limits'\r\n return\r\n wh, = numpy.nonzero((limited[:,0] & limited[:,1]) &\r\n (limits[:,0] >= limits[:,1]) &\r\n (pfixed == 0))\r\n if (len(wh) > 0):\r\n self.errmsg = 'ERROR: PARINFO parameter limits are not consistent'\r\n return\r\n\r\n ## Transfer structure values to local variables\r\n qulim = numpy.take(limited[:,1], ifree)\r\n ulim = numpy.take(limits [:,1], ifree)\r\n qllim = numpy.take(limited[:,0], ifree)\r\n llim = numpy.take(limits [:,0], ifree)\r\n\r\n wh, = numpy.nonzero((qulim!=0.) | (qllim!=0.))\r\n if (len(wh) > 0): qanylim = 1\r\n else: qanylim = 0\r\n else:\r\n ## Fill in local variables with dummy values\r\n qulim = numpy.zeros(nfree)\r\n ulim = x * 0.\r\n qllim = qulim\r\n llim = x * 0.\r\n qanylim = 0\r\n\r\n n = len(x)\r\n ## Check input parameters for errors\r\n if ((n < 0) or (ftol <= 0) or (xtol <= 0) or (gtol <= 0)\r\n or (maxiter <= 0) or (factor <= 0)):\r\n self.errmsg = 'ERROR: input keywords are inconsistent'\r\n return\r\n\r\n if (rescale != 0):\r\n self.errmsg = 'ERROR: DIAG parameter scales are inconsistent'\r\n if (len(diag) < n): return\r\n wh, = numpy.nonzero(diag <= 0)\r\n if (len(wh) > 0): return\r\n self.errmsg = ''\r\n\r\n # Make sure x is a numpy array of type numpy.float\r\n x = numpy.asarray(x, numpy.float)\r\n\r\n [self.status, fvec] = self.call(fcn, self.params, functkw)\r\n if (self.status < 0):\r\n self.errmsg = 'ERROR: first call to \"'+str(fcn)+'\" failed'\r\n return\r\n\r\n m = len(fvec)\r\n if (m < n):\r\n self.errmsg = 'ERROR: number of parameters must not exceed data'\r\n return\r\n\r\n self.fnorm = self.enorm(fvec)\r\n\r\n ## Initialize Levelberg-Marquardt parameter and iteration counter\r\n\r\n par = 0.\r\n self.niter = 1\r\n qtf = x * 0.\r\n self.status = 0\r\n\r\n ## Beginning of the outer loop\r\n\r\n while(1):\r\n\r\n ## If requested, call fcn to enable printing of iterates\r\n numpy.put(self.params, ifree, x)\r\n if (self.qanytied): self.params = self.tie(self.params, ptied)\r\n\r\n if (nprint > 0) and (iterfunct != None):\r\n if (((self.niter-1) % nprint) == 0):\r\n mperr = 0\r\n xnew0 = self.params.copy()\r\n\r\n dof = max(len(fvec) - len(x), 0)\r\n status = iterfunct(fcn, self.params, self.niter, self.fnorm**2, \r\n functkw=functkw, parinfo=parinfo, quiet=quiet, \r\n dof=dof, **iterkw)\r\n if (status != None): self.status = status\r\n\r\n ## Check for user termination\r\n if (self.status < 0): \r\n self.errmsg = 'WARNING: premature termination by ' + str(iterfunct)\r\n return\r\n\r\n ## If parameters were changed (grrr..) then re-tie\r\n if (max(abs(xnew0-self.params)) > 0):\r\n if (self.qanytied): self.params = self.tie(self.params, ptied)\r\n x = numpy.take(self.params, ifree)\r\n\r\n\r\n ## Calculate the jacobian matrix\r\n self.status = 2\r\n catch_msg = 'calling MPFIT_FDJAC2'\r\n fjac = self.fdjac2(fcn, x, fvec, step, qulim, ulim, dside, \r\n epsfcn=epsfcn, \r\n autoderivative=autoderivative, dstep=dstep, \r\n functkw=functkw, ifree=ifree, xall=self.params)\r\n if (fjac == None):\r\n self.errmsg = 'WARNING: premature termination by FDJAC2'\r\n return\r\n\r\n ## Determine if any of the parameters are pegged at the limits\r\n if (qanylim):\r\n catch_msg = 'zeroing derivatives of pegged parameters'\r\n whlpeg, = numpy.nonzero(qllim & (x == llim))\r\n nlpeg = len(whlpeg)\r\n whupeg, = numpy.nonzero(qulim & (x == ulim))\r\n nupeg = len(whupeg)\r\n ## See if any \"pegged\" values should keep their derivatives\r\n if (nlpeg > 0):\r\n ## Total derivative of sum wrt lower pegged parameters\r\n for i in range(nlpeg):\r\n sum = numpy.sum(fvec * fjac[:,whlpeg[i]])\r\n if (sum > 0): fjac[:,whlpeg[i]] = 0\r\n if (nupeg > 0):\r\n ## Total derivative of sum wrt upper pegged parameters\r\n for i in range(nupeg):\r\n sum = numpy.sum(fvec * fjac[:,whupeg[i]])\r\n if (sum < 0): fjac[:,whupeg[i]] = 0\r\n\r\n ## Compute the QR factorization of the jacobian\r\n [fjac, ipvt, wa1, wa2] = self.qrfac(fjac, pivot=1)\r\n\r\n ## On the first iteration if \"diag\" is unspecified, scale\r\n ## according to the norms of the columns of the initial jacobian\r\n catch_msg = 'rescaling diagonal elements'\r\n if (self.niter == 1):\r\n if ((rescale==0) or (len(diag) < n)):\r\n diag = wa2.copy()\r\n wh, = numpy.nonzero(diag == 0)\r\n numpy.put(diag, wh, 1.)\r\n\r\n ## On the first iteration, calculate the norm of the scaled x\r\n ## and initialize the step bound delta \r\n wa3 = diag * x\r\n xnorm = self.enorm(wa3)\r\n delta = factor*xnorm\r\n if (delta == 0.): delta = factor\r\n\r\n ## Form (q transpose)*fvec and store the first n components in qtf\r\n catch_msg = 'forming (q transpose)*fvec'\r\n wa4 = fvec.copy()\r\n for j in range(n):\r\n lj = ipvt[j]\r\n temp3 = fjac[j,lj]\r\n if (temp3 != 0):\r\n fj = fjac[j:,lj]\r\n wj = wa4[j:]\r\n ## *** optimization wa4(j:*)\r\n wa4[j:] = wj - fj * numpy.sum(fj*wj) / temp3 \r\n fjac[j,lj] = wa1[j]\r\n qtf[j] = wa4[j]\r\n ## From this point on, only the square matrix, consisting of the\r\n ## triangle of R, is needed.\r\n fjac = fjac[0:n, 0:n]\r\n fjac.shape = [n, n]\r\n temp = fjac.copy()\r\n for i in range(n):\r\n temp[:,i] = fjac[:, ipvt[i]]\r\n fjac = temp.copy()\r\n\r\n ## Check for overflow. This should be a cheap test here since FJAC\r\n ## has been reduced to a (small) square matrix, and the test is\r\n ## O(N^2).\r\n #wh = where(finite(fjac) EQ 0, ct)\r\n #if ct GT 0 then goto, FAIL_OVERFLOW\r\n\r\n ## Compute the norm of the scaled gradient\r\n catch_msg = 'computing the scaled gradient'\r\n gnorm = 0.\r\n if (self.fnorm != 0):\r\n for j in range(n):\r\n l = ipvt[j]\r\n if (wa2[l] != 0):\r\n sum = numpy.sum(fjac[0:j+1,j]*qtf[0:j+1])/self.fnorm\r\n gnorm = max([gnorm,abs(sum/wa2[l])])\r\n\r\n ## Test for convergence of the gradient norm\r\n if (gnorm <= gtol):\r\n self.status = 4\r\n return\r\n\r\n ## Rescale if necessary\r\n if (rescale == 0):\r\n diag = numpy.choose(diag>wa2, (wa2, diag))\r\n\r\n ## Beginning of the inner loop\r\n while(1):\r\n\r\n ## Determine the levenberg-marquardt parameter\r\n catch_msg = 'calculating LM parameter (MPFIT_)'\r\n [fjac, par, wa1, wa2] = self.lmpar(fjac, ipvt, diag, qtf,\r\n delta, wa1, wa2, par=par)\r\n ## Store the direction p and x+p. Calculate the norm of p\r\n wa1 = -wa1\r\n\r\n if (qanylim == 0) and (qminmax == 0):\r\n ## No parameter limits, so just move to new position WA2\r\n alpha = 1.\r\n wa2 = x + wa1\r\n\r\n else:\r\n\r\n ## Respect the limits. If a step were to go out of bounds, then\r\n ## we should take a step in the same direction but shorter distance.\r\n ## The step should take us right to the limit in that case.\r\n alpha = 1.\r\n\r\n if (qanylim):\r\n ## Do not allow any steps out of bounds\r\n catch_msg = 'checking for a step out of bounds'\r\n if (nlpeg > 0):\r\n numpy.put(wa1, whlpeg, numpy.clip(\r\n numpy.take(wa1, whlpeg), 0., max(wa1)))\r\n if (nupeg > 0):\r\n numpy.put(wa1, whupeg, numpy.clip(\r\n numpy.take(wa1, whupeg), min(wa1), 0.))\r\n\r\n dwa1 = abs(wa1) > machep\r\n whl, = numpy.nonzero(((dwa1!=0.) & qllim) & ((x + wa1) < llim))\r\n if (len(whl) > 0):\r\n t = ((numpy.take(llim, whl) - numpy.take(x, whl)) /\r\n numpy.take(wa1, whl))\r\n alpha = min(alpha, min(t))\r\n whu, = numpy.nonzero(((dwa1!=0.) & qulim) & ((x + wa1) > ulim))\r\n if (len(whu) > 0):\r\n t = ((numpy.take(ulim, whu) - numpy.take(x, whu)) /\r\n numpy.take(wa1, whu))\r\n alpha = min(alpha, min(t))\r\n\r\n ## Obey any max step values.\r\n if (qminmax):\r\n nwa1 = wa1 * alpha\r\n whmax, = numpy.nonzero((qmax != 0.) & (maxstep > 0))\r\n if (len(whmax) > 0):\r\n mrat = max(numpy.take(nwa1, whmax) /\r\n numpy.take(maxstep, whmax))\r\n if (mrat > 1): alpha = alpha / mrat\r\n\r\n ## Scale the resulting vector\r\n wa1 = wa1 * alpha\r\n wa2 = x + wa1\r\n\r\n ## Adjust the final output values. If the step put us exactly\r\n ## on a boundary, make sure it is exact.\r\n wh, = numpy.nonzero((qulim!=0.) & (wa2 >= ulim*(1-machep)))\r\n if (len(wh) > 0): numpy.put(wa2, wh, numpy.take(ulim, wh))\r\n wh, = numpy.nonzero((qllim!=0.) & (wa2 <= llim*(1+machep)))\r\n if (len(wh) > 0): numpy.put(wa2, wh, numpy.take(llim, wh))\r\n # endelse\r\n wa3 = diag * wa1\r\n pnorm = self.enorm(wa3)\r\n\r\n ## On the first iteration, adjust the initial step bound\r\n if (self.niter == 1): delta = min([delta,pnorm])\r\n\r\n numpy.put(self.params, ifree, wa2)\r\n\r\n ## Evaluate the function at x+p and calculate its norm\r\n mperr = 0\r\n catch_msg = 'calling '+str(fcn)\r\n [self.status, wa4] = self.call(fcn, self.params, functkw)\r\n if (self.status < 0):\r\n self.errmsg = 'WARNING: premature termination by \"'+fcn+'\"'\r\n return\r\n fnorm1 = self.enorm(wa4)\r\n\r\n ## Compute the scaled actual reduction\r\n catch_msg = 'computing convergence criteria'\r\n actred = -1.\r\n if ((0.1 * fnorm1) < self.fnorm): actred = - (fnorm1/self.fnorm)**2 + 1.\r\n\r\n ## Compute the scaled predicted reduction and the scaled directional\r\n ## derivative\r\n for j in range(n):\r\n wa3[j] = 0\r\n wa3[0:j+1] = wa3[0:j+1] + fjac[0:j+1,j]*wa1[ipvt[j]]\r\n\r\n ## Remember, alpha is the fraction of the full LM step actually\r\n ## taken\r\n temp1 = self.enorm(alpha*wa3)/self.fnorm\r\n temp2 = (numpy.sqrt(alpha*par)*pnorm)/self.fnorm\r\n prered = temp1*temp1 + (temp2*temp2)/0.5\r\n dirder = -(temp1*temp1 + temp2*temp2)\r\n\r\n ## Compute the ratio of the actual to the predicted reduction.\r\n ratio = 0.\r\n if (prered != 0): ratio = actred/prered\r\n\r\n ## Update the step bound\r\n if (ratio <= 0.25):\r\n if (actred >= 0): temp = .5\r\n else: temp = .5*dirder/(dirder + .5*actred)\r\n if ((0.1*fnorm1) >= self.fnorm) or (temp < 0.1): temp = 0.1\r\n delta = temp*min([delta,pnorm/0.1])\r\n par = par/temp\r\n else: \r\n if (par == 0) or (ratio >= 0.75):\r\n delta = pnorm/.5\r\n par = .5*par\r\n\r\n ## Test for successful iteration\r\n if (ratio >= 0.0001): \r\n ## Successful iteration. Update x, fvec, and their norms\r\n x = wa2\r\n wa2 = diag * x\r\n fvec = wa4\r\n xnorm = self.enorm(wa2)\r\n self.fnorm = fnorm1\r\n self.niter = self.niter + 1\r\n\r\n ## Tests for convergence\r\n if ((abs(actred) <= ftol) and (prered <= ftol)\r\n and (0.5 * ratio <= 1)): self.status = 1\r\n if delta <= xtol*xnorm: self.status = 2\r\n if ((abs(actred) <= ftol) and (prered <= ftol)\r\n and (0.5 * ratio <= 1) and (self.status == 2)): self.status = 3\r\n if (self.status != 0): break\r\n\r\n ## Tests for termination and stringent tolerances\r\n if (self.niter >= maxiter): self.status = 5\r\n if ((abs(actred) <= machep) and (prered <= machep) \r\n and (0.5*ratio <= 1)): self.status = 6\r\n if delta <= machep*xnorm: self.status = 7\r\n if gnorm <= machep: self.status = 8\r\n if (self.status != 0): break\r\n\r\n ## End of inner loop. Repeat if iteration unsuccessful\r\n if (ratio >= 0.0001): break\r\n\r\n ## Check for over/underflow - SKIP FOR NOW\r\n ##wh = where(finite(wa1) EQ 0 OR finite(wa2) EQ 0 OR finite(x) EQ 0, ct)\r\n ##if ct GT 0 OR finite(ratio) EQ 0 then begin\r\n ## errmsg = ('ERROR: parameter or function value(s) have become '+$\r\n ## 'infinite# check model function for over- '+$\r\n ## 'and underflow')\r\n ## self.status = -16\r\n ## break\r\n if (self.status != 0): break;\r\n ## End of outer loop.\r\n\r\n catch_msg = 'in the termination phase'\r\n ## Termination, either normal or user imposed.\r\n if (len(self.params) == 0):\r\n return\r\n if (nfree == 0): self.params = xall.copy()\r\n else: numpy.put(self.params, ifree, x)\r\n if (nprint > 0) and (self.status > 0):\r\n catch_msg = 'calling ' + str(fcn)\r\n [status, fvec] = self.call(fcn, self.params, functkw)\r\n catch_msg = 'in the termination phase'\r\n self.fnorm = self.enorm(fvec)\r\n\r\n if ((self.fnorm != None) and (fnorm1 != None)):\r\n self.fnorm = max([self.fnorm, fnorm1])\r\n self.fnorm = self.fnorm**2.\r\n\r\n self.covar = None\r\n self.perror = None\r\n ## (very carefully) set the covariance matrix COVAR\r\n if ((self.status > 0) and (nocovar==0) and (n != None)\r\n and (fjac != None) and (ipvt != None)):\r\n sz = numpy.shape(fjac)\r\n if ((n > 0) and (sz[0] >= n) and (sz[1] >= n)\r\n and (len(ipvt) >= n)):\r\n catch_msg = 'computing the covariance matrix'\r\n cv = self.calc_covar(fjac[0:n,0:n], ipvt[0:n])\r\n cv.shape = [n, n]\r\n nn = len(xall)\r\n\r\n ## Fill in actual covariance matrix, accounting for fixed\r\n ## parameters.\r\n self.covar = numpy.zeros([nn, nn], numpy.float)\r\n for i in range(n):\r\n indices = ifree+ifree[i]*n\r\n numpy.put(self.covar, indices, cv[:,i])\r\n\r\n ## Compute errors in parameters\r\n catch_msg = 'computing parameter errors'\r\n self.perror = numpy.zeros(nn, numpy.float)\r\n d = numpy.diagonal(self.covar).copy()\r\n wh, = numpy.nonzero(d >= 0)\r\n if len(wh) > 0:\r\n numpy.put(self.perror, wh, numpy.sqrt(numpy.take(d, wh)))\r\n return\r\n\r\n\f\r\n ## Default procedure to be called every iteration. It simply prints\r\n ## the parameter values.\r\n def defiter(self, fcn, x, iter, fnorm=None, functkw=None, \r\n quiet=0, iterstop=None, parinfo=None, \r\n format=None, pformat='%.10g', dof=1):\r\n\r\n if (self.debug): print('Entering defiter...')\r\n if (quiet): return\r\n if (fnorm == None):\r\n [status, fvec] = self.call(fcn, x, functkw)\r\n fnorm = self.enorm(fvec)**2\r\n\r\n ## Determine which parameters to print\r\n nprint = len(x)\r\n print(\"Iter \", ('%6i' % iter),\" CHI-SQUARE = \",('%.10g' % fnorm),\" DOF = \", ('%i' % dof))\r\n for i in range(nprint):\r\n if (parinfo != None) and ('parname' in parinfo[i]):\r\n p = ' ' + parinfo[i]['parname'] + ' = '\r\n else:\r\n p = ' P' + str(i) + ' = '\r\n if (parinfo != None) and ('mpprint' in parinfo[i]):\r\n iprint = parinfo[i]['mpprint']\r\n else:\r\n iprint = 1\r\n if (iprint):\r\n print(p + (pformat % x[i]) + ' ')\r\n return(0)\r\n\r\n ## DO_ITERSTOP:\r\n ## if keyword_set(iterstop) then begin\r\n ## k = get_kbrd(0)\r\n ## if k EQ string(byte(7)) then begin\r\n ## message, 'WARNING: minimization not complete', /info\r\n ## print, 'Do you want to terminate this procedure? (y/n)', $\r\n ## format='(A,$)'\r\n ## k = ''\r\n ## read, k\r\n ## if strupcase(strmid(k,0,1)) EQ 'Y' then begin\r\n ## message, 'WARNING: Procedure is terminating.', /info\r\n ## mperr = -1\r\n ## endif\r\n ## endif\r\n ## endif\r\n\r\n\f\r\n ## Procedure to parse the parameter values in PARINFO, which is a list of dictionaries\r\n def parinfo(self, parinfo=None, key='a', default=None, n=0):\r\n if (self.debug): print('Entering parinfo...')\r\n if (n == 0) and (parinfo != None): n = len(parinfo)\r\n if (n == 0):\r\n values = default\r\n return(values)\r\n\r\n values = []\r\n for i in range(n):\r\n if ((parinfo != None) and (key in parinfo[i])):\r\n values.append(parinfo[i][key])\r\n else:\r\n values.append(default)\r\n\r\n # Convert to numeric arrays if possible\r\n test = default\r\n if (type(default) == list): test=default[0]\r\n if isinstance(test, int):\r\n values = numpy.asarray(values, numpy.int)\r\n elif isinstance(test, float):\r\n values = numpy.asarray(values, numpy.float)\r\n return(values)\r\n\r\n\f\r\n ## Call user function or procedure, with _EXTRA or not, with\r\n ## derivatives or not.\r\n def call(self, fcn, x, functkw, fjac=None):\r\n if (self.debug): print('Entering call...')\r\n if (self.qanytied): x = self.tie(x, self.ptied)\r\n self.nfev = self.nfev + 1\r\n if (fjac == None):\r\n [status, f] = fcn(x, fjac=fjac, **functkw)\r\n if (self.damp > 0):\r\n ## Apply the damping if requested. This replaces the residuals\r\n ## with their hyperbolic tangent. Thus residuals larger than\r\n ## DAMP are essentially clipped.\r\n f = numpy.tanh(f/self.damp)\r\n return([status, f])\r\n else:\r\n return(fcn(x, fjac=fjac, **functkw))\r\n\r\n\f\r\n def enorm(self, vec):\r\n\r\n if (self.debug): print('Entering enorm...')\r\n ## NOTE: it turns out that, for systems that have a lot of data\r\n ## points, this routine is a big computing bottleneck. The extended\r\n ## computations that need to be done cannot be effectively\r\n ## vectorized. The introduction of the FASTNORM configuration\r\n ## parameter allows the user to select a faster routine, which is \r\n ## based on TOTAL() alone.\r\n\r\n # Very simple-minded sum-of-squares\r\n if (self.fastnorm):\r\n ans = numpy.sqrt(numpy.sum(vec*vec))\r\n else:\r\n agiant = self.machar.rgiant / len(vec)\r\n adwarf = self.machar.rdwarf * len(vec)\r\n\r\n ## This is hopefully a compromise between speed and robustness.\r\n ## Need to do this because of the possibility of over- or underflow.\r\n mx = max(vec)\r\n mn = min(vec)\r\n mx = max(abs(mx), abs(mn))\r\n if mx == 0: return(vec[0]*0.)\r\n if mx > agiant or mx < adwarf:\r\n ans = mx * numpy.sqrt(numpy.sum((vec/mx)*(vec/mx)))\r\n else:\r\n ans = numpy.sqrt(numpy.sum(vec*vec))\r\n\r\n return(ans)\r\n\r\n\f\r\n def fdjac2(self, fcn, x, fvec, step=None, ulimited=None, ulimit=None, dside=None,\r\n epsfcn=None, autoderivative=1,\r\n functkw=None, xall=None, ifree=None, dstep=None):\r\n\r\n if (self.debug): print('Entering fdjac2...')\r\n machep = self.machar.machep\r\n if epsfcn == None: epsfcn = machep\r\n if xall == None: xall = x\r\n if ifree == None: ifree = numpy.arange(len(xall))\r\n if step == None: step = x * 0.\r\n nall = len(xall)\r\n\r\n eps = numpy.sqrt(max([epsfcn, machep]))\r\n m = len(fvec)\r\n n = len(x)\r\n\r\n ## Compute analytical derivative if requested\r\n if (autoderivative == 0):\r\n mperr = 0\r\n fjac = numpy.zeros(nall, numpy.float)\r\n numpy.put(fjac, ifree, 1.0) ## Specify which parameters need derivatives\r\n [status, fp, pderiv] = self.call(fcn, xall, functkw, fjac=fjac)\r\n\r\n fjac = pderiv\r\n\r\n if fjac.shape != (m, nall):\r\n print('ERROR: Derivative matrix was not computed properly.')\r\n return(None)\r\n\r\n ## This definition is c1onsistent with CURVEFIT\r\n ## Sign error found (thanks Jesus Fernandez <[email protected]>)\r\n fjac = -fjac\r\n\r\n ## Select only the free parameters\r\n if len(ifree) < nall:\r\n fjac = fjac[:,ifree]\r\n fjac.shape = [m, n]\r\n return(fjac)\r\n\r\n fjac = numpy.zeros([m, n], numpy.float)\r\n\r\n h = eps * abs(x)\r\n\r\n ## if STEP is given, use that\r\n if step != None:\r\n stepi = numpy.take(step, ifree)\r\n wh, = numpy.nonzero(stepi > 0)\r\n if (len(wh) > 0): numpy.put(h, wh, numpy.take(stepi, wh))\r\n\r\n ## if relative step is given, use that\r\n if (len(dstep) > 0):\r\n dstepi = numpy.take(dstep, ifree)\r\n wh, = numpy.nonzero(dstepi > 0)\r\n if len(wh) > 0: numpy.put(h, wh, abs(numpy.take(dstepi,wh)*numpy.take(x,wh)))\r\n\r\n ## In case any of the step values are zero\r\n wh, = numpy.nonzero(h == 0)\r\n if len(wh) > 0: numpy.put(h, wh, eps)\r\n\r\n ## Reverse the sign of the step if we are up against the parameter\r\n ## limit, or if the user requested it.\r\n mask = dside == -1\r\n if len(ulimited) > 0 and len(ulimit) > 0:\r\n mask = mask | (ulimited & (x > ulimit-h))\r\n wh, = numpy.nonzero(mask)\r\n if len(wh) > 0: numpy.put(h, wh, -numpy.take(h, wh))\r\n ## Loop through parameters, computing the derivative for each\r\n for j in range(n):\r\n xp = xall.copy()\r\n xp[ifree[j]] = xp[ifree[j]] + h[j]\r\n [status, fp] = self.call(fcn, xp, functkw)\r\n if (status < 0): return(None)\r\n\r\n if abs(dside[j]) <= 1:\r\n ## COMPUTE THE ONE-SIDED DERIVATIVE\r\n ## Note optimization fjac(0:*,j)\r\n fjac[0:,j] = (fp-fvec)/h[j]\r\n\r\n else:\r\n ## COMPUTE THE TWO-SIDED DERIVATIVE\r\n xp[ifree[j]] = xall[ifree[j]] - h[j]\r\n\r\n mperr = 0\r\n [status, fm] = self.call(fcn, xp, functkw)\r\n if (status < 0): return(None)\r\n\r\n ## Note optimization fjac(0:*,j)\r\n fjac[0:,j] = (fp-fm)/(2*h[j])\r\n return(fjac)\r\n\r\n\r\n\f\r\n # Original FORTRAN documentation\r\n # **********\r\n #\r\n # subroutine qrfac\r\n #\r\n # this subroutine uses householder transformations with column\r\n # pivoting (optional) to compute a qr factorization of the\r\n # m by n matrix a. that is, qrfac determines an orthogonal\r\n # matrix q, a permutation matrix p, and an upper trapezoidal\r\n # matrix r with diagonal elements of nonincreasing magnitude,\r\n # such that a*p = q*r. the householder transformation for\r\n # column k, k = 1,2,...,min(m,n), is of the form\r\n #\r\n # t\r\n # i - (1/u(k))*u*u\r\n #\r\n # where u has zeros in the first k-1 positions. the form of\r\n # this transformation and the method of pivoting first\r\n # appeared in the corresponding linpack subroutine.\r\n #\r\n # the subroutine statement is\r\n #\r\n # subroutine qrfac(m,n,a,lda,pivot,ipvt,lipvt,rdiag,acnorm,wa)\r\n #\r\n # where\r\n #\r\n # m is a positive integer input variable set to the number\r\n # of rows of a.\r\n #\r\n # n is a positive integer input variable set to the number\r\n # of columns of a.\r\n #\r\n # a is an m by n array. on input a contains the matrix for\r\n # which the qr factorization is to be computed. on output\r\n # the strict upper trapezoidal part of a contains the strict\r\n # upper trapezoidal part of r, and the lower trapezoidal\r\n # part of a contains a factored form of q (the non-trivial\r\n # elements of the u vectors described above).\r\n #\r\n # lda is a positive integer input variable not less than m\r\n # which specifies the leading dimension of the array a.\r\n #\r\n # pivot is a logical input variable. if pivot is set true,\r\n # then column pivoting is enforced. if pivot is set false,\r\n # then no column pivoting is done.\r\n #\r\n # ipvt is an integer output array of length lipvt. ipvt\r\n # defines the permutation matrix p such that a*p = q*r.\r\n # column j of p is column ipvt(j) of the identity matrix.\r\n # if pivot is false, ipvt is not referenced.\r\n #\r\n # lipvt is a positive integer input variable. if pivot is false,\r\n # then lipvt may be as small as 1. if pivot is true, then\r\n # lipvt must be at least n.\r\n #\r\n # rdiag is an output array of length n which contains the\r\n # diagonal elements of r.\r\n #\r\n # acnorm is an output array of length n which contains the\r\n # norms of the corresponding columns of the input matrix a.\r\n # if this information is not needed, then acnorm can coincide\r\n # with rdiag.\r\n #\r\n # wa is a work array of length n. if pivot is false, then wa\r\n # can coincide with rdiag.\r\n #\r\n # subprograms called\r\n #\r\n # minpack-supplied ... dpmpar,enorm\r\n #\r\n # fortran-supplied ... dmax1,dsqrt,min0\r\n #\r\n # argonne national laboratory. minpack project. march 1980.\r\n # burton s. garbow, kenneth e. hillstrom, jorge j. more\r\n #\r\n # **********\r\n\r\n # NOTE: in IDL the factors appear slightly differently than described\r\n # above. The matrix A is still m x n where m >= n. \r\n #\r\n # The \"upper\" triangular matrix R is actually stored in the strict\r\n # lower left triangle of A under the standard notation of IDL.\r\n #\r\n # The reflectors that generate Q are in the upper trapezoid of A upon\r\n # output.\r\n #\r\n # EXAMPLE: decompose the matrix [[9.,2.,6.],[4.,8.,7.]]\r\n # aa = [[9.,2.,6.],[4.,8.,7.]]\r\n # mpfit_qrfac, aa, aapvt, rdiag, aanorm\r\n # IDL> print, aa\r\n # 1.81818* 0.181818* 0.545455*\r\n # -8.54545+ 1.90160* 0.432573*\r\n # IDL> print, rdiag\r\n # -11.0000+ -7.48166+\r\n #\r\n # The components marked with a * are the components of the\r\n # reflectors, and those marked with a + are components of R.\r\n #\r\n # To reconstruct Q and R we proceed as follows. First R.\r\n # r = fltarr(m, n)\r\n # for i = 0, n-1 do r(0:i,i) = aa(0:i,i) # fill in lower diag\r\n # r(lindgen(n)*(m+1)) = rdiag\r\n #\r\n # Next, Q, which are composed from the reflectors. Each reflector v\r\n # is taken from the upper trapezoid of aa, and converted to a matrix\r\n # via (I - 2 vT . v / (v . vT)).\r\n #\r\n # hh = ident ## identity matrix\r\n # for i = 0, n-1 do begin\r\n # v = aa(*,i) & if i GT 0 then v(0:i-1) = 0 ## extract reflector\r\n # hh = hh ## (ident - 2*(v # v)/total(v * v)) ## generate matrix\r\n # endfor\r\n #\r\n # Test the result:\r\n # IDL> print, hh ## transpose(r)\r\n # 9.00000 4.00000\r\n # 2.00000 8.00000\r\n # 6.00000 7.00000\r\n #\r\n # Note that it is usually never necessary to form the Q matrix\r\n # explicitly, and MPFIT does not.\r\n \f\r\n\r\n def qrfac(self, a, pivot=0):\r\n\r\n if (self.debug): print('Entering qrfac...')\r\n machep = self.machar.machep\r\n sz = numpy.shape(a)\r\n m = sz[0]\r\n n = sz[1]\r\n\r\n ## Compute the initial column norms and initialize arrays\r\n acnorm = numpy.zeros(n, numpy.float)\r\n for j in range(n):\r\n acnorm[j] = self.enorm(a[:,j])\r\n rdiag = acnorm.copy()\r\n wa = rdiag.copy()\r\n ipvt = numpy.arange(n)\r\n\r\n ## Reduce a to r with householder transformations\r\n minmn = min([m,n])\r\n for j in range(minmn):\r\n if (pivot != 0):\r\n ## Bring the column of largest norm into the pivot position\r\n rmax = max(rdiag[j:])\r\n kmax, = numpy.nonzero(rdiag[j:] == rmax)\r\n ct = len(kmax)\r\n kmax = kmax + j\r\n if ct > 0:\r\n kmax = kmax[0]\r\n\r\n ## Exchange rows via the pivot only. Avoid actually exchanging\r\n ## the rows, in case there is lots of memory transfer. The\r\n ## exchange occurs later, within the body of MPFIT, after the\r\n ## extraneous columns of the matrix have been shed.\r\n if kmax != j:\r\n temp = ipvt[j] ; ipvt[j] = ipvt[kmax] ; ipvt[kmax] = temp\r\n rdiag[kmax] = rdiag[j]\r\n wa[kmax] = wa[j]\r\n\r\n ## Compute the householder transformation to reduce the jth\r\n ## column of A to a multiple of the jth unit vector\r\n lj = ipvt[j]\r\n ajj = a[j:,lj]\r\n ajnorm = self.enorm(ajj)\r\n if ajnorm == 0: break\r\n if a[j,j] < 0: ajnorm = -ajnorm\r\n\r\n ajj = ajj / ajnorm\r\n ajj[0] = ajj[0] + 1\r\n ## *** Note optimization a(j:*,j)\r\n a[j:,lj] = ajj\r\n\r\n ## Apply the transformation to the remaining columns\r\n ## and update the norms\r\n\r\n ## NOTE to SELF: tried to optimize this by removing the loop,\r\n ## but it actually got slower. Reverted to \"for\" loop to keep\r\n ## it simple.\r\n if (j+1 < n):\r\n for k in range(j+1, n):\r\n lk = ipvt[k]\r\n ajk = a[j:,lk]\r\n ## *** Note optimization a(j:*,lk) \r\n ## (corrected 20 Jul 2000)\r\n if a[j,lj] != 0: \r\n a[j:,lk] = ajk - ajj * numpy.sum(ajk*ajj)/a[j,lj]\r\n if ((pivot != 0) and (rdiag[k] != 0)):\r\n temp = a[j,lk]/rdiag[k]\r\n rdiag[k] = rdiag[k] * numpy.sqrt(max((1.-temp**2), 0.))\r\n temp = rdiag[k]/wa[k]\r\n if ((0.05*temp*temp) <= machep):\r\n rdiag[k] = self.enorm(a[j+1:,lk])\r\n wa[k] = rdiag[k]\r\n rdiag[j] = -ajnorm\r\n return([a, ipvt, rdiag, acnorm])\r\n\r\n \f\r\n # Original FORTRAN documentation\r\n # **********\r\n #\r\n # subroutine qrsolv\r\n #\r\n # given an m by n matrix a, an n by n diagonal matrix d,\r\n # and an m-vector b, the problem is to determine an x which\r\n # solves the system\r\n #\r\n # a*x = b , d*x = 0 ,\r\n #\r\n # in the least squares sense.\r\n #\r\n # this subroutine completes the solution of the problem\r\n # if it is provided with the necessary information from the\r\n # factorization, with column pivoting, of a. that is, if\r\n # a*p = q*r, where p is a permutation matrix, q has orthogonal\r\n # columns, and r is an upper triangular matrix with diagonal\r\n # elements of nonincreasing magnitude, then qrsolv expects\r\n # the full upper triangle of r, the permutation matrix p,\r\n # and the first n components of (q transpose)*b. the system\r\n # a*x = b, d*x = 0, is then equivalent to\r\n #\r\n # t t\r\n # r*z = q *b , p *d*p*z = 0 ,\r\n #\r\n # where x = p*z. if this system does not have full rank,\r\n # then a least squares solution is obtained. on output qrsolv\r\n # also provides an upper triangular matrix s such that\r\n #\r\n # t t t\r\n # p *(a *a + d*d)*p = s *s .\r\n #\r\n # s is computed within qrsolv and may be of separate interest.\r\n #\r\n # the subroutine statement is\r\n #\r\n # subroutine qrsolv(n,r,ldr,ipvt,diag,qtb,x,sdiag,wa)\r\n #\r\n # where\r\n #\r\n # n is a positive integer input variable set to the order of r.\r\n #\r\n # r is an n by n array. on input the full upper triangle\r\n # must contain the full upper triangle of the matrix r.\r\n # on output the full upper triangle is unaltered, and the\r\n # strict lower triangle contains the strict upper triangle\r\n # (transposed) of the upper triangular matrix s.\r\n #\r\n # ldr is a positive integer input variable not less than n\r\n # which specifies the leading dimension of the array r.\r\n #\r\n # ipvt is an integer input array of length n which defines the\r\n # permutation matrix p such that a*p = q*r. column j of p\r\n # is column ipvt(j) of the identity matrix.\r\n #\r\n # diag is an input array of length n which must contain the\r\n # diagonal elements of the matrix d.\r\n #\r\n # qtb is an input array of length n which must contain the first\r\n # n elements of the vector (q transpose)*b.\r\n #\r\n # x is an output array of length n which contains the least\r\n # squares solution of the system a*x = b, d*x = 0.\r\n #\r\n # sdiag is an output array of length n which contains the\r\n # diagonal elements of the upper triangular matrix s.\r\n #\r\n # wa is a work array of length n.\r\n #\r\n # subprograms called\r\n #\r\n # fortran-supplied ... dabs,dsqrt\r\n #\r\n # argonne national laboratory. minpack project. march 1980.\r\n # burton s. garbow, kenneth e. hillstrom, jorge j. more\r\n #\r\n \f\r\n def qrsolv(self, r, ipvt, diag, qtb, sdiag):\r\n if (self.debug): print('Entering qrsolv...')\r\n sz = numpy.shape(r)\r\n m = sz[0]\r\n n = sz[1]\r\n\r\n ## copy r and (q transpose)*b to preserve input and initialize s.\r\n ## in particular, save the diagonal elements of r in x.\r\n\r\n for j in range(n):\r\n r[j:n,j] = r[j,j:n]\r\n x = numpy.diagonal(r).copy()\r\n wa = qtb.copy()\r\n\r\n ## Eliminate the diagonal matrix d using a givens rotation\r\n for j in range(n):\r\n l = ipvt[j]\r\n if (diag[l] == 0): break\r\n sdiag[j:] = 0\r\n sdiag[j] = diag[l]\r\n\r\n ## The transformations to eliminate the row of d modify only a\r\n ## single element of (q transpose)*b beyond the first n, which\r\n ## is initially zero.\r\n\r\n qtbpj = 0.\r\n for k in range(j,n):\r\n if (sdiag[k] == 0): break\r\n if (abs(r[k,k]) < abs(sdiag[k])):\r\n cotan = r[k,k]/sdiag[k]\r\n sine = 0.5/numpy.sqrt(.25 + .25*cotan*cotan)\r\n cosine = sine*cotan\r\n else:\r\n tang = sdiag[k]/r[k,k]\r\n cosine = 0.5/numpy.sqrt(.25 + .25*tang*tang)\r\n sine = cosine*tang\r\n\r\n ## Compute the modified diagonal element of r and the\r\n ## modified element of ((q transpose)*b,0).\r\n r[k,k] = cosine*r[k,k] + sine*sdiag[k]\r\n temp = cosine*wa[k] + sine*qtbpj\r\n qtbpj = -sine*wa[k] + cosine*qtbpj\r\n wa[k] = temp\r\n\r\n ## Accumulate the transformation in the row of s\r\n if (n > k+1):\r\n temp = cosine*r[k+1:n,k] + sine*sdiag[k+1:n]\r\n sdiag[k+1:n] = -sine*r[k+1:n,k] + cosine*sdiag[k+1:n]\r\n r[k+1:n,k] = temp\r\n sdiag[j] = r[j,j]\r\n r[j,j] = x[j]\r\n\r\n ## Solve the triangular system for z. If the system is singular\r\n ## then obtain a least squares solution\r\n nsing = n\r\n wh, = numpy.nonzero(sdiag == 0)\r\n if (len(wh) > 0):\r\n nsing = wh[0]\r\n wa[nsing:] = 0\r\n\r\n if (nsing >= 1):\r\n wa[nsing-1] = wa[nsing-1]/sdiag[nsing-1] ## Degenerate case\r\n ## *** Reverse loop ***\r\n for j in range(nsing-2,-1,-1): \r\n sum = numpy.sum(r[j+1:nsing,j]*wa[j+1:nsing])\r\n wa[j] = (wa[j]-sum)/sdiag[j]\r\n\r\n ## Permute the components of z back to components of x\r\n #print \"wa = \",wa, ipvt\r\n numpy.put(x, ipvt, wa)\r\n return(r, x, sdiag)\r\n\r\n\r\n\r\n \f\r\n # Original FORTRAN documentation\r\n #\r\n # subroutine lmpar\r\n #\r\n # given an m by n matrix a, an n by n nonsingular diagonal\r\n # matrix d, an m-vector b, and a positive number delta,\r\n # the problem is to determine a value for the parameter\r\n # par such that if x solves the system\r\n #\r\n # a*x = b , sqrt(par)*d*x = 0 ,\r\n #\r\n # in the least squares sense, and dxnorm is the euclidean\r\n # norm of d*x, then either par is zero and\r\n #\r\n # (dxnorm-delta) .le. 0.1*delta ,\r\n #\r\n # or par is positive and\r\n #\r\n # abs(dxnorm-delta) .le. 0.1*delta .\r\n #\r\n # this subroutine completes the solution of the problem\r\n # if it is provided with the necessary information from the\r\n # qr factorization, with column pivoting, of a. that is, if\r\n # a*p = q*r, where p is a permutation matrix, q has orthogonal\r\n # columns, and r is an upper triangular matrix with diagonal\r\n # elements of nonincreasing magnitude, then lmpar expects\r\n # the full upper triangle of r, the permutation matrix p,\r\n # and the first n components of (q transpose)*b. on output\r\n # lmpar also provides an upper triangular matrix s such that\r\n #\r\n # t t t\r\n # p *(a *a + par*d*d)*p = s *s .\r\n #\r\n # s is employed within lmpar and may be of separate interest.\r\n #\r\n # only a few iterations are generally needed for convergence\r\n # of the algorithm. if, however, the limit of 10 iterations\r\n # is reached, then the output par will contain the best\r\n # value obtained so far.\r\n #\r\n # the subroutine statement is\r\n #\r\n # subroutine lmpar(n,r,ldr,ipvt,diag,qtb,delta,par,x,sdiag,\r\n # wa1,wa2)\r\n #\r\n # where\r\n #\r\n # n is a positive integer input variable set to the order of r.\r\n #\r\n # r is an n by n array. on input the full upper triangle\r\n # must contain the full upper triangle of the matrix r.\r\n # on output the full upper triangle is unaltered, and the\r\n # strict lower triangle contains the strict upper triangle\r\n # (transposed) of the upper triangular matrix s.\r\n #\r\n # ldr is a positive integer input variable not less than n\r\n # which specifies the leading dimension of the array r.\r\n #\r\n # ipvt is an integer input array of length n which defines the\r\n # permutation matrix p such that a*p = q*r. column j of p\r\n # is column ipvt(j) of the identity matrix.\r\n #\r\n # diag is an input array of length n which must contain the\r\n # diagonal elements of the matrix d.\r\n #\r\n # qtb is an input array of length n which must contain the first\r\n # n elements of the vector (q transpose)*b.\r\n #\r\n # delta is a positive input variable which specifies an upper\r\n # bound on the euclidean norm of d*x.\r\n #\r\n # par is a nonnegative variable. on input par contains an\r\n # initial estimate of the levenberg-marquardt parameter.\r\n # on output par contains the final estimate.\r\n #\r\n # x is an output array of length n which contains the least\r\n # squares solution of the system a*x = b, sqrt(par)*d*x = 0,\r\n # for the output par.\r\n #\r\n # sdiag is an output array of length n which contains the\r\n # diagonal elements of the upper triangular matrix s.\r\n #\r\n # wa1 and wa2 are work arrays of length n.\r\n #\r\n # subprograms called\r\n #\r\n # minpack-supplied ... dpmpar,enorm,qrsolv\r\n #\r\n # fortran-supplied ... dabs,dmax1,dmin1,dsqrt\r\n #\r\n # argonne national laboratory. minpack project. march 1980.\r\n # burton s. garbow, kenneth e. hillstrom, jorge j. more\r\n #\r\n \f\r\n def lmpar(self, r, ipvt, diag, qtb, delta, x, sdiag, par=None):\r\n\r\n if (self.debug): print('Entering lmpar...')\r\n dwarf = self.machar.minnum\r\n sz = numpy.shape(r)\r\n m = sz[0]\r\n n = sz[1]\r\n\r\n ## Compute and store in x the gauss-newton direction. If the\r\n ## jacobian is rank-deficient, obtain a least-squares solution\r\n nsing = n\r\n wa1 = qtb.copy()\r\n wh, = numpy.nonzero(numpy.diagonal(r) == 0)\r\n if len(wh) > 0:\r\n nsing = wh[0]\r\n wa1[wh[0]:] = 0\r\n if nsing > 1:\r\n ## *** Reverse loop ***\r\n for j in range(nsing-1,-1,-1): \r\n wa1[j] = wa1[j]/r[j,j]\r\n if (j-1 >= 0):\r\n wa1[0:j] = wa1[0:j] - r[0:j,j]*wa1[j]\r\n\r\n ## Note: ipvt here is a permutation array\r\n numpy.put(x, ipvt, wa1)\r\n\r\n ## Initialize the iteration counter. Evaluate the function at the\r\n ## origin, and test for acceptance of the gauss-newton direction\r\n iter = 0\r\n wa2 = diag * x\r\n dxnorm = self.enorm(wa2)\r\n fp = dxnorm - delta\r\n if (fp <= 0.1*delta):\r\n return[r, 0., x, sdiag]\r\n\r\n ## If the jacobian is not rank deficient, the newton step provides a\r\n ## lower bound, parl, for the zero of the function. Otherwise set\r\n ## this bound to zero.\r\n\r\n parl = 0.\r\n if nsing >= n:\r\n wa1 = numpy.take(diag, ipvt)*numpy.take(wa2, ipvt)/dxnorm\r\n wa1[0] = wa1[0] / r[0,0] ## Degenerate case \r\n for j in range(1,n): ## Note \"1\" here, not zero\r\n sum = numpy.sum(r[0:j,j]*wa1[0:j])\r\n wa1[j] = (wa1[j] - sum)/r[j,j]\r\n\r\n temp = self.enorm(wa1)\r\n parl = ((fp/delta)/temp)/temp\r\n\r\n ## Calculate an upper bound, paru, for the zero of the function\r\n for j in range(n):\r\n sum = numpy.sum(r[0:j+1,j]*qtb[0:j+1])\r\n wa1[j] = sum/diag[ipvt[j]]\r\n gnorm = self.enorm(wa1)\r\n paru = gnorm/delta\r\n if paru == 0: paru = dwarf/min([delta,0.1])\r\n\r\n ## If the input par lies outside of the interval (parl,paru), set\r\n ## par to the closer endpoint\r\n\r\n par = max([par,parl])\r\n par = min([par,paru])\r\n if par == 0: par = gnorm/dxnorm\r\n\r\n ## Beginning of an interation\r\n while(1):\r\n iter = iter + 1\r\n\r\n ## Evaluate the function at the current value of par\r\n if par == 0: par = max([dwarf, paru*0.001])\r\n temp = numpy.sqrt(par)\r\n wa1 = temp * diag\r\n [r, x, sdiag] = self.qrsolv(r, ipvt, wa1, qtb, sdiag)\r\n wa2 = diag*x\r\n dxnorm = self.enorm(wa2)\r\n temp = fp\r\n fp = dxnorm - delta\r\n\r\n if ((abs(fp) <= 0.1*delta) or\r\n ((parl == 0) and (fp <= temp) and (temp < 0)) or\r\n (iter == 10)): break;\r\n\r\n ## Compute the newton correction\r\n wa1 = numpy.take(diag, ipvt)*numpy.take(wa2, ipvt)/dxnorm\r\n\r\n for j in range(n-1):\r\n wa1[j] = wa1[j]/sdiag[j]\r\n wa1[j+1:n] = wa1[j+1:n] - r[j+1:n,j]*wa1[j]\r\n wa1[n-1] = wa1[n-1]/sdiag[n-1] ## Degenerate case\r\n\r\n temp = self.enorm(wa1)\r\n parc = ((fp/delta)/temp)/temp\r\n\r\n ## Depending on the sign of the function, update parl or paru\r\n if fp > 0: parl = max([parl,par])\r\n if fp < 0: paru = min([paru,par])\r\n\r\n ## Compute an improved estimate for par\r\n par = max([parl, par+parc])\r\n\r\n ## End of an iteration\r\n\r\n ## Termination\r\n return[r, par, x, sdiag]\r\n\r\n \f\r\n ## Procedure to tie one parameter to another.\r\n def tie(self, p, ptied=None):\r\n if (self.debug): print('Entering tie...')\r\n if (ptied == None): return\r\n for i in range(len(ptied)):\r\n if ptied[i] == '': continue\r\n cmd = 'p[' + str(i) + '] = ' + ptied[i]\r\n exec(cmd)\r\n return(p)\r\n\r\n \f\r\n # Original FORTRAN documentation\r\n # **********\r\n #\r\n # subroutine covar\r\n #\r\n # given an m by n matrix a, the problem is to determine\r\n # the covariance matrix corresponding to a, defined as\r\n #\r\n # t\r\n # inverse(a *a) .\r\n #\r\n # this subroutine completes the solution of the problem\r\n # if it is provided with the necessary information from the\r\n # qr factorization, with column pivoting, of a. that is, if\r\n # a*p = q*r, where p is a permutation matrix, q has orthogonal\r\n # columns, and r is an upper triangular matrix with diagonal\r\n # elements of nonincreasing magnitude, then covar expects\r\n # the full upper triangle of r and the permutation matrix p.\r\n # the covariance matrix is then computed as\r\n #\r\n # t t\r\n # p*inverse(r *r)*p .\r\n #\r\n # if a is nearly rank deficient, it may be desirable to compute\r\n # the covariance matrix corresponding to the linearly independent\r\n # columns of a. to define the numerical rank of a, covar uses\r\n # the tolerance tol. if l is the largest integer such that\r\n #\r\n # abs(r(l,l)) .gt. tol*abs(r(1,1)) ,\r\n #\r\n # then covar computes the covariance matrix corresponding to\r\n # the first l columns of r. for k greater than l, column\r\n # and row ipvt(k) of the covariance matrix are set to zero.\r\n #\r\n # the subroutine statement is\r\n #\r\n # subroutine covar(n,r,ldr,ipvt,tol,wa)\r\n #\r\n # where\r\n #\r\n # n is a positive integer input variable set to the order of r.\r\n #\r\n # r is an n by n array. on input the full upper triangle must\r\n # contain the full upper triangle of the matrix r. on output\r\n # r contains the square symmetric covariance matrix.\r\n #\r\n # ldr is a positive integer input variable not less than n\r\n # which specifies the leading dimension of the array r.\r\n #\r\n # ipvt is an integer input array of length n which defines the\r\n # permutation matrix p such that a*p = q*r. column j of p\r\n # is column ipvt(j) of the identity matrix.\r\n #\r\n # tol is a nonnegative input variable used to define the\r\n # numerical rank of a in the manner described above.\r\n #\r\n # wa is a work array of length n.\r\n #\r\n # subprograms called\r\n #\r\n # fortran-supplied ... dabs\r\n #\r\n # argonne national laboratory. minpack project. august 1980.\r\n # burton s. garbow, kenneth e. hillstrom, jorge j. more\r\n #\r\n # **********\r\n \f\r\n def calc_covar(self, rr, ipvt=None, tol=1.e-14):\r\n\r\n if (self.debug): print('Entering calc_covar...')\r\n if numpy.rank(rr) != 2:\r\n print('ERROR: r must be a two-dimensional matrix')\r\n return(-1)\r\n s = numpy.shape(rr)\r\n n = s[0]\r\n if s[0] != s[1]:\r\n print('ERROR: r must be a square matrix')\r\n return(-1)\r\n\r\n if (ipvt == None): ipvt = numpy.arange(n)\r\n r = rr.copy()\r\n r.shape = [n,n]\r\n\r\n ## For the inverse of r in the full upper triangle of r\r\n l = -1\r\n tolr = tol * abs(r[0,0])\r\n for k in range(n):\r\n if (abs(r[k,k]) <= tolr): break\r\n r[k,k] = 1./r[k,k]\r\n for j in range(k):\r\n temp = r[k,k] * r[j,k]\r\n r[j,k] = 0.\r\n r[0:j+1,k] = r[0:j+1,k] - temp*r[0:j+1,j]\r\n l = k\r\n\r\n ## Form the full upper triangle of the inverse of (r transpose)*r\r\n ## in the full upper triangle of r\r\n if l >= 0:\r\n for k in range(l+1):\r\n for j in range(k):\r\n temp = r[j,k]\r\n r[0:j+1,j] = r[0:j+1,j] + temp*r[0:j+1,k]\r\n temp = r[k,k]\r\n r[0:k+1,k] = temp * r[0:k+1,k]\r\n\r\n ## For the full lower triangle of the covariance matrix\r\n ## in the strict lower triangle or and in wa\r\n wa = numpy.repeat([r[0,0]], n)\r\n for j in range(n):\r\n jj = ipvt[j]\r\n sing = j > l\r\n for i in range(j+1):\r\n if sing: r[i,j] = 0.\r\n ii = ipvt[i]\r\n if ii > jj: r[ii,jj] = r[i,j]\r\n if ii < jj: r[jj,ii] = r[i,j]\r\n wa[jj] = r[j,j]\r\n\r\n ## Symmetrize the covariance matrix in r\r\n for j in range(n):\r\n r[0:j+1,j] = r[j,0:j+1]\r\n r[j,j] = wa[j]\r\n\r\n return(r)\r\n\r\nclass machar:\r\n def __init__(self, double=1):\r\n if (double == 0):\r\n self.machep = 1.19209e-007\r\n self.maxnum = 3.40282e+038\r\n self.minnum = 1.17549e-038\r\n self.maxgam = 171.624376956302725\r\n else:\r\n self.machep = 2.2204460e-016\r\n self.maxnum = 1.7976931e+308\r\n self.minnum = 2.2250739e-308\r\n self.maxgam = 171.624376956302725\r\n\r\n self.maxlog = numpy.log(self.maxnum)\r\n self.minlog = numpy.log(self.minnum)\r\n self.rdwarf = numpy.sqrt(self.minnum*1.5) * 10\r\n self.rgiant = numpy.sqrt(self.maxnum) * 0.1\r\n\f\r\n\r\n" ]
[ [ "numpy.sum", "numpy.rank", "numpy.zeros", "numpy.take", "numpy.put", "numpy.asarray", "numpy.repeat", "numpy.arange", "numpy.log", "numpy.shape", "numpy.sqrt", "numpy.diagonal", "numpy.nonzero", "numpy.tanh", "numpy.choose" ] ]
natteruw/worms
[ "6530505d4fca3229bb93738a0bae0463a17229b8" ]
[ "worms/search.py" ]
[ "'search stuff'\n\nimport sys\nimport os\nimport pickle\nimport itertools as it\nimport numpy as np\nfrom collections import defaultdict\nfrom xbin import XformBinner\nfrom homog import hinv, hrot\nfrom concurrent.futures import ProcessPoolExecutor\nfrom .worms import Segment, Segments, Worms\nfrom .criteria import CriteriaList, Cyclic, WormCriteria\nfrom . import util\n# import numba\n\n\nclass SimpleAccumulator:\n\n def __init__(self, max_results=1000000, max_tmp_size=1024):\n self.max_tmp_size = max_tmp_size\n self.max_results = max_results\n self.temporary = []\n\n def checkpoint(self):\n if len(self.temporary) is 0: return\n if hasattr(self, 'scores'):\n sc, li, lp = [self.scores], [self.lowidx], [self.lowpos]\n else:\n sc, li, lp = [], [], []\n scores = np.concatenate([x[0] for x in self.temporary] + sc)\n lowidx = np.concatenate([x[1] for x in self.temporary] + li)\n lowpos = np.concatenate([x[2] for x in self.temporary] + lp)\n order = np.argsort(scores)\n self.scores = scores[order[:self.max_results]]\n self.lowidx = lowidx[order[:self.max_results]]\n self.lowpos = lowpos[order[:self.max_results]]\n self.temporary = []\n\n def accumulate(self, gen):\n for future in gen:\n result = future.result()\n if result is not None:\n self.temporary.append(result)\n if len(self.temporary) >= self.max_tmp_size:\n self.checkpoint()\n yield None\n\n def final_result(self):\n self.checkpoint()\n try:\n return self.scores, self.lowidx, self.lowpos\n except AttributeError:\n return None\n\n\nclass MakeXIndexAccumulator:\n\n def __init__(self, sizes, thresh=1, from_seg=0, to_seg=-1,\n max_tmp_size=1024, cart_resl=2.0, ori_resl=15.0):\n self.sizes = sizes\n self.thresh = thresh\n self.max_tmp_size = max_tmp_size\n self.from_seg = from_seg\n self.to_seg = to_seg\n self.tmp = []\n self.binner = XformBinner(cart_resl, ori_resl)\n\n self.xindex = defaultdict(list)\n # self.xindex = dict()\n\n def checkpoint(self):\n print('MakeXIndexAccumulator checkpoint', end='')\n sys.stdout.flush()\n if len(self.tmp) is 0: return\n sc = np.concatenate([x[0] for x in self.tmp])\n indices = np.concatenate([x[1] for x in self.tmp])[sc <= self.thresh]\n assert np.all(indices < self.sizes)\n positions = np.concatenate([x[2] for x in self.tmp])[sc <= self.thresh]\n from_pos = positions[:, self.from_seg]\n to_pos = positions[:, self.to_seg]\n xtgt = hinv(from_pos) @ to_pos\n bin_idx = self.binner.get_bin_index(xtgt)\n\n for k, v in zip(bin_idx, indices):\n self.xindex[k].append(v)\n # self.xindex = {**{k: v for k, v in zip(bin_idx, indices)},\n # **self.xindex}\n\n # print('IndexAcculator checkpoint, xindex size:', len(self.xindex))\n self.tmp = []\n print('done, xindex size:', len(self.xindex))\n sys.stdout.flush()\n\n def accumulate(self, gen):\n for future in gen:\n result = future.result()\n if result is not None:\n self.tmp.append(result)\n if len(self.tmp) >= self.max_tmp_size:\n self.checkpoint()\n yield None\n\n def final_result(self):\n self.checkpoint()\n return self.xindex, self.binner\n\n\n# GLOBAL_xindex_set = set([0])\n\n\n# @numba.vectorize([numba.float64(numba.int64)])\n# def is_in_xindex_set_numba(idx):\n# global GLOBAL_xindex_set\n# if idx in GLOBAL_xindex_set:\n# return 0.\n# else:\n# return 999999.\n\n\nclass XIndexedCriteria(WormCriteria):\n\n def __init__(self, xindex, binner, nfold, from_seg=-1):\n self.xindex_set = set(xindex.keys())\n self.binner = binner\n self.from_seg = from_seg\n self.cyclic_xform = hrot([0, 0, 1], 360 / nfold)\n # global GLOBAL_xindex_set\n # GLOBAL_xindex_set = self.xindex_set\n\n def get_xform_commutator(self, from_pos, to_pos):\n return np.linalg.inv(from_pos) @ to_pos\n\n def is_in_xindex_set(self, idxary):\n is_in = np.ones(idxary.size, dtype='f') * 999999.\n for i, idx in enumerate(idxary.flat):\n if idx in self.xindex_set:\n is_in[i] = 0\n return is_in.reshape(idxary.shape)\n\n def score(self, segpos, **kw):\n from_pos = segpos[self.from_seg]\n to_pos = self.cyclic_xform @ from_pos\n xtgt = self.get_xform_commutator(from_pos, to_pos)\n bin_idx = self.binner.get_bin_index(xtgt)\n return self.is_in_xindex_set(bin_idx)\n\n def alignment(self, segpos, **kw):\n return np.eye(4)\n\n\nclass XIndexedAccumulator:\n\n def __init__(self, segments, tail, splitpoint, head, xindex, binner,\n nfold, from_seg, to_seg,\n max_tmp_size=1024, max_results=100000):\n self.segments = segments\n self.tail = tail\n self.splitpoint = splitpoint\n self.head = head\n self.xindex = xindex\n self.binner = binner\n self.from_seg = from_seg\n self.to_seg = to_seg\n self.cyclic_xform = hrot([0, 0, 1], 360 / nfold)\n self.max_tmp_size = max_tmp_size\n self.max_results = max_results\n self.temporary = []\n\n def checkpoint(self):\n if len(self.temporary) is 0: return\n ntmp = sum(len(tmp[0]) for tmp in self.temporary)\n print('XIndexedAccumulator checkpoint... ncandidates:', ntmp, end=' ')\n sys.stdout.flush()\n if hasattr(self, 'scores'):\n sc, li, lp = [self.scores], [self.lowidx], [self.lowpos]\n else:\n sc, li, lp = [], [], []\n scores = np.concatenate([x[0] for x in self.temporary])\n if scores.shape[0] is 0: return\n assert np.all(scores == 0)\n lowidx = np.concatenate([x[1] for x in self.temporary])\n lowpos = np.concatenate([x[2] for x in self.temporary])\n scores = scores[:self.max_results]\n lowpos = lowpos[:self.max_results]\n lowidx = lowidx[:self.max_results]\n from_pos = lowpos[:, -1]\n to_pos = self.cyclic_xform @ from_pos\n xtgt = hinv(from_pos) @ to_pos\n bin_idx = self.binner.get_bin_index(xtgt)\n\n # head_idx = np.stack([self.xindex[i] for i in bin_idx])\n lowidxtmp, headidxtmp = [], []\n for i, b in enumerate(bin_idx):\n for headidx in self.xindex[b]:\n lowidxtmp.append(lowidx[i])\n headidxtmp.append(headidx)\n lowidx = np.stack(lowidxtmp)\n head_idx = np.stack(headidxtmp)\n\n join_idx, valid = self.segments[self.splitpoint].merge_idx(\n self.tail[-1], lowidx[:, -1],\n self.head[0], head_idx[:, 0])\n lowidx = lowidx[valid][join_idx >= 0]\n head_idx = head_idx[valid][join_idx >= 0]\n join_idx = join_idx[join_idx >= 0]\n # join_idx = self.segments[self.splitpoint].merge_idx_slow(\n # self.tail[-1], lowidx[:, -1],\n # self.head[0], head_idx[:, 0])\n # lowidx = lowidx[join_idx >= 0]\n # head_idx = head_idx[join_idx >= 0]\n # join_idx = join_idx[join_idx >= 0]\n lowidx = np.concatenate(\n [lowidx[:, :-1], join_idx[:, None], head_idx[:, 1:]], axis=1)\n\n ifrom, ito = lowidx[:, self.from_seg], lowidx[:, self.to_seg]\n site1 = self.segments[self.from_seg].entrysiteid[ifrom]\n site2 = self.segments[self.from_seg].exitsiteid[ifrom]\n site3 = self.segments[self.to_seg].entrysiteid[ito]\n ok = (site1 != site2) * (site1 != site3) * (site2 != site3)\n # print('!!!!!!!!', np.sum(ok), ok.shape)\n # print('site1', *['%6i' % np.sum(site1 == i) for i in range(10)])\n # print('site2', *['%6i' % np.sum(site2 == i) for i in range(10)])\n # print('site3', *['%6i' % np.sum(site3 == i) for i in range(10)])\n lowidx = lowidx[ok]\n\n if hasattr(self, 'lowidx'):\n self.lowidx = np.concatenate([self.lowidx, lowidx])\n else:\n self.lowidx = lowidx\n self.temporary = []\n print('done, total pre-err =', len(self.lowidx))\n sys.stdout.flush()\n\n def accumulate(self, gen):\n for future in gen:\n result = future.result()\n if result is not None:\n self.temporary.append(result)\n if len(self.temporary) >= self.max_tmp_size:\n self.checkpoint()\n yield None\n\n def final_result(self):\n self.checkpoint()\n try:\n return self.lowidx\n except AttributeError:\n return None\n\n\ndef _get_chunk_end_seg(sizes, max_workers, memsize):\n end = len(sizes) - 1\n while end > 1 and (util.bigprod(sizes[end:]) < max_workers or\n memsize <= 64 * util.bigprod(sizes[:end])): end -= 1\n return end\n\n\ndef grow(\n segments,\n criteria,\n *,\n thresh=2,\n expert=0,\n memsize=1e6,\n executor=None,\n executor_args=None,\n max_workers=None,\n verbosity=2,\n chunklim=None,\n max_samples=int(1e12),\n max_results=int(1e4),\n cart_resl=2.0,\n ori_resl=15.0,\n xindex_cache_file=None\n):\n if True: # setup\n os.environ['OMP_NUM_THREADS'] = '1'\n os.environ['MKL_NUM_THREADS'] = '1'\n os.environ['NUMEXPR_NUM_THREADS'] = '1'\n if isinstance(segments, list):\n segments = Segments(segments)\n # if isinstance(executor, (ProcessPoolExecutor, ThreadPoolExecutor)):\n # raise ValueError('please use dask.distributed executor')\n if verbosity > 0:\n print('grow, from', criteria.from_seg, 'to', criteria.to_seg)\n for i, seg in enumerate(segments):\n print(' segment', i,\n 'enter:', seg.entrypol,\n 'exit:', seg.exitpol)\n for sp in seg.spliceables: print(' ', sp)\n elif verbosity == 0:\n print('grow, nseg:', len(segments))\n if verbosity > 2:\n global __print_best\n __print_best = True\n if not isinstance(criteria, CriteriaList):\n criteria = CriteriaList(criteria)\n if max_workers is not None and max_workers <= 0:\n max_workers = util.cpu_count()\n if executor_args is None and max_workers is None:\n executor_args = dict()\n elif executor_args is None:\n executor_args = dict(max_workers=max_workers)\n elif executor_args is not None and max_workers is not None:\n raise ValueError('executor_args incompatible with max_workers')\n\n if executor is None:\n executor = util.InProcessExecutor\n max_workers = 1\n if max_workers is None: max_workers = util.cpu_count()\n nworker = max_workers or util.cpu_count()\n\n if criteria.origin_seg is None:\n\n matchlast = _check_topology(segments, criteria, expert)\n sizes = [len(s) for s in segments]\n end = _get_chunk_end_seg(sizes, max_workers, memsize)\n ntot, chunksize, nchunks = (util.bigprod(x)\n for x in (sizes, sizes[:end], sizes[end:]))\n if max_samples is not None:\n max_samples = np.clip(chunksize * max_workers, max_samples, ntot)\n every_other = max(1, int(ntot / max_samples)) if max_samples else 1\n njob = int(np.sqrt(nchunks / every_other) / 32) * nworker\n njob = np.clip(nworker, njob, nchunks)\n\n actual_ntot = int(ntot / every_other)\n actual_nchunk = int(nchunks / every_other)\n actual_perjob = int(ntot / every_other / njob)\n actual_chunkperjob = int(nchunks / every_other / njob)\n if verbosity >= 0:\n print('tot: {:,} chunksize: {:,} nchunks: {:,} nworker: {} njob: {}'.format(\n ntot, chunksize, nchunks, nworker, njob))\n print('worm/job: {:,} chunk/job: {} sizes={} every_other={}'.format(\n int(ntot / njob), int(nchunks / njob), sizes, every_other))\n print('max_samples: {:,} max_results: {:,}'.format(\n max_samples, max_results))\n print('actual tot: {:,}'.format(int(actual_ntot)))\n print('actual nchunks: {:,}'.format(int(actual_nchunk)))\n print('actual worms/job: {:,}'.format(int(actual_perjob)))\n print('actual chunks/job: {:,}'.format(int(actual_chunkperjob)))\n _grow_args = dict(executor=executor, executor_args=executor_args,\n njob=njob, end=end, thresh=thresh,\n matchlast=matchlast, every_other=every_other,\n max_results=max_results, nworker=nworker,\n verbosity=verbosity)\n if njob > 1e9 or nchunks >= 2**63 or every_other >= 2**63:\n print('too big?!?')\n print(' njob', njob)\n print(' nchunks', nchunks, nchunks / 2**63)\n print(' every_other', every_other, every_other / 2**63)\n raise ValueError('system too big')\n accum = SimpleAccumulator(max_results=max_results, max_tmp_size=1e5)\n _grow(segments, criteria, accum, **_grow_args)\n result = accum.final_result()\n if result is None: return None\n scores, lowidx, lowpos = result\n lowposlist = [lowpos[:, i] for i in range(len(segments))]\n score_check = criteria.score(segpos=lowposlist, verbosity=verbosity)\n assert np.allclose(score_check, scores)\n detail = dict(ntot=ntot, chunksize=chunksize, nchunks=nchunks,\n nworker=nworker, njob=njob, sizes=sizes, end=end)\n\n else: # hash-based protocol...\n\n assert len(criteria) is 1\n matchlast = _check_topology(segments, criteria, expert)\n\n splitpoint = criteria.from_seg\n tail, head = segments.split_at(splitpoint)\n\n print('HASH PROTOCOL splitting at segment', splitpoint)\n print(' full:', [len(s) for s in segments])\n\n headsizes = [len(s) for s in head]\n headend = _get_chunk_end_seg(headsizes, max_workers, memsize)\n ntot, chunksize, nchunks = (util.bigprod(x)\n for x in (headsizes, headsizes[:headend],\n headsizes[headend:]))\n if max_samples is not None:\n max_samples = np.clip(chunksize * max_workers, max_samples, ntot)\n every_other = max(1, int(ntot / max_samples)) if max_samples else 1\n njob = int(np.sqrt(nchunks / every_other) / 8 / nworker) * nworker\n njob = np.clip(nworker, njob, nchunks)\n _grow_args = dict(executor=executor, executor_args=executor_args,\n njob=njob, end=headend, thresh=thresh,\n matchlast=0, every_other=every_other,\n max_results=max_results, nworker=nworker,\n verbosity=verbosity)\n t1 = 0\n if xindex_cache_file and os.path.exists(xindex_cache_file):\n print('!' * 100)\n print('reading xindex, xbinner from', xindex_cache_file)\n xindex, binner = pickle.load(open(xindex_cache_file, 'rb'))\n else:\n # if 1:\n accum1 = MakeXIndexAccumulator(headsizes, from_seg=0, to_seg=-1,\n cart_resl=cart_resl, ori_resl=ori_resl)\n headcriteria = Cyclic(criteria[0].nfold, from_seg=0, to_seg=-1,\n tol=criteria[0].tol * 1.25,\n lever=criteria[0].lever)\n print('STEP ONE: growing head into xindex')\n print(' ntot {:,}'.format(ntot))\n print(' headsizes {}'.format(headsizes))\n print(' headend {:,}'.format(headend))\n print(' njob {:,}'.format(njob))\n print(' nchunks {:,}'.format(nchunks))\n print(' chunksize {:,}'.format(chunksize))\n print(' thresh {:,}'.format(thresh))\n print(' matchlast {:,}'.format(0))\n print(' every_other {:,}'.format(every_other))\n print(' max_results {:,}'.format(max_results))\n print(' nworker {:,}'.format(nworker))\n print(' act. ntot {:,}'.format(int(ntot / every_other)))\n print(' act. nchunks {:,}'.format(\n int(nchunks / every_other)))\n print(' act. worms/job {:,}'.format(\n int(ntot / every_other / njob)))\n print(' act. chunks/job {:,}'.format(\n int(nchunks / every_other / njob)))\n\n import time\n t1 = time.time()\n _grow(head, headcriteria, accum1, **_grow_args)\n xindex, binner = accum1.final_result()\n t1 = time.time() - t1\n print('!' * 100)\n print(\"TIME PHASE ONE\", t1)\n print('!' * 100)\n\n if xindex_cache_file:\n print('!' * 100)\n print('dumping xindex to', xindex_cache_file)\n print('!' * 100)\n pickle.dump((xindex, binner), open(xindex_cache_file, 'wb'))\n\n ################### PHASE TWO ####################\n\n tailcriteria = XIndexedCriteria(xindex, binner,\n criteria[0].nfold, from_seg=-1)\n accum2 = XIndexedAccumulator(segments, tail, splitpoint, head, xindex,\n binner, criteria[0].nfold,\n from_seg=criteria.from_seg,\n to_seg=criteria.to_seg,\n max_results=max_results * 20)\n\n tailsizes = [len(s) for s in tail]\n tailend = _get_chunk_end_seg(tailsizes, max_workers, memsize)\n ntot, chunksize, nchunks = (util.bigprod(x)\n for x in (tailsizes, tailsizes[:tailend],\n tailsizes[tailend:]))\n if max_samples is not None:\n max_samples = np.clip(chunksize * max_workers, max_samples, ntot)\n every_other = max(1, int(ntot / max_samples * 20)\n ) if max_samples else 1\n njob = int(np.ceil(np.sqrt(nchunks / every_other) / 32 / nworker))\n njob = np.clip(nworker, njob * nworker, nchunks)\n\n _grow_args = dict(\n executor=executor,\n executor_args=executor_args,\n njob=njob, end=tailend, thresh=thresh,\n matchlast=None, every_other=every_other,\n max_results=max_results, nworker=nworker,\n verbosity=verbosity)\n\n print('STEP TWO: using xindex, nentries {:,}'.format(len(xindex)))\n print(' ntot {:,}'.format(ntot))\n print(' tailsizes {}'.format(tailsizes))\n print(' tailend {:,}'.format(tailend))\n print(' njob {:,}'.format(njob))\n print(' nchunks {:,}'.format(nchunks))\n print(' chunksize {:,}'.format(chunksize))\n print(' thresh {:,}'.format(thresh))\n print(' matchlast None')\n print(' every_other {:,}'.format(every_other))\n print(' max_results {:,}'.format(max_results))\n print(' nworker {:,}'.format(nworker))\n print(' act. ntot {:,}'.format(int(ntot / every_other)))\n print(' act. nchunks {:,}'.format(\n int(nchunks / every_other)))\n print(' act. worms/job {:,}'.format(\n int(ntot / every_other / njob)))\n print(' act. chunks/job {:,}'.format(\n int(nchunks / every_other / njob)))\n print(' executor ', type(executor()))\n\n import time\n t2 = time.time()\n\n _grow(tail, tailcriteria, accum2, **_grow_args)\n # import cProfile\n # cProfile.runctx('_grow(tail, tailcriteria, accum2, **_grow_args)',\n # locals(), globals(), 'grow2.stats')\n # import pstats\n # pst = pstats.Stats('grow2.stats')\n # pst.strip_dirs().sort_stats('time').print_stats(20)\n lowidx = accum2.final_result()\n t2 = time.time() - t2\n\n print('!' * 100)\n print(\"TIME PHASE ONE\", t1)\n print(\"TIME PHASE TWO\", t2)\n # print(' best 28 cores 1608.94K/s small 1min job 681k/.s')\n print('!' * 100)\n\n if lowidx is None:\n print('grow: no results')\n return\n\n print('refold segments')\n lowpos = _refold_segments(segments, lowidx)\n lowposlist = [lowpos[:, i] for i in range(len(segments))]\n print('score refolded segments')\n scores = criteria.score(segpos=lowposlist, verbosity=verbosity)\n print('organize results')\n nlow = sum(scores <= thresh)\n order = np.argsort(scores)[:nlow]\n scores = scores[order]\n lowpos = lowpos[order]\n lowidx = lowidx[order]\n detail = dict(ntot=ntot, chunksize=chunksize, nchunks=nchunks,\n nworker=nworker, njob=njob, sizes=tailsizes, end=tailend)\n\n return Worms(segments, scores, lowidx, lowpos, criteria, detail)\n\n\ndef _refold_segments(segments, lowidx):\n pos = np.zeros_like(lowidx, dtype='(4,4)f') + np.eye(4)\n end = np.eye(4)\n for i, seg in enumerate(segments):\n pos[:, i] = end @ seg.x2orgn[lowidx[:, i]]\n end = end @ seg.x2exit[lowidx[:, i]]\n return pos\n\n\ndef _chain_xforms(segments):\n x2exit = [s.x2exit for s in segments]\n x2orgn = [s.x2orgn for s in segments]\n fullaxes = (np.newaxis,) * (len(x2exit) - 1)\n xconn = [x2exit[0][fullaxes], ]\n xbody = [x2orgn[0][fullaxes], ]\n for iseg in range(1, len(x2exit)):\n fullaxes = (slice(None),) + (np.newaxis,) * iseg\n xconn.append(xconn[iseg - 1] @ x2exit[iseg][fullaxes])\n xbody.append(xconn[iseg - 1] @ x2orgn[iseg][fullaxes])\n perm = list(range(len(xbody) - 1, -1, -1)) + [len(xbody), len(xbody) + 1]\n xbody = [np.transpose(x, perm) for x in xbody]\n xconn = [np.transpose(x, perm) for x in xconn]\n return xbody, xconn\n\n\n__print_best = False\n__best_score = 9e9\n\n\ndef _grow_chunk(samp, segpos, conpos, context):\n os.environ['OMP_NUM_THREADS'] = '1'\n os.environ['MKL_NUM_THREADS'] = '1'\n os.environ['NUMEXPR_NUM_THREADS'] = '1'\n _, _, segs, end, criteria, thresh, matchlast, _, max_results = context\n # print('_grow_chunk', samp, end, thresh, matchlast, max_results)\n ML = matchlast\n # body must match, and splice sites must be distinct\n if ML is not None:\n # print(' ML')\n ndimchunk = segpos[0].ndim - 2\n bidB = segs[-1].bodyid[samp[-1]]\n site3 = segs[-1].entrysiteid[samp[-1]]\n if ML < ndimchunk:\n bidA = segs[ML].bodyid\n site1 = segs[ML].entrysiteid\n site2 = segs[ML].exitsiteid\n allowed = (bidA == bidB) * (site1 != site3) * (site2 != site3)\n idx = (slice(None),) * ML + (allowed,)\n segpos = segpos[: ML] + [x[idx] for x in segpos[ML:]]\n conpos = conpos[: ML] + [x[idx] for x in conpos[ML:]]\n idxmap = np.where(allowed)[0]\n else:\n bidA = segs[ML].bodyid[samp[ML - ndimchunk]]\n site1 = segs[ML].entrysiteid[samp[ML - ndimchunk]]\n site2 = segs[ML].exitsiteid[samp[ML - ndimchunk]]\n if bidA != bidB or site3 == site2 or site3 == site1:\n return\n segpos, conpos = segpos[:end], conpos[:end]\n # print(' do geom')\n for iseg, seg in enumerate(segs[end:]):\n segpos.append(conpos[-1] @ seg.x2orgn[samp[iseg]])\n if seg is not segs[-1]:\n conpos.append(conpos[-1] @ seg.x2exit[samp[iseg]])\n # print(' scoring')\n score = criteria.score(segpos=segpos)\n # print(' scores shape', score.shape)\n if __print_best:\n global __best_score\n min_score = np.min(score)\n if min_score < __best_score:\n __best_score = min_score\n if __best_score < thresh * 5:\n print('best for pid %6i %7.3f' % (os.getpid(), __best_score))\n # print(' trimming max_results')\n ilow0 = np.where(score < thresh)\n if len(ilow0) > max_results:\n order = np.argsort(score[ilow0])\n ilow0 = ilow0[order[:max_results]]\n sampidx = tuple(np.repeat(i, len(ilow0[0])) for i in samp)\n lowpostmp = []\n # print(' make lowpos')\n for iseg in range(len(segpos)):\n ilow = ilow0[: iseg + 1] + (0,) * (segpos[0].ndim - 2 - (iseg + 1))\n lowpostmp.append(segpos[iseg][ilow])\n ilow1 = (ilow0 if (ML is None or ML >= ndimchunk) else\n ilow0[:ML] + (idxmap[ilow0[ML]],) + ilow0[ML + 1:])\n return score[ilow0], np.array(ilow1 + sampidx).T, np.stack(lowpostmp, 1)\n\n\ndef _grow_chunks(ijob, context):\n os.environ['OMP_NUM_THREADS'] = '1'\n os.environ['MKL_NUM_THREADS'] = '1'\n os.environ['NUMEXPR_NUM_THREADS'] = '1'\n sampsizes, njob, segments, end, _, _, _, every_other, max_results = context\n samples = list(util.MultiRange(sampsizes)[ijob::njob * every_other])\n segpos, connpos = _chain_xforms(segments[:end]) # common data\n args = [samples, it.repeat(segpos),\n it.repeat(connpos), it.repeat(context)]\n chunks = list(map(_grow_chunk, *args))\n chunks = [c for c in chunks if c is not None]\n if not chunks: return None\n scores = np.concatenate([x[0] for x in chunks])\n lowidx = np.concatenate([x[1] for x in chunks])\n lowpos = np.concatenate([x[2] for x in chunks])\n order = np.argsort(scores)\n return [scores[order[:max_results]],\n lowidx[order[:max_results]],\n lowpos[order[:max_results]]]\n\n\ndef _check_topology(segments, criteria, expert=False):\n if segments[0].entrypol is not None:\n raise ValueError('beginning of worm cant have entry')\n if segments[-1].exitpol is not None:\n raise ValueError('end of worm cant have exit')\n for a, b in zip(segments[:-1], segments[1:]):\n if not (a.exitpol and b.entrypol and a.exitpol != b.entrypol):\n raise ValueError('incompatible exit->entry polarity: '\n + str(a.exitpol) + '->'\n + str(b.entrypol) + ' on segment pair: '\n + str((segments.index(a), segments.index(b))))\n matchlast = criteria.last_body_same_as\n if matchlast is not None and not expert and (\n not segments[matchlast].same_bodies_as(segments[-1])):\n raise ValueError(\"segments[matchlast] not same as segments[-1], \"\n + \"if you're sure, pass expert=True\")\n if criteria.is_cyclic and not criteria.to_seg in (-1, len(segments) - 1):\n raise ValueError('Cyclic and to_seg is not last segment,'\n 'if you\\'re sure, pass expert=True')\n if criteria.is_cyclic:\n beg, end = segments[criteria.from_seg], segments[criteria.to_seg]\n sites_required = {'N': 0, 'C': 0, None: 0}\n sites_required[beg.entrypol] += 1\n sites_required[beg.exitpol] += 1\n sites_required[end.entrypol] += 1\n # print('pols', beg.entrypol, beg.exitpol, end.entrypol)\n for pol in 'NC':\n # print(pol, beg.max_sites[pol], sites_required[pol])\n if beg.max_sites[pol] < sites_required[pol]:\n msg = 'Not enough %s sites in any of segment %i Spliceables, %i required, at most %i available' % (\n pol, criteria.from_seg, sites_required[pol],\n beg.max_sites[pol])\n raise ValueError(msg)\n if beg.min_sites[pol] < sites_required[pol]:\n msg = 'Not enough %s sites in all of segment %i Spliceables, %i required, some have only %i available (pass expert=True if you really want to run anyway)' % (\n pol, criteria.from_seg, sites_required[pol],\n beg.max_sites[pol])\n if not expert: raise ValueError(msg)\n print(\"WARNING:\", msg)\n return matchlast\n\n\ndef _grow(segments, criteria, accumulator, **kw):\n # terrible hack... xfering the poses too expensive\n tmp = {spl: (spl.body, spl.chains)\n for seg in segments for spl in seg.spliceables}\n for seg in segments:\n for spl in seg.spliceables:\n spl.body, spl.chains = None, None # poses not pickleable...\n\n sizes = [len(s) for s in segments]\n ntot = util.bigprod(sizes)\n with kw['executor'](**kw['executor_args']) as pool:\n context = (sizes[kw['end']:], kw['njob'], segments, kw['end'],\n criteria, kw['thresh'], kw['matchlast'], kw['every_other'],\n kw['max_results'])\n args = [range(kw['njob'])] + [it.repeat(context)]\n util.tqdm_parallel_map(\n pool=pool,\n function=_grow_chunks,\n accumulator=accumulator,\n map_func_args=args,\n batch_size=kw['nworker'] * 8,\n unit='K worms',\n ascii=0,\n desc='growing worms',\n unit_scale=int(ntot / kw['njob'] / 1000 / kw['every_other']),\n disable=kw['verbosity'] < 0\n )\n\n # put the poses back...\n for seg in segments:\n for spl in seg.spliceables:\n spl.body, spl.chains = tmp[spl]\n" ]
[ [ "numpy.sqrt", "numpy.eye", "numpy.allclose", "numpy.zeros_like", "numpy.transpose", "numpy.ones", "numpy.linalg.inv", "numpy.argsort", "numpy.where", "numpy.all", "numpy.clip", "numpy.min", "numpy.stack", "numpy.concatenate", "numpy.array" ] ]
barbagroup/pygbe_lspr
[ "8653771cac5d650dc85b6ecb4a8d9bbe52402b05" ]
[ "paper/convergence_analysis/convergence_helper.py" ]
[ "'''This file contains functions that help to analyze and plot data related\nto the convergence analysis.\n'''\n\nimport numpy\nimport pickle\nfrom matplotlib import pyplot, rcParams\n\ndef pickleload(pickle_file):\n '''Loads a pickle file and assins it to a variable.\n '''\n with open(pickle_file, 'rb') as f:\n dict_res = pickle.load(f)\n return dict_res\n\ndef ord_convergence(array, rate):\n '''Computes the order of convergence given 3 scalar outputs of 3 different\n mesh refinments, saved in an array. The rate is how much the mesh is\n refined. In our case is 4.\n '''\n\n ord_conv = numpy.log((array[-3] - array[-2])/(array[-2] - array[-1]))/numpy.log(rate)\n\n return ord_conv\n\ndef richardson_extrapolation(array):\n '''Performs an estimate of the exact solution using Richardson \n extrapolation, given by\n\n f_ex = (f_1 * f_3 - f_2^2) / (f_3 - 2*f_2+f_1)\n\n where f_1 is a result from the finest grid and f_3 is from the coarsest.\n The grids f_1, f_2, f_3 should have the same refinement ratio (e.g. 4 -> 8 -> 16)\n\n Arguments:\n ----------\n array: contains C_ext values of the sensor for the different meshes.\n\n Returns:\n --------\n f_ex : float, richardson_extrapolation estimated exact solution. \n '''\n \n f1 = array[-1] \n f2 = array[-2]\n f3 = array[-3]\n\n f_ex = (f1 * f3 - f2**2) / (f3 - 2 * f2 + f1)\n\n return f_ex \n\n\ndef perc_error(Cext, rich_ext):\n '''Computes the relative and percentage error respect to the richardson\n extrapolation of a scalar quantity, in this case the different meshes \n values for the extinction cross section of the sensor. \n \n Arguments:\n ----------\n Cext: array, extinction cross section values of the sensor for the \n different meshes.\n rich_ext: float, richardson_extrapolation estimated exact solution.\n \n Returns:\n --------\n rel_err :array, relative error values respect to the richardson\n extrapolation. \n perc_err: array, percentage error values respect to the richardson\n extrapolation. \n '''\n \n rel_err = abs((Cext - rich_ext)/rich_ext)\n perc_err = rel_err*100\n \n return rel_err, perc_err\n\ndef plot_sph_complex_convergence(N, error, file_name=None, file_ext=None, paper=False):\n\n if paper:\n file_ext = 'pdf'\n pyplot.switch_backend('agg')\n fig = pyplot.figure(figsize=(3, 2))\n ms = 5\n lw = 1\n fs = 10\n else:\n pyplot.figure(figsize=(6, 4))\n ms = 10\n lw = 2\n fs = 12\n\n rcParams['font.family'] = 'serif'\n rcParams['font.size'] = fs\n rcParams['xtick.top'] = True\n rcParams['ytick.right'] = True\n rcParams['axes.linewidth'] = 1\n\n asymp = N[-3]*error[-3]/N\n\n pyplot.loglog(N, error, ls='',marker='o', c='k', mew=1, mfc='w', ms=ms, label='BSA_sensor')\n pyplot.loglog(N, asymp, c='k', marker='None', ls=':', lw=lw, label=None)\n\n\n loc = (3*N[-2]+N[-1])/4\n\n tex_loc = numpy.array((loc,N[-3]*error[-3]/loc))\n\n \n pyplot.text(tex_loc[0], tex_loc[1],'N$^{-1}$', fontsize=fs,\n rotation=-35,rotation_mode='anchor')\n \n pyplot.xlabel('N')\n pyplot.ylabel('Relative error')\n pyplot.tick_params(axis='both', length=10, width=0.8, which='major', direction='in')\n pyplot.tick_params(axis='both', length=5, width=0.8, which='minor', direction='in')\n pyplot.ylim(1e-3,1)\n pyplot.xlim(1e2,1e5)\n pyplot.legend(loc='upper right', fontsize=fs, numpoints=1, handlelength=0.1).get_frame().set_lw(0.3)\n pyplot.grid(True, which=\"both\")\n \n if (file_name and file_ext):\n #fig.subplots_adjust(left=0.235, bottom=0.25, right=0.965, top=0.95)\n fig.savefig(file_name+'.'+file_ext, format=file_ext, dpi=80, bbox_inches='tight', pad_inches=0.04)\n\n if paper :\n pyplot.close(fig)\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.tick_params", "matplotlib.pyplot.grid", "matplotlib.pyplot.switch_backend", "matplotlib.pyplot.figure", "matplotlib.pyplot.xlim", "numpy.log", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.text", "matplotlib.pyplot.ylim", "matplotlib.pyplot.loglog", "numpy.array", "matplotlib.pyplot.close", "matplotlib.pyplot.xlabel" ] ]
SeitaBV/flexmeasures
[ "f715012c9c35d38d3382bd88d36ef86ce9728d10" ]
[ "flexmeasures/data/models/planning/solver.py" ]
[ "from typing import List, Tuple, Union\n\nfrom flask import current_app\nimport pandas as pd\nimport numpy as np\nfrom pandas.tseries.frequencies import to_offset\nfrom pyomo.core import (\n ConcreteModel,\n Var,\n RangeSet,\n Param,\n Reals,\n Constraint,\n Objective,\n minimize,\n)\nfrom pyomo.environ import UnknownSolver # noqa F401\nfrom pyomo.environ import value\nfrom pyomo.opt import SolverFactory\n\nfrom flexmeasures.data.models.planning.utils import initialize_series\n\ninfinity = float(\"inf\")\n\n\ndef device_scheduler( # noqa C901\n device_constraints: List[pd.DataFrame],\n ems_constraints: pd.DataFrame,\n commitment_quantities: List[pd.Series],\n commitment_downwards_deviation_price: Union[List[pd.Series], List[float]],\n commitment_upwards_deviation_price: Union[List[pd.Series], List[float]],\n) -> Tuple[List[pd.Series], float]:\n \"\"\"Schedule devices given constraints on a device and EMS level, and given a list of commitments by the EMS.\n The commitments are assumed to be with regards to the flow of energy to the device (positive for consumption,\n negative for production). The solver minimises the costs of deviating from the commitments.\n\n Device constraints are on a device level. Handled constraints (listed by column name):\n max: maximum stock assuming an initial stock of zero (e.g. in MWh or boxes)\n min: minimum stock assuming an initial stock of zero\n equal: exact amount of stock (we do this by clamping min and max)\n derivative max: maximum flow (e.g. in MW or boxes/h)\n derivative min: minimum flow\n derivative equals: exact amount of flow (we do this by clamping derivative min and derivative max)\n EMS constraints are on an EMS level. Handled constraints (listed by column name):\n derivative max: maximum flow\n derivative min: minimum flow\n Commitments are on an EMS level. Parameter explanations:\n commitment_quantities: amounts of flow specified in commitments (both previously ordered and newly requested)\n - e.g. in MW or boxes/h\n commitment_downwards_deviation_price: penalty for downwards deviations of the flow\n - e.g. in EUR/MW or EUR/(boxes/h)\n - either a single value (same value for each flow value) or a Series (different value for each flow value)\n commitment_upwards_deviation_price: penalty for upwards deviations of the flow\n\n All Series and DataFrames should have the same resolution.\n\n For now we pass in the various constraints and prices as separate variables, from which we make a MultiIndex\n DataFrame. Later we could pass in a MultiIndex DataFrame directly.\n \"\"\"\n\n # If the EMS has no devices, don't bother\n if len(device_constraints) == 0:\n return [], 0\n\n # Check if commitments have the same time window and resolution as the constraints\n start = device_constraints[0].index.to_pydatetime()[0]\n resolution = pd.to_timedelta(device_constraints[0].index.freq)\n end = device_constraints[0].index.to_pydatetime()[-1] + resolution\n if len(commitment_quantities) != 0:\n start_c = commitment_quantities[0].index.to_pydatetime()[0]\n resolution_c = pd.to_timedelta(commitment_quantities[0].index.freq)\n end_c = commitment_quantities[0].index.to_pydatetime()[-1] + resolution\n if not (start_c == start and end_c == end):\n raise Exception(\n \"Not implemented for different time windows.\\n(%s,%s)\\n(%s,%s)\"\n % (start, end, start_c, end_c)\n )\n if resolution_c != resolution:\n raise Exception(\n \"Not implemented for different resolutions.\\n%s\\n%s\"\n % (resolution, resolution_c)\n )\n\n # Turn prices per commitment into prices per commitment flow\n if len(commitment_downwards_deviation_price) != 0:\n if all(\n isinstance(price, float) for price in commitment_downwards_deviation_price\n ):\n commitment_downwards_deviation_price = [\n initialize_series(price, start, end, resolution)\n for price in commitment_downwards_deviation_price\n ]\n if len(commitment_upwards_deviation_price) != 0:\n if all(\n isinstance(price, float) for price in commitment_upwards_deviation_price\n ):\n commitment_upwards_deviation_price = [\n initialize_series(price, start, end, resolution)\n for price in commitment_upwards_deviation_price\n ]\n\n model = ConcreteModel()\n\n # Add indices for devices (d), datetimes (j) and commitments (c)\n model.d = RangeSet(0, len(device_constraints) - 1, doc=\"Set of devices\")\n model.j = RangeSet(\n 0, len(device_constraints[0].index.to_pydatetime()) - 1, doc=\"Set of datetimes\"\n )\n model.c = RangeSet(0, len(commitment_quantities) - 1, doc=\"Set of commitments\")\n\n # Add parameters\n def price_down_select(m, c, j):\n return commitment_downwards_deviation_price[c].iloc[j]\n\n def price_up_select(m, c, j):\n return commitment_upwards_deviation_price[c].iloc[j]\n\n def commitment_quantity_select(m, c, j):\n return commitment_quantities[c].iloc[j]\n\n def device_max_select(m, d, j):\n max_v = device_constraints[d][\"max\"].iloc[j]\n equal_v = device_constraints[d][\"equals\"].iloc[j]\n if np.isnan(max_v) and np.isnan(equal_v):\n return infinity\n else:\n return np.nanmin([max_v, equal_v])\n\n def device_min_select(m, d, j):\n min_v = device_constraints[d][\"min\"].iloc[j]\n equal_v = device_constraints[d][\"equals\"].iloc[j]\n if np.isnan(min_v) and np.isnan(equal_v):\n return -infinity\n else:\n return np.nanmax([min_v, equal_v])\n\n def device_derivative_max_select(m, d, j):\n max_v = device_constraints[d][\"derivative max\"].iloc[j]\n equal_v = device_constraints[d][\"derivative equals\"].iloc[j]\n if np.isnan(max_v) and np.isnan(equal_v):\n return infinity\n else:\n return np.nanmin([max_v, equal_v])\n\n def device_derivative_min_select(m, d, j):\n min_v = device_constraints[d][\"derivative min\"].iloc[j]\n equal_v = device_constraints[d][\"derivative equals\"].iloc[j]\n if np.isnan(min_v) and np.isnan(equal_v):\n return -infinity\n else:\n return np.nanmax([min_v, equal_v])\n\n def ems_derivative_max_select(m, j):\n v = ems_constraints[\"derivative max\"].iloc[j]\n if np.isnan(v):\n return infinity\n else:\n return v\n\n def ems_derivative_min_select(m, j):\n v = ems_constraints[\"derivative min\"].iloc[j]\n if np.isnan(v):\n return -infinity\n else:\n return v\n\n model.up_price = Param(model.c, model.j, initialize=price_up_select)\n model.down_price = Param(model.c, model.j, initialize=price_down_select)\n model.commitment_quantity = Param(\n model.c, model.j, initialize=commitment_quantity_select\n )\n model.device_max = Param(model.d, model.j, initialize=device_max_select)\n model.device_min = Param(model.d, model.j, initialize=device_min_select)\n model.device_derivative_max = Param(\n model.d, model.j, initialize=device_derivative_max_select\n )\n model.device_derivative_min = Param(\n model.d, model.j, initialize=device_derivative_min_select\n )\n model.ems_derivative_max = Param(model.j, initialize=ems_derivative_max_select)\n model.ems_derivative_min = Param(model.j, initialize=ems_derivative_min_select)\n\n # Add variables\n model.power = Var(model.d, model.j, domain=Reals, initialize=0)\n\n # Add constraints as a tuple of (lower bound, value, upper bound)\n def device_bounds(m, d, j):\n return (\n m.device_min[d, j],\n sum(m.power[d, k] for k in range(0, j + 1)),\n m.device_max[d, j],\n )\n\n def device_derivative_bounds(m, d, j):\n return (\n m.device_derivative_min[d, j],\n m.power[d, j],\n m.device_derivative_max[d, j],\n )\n\n def ems_derivative_bounds(m, j):\n return m.ems_derivative_min[j], sum(m.power[:, j]), m.ems_derivative_max[j]\n\n model.device_energy_bounds = Constraint(model.d, model.j, rule=device_bounds)\n model.device_power_bounds = Constraint(\n model.d, model.j, rule=device_derivative_bounds\n )\n model.ems_power_bounds = Constraint(model.j, rule=ems_derivative_bounds)\n\n # Add objective\n def cost_function(m):\n costs = 0\n for c in m.c:\n for j in m.j:\n ems_power_in_j = sum(m.power[d, j] for d in m.d)\n ems_power_deviation = ems_power_in_j - m.commitment_quantity[c, j]\n if value(ems_power_deviation) >= 0:\n costs += ems_power_deviation * m.up_price[c, j]\n else:\n costs += ems_power_deviation * m.down_price[c, j]\n return costs\n\n model.costs = Objective(rule=cost_function, sense=minimize)\n\n # Solve\n SolverFactory(current_app.config.get(\"FLEXMEASURES_LP_SOLVER\")).solve(model)\n\n planned_costs = value(model.costs)\n planned_power_per_device = []\n for d in model.d:\n planned_device_power = [model.power[d, j].value for j in model.j]\n planned_power_per_device.append(\n pd.Series(\n index=pd.date_range(\n start=start, end=end, freq=to_offset(resolution), closed=\"left\"\n ),\n data=planned_device_power,\n )\n )\n\n # model.pprint()\n # print(planned_costs)\n # input()\n return planned_power_per_device, planned_costs\n" ]
[ [ "numpy.nanmax", "pandas.tseries.frequencies.to_offset", "numpy.nanmin", "numpy.isnan", "pandas.to_timedelta" ] ]
16umm001/pandas
[ "a2e599499667b256bc5b8b13a75f0601eccfd432" ]
[ "pandas/tests/extension/base/methods.py" ]
[ "import numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas.util.testing as tm\n\nfrom .base import BaseExtensionTests\n\n\nclass BaseMethodsTests(BaseExtensionTests):\n \"\"\"Various Series and DataFrame methods.\"\"\"\n\n @pytest.mark.parametrize('dropna', [True, False])\n def test_value_counts(self, all_data, dropna):\n all_data = all_data[:10]\n if dropna:\n other = np.array(all_data[~all_data.isna()])\n else:\n other = all_data\n\n result = pd.Series(all_data).value_counts(dropna=dropna).sort_index()\n expected = pd.Series(other).value_counts(\n dropna=dropna).sort_index()\n\n self.assert_series_equal(result, expected)\n\n def test_count(self, data_missing):\n df = pd.DataFrame({\"A\": data_missing})\n result = df.count(axis='columns')\n expected = pd.Series([0, 1])\n self.assert_series_equal(result, expected)\n\n def test_apply_simple_series(self, data):\n result = pd.Series(data).apply(id)\n assert isinstance(result, pd.Series)\n\n def test_argsort(self, data_for_sorting):\n result = pd.Series(data_for_sorting).argsort()\n expected = pd.Series(np.array([2, 0, 1], dtype=np.int64))\n self.assert_series_equal(result, expected)\n\n def test_argsort_missing(self, data_missing_for_sorting):\n result = pd.Series(data_missing_for_sorting).argsort()\n expected = pd.Series(np.array([1, -1, 0], dtype=np.int64))\n self.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize('ascending', [True, False])\n def test_sort_values(self, data_for_sorting, ascending):\n ser = pd.Series(data_for_sorting)\n result = ser.sort_values(ascending=ascending)\n expected = ser.iloc[[2, 0, 1]]\n if not ascending:\n expected = expected[::-1]\n\n self.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize('ascending', [True, False])\n def test_sort_values_missing(self, data_missing_for_sorting, ascending):\n ser = pd.Series(data_missing_for_sorting)\n result = ser.sort_values(ascending=ascending)\n if ascending:\n expected = ser.iloc[[2, 0, 1]]\n else:\n expected = ser.iloc[[0, 2, 1]]\n self.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize('ascending', [True, False])\n def test_sort_values_frame(self, data_for_sorting, ascending):\n df = pd.DataFrame({\"A\": [1, 2, 1],\n \"B\": data_for_sorting})\n result = df.sort_values(['A', 'B'])\n expected = pd.DataFrame({\"A\": [1, 1, 2],\n 'B': data_for_sorting.take([2, 0, 1])},\n index=[2, 0, 1])\n self.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize('box', [pd.Series, lambda x: x])\n @pytest.mark.parametrize('method', [lambda x: x.unique(), pd.unique])\n def test_unique(self, data, box, method):\n duplicated = box(data._from_sequence([data[0], data[0]]))\n\n result = method(duplicated)\n\n assert len(result) == 1\n assert isinstance(result, type(data))\n assert result[0] == duplicated[0]\n\n @pytest.mark.parametrize('na_sentinel', [-1, -2])\n def test_factorize(self, data_for_grouping, na_sentinel):\n labels, uniques = pd.factorize(data_for_grouping,\n na_sentinel=na_sentinel)\n expected_labels = np.array([0, 0, na_sentinel,\n na_sentinel, 1, 1, 0, 2],\n dtype=np.intp)\n expected_uniques = data_for_grouping.take([0, 4, 7])\n\n tm.assert_numpy_array_equal(labels, expected_labels)\n self.assert_extension_array_equal(uniques, expected_uniques)\n\n @pytest.mark.parametrize('na_sentinel', [-1, -2])\n def test_factorize_equivalence(self, data_for_grouping, na_sentinel):\n l1, u1 = pd.factorize(data_for_grouping, na_sentinel=na_sentinel)\n l2, u2 = data_for_grouping.factorize(na_sentinel=na_sentinel)\n\n tm.assert_numpy_array_equal(l1, l2)\n self.assert_extension_array_equal(u1, u2)\n\n def test_combine_le(self, data_repeated):\n # GH 20825\n # Test that combine works when doing a <= (le) comparison\n orig_data1, orig_data2 = data_repeated(2)\n s1 = pd.Series(orig_data1)\n s2 = pd.Series(orig_data2)\n result = s1.combine(s2, lambda x1, x2: x1 <= x2)\n expected = pd.Series([a <= b for (a, b) in\n zip(list(orig_data1), list(orig_data2))])\n self.assert_series_equal(result, expected)\n\n val = s1.iloc[0]\n result = s1.combine(val, lambda x1, x2: x1 <= x2)\n expected = pd.Series([a <= val for a in list(orig_data1)])\n self.assert_series_equal(result, expected)\n\n def test_combine_add(self, data_repeated):\n # GH 20825\n orig_data1, orig_data2 = data_repeated(2)\n s1 = pd.Series(orig_data1)\n s2 = pd.Series(orig_data2)\n result = s1.combine(s2, lambda x1, x2: x1 + x2)\n with np.errstate(over='ignore'):\n expected = pd.Series(\n orig_data1._from_sequence([a + b for (a, b) in\n zip(list(orig_data1),\n list(orig_data2))]))\n self.assert_series_equal(result, expected)\n\n val = s1.iloc[0]\n result = s1.combine(val, lambda x1, x2: x1 + x2)\n expected = pd.Series(\n orig_data1._from_sequence([a + val for a in list(orig_data1)]))\n self.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize('frame', [True, False])\n @pytest.mark.parametrize('periods, indices', [\n (-2, [2, 3, 4, -1, -1]),\n (0, [0, 1, 2, 3, 4]),\n (2, [-1, -1, 0, 1, 2]),\n ])\n def test_container_shift(self, data, frame, periods, indices):\n # https://github.com/pandas-dev/pandas/issues/22386\n subset = data[:5]\n data = pd.Series(subset, name='A')\n expected = pd.Series(subset.take(indices, allow_fill=True), name='A')\n\n if frame:\n result = data.to_frame(name='A').assign(B=1).shift(periods)\n expected = pd.concat([\n expected,\n pd.Series([1] * 5, name='B').shift(periods)\n ], axis=1)\n compare = self.assert_frame_equal\n else:\n result = data.shift(periods)\n compare = self.assert_series_equal\n\n compare(result, expected)\n\n @pytest.mark.parametrize(\"as_frame\", [True, False])\n def test_hash_pandas_object_works(self, data, as_frame):\n # https://github.com/pandas-dev/pandas/issues/23066\n data = pd.Series(data)\n if as_frame:\n data = data.to_frame()\n a = pd.util.hash_pandas_object(data)\n b = pd.util.hash_pandas_object(data)\n self.assert_equal(a, b)\n" ]
[ [ "pandas.util.hash_pandas_object", "pandas.Series", "pandas.util.testing.assert_numpy_array_equal", "pandas.DataFrame", "numpy.errstate", "numpy.array", "pandas.factorize" ] ]
Tommy-Liu/MovieQA_Contest
[ "4281bf4a731aa14a0d19f18adda31d59a4a297cb", "4281bf4a731aa14a0d19f18adda31d59a4a297cb" ]
[ "model/model_se_spec/6.py", "model/model_se_spec/basic.py" ]
[ "import tensorflow as tf\nfrom tensorflow.contrib import layers\n\nfrom config import MovieQAPath\nfrom raw_input import Input\n\n_mp = MovieQAPath()\nhp = {'emb_dim': 300, 'feat_dim': 512, 'dropout_rate': 0.1}\n\n\ndef dropout(x, training):\n return tf.layers.dropout(x, hp['dropout_rate'], training=training)\n\n\ndef l2_norm(x, axis=1):\n return tf.nn.l2_normalize(x, axis=axis)\n\n\ndef unit_norm(x, dim=2):\n return layers.unit_norm(x, dim=dim, epsilon=1e-12)\n\n\ndef l1_norm(x, axis=None, epsilon=1e-6, name=None):\n with tf.name_scope(name, \"l1_normalize\", [x]) as name:\n x = tf.convert_to_tensor(x, name=\"x\")\n square_sum = tf.reduce_sum(x, axis, keepdims=True)\n x_inv_norm = tf.reciprocal(tf.maximum(square_sum, epsilon))\n return tf.multiply(x, x_inv_norm, name=name)\n\n\ndef bhattacharyya_norm(x, axis=None, epsilon=1e-6, name=None):\n with tf.name_scope(name, \"l1_normalize\", [x]) as name:\n x = tf.convert_to_tensor(x, name=\"x\")\n x = tf.sqrt(x)\n square_sum = tf.reduce_sum(x, axis, keepdims=True)\n x_inv_norm = tf.reciprocal(tf.maximum(square_sum, epsilon))\n return tf.multiply(x, x_inv_norm, name=name)\n\n\nclass Model(object):\n def __init__(self, data, beta=0.0, training=False):\n self.data = data\n reg = layers.l2_regularizer(beta)\n # reg = layers.l1_regularizer(beta)\n initializer = tf.glorot_normal_initializer(seed=0)\n # constraint = tf.keras.constraints.NonNeg()\n group = 32\n skip = True\n norm = True\n drop = True\n with tf.variable_scope('Embedding_Linear'):\n self.raw_ques = self.data.ques\n self.raw_ans = self.data.ans\n # self.raw_subt = self.data.subt\n self.raw_subt = tf.boolean_mask(self.data.subt, tf.cast(self.data.spec, tf.bool))\n self.raw_ques = l2_norm(self.raw_ques)\n self.raw_ans = l2_norm(self.raw_ans)\n self.raw_subt = l2_norm(self.raw_subt)\n\n self.q_pass = tf.layers.dense(self.raw_ques, group, tf.nn.relu, True, initializer,\n kernel_regularizer=reg)\n self.q_pass = tf.reshape(self.q_pass, [group, 1, 1])\n self.q_pass = dropout(self.q_pass, training)\n self.a_pass = tf.layers.dense(self.raw_ques, group, tf.nn.relu, True, initializer,\n kernel_regularizer=reg)\n self.a_pass = tf.reshape(self.a_pass, [group, 1, 1])\n self.a_pass = dropout(self.a_pass, training)\n self.s_pass = tf.layers.dense(self.raw_ques, group, tf.nn.relu, True, initializer,\n kernel_regularizer=reg)\n self.s_pass = tf.reshape(self.s_pass, [group, 1, 1])\n self.s_pass = dropout(self.s_pass, training)\n # for i in range(5):\n # self.group_pass = self.group_pass + \\\n # tf.layers.dense(self.raw_ans[i], tf.nn.relu, True, initializer,\n # kernel_regularizer=reg)\n\n self.ques = tf.layers.dense(self.raw_ques, hp['emb_dim'] * group, tf.nn.tanh, True, initializer,\n kernel_regularizer=reg)\n self.ques = tf.split(self.ques, group, axis=1)\n self.ques = tf.stack(self.ques)\n self.ques = dropout(self.ques, training)\n self.ques = tf.reduce_sum(self.ques * self.q_pass, axis=0)\n\n self.ans = tf.layers.dense(self.raw_ans, hp['emb_dim'] * group, tf.nn.tanh, True, initializer,\n kernel_regularizer=reg)\n self.ans = tf.split(self.ans, group, axis=1)\n self.ans = tf.stack(self.ans)\n self.ans = dropout(self.ans, training)\n self.ans = tf.reduce_sum(self.ans * self.a_pass, axis=0)\n\n self.subt = tf.layers.dense(self.raw_subt, hp['emb_dim'] * group, tf.nn.tanh, True, initializer,\n kernel_regularizer=reg)\n self.subt = tf.split(self.subt, group, axis=1)\n self.subt = tf.stack(self.subt)\n self.subt = dropout(self.subt, training)\n self.subt = tf.reduce_sum(self.subt * self.s_pass, axis=0)\n\n if skip:\n self.ques = self.raw_ques + self.ques\n self.ans = self.raw_ans + self.ans\n self.subt = self.raw_subt + self.subt\n\n if norm:\n self.ques = l2_norm(self.ques)\n self.ans = l2_norm(self.ans)\n self.subt = l2_norm(self.subt)\n\n if drop:\n self.ques = dropout(self.ques, training)\n self.ans = dropout(self.ans, training)\n self.subt = dropout(self.subt, training)\n\n with tf.variable_scope('Response'):\n # (N, 1)\n self.sq = tf.matmul(self.subt, self.ques, transpose_b=True)\n self.sq = tf.nn.relu(self.sq)\n # self.sq = tf.nn.softmax(self.sq, 0)\n self.sq = dropout(self.sq, training)\n # self.sq = l2_norm(self.sq, 0)\n # self.sq = l1_norm(self.sq, 0)\n\n # (N, 5)\n self.sa = tf.matmul(self.subt, self.ans, transpose_b=True)\n self.sa = tf.nn.relu(self.sa)\n # self.sa = tf.nn.softmax(self.sa)\n self.sa = dropout(self.sa, training)\n # self.sa = l2_norm(self.sa, 0)\n # self.sa = l1_norm(self.sa, 0)\n\n # (N, 5)\n self.attn = self.sq + self.sa\n # self.attn = l1_norm(self.attn, axis=0)\n # (1, N, 5)\n self.attn = tf.expand_dims(self.attn, 0)\n # (5, N, 1)\n self.attn = tf.transpose(self.attn, [2, 1, 0])\n # (5, N, E_t)\n self.abs = tf.expand_dims(self.subt, 0) * self.attn\n # (5, E_t)\n self.abs = tf.reduce_sum(self.abs, axis=1)\n self.abs = l2_norm(self.abs, 1)\n # (5, 1)\n self.output = tf.reduce_sum(self.abs * self.ans, axis=1, keepdims=True)\n # (1, 5)\n self.output = tf.transpose(self.output)\n\n\ndef main():\n data = Input(split='train', mode='subt')\n model = Model(data)\n\n for v in tf.global_variables():\n print(v)\n config = tf.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth = True\n # config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1\n with tf.Session(config=config) as sess:\n sess.run([model.data.initializer, tf.global_variables_initializer()],\n feed_dict=data.feed_dict)\n\n # q, a, s = sess.run([model.ques_enc, model.ans_enc, model.subt_enc])\n # print(q.shape, a.shape, s.shape)\n # a, b, c, d = sess.run(model.tri_word_encodes)\n # print(a, b, c, d)\n # print(a.shape, b.shape, c.shape, d.shape)\n a, b = sess.run([model.subt, model.output])\n print(a, b)\n print(a.shape, b.shape)\n\n\nif __name__ == '__main__':\n main()\n", "import tensorflow as tf\n\nimport utils.model_utils as mu\nfrom model.basic_model import BasicModel\nfrom raw_input import Input\n\n\nclass SetupModel(BasicModel):\n def __init__(self, attn, checkpoint):\n super(SetupModel, self).__init__()\n self.data = Input()\n self.output = None\n self.saver = None\n self.best_saver = None\n self.train_gv_summaries_op = None\n self.val_init_op_list = None\n self.train_init_op_list = None\n self.train_op_list = None\n self.train_summaries_op = None\n self.val_op_list = None\n self.attn = attn\n self.checkpoint = checkpoint\n\n def eval_setup(self):\n val_answer = tf.argmax(self.output, axis=1)\n val_accuracy, val_accuracy_update, val_accuracy_initializer \\\n = mu.get_acc(self.data.gt, tf.argmax(self.output, axis=1), name='val_accuracy')\n\n val_summaries_op = tf.summary.scalar('val_accuracy', val_accuracy)\n self.val_init_op_list = [self.data.initializer, val_accuracy_initializer]\n self.val_op_list = [val_accuracy, val_accuracy_update, val_summaries_op]\n\n def train_setup(self):\n main_loss = mu.get_loss(self._hp['loss'], self.data.gt, self.output)\n regu_loss = tf.losses.get_regularization_loss()\n\n loss = main_loss + regu_loss\n\n train_answer = tf.argmax(self.output, axis=1)\n train_accuracy, train_accuracy_update, train_accuracy_initializer \\\n = mu.get_acc(self.data.gt, train_answer, name='train_accuracy')\n\n global_step = tf.train.get_or_create_global_step()\n\n decay_step = int(self._hp['decay_epoch'] * len(self.data))\n learning_rate = mu.get_lr(self._hp['decay_type'], self._hp['learning_rate'], global_step,\n decay_step, self._hp['decay_rate'])\n\n optimizer = mu.get_opt(self._hp['opt'], learning_rate, decay_step)\n\n grads_and_vars = optimizer.compute_gradients(loss)\n # grads_and_vars = [(tf.clip_by_norm(grad, 0.01, axes=[0]), var) if grad is not None else (grad, var)\n # for grad, var in grads_and_vars ]\n gradients, variables = list(zip(*grads_and_vars))\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = tf.group(optimizer.apply_gradients(grads_and_vars, global_step),\n train_accuracy_update)\n\n self.saver = tf.train.Saver(tf.global_variables())\n self.best_saver = tf.train.Saver(tf.global_variables())\n\n # Summary\n train_gv_summaries = []\n for idx, grad in enumerate(gradients):\n if grad is not None:\n train_gv_summaries.append(tf.summary.histogram('gradients/' + variables[idx].name, grad))\n train_gv_summaries.append(tf.summary.histogram(variables[idx].name, variables[idx]))\n\n train_summaries = [\n tf.summary.scalar('train_loss', loss),\n tf.summary.scalar('train_accuracy', train_accuracy),\n tf.summary.scalar('learning_rate', learning_rate)\n ]\n self.train_summaries_op = tf.summary.merge(train_summaries)\n self.train_gv_summaries_op = tf.summary.merge(train_gv_summaries + train_summaries)\n\n self.train_init_op_list = [self.data.initializer, train_accuracy_initializer]\n\n self.train_op_list = [train_op, loss, train_accuracy, global_step]\n\n # if attn:\n # self.train_op_list += [self.train_model.sq, self.train_model.sa,\n # self.train_data.gt, self.train_answer]\n # self.val_op_list += [self.val_model.sq, self.val_model.sa,\n # self.val_data.gt, self.val_answer]\n" ]
[ [ "tensorflow.reshape", "tensorflow.variable_scope", "tensorflow.matmul", "tensorflow.name_scope", "tensorflow.convert_to_tensor", "tensorflow.split", "tensorflow.reduce_sum", "tensorflow.global_variables_initializer", "tensorflow.multiply", "tensorflow.contrib.layers.unit_norm", "tensorflow.contrib.layers.l2_regularizer", "tensorflow.transpose", "tensorflow.nn.relu", "tensorflow.stack", "tensorflow.expand_dims", "tensorflow.global_variables", "tensorflow.cast", "tensorflow.Session", "tensorflow.layers.dense", "tensorflow.layers.dropout", "tensorflow.ConfigProto", "tensorflow.nn.l2_normalize", "tensorflow.sqrt", "tensorflow.glorot_normal_initializer", "tensorflow.maximum" ], [ "tensorflow.summary.scalar", "tensorflow.summary.histogram", "tensorflow.get_collection", "tensorflow.losses.get_regularization_loss", "tensorflow.global_variables", "tensorflow.train.get_or_create_global_step", "tensorflow.argmax", "tensorflow.summary.merge", "tensorflow.control_dependencies" ] ]
michaelyeah7/magics_sim
[ "3e5b7116769ebd175b170c92caff6e3c079b6382" ]
[ "examples/quadrupedal_NN.py" ]
[ "import jax.numpy as jnp\nimport matplotlib.pyplot as plt\nimport jax\nfrom jax import lax\nfrom envs import Qaudrupedal\nfrom agents import Deep_Qaudrupedal\nimport copy\nimport pickle\nfrom time import gmtime, strftime \nfrom jaxRBDL.Dynamics.ForwardDynamics import ForwardDynamics, ForwardDynamicsCore\nimport numpy as np\nfrom jax.api import jit\nfrom functools import partial\n\n\n\n\n\ndef loop(context, x):\n env, agent, params = context\n control = agent(env.state, params)\n prev_state = copy.deepcopy(env.state)\n _, reward, done, _ = env.step(env.state,control)\n\n return (env, agent), reward, done\n\n# @partial(jit, static_argnums=(0, 1))\ndef roll_out(env, agent, params):\n losses = 0.0\n for i in range(100):\n (env, agent), r, done= loop((env, agent,params), i)\n losses += r \n if done:\n print(\"end this episode because out of threshhold, total %d steps \" % i)\n env.past_reward = 0\n break\n \n return losses\n\n# f_grad = jax.grad(forward,argnums=1)\n\n\nf_grad = jax.grad(roll_out,argnums=2)\n\ndef loop_for_render(context, x):\n env, agent, params = context\n if(render==True):\n env.osim_render()\n control = agent(env.state, params)\n prev_state = copy.deepcopy(env.state)\n _, reward, done, _ = env.step(env.state,control)\n\n return (env, agent), reward, done\n\ndef roll_out_for_render(env, agent, params):\n gamma = 0.9\n losses = 0.0\n for i in range(100):\n (env, agent), r, done= loop_for_render((env, agent,params), i)\n losses = losses * gamma + r \n if done:\n print(\"end this episode because out of threshhold\")\n env.past_reward = 0\n break\n \n return losses\n\n\n\n# Deep\nenv = Qaudrupedal()\n#Quadrupedal has 14 joints\nagent = Deep_Qaudrupedal(\n env_state_size = 28,\n action_space = jnp.zeros(14),\n learning_rate = 0.1,\n gamma = 0.99,\n max_episode_length = 500,\n seed = 0\n )\n\n# load_params = False\n# update_params = True\n# render = False\n\nload_params = False\nupdate_params = True\nrender = True\n\nif load_params == True:\n loaded_params = pickle.load( open( \"examples/qudrupedal_params_episode_270_2021-03-21 16:41:06.txt\", \"rb\" ) )\n agent.params = loaded_params\n\nreward = 0\nloss = 0\nepisode_loss = []\nepisodes_num = 500\nT = 100\nfor j in range(episodes_num):\n\n loss = 0\n env.reset() \n print(\"episode:{%d}\" % j)\n loss = roll_out_for_render(env, agent, agent.params)\n\n #update the parameter\n if (update_params==True):\n # grads = f_grad(prev_state, agent.params, env, agent)\n grads = f_grad(env, agent, agent.params)\n print(\"grads\",grads)\n #get norm square\n total_norm_sqr = 0 \n for (dw,db) in grads:\n # print(\"previous dw\",dw)\n # dw = normalize(dw)\n # db = normalize(db[:,np.newaxis],axis =0).ravel()\n total_norm_sqr += np.linalg.norm(dw) ** 2\n total_norm_sqr += np.linalg.norm(db) ** 2\n # print(\"grads\",grads)\n #scale the gradient\n gradient_clip = 0.2\n scale = min(\n 1.0, gradient_clip / (total_norm_sqr**0.5 + 1e-4))\n\n agent.params = [(w - agent.lr * scale * dw, b - agent.lr * scale * db)\n for (w, b), (dw, db) in zip(agent.params, grads)]\n\n\n episode_loss.append(loss)\n print(\"loss is %f \" % loss)\n if (j%10==0 and j!=0 and update_params==True):\n with open(\"examples/qudrupedal_params\"+ \"_episode_%d_\" % j + strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()) +\".txt\", \"wb\") as fp: #Pickling\n pickle.dump(agent.params, fp)\n# reward_forloop = reward\n# print('reward_forloop = ' + str(reward_forloop))\nplt.plot(episode_loss[1:])\n\n#save plot and params\nplt.savefig('quadrupedal_loss'+ strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()) + '.png')\n\n# fp.close()" ]
[ [ "matplotlib.pyplot.plot", "numpy.linalg.norm" ] ]
suliuzh/transformers
[ "f34372a9ff99f6bc8619ac83dc07f7afe6b92141", "f34372a9ff99f6bc8619ac83dc07f7afe6b92141" ]
[ "src/transformers/convert_pegasus_tf_to_pytorch.py", "src/transformers/modeling_mmbt.py" ]
[ "# coding=utf-8\n# Copyright 2020 Google and The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport os\nfrom pathlib import Path\nfrom typing import Dict\n\nimport tensorflow as tf\nimport torch\nfrom tqdm import tqdm\n\nfrom transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer\nfrom transformers.configuration_pegasus import DEFAULTS, task_specific_params\n\n\nPATTERNS = [\n # replace left string with right string to get the relevant state_dict key (identical state dict to bart)\n [\"memory_attention\", \"encoder_attn\"],\n [\"attention\", \"attn\"],\n [\"/\", \".\"],\n [\".LayerNorm.gamma\", \"_layer_norm.weight\"],\n [\".LayerNorm.beta\", \"_layer_norm.bias\"],\n [\"r.layer_\", \"r.layers.\"],\n [\"output_proj\", \"out_proj\"],\n [\"ffn.dense_1.\", \"fc2.\"],\n [\"ffn.dense.\", \"fc1.\"],\n [\"ffn_layer_norm\", \"final_layer_norm\"],\n [\"kernel\", \"weight\"],\n [\"encoder_layer_norm.\", \"encoder.layer_norm.\"],\n [\"decoder_layer_norm.\", \"decoder.layer_norm.\"],\n [\"embeddings.weights\", \"shared.weight\"],\n]\n\n\ndef rename_state_dict_key(k):\n\n for pegasus_name, hf_name in PATTERNS:\n k = k.replace(pegasus_name, hf_name)\n return k\n\n\n# See appendix C of paper for all hyperparams\n\n\ndef convert_pegasus(tf_weights: dict, cfg_updates: dict) -> PegasusForConditionalGeneration:\n cfg_kwargs = DEFAULTS.copy()\n cfg_kwargs.update(cfg_updates)\n cfg = PegasusConfig(**cfg_kwargs)\n torch_model = PegasusForConditionalGeneration(cfg)\n sd = torch_model.model.state_dict()\n mapping = {}\n for k, v in tf_weights.items():\n new_k = rename_state_dict_key(k)\n if new_k not in sd:\n raise ValueError(f\"could not find new key {new_k} in state dict. (converted from {k})\")\n\n if \"dense\" in k or \"proj\" in new_k:\n v = v.T\n mapping[new_k] = torch.tensor(v, dtype=sd[new_k].dtype)\n assert v.shape == sd[new_k].shape, f\"{new_k}, {k}, {v.shape}, {sd[new_k].shape}\"\n # make sure embedding.padding_idx is respected\n mapping[\"shared.weight\"][cfg.pad_token_id] = torch.zeros_like(mapping[\"shared.weight\"][cfg.pad_token_id + 1])\n mapping[\"encoder.embed_tokens.weight\"] = mapping[\"shared.weight\"]\n mapping[\"decoder.embed_tokens.weight\"] = mapping[\"shared.weight\"]\n empty_biases = {k: torch.zeros_like(v) for k, v in sd.items() if k.endswith(\"bias\") and k not in mapping}\n mapping.update(**empty_biases)\n missing, extra = torch_model.model.load_state_dict(mapping, strict=False)\n unexpected_missing = [\n k for k in missing if k not in [\"encoder.embed_positions.weight\", \"decoder.embed_positions.weight\"]\n ]\n assert unexpected_missing == [], f\"no matches found for the following torch keys {unexpected_missing}\"\n assert extra == [], f\"no matches found for the following tf keys {extra}\"\n return torch_model\n\n\ndef get_tf_weights_as_numpy(path=\"./ckpt/aeslc/model.ckpt-32000\") -> Dict:\n init_vars = tf.train.list_variables(path)\n tf_weights = {}\n ignore_name = [\"Adafactor\", \"global_step\"]\n for name, shape in tqdm(init_vars, desc=\"converting tf checkpoint to dict\"):\n skip_key = any([pat in name for pat in ignore_name])\n if skip_key:\n continue\n array = tf.train.load_variable(path, name)\n tf_weights[name] = array\n return tf_weights\n\n\ndef convert_pegasus_ckpt_to_pytorch(ckpt_path: str, save_dir: str):\n # save tokenizer first\n dataset = Path(ckpt_path).parent.name\n desired_max_model_length = task_specific_params[f\"summarization_{dataset}\"][\"max_position_embeddings\"]\n tok = PegasusTokenizer.from_pretrained(\"sshleifer/pegasus\", model_max_length=desired_max_model_length)\n assert tok.model_max_length == desired_max_model_length\n tok.save_pretrained(save_dir)\n\n # convert model\n tf_weights = get_tf_weights_as_numpy(ckpt_path)\n cfg_updates = task_specific_params[f\"summarization_{dataset}\"]\n if dataset == \"large\":\n cfg_updates[\"task_specific_params\"] = task_specific_params\n torch_model = convert_pegasus(tf_weights, cfg_updates)\n torch_model.save_pretrained(save_dir)\n sd = torch_model.state_dict()\n sd.pop(\"model.decoder.embed_positions.weight\")\n sd.pop(\"model.encoder.embed_positions.weight\")\n torch.save(sd, Path(save_dir) / \"pytorch_model.bin\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n # Required parameters\n parser.add_argument(\"tf_ckpt_path\", type=str, help=\"passed to tf.train.list_variables\")\n parser.add_argument(\"save_dir\", default=None, type=str, help=\"Path to the output PyTorch model.\")\n args = parser.parse_args()\n if args.save_dir is None:\n dataset = Path(args.tf_ckpt_path).parent.name\n args.save_dir = os.path.join(\"pegasus\", dataset)\n convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)\n", "# coding=utf-8\n# Copyright (c) Facebook, Inc. and its affiliates.\n# Copyright (c) HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch MMBT model. \"\"\"\n\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom .file_utils import add_start_docstrings, add_start_docstrings_to_callable, replace_return_docstrings\nfrom .modeling_outputs import BaseModelOutputWithPooling, SequenceClassifierOutput\nfrom .modeling_utils import ModuleUtilsMixin\nfrom .utils import logging\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"MMBTConfig\"\n\n\nclass ModalEmbeddings(nn.Module):\n \"\"\"Generic Modal Embeddings which takes in an encoder, and a transformer embedding.\"\"\"\n\n def __init__(self, config, encoder, embeddings):\n super().__init__()\n self.config = config\n self.encoder = encoder\n self.proj_embeddings = nn.Linear(config.modal_hidden_size, config.hidden_size)\n self.position_embeddings = embeddings.position_embeddings\n self.token_type_embeddings = embeddings.token_type_embeddings\n self.word_embeddings = embeddings.word_embeddings\n self.LayerNorm = embeddings.LayerNorm\n self.dropout = nn.Dropout(p=config.hidden_dropout_prob)\n\n def forward(self, input_modal, start_token=None, end_token=None, position_ids=None, token_type_ids=None):\n token_embeddings = self.proj_embeddings(self.encoder(input_modal))\n seq_length = token_embeddings.size(1)\n\n if start_token is not None:\n start_token_embeds = self.word_embeddings(start_token)\n seq_length += 1\n token_embeddings = torch.cat([start_token_embeds.unsqueeze(1), token_embeddings], dim=1)\n\n if end_token is not None:\n end_token_embeds = self.word_embeddings(end_token)\n seq_length += 1\n token_embeddings = torch.cat([token_embeddings, end_token_embeds.unsqueeze(1)], dim=1)\n\n if position_ids is None:\n position_ids = torch.arange(seq_length, dtype=torch.long, device=input_modal.device)\n position_ids = position_ids.unsqueeze(0).expand(input_modal.size(0), seq_length)\n\n if token_type_ids is None:\n token_type_ids = torch.zeros(\n (input_modal.size(0), seq_length), dtype=torch.long, device=input_modal.device\n )\n\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n embeddings = token_embeddings + position_embeddings + token_type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nMMBT_START_DOCSTRING = r\"\"\"\n MMBT model was proposed in\n `Supervised Multimodal Bitransformers for Classifying Images and Text <https://github.com/facebookresearch/mmbt>`__\n by Douwe Kiela, Suvrat Bhooshan, Hamed Firooz, Davide Testuggine.\n It's a supervised multimodal bitransformer model that fuses information from text and other image encoders,\n and obtain state-of-the-art performance on various multimodal classification benchmark tasks.\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general\n usage and behavior.\n\n Parameters:\n config (:class:`~transformers.MMBTConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n transformer (:class: `~nn.Module`): A text transformer that is used by MMBT.\n It should have embeddings, encoder, and pooler attributes.\n encoder (:class: `~nn.Module`): Encoder for the second modality.\n It should take in a batch of modal inputs and return k, n dimension embeddings.\n\"\"\"\n\nMMBT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_modal (``torch.FloatTensor`` of shape ``(batch_size, ***)``):\n The other modality data. It will be the shape that the encoder for that type expects.\n e.g. With an Image Encoder, the shape would be (batch_size, channels, height, width)\n input_ids (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``):\n Indices of input sequence tokens in the vocabulary.\n It does not expect [CLS] token to be added as it's appended to the end of other modality embeddings.\n Indices can be obtained using :class:`~transformers.BertTokenizer`.\n See :meth:`transformers.PreTrainedTokenizer.encode` and\n :meth:`transformers.PreTrainedTokenizer.__call__` for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n modal_start_tokens (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):\n Optional start token to be added to Other Modality Embedding. [CLS] Most commonly used for classification\n tasks.\n modal_end_tokens (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):\n Optional end token to be added to Other Modality Embedding. [SEP] Most commonly used.\n attention_mask (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Segment token indices to indicate first and second portions of the inputs.\n Indices are selected in ``[0, 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n modal_token_type_ids (`optional`) ``torch.LongTensor`` of shape ``(batch_size, modal_sequence_length)``:\n Segment token indices to indicate different portions of the non-text modality.\n The embeddings from these tokens will be summed with the respective token embeddings for the non-text modality.\n position_ids (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings.\n Selected in the range ``[0, config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`__\n modal_position_ids (``torch.LongTensor`` of shape ``(batch_size, modal_sequence_length)``, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings for the non-text modality.\n Selected in the range ``[0, config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`__\n head_mask (``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``, `optional`):\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n encoder_hidden_states (``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask\n is used in the cross-attention if the model is configured as a decoder.\n Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare MMBT Model outputting raw hidden-states without any specific head on top.\",\n MMBT_START_DOCSTRING,\n)\nclass MMBTModel(nn.Module, ModuleUtilsMixin):\n def __init__(self, config, transformer, encoder):\n super().__init__()\n self.config = config\n self.transformer = transformer\n self.modal_encoder = ModalEmbeddings(config, encoder, transformer.embeddings)\n\n @add_start_docstrings_to_callable(MMBT_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_modal,\n input_ids=None,\n modal_start_tokens=None,\n modal_end_tokens=None,\n attention_mask=None,\n token_type_ids=None,\n modal_token_type_ids=None,\n position_ids=None,\n modal_position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n Returns:\n\n Examples::\n\n # For example purposes. Not runnable.\n transformer = BertModel.from_pretrained('bert-base-uncased')\n encoder = ImageEncoder(args)\n mmbt = MMBTModel(config, transformer, encoder)\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_txt_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_txt_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n modal_embeddings = self.modal_encoder(\n input_modal,\n start_token=modal_start_tokens,\n end_token=modal_end_tokens,\n position_ids=modal_position_ids,\n token_type_ids=modal_token_type_ids,\n )\n\n input_modal_shape = modal_embeddings.size()[:-1]\n\n if token_type_ids is None:\n token_type_ids = torch.ones(input_txt_shape, dtype=torch.long, device=device)\n\n txt_embeddings = self.transformer.embeddings(\n input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds\n )\n\n embedding_output = torch.cat([modal_embeddings, txt_embeddings], 1)\n\n input_shape = embedding_output.size()[:-1]\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n else:\n attention_mask = torch.cat(\n [torch.ones(input_modal_shape, device=device, dtype=torch.long), attention_mask], dim=1\n )\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(input_shape, device=device)\n else:\n encoder_attention_mask = torch.cat(\n [torch.ones(input_modal_shape, device=device), encoder_attention_mask], dim=1\n )\n\n extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, self.device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n encoder_outputs = self.transformer.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = encoder_outputs[0]\n pooled_output = self.transformer.pooler(sequence_output)\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPooling(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n\n@add_start_docstrings(\n \"\"\"MMBT Model with a sequence classification/regression head on top (a linear layer on top of\n the pooled output)\"\"\",\n MMBT_START_DOCSTRING,\n MMBT_INPUTS_DOCSTRING,\n)\nclass MMBTForClassification(nn.Module):\n r\"\"\"\n **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for computing the sequence classification/regression loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),\n If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Classification (or regression if config.num_labels==1) loss.\n **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n **hidden_states**: (`optional`, returned when ``output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n # For example purposes. Not runnable.\n transformer = BertModel.from_pretrained('bert-base-uncased')\n encoder = ImageEncoder(args)\n model = MMBTForClassification(config, transformer, encoder)\n outputs = model(input_modal, input_ids, labels=labels)\n loss, logits = outputs[:2]\n \"\"\"\n\n def __init__(self, config, transformer, encoder):\n super().__init__()\n self.num_labels = config.num_labels\n\n self.mmbt = MMBTModel(config, transformer, encoder)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n def forward(\n self,\n input_modal,\n input_ids=None,\n modal_start_tokens=None,\n modal_end_tokens=None,\n attention_mask=None,\n token_type_ids=None,\n modal_token_type_ids=None,\n position_ids=None,\n modal_position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n return_dict=None,\n ):\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.mmbt(\n input_modal=input_modal,\n input_ids=input_ids,\n modal_start_tokens=modal_start_tokens,\n modal_end_tokens=modal_end_tokens,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n modal_token_type_ids=modal_token_type_ids,\n position_ids=position_ids,\n modal_position_ids=modal_position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n" ]
[ [ "tensorflow.train.list_variables", "torch.zeros_like", "torch.tensor", "tensorflow.train.load_variable" ], [ "torch.ones", "torch.nn.Linear", "torch.nn.MSELoss", "torch.nn.CrossEntropyLoss", "torch.arange", "torch.cat", "torch.nn.Dropout" ] ]
smressle/yt
[ "29869405f139d9341101793a6e849497bd85f5ea" ]
[ "yt/frontends/boxlib/tests/test_outputs.py" ]
[ "\"\"\"\nBoxlib frontend tests\n\n\n\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2017, yt Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nfrom yt.testing import \\\n assert_equal, \\\n requires_file, \\\n units_override_check\nfrom yt.utilities.answer_testing.framework import \\\n requires_ds, \\\n small_patch_amr, \\\n data_dir_load, \\\n GridValuesTest\nfrom yt.frontends.boxlib.api import \\\n OrionDataset, \\\n NyxDataset, \\\n WarpXDataset, \\\n CastroDataset, \\\n MaestroDataset\nimport numpy as np \n\n# We don't do anything needing ghost zone generation right now, because these\n# are non-periodic datasets.\n_orion_fields = (\"temperature\", \"density\", \"velocity_magnitude\")\n_nyx_fields = (\"Ne\", \"Temp\", \"particle_mass_density\")\n_warpx_fields = (\"Ex\", \"By\", \"jz\")\n_castro_fields = (\"Temp\", \"density\", \"particle_count\")\n\nradadvect = \"RadAdvect/plt00000\"\n@requires_ds(radadvect)\ndef test_radadvect():\n ds = data_dir_load(radadvect)\n assert_equal(str(ds), \"plt00000\")\n for test in small_patch_amr(ds, _orion_fields):\n test_radadvect.__name__ = test.description\n yield test\n\nrt = \"RadTube/plt00500\"\n@requires_ds(rt)\ndef test_radtube():\n ds = data_dir_load(rt)\n assert_equal(str(ds), \"plt00500\")\n for test in small_patch_amr(ds, _orion_fields):\n test_radtube.__name__ = test.description\n yield test\n\nstar = \"StarParticles/plrd01000\"\n@requires_ds(star)\ndef test_star():\n ds = data_dir_load(star)\n assert_equal(str(ds), \"plrd01000\")\n for test in small_patch_amr(ds, _orion_fields):\n test_star.__name__ = test.description\n yield test\n\nLyA = \"Nyx_LyA/plt00000\"\n@requires_ds(LyA)\ndef test_LyA():\n ds = data_dir_load(LyA)\n assert_equal(str(ds), \"plt00000\")\n for test in small_patch_amr(ds, _nyx_fields,\n input_center=\"c\",\n input_weight=\"Ne\"):\n test_LyA.__name__ = test.description\n yield test\n\n@requires_file(LyA)\ndef test_nyx_particle_io():\n ds = data_dir_load(LyA)\n\n grid = ds.index.grids[0]\n npart_grid_0 = 7908 # read directly from the header\n assert_equal(grid['particle_position_x'].size, npart_grid_0)\n assert_equal(grid['DM', 'particle_position_y'].size, npart_grid_0)\n assert_equal(grid['all', 'particle_position_z'].size, npart_grid_0)\n\n ad = ds.all_data()\n npart = 32768 # read directly from the header\n assert_equal(ad['particle_velocity_x'].size, npart)\n assert_equal(ad['DM', 'particle_velocity_y'].size, npart)\n assert_equal(ad['all', 'particle_velocity_z'].size, npart)\n\n assert(np.all(ad['particle_mass'] == ad['particle_mass'][0]))\n\n left_edge = ds.arr([0.0, 0.0, 0.0], 'code_length')\n right_edge = ds.arr([4.0, 4.0, 4.0], 'code_length')\n center = 0.5*(left_edge + right_edge)\n \n reg = ds.region(center, left_edge, right_edge)\n\n assert(np.all(np.logical_and(reg['particle_position_x'] <= right_edge[0], \n reg['particle_position_x'] >= left_edge[0])))\n\n assert(np.all(np.logical_and(reg['particle_position_y'] <= right_edge[1], \n reg['particle_position_y'] >= left_edge[1])))\n\n assert(np.all(np.logical_and(reg['particle_position_z'] <= right_edge[2], \n reg['particle_position_z'] >= left_edge[2])))\n\nRT_particles = \"RT_particles/plt00050\"\n@requires_ds(RT_particles)\ndef test_RT_particles():\n ds = data_dir_load(RT_particles)\n assert_equal(str(ds), \"plt00050\")\n for test in small_patch_amr(ds, _castro_fields):\n test_RT_particles.__name__ = test.description\n yield test\n\n\n@requires_file(RT_particles)\ndef test_castro_particle_io():\n ds = data_dir_load(RT_particles)\n\n grid = ds.index.grids[2]\n npart_grid_2 = 49 # read directly from the header\n assert_equal(grid['particle_position_x'].size, npart_grid_2)\n assert_equal(grid['Tracer', 'particle_position_y'].size, npart_grid_2)\n assert_equal(grid['all', 'particle_position_y'].size, npart_grid_2)\n\n ad = ds.all_data()\n npart = 49 # read directly from the header\n assert_equal(ad['particle_velocity_x'].size, npart)\n assert_equal(ad['Tracer', 'particle_velocity_y'].size, npart)\n assert_equal(ad['all', 'particle_velocity_y'].size, npart)\n\n left_edge = ds.arr([0.0, 0.0, 0.0], 'code_length')\n right_edge = ds.arr([0.25, 1.0, 1.0], 'code_length')\n center = 0.5*(left_edge + right_edge)\n \n reg = ds.region(center, left_edge, right_edge)\n\n assert(np.all(np.logical_and(reg['particle_position_x'] <= right_edge[0], \n reg['particle_position_x'] >= left_edge[0])))\n\n assert(np.all(np.logical_and(reg['particle_position_y'] <= right_edge[1], \n reg['particle_position_y'] >= left_edge[1])))\n\nlangmuir = \"LangmuirWave/plt00020_v2\"\n@requires_ds(langmuir)\ndef test_langmuir():\n ds = data_dir_load(langmuir)\n assert_equal(str(ds), \"plt00020_v2\")\n for test in small_patch_amr(ds, _warpx_fields, \n input_center=\"c\",\n input_weight=\"Ex\"):\n test_langmuir.__name__ = test.description\n yield test\n\nplasma = \"PlasmaAcceleration/plt00030_v2\"\n@requires_ds(plasma)\ndef test_plasma():\n ds = data_dir_load(plasma)\n assert_equal(str(ds), \"plt00030_v2\")\n for test in small_patch_amr(ds, _warpx_fields,\n input_center=\"c\",\n input_weight=\"Ex\"):\n test_plasma.__name__ = test.description\n yield test\n\nbeam = \"GaussianBeam/plt03008\"\n@requires_ds(beam)\ndef test_beam():\n ds = data_dir_load(beam)\n assert_equal(str(ds), \"plt03008\")\n for test in small_patch_amr(ds, _warpx_fields,\n input_center=\"c\",\n input_weight=\"Ex\"):\n test_beam.__name__ = test.description\n yield test\n\n@requires_file(plasma)\ndef test_warpx_particle_io():\n ds = data_dir_load(plasma)\n grid = ds.index.grids[0]\n\n # read directly from the header\n npart0_grid_0 = 344 \n npart1_grid_0 = 69632\n\n assert_equal(grid['particle0', 'particle_position_x'].size, npart0_grid_0)\n assert_equal(grid['particle1', 'particle_position_y'].size, npart1_grid_0)\n assert_equal(grid['all', 'particle_position_z'].size, npart0_grid_0 + npart1_grid_0)\n\n # read directly from the header\n npart0 = 1360 \n npart1 = 802816 \n ad = ds.all_data()\n assert_equal(ad['particle0', 'particle_velocity_x'].size, npart0)\n assert_equal(ad['particle1', 'particle_velocity_y'].size, npart1)\n assert_equal(ad['all', 'particle_velocity_z'].size, npart0 + npart1)\n\n np.all(ad['particle1', 'particle_mass'] == ad['particle1', 'particle_mass'][0])\n np.all(ad['particle0', 'particle_mass'] == ad['particle0', 'particle_mass'][0])\n\n left_edge = ds.arr([-7.5e-5, -7.5e-5, -7.5e-5], 'code_length')\n right_edge = ds.arr([2.5e-5, 2.5e-5, 2.5e-5], 'code_length')\n center = 0.5*(left_edge + right_edge)\n \n reg = ds.region(center, left_edge, right_edge)\n\n assert(np.all(np.logical_and(reg['particle_position_x'] <= right_edge[0], \n reg['particle_position_x'] >= left_edge[0])))\n\n assert(np.all(np.logical_and(reg['particle_position_y'] <= right_edge[1], \n reg['particle_position_y'] >= left_edge[1])))\n\n assert(np.all(np.logical_and(reg['particle_position_z'] <= right_edge[2], \n reg['particle_position_z'] >= left_edge[2])))\n\n\n_raw_fields = [('raw', 'Bx'), ('raw', 'Ey'), ('raw', 'jz')]\n\nraw_fields = \"Laser/plt00015\"\n@requires_ds(raw_fields)\ndef test_raw_fields():\n ds_fn = raw_fields\n for field in _raw_fields:\n yield GridValuesTest(ds_fn, field)\n\n\n@requires_file(rt)\ndef test_OrionDataset():\n assert isinstance(data_dir_load(rt), OrionDataset)\n\n@requires_file(LyA)\ndef test_NyxDataset():\n assert isinstance(data_dir_load(LyA), NyxDataset)\n\n@requires_file(RT_particles)\ndef test_CastroDataset():\n assert isinstance(data_dir_load(RT_particles), CastroDataset)\n\n@requires_file(LyA)\ndef test_WarpXDataset():\n assert isinstance(data_dir_load(plasma), WarpXDataset)\n\n@requires_file(rt)\ndef test_units_override():\n units_override_check(rt)\n\nnyx_no_particles = \"nyx_sedov_plt00086\"\n@requires_file(nyx_no_particles)\ndef test_nyx_no_part():\n assert isinstance(data_dir_load(nyx_no_particles), NyxDataset)\n\n fields = sorted(\n [('boxlib', 'H'), ('boxlib', 'He'), ('boxlib', 'MachNumber'),\n ('boxlib', 'Ne'), ('boxlib', 'Rank'), ('boxlib', 'StateErr'),\n ('boxlib', 'Temp'), ('boxlib', 'X(H)'), ('boxlib', 'X(He)'),\n ('boxlib', 'density'), ('boxlib', 'divu'), ('boxlib', 'eint_E'),\n ('boxlib', 'eint_e'), ('boxlib', 'entropy'), ('boxlib', 'forcex'),\n ('boxlib', 'forcey'), ('boxlib', 'forcez'), ('boxlib', 'kineng'),\n ('boxlib', 'logden'), ('boxlib', 'magmom'), ('boxlib', 'magvel'),\n ('boxlib', 'magvort'), ('boxlib', 'pressure'), ('boxlib', 'rho_E'),\n ('boxlib', 'rho_H'), ('boxlib', 'rho_He'), ('boxlib', 'rho_e'),\n ('boxlib', 'soundspeed'), ('boxlib', 'x_velocity'), ('boxlib', 'xmom'),\n ('boxlib', 'y_velocity'), ('boxlib', 'ymom'), ('boxlib', 'z_velocity'),\n ('boxlib', 'zmom')])\n\n ds = data_dir_load(nyx_no_particles)\n assert_equal(sorted(ds.field_list), fields)\n\nmsubch = 'maestro_subCh_plt00248'\n@requires_file(msubch)\ndef test_maestro_parameters():\n assert isinstance(data_dir_load(msubch), MaestroDataset)\n ds = data_dir_load(msubch)\n\n # Check a string parameter\n assert(ds.parameters['plot_base_name']==\"subCh_hot_baserun_plt\")\n assert(type(ds.parameters['plot_base_name']) is str)\n\n # Check boolean parameters: T or F\n assert(not ds.parameters['use_thermal_diffusion'])\n assert(type(ds.parameters['use_thermal_diffusion']) is bool)\n\n assert(ds.parameters['do_burning'])\n assert(type(ds.parameters['do_burning']) is bool)\n\n # Check a float parameter with a decimal point\n assert(ds.parameters['sponge_kappa']==float('10.00000000'))\n assert(type(ds.parameters['sponge_kappa']) is float)\n\n # Check a float parameter with E exponent notation\n assert(ds.parameters['small_dt']==float('0.1000000000E-09'))\n\n # Check an int parameter\n assert(ds.parameters['s0_interp_type']==3)\n assert(type(ds.parameters['s0_interp_type']) is int)\n" ]
[ [ "numpy.logical_and", "numpy.all" ] ]
xianjian-xie/pose-generation
[ "ad0495e80c6fe1e7690fa8691f1eb11b4e9bca32" ]
[ "models/Global-Flow-Local-Attention/util/task.py" ]
[ "import torch\nimport torch.nn.functional as F\nimport torchvision.transforms as transforms\nfrom random import randint\nimport numpy as np\nimport cv2\nfrom PIL import Image\nimport random\n\n###################################################################\n# random mask generation\n###################################################################\n\n\ndef random_regular_mask(img):\n \"\"\"Generates a random regular hole\"\"\"\n mask = torch.ones_like(img)\n s = img.size()\n N_mask = random.randint(1, 5)\n limx = s[1] - s[1] / (N_mask + 1)\n limy = s[2] - s[2] / (N_mask + 1)\n for _ in range(N_mask):\n x = random.randint(0, int(limx))\n y = random.randint(0, int(limy))\n range_x = x + random.randint(int(s[1] / (N_mask + 7)), int(s[1] - x))\n range_y = y + random.randint(int(s[2] / (N_mask + 7)), int(s[2] - y))\n mask[:, int(x):int(range_x), int(y):int(range_y)] = 0\n\n if mask.size(0) == 3:\n mask = mask.chunk(3, dim=0)[0] \n return 1-mask\n\n\ndef center_mask(img):\n \"\"\"Generates a center hole with 1/4*W and 1/4*H\"\"\"\n mask = torch.ones_like(img)\n size = img.size()\n x = int(size[1] / 4)\n y = int(size[2] / 4)\n range_x = int(size[1] * 3 / 4)\n range_y = int(size[2] * 3 / 4)\n mask[:, x:range_x, y:range_y] = 0\n\n if mask.size(0) == 3:\n mask = mask.chunk(3, dim=0)[0]\n return 1-mask\n\n\ndef random_irregular_mask(img):\n \"\"\"Generates a random irregular mask with lines, circles and elipses\"\"\"\n transform = transforms.Compose([transforms.ToTensor()])\n mask = torch.ones_like(img)\n size = img.size()\n img = np.zeros((size[1], size[2], 1), np.uint8)\n\n # Set size scale\n max_width = 20\n if size[1] < 64 or size[2] < 64:\n raise Exception(\"Width and Height of mask must be at least 64!\")\n\n number = random.randint(16, 64)\n for _ in range(number):\n model = random.random()\n if model < 0.6:\n # Draw random lines\n x1, x2 = randint(1, size[1]), randint(1, size[1])\n y1, y2 = randint(1, size[2]), randint(1, size[2])\n thickness = randint(4, max_width)\n cv2.line(img, (x1, y1), (x2, y2), (1, 1, 1), thickness)\n\n elif model > 0.6 and model < 0.8:\n # Draw random circles\n x1, y1 = randint(1, size[1]), randint(1, size[2])\n radius = randint(4, max_width)\n cv2.circle(img, (x1, y1), radius, (1, 1, 1), -1)\n\n elif model > 0.8:\n # Draw random ellipses\n x1, y1 = randint(1, size[1]), randint(1, size[2])\n s1, s2 = randint(1, size[1]), randint(1, size[2])\n a1, a2, a3 = randint(3, 180), randint(3, 180), randint(3, 180)\n thickness = randint(4, max_width)\n cv2.ellipse(img, (x1, y1), (s1, s2), a1, a2, a3, (1, 1, 1), thickness)\n\n img = img.reshape(size[2], size[1])\n img = Image.fromarray(img*255)\n\n img_mask = transform(img)\n for j in range(size[0]):\n mask[j, :, :] = img_mask < 1\n\n if mask.size(0) == 3:\n mask = mask.chunk(3, dim=0)[0]\n return 1-mask\n\n###################################################################\n# multi scale for image generation\n###################################################################\n\n\ndef scale_img(img, size):\n scaled_img = F.interpolate(img, size=size, mode='bilinear', align_corners=True)\n return scaled_img\n\n\ndef scale_pyramid(img, num_scales):\n scaled_imgs = [img]\n\n s = img.size()\n\n h = s[2]\n w = s[3]\n\n for i in range(1, num_scales):\n ratio = 2**i\n nh = h // ratio\n nw = w // ratio\n scaled_img = scale_img(img, size=[nh, nw])\n scaled_imgs.append(scaled_img)\n\n scaled_imgs.reverse()\n return scaled_imgs\n\n" ]
[ [ "torch.ones_like", "torch.nn.functional.interpolate", "numpy.zeros" ] ]
milankl/misc
[ "40c74d927e6d18b44a6edb51bffda85cafb347e1" ]
[ "num/wave.py" ]
[ "## WAVE EQUATION\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pylan as pn\n\n## constants and grid\nH = 10\nL = 1e5\ng = 9.8\nF = 0.01/1e3/H #tau/rho0/H\n\ndx = 5e3\ndt = 300\n\ncfl = np.sqrt(g*H)*dt/dx\nprint('cfl = %1.3f' % cfl)\n\nT = 48*3600\nN = int(T/dt)+1\n\n## staggered grid\nxu = np.arange(0,L+dx,dx)\nxe = xu[:-1]+dx/2\n\nnxu = xu.shape[0] - 2\nnxe = xe.shape[0]\n\nt = np.arange(0,T+dt,dt)\nxxu,ttu = np.meshgrid(xu,t)\nxxe,tte = np.meshgrid(xe,t)\n\n## dx gradients\nGxu = (np.diag(np.ones(nxu+1),0) - np.diag(np.ones(nxe-1),-1))[:,:-1]/dx\nGxe = -Gxu.T\n\n## preallocate\n\nu = np.zeros((N,nxu))\neta = np.zeros((N,nxe))\n\nfor i in range(N-1):\n eta[i+1,:] = eta[i,:] - dt*H*Gxu.dot(u[i,:])\n u[i+1,:] = u[i,:] - g*dt*Gxe.dot(eta[i+1,:]) + dt*F\n\n#pad u with zeros\nu = np.concatenate((np.zeros((N,1)),u,np.zeros((N,1))),axis=1)\n\n## plotting\n\nfig,(ax1,ax2) = plt.subplots(1,2,sharex=True,sharey=True)\n\nc1 = ax1.contourf(xxu/1e3,ttu/3600,u*1e2)\nax1.set_xlabel(r'$x$ [km]')\nax1.set_ylabel(r'$t$ [h]')\nax1.set_title(r'$u$ [cm/s]')\nplt.colorbar(c1,ax=ax1)\n\nc2 = ax2.contourf(xxe/1e3,tte/3600,eta*1e2)\nax2.set_xlabel(r'$x$ [km]')\nax2.set_title(r'$\\eta$ [cm]')\nplt.colorbar(c2,ax=ax2)\n\nplt.show()\n\n\n\n\n" ]
[ [ "numpy.ones", "numpy.zeros", "matplotlib.pyplot.subplots", "numpy.arange", "matplotlib.pyplot.show", "numpy.sqrt", "matplotlib.pyplot.colorbar", "numpy.meshgrid" ] ]
GiorgosNikitopoulos/Mine_Vulnerable_Code
[ "e8770698b501a3681b1cf1a978a4cc409d359b3c" ]
[ "models/cnn_model.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCNN model for text classification implemented in TensorFlow 2.\nThis implementation is based on the original paper of Yoon Kim [1] for classification using words.\nBesides I add charachter level input [2].\n\n# References\n- [1] [Convolutional Neural Networks for Sentence Classification](https://arxiv.org/abs/1408.5882)\n- [2] [Character-level Convolutional Networks for Text Classification](https://arxiv.org/abs/1509.01626)\n\n@author: Christopher Masch\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.keras import layers\n\n\nclass CNN(tf.keras.Model):\n __version__ = '0.2.0'\n\n def __init__(self, embedding_layer=None, num_words=None, embedding_dim=None,\n max_seq_length=100, kernel_sizes=[3, 4, 5], feature_maps=[100, 100, 100],\n use_char=False, char_max_length=200, alphabet_size=None, char_kernel_sizes=[3, 10, 20],\n char_feature_maps=[100, 100, 100], hidden_units=100, dropout_rate=None, nb_classes=None):\n \"\"\"\n Arguments:\n embedding_layer : If not defined with pre-trained embeddings it will be created from scratch (default: None)\n num_words : Maximal amount of words in the vocabulary (default: None)\n embedding_dim : Dimension of word representation (default: None)\n max_seq_length : Max length of word sequence (default: 100)\n filter_sizes : An array of filter sizes per channel (default: [3,4,5])\n feature_maps : Defines the feature maps per channel (default: [100,100,100])\n use_char : If True, char-based model will be added to word-based model\n char_max_length : Max length of char sequence (default: 200)\n alphabet_size : Amount of differnent chars used for creating embeddings (default: None)\n hidden_units : Hidden units per convolution channel (default: 100)\n dropout_rate : If defined, dropout will be added after embedding layer & concatenation (default: None)\n nb_classes : Number of classes which can be predicted\n \"\"\"\n super(CNN, self).__init__()\n\n\n # WORD-level\n self.embedding_layer = embedding_layer\n self.num_words = num_words\n self.max_seq_length = max_seq_length\n self.embedding_dim = embedding_dim\n self.kernel_sizes = kernel_sizes\n self.feature_maps = feature_maps\n # CHAR-level\n self.use_char = use_char\n self.char_max_length = char_max_length\n self.alphabet_size = alphabet_size\n self.char_kernel_sizes = char_kernel_sizes\n self.char_feature_maps = char_feature_maps\n # General\n self.hidden_units = hidden_units\n self.dropout_rate = dropout_rate\n self.nb_classes = nb_classes\n\n def build_model(self):\n \"\"\"\n Build the model\n\n Returns:\n Model : Keras model instance\n \"\"\"\n\n # Checks\n if len(self.kernel_sizes) != len(self.feature_maps):\n raise Exception('Please define `kernel_sizes` and `feature_maps` with the same amount.')\n if not self.embedding_layer and (not self.num_words or not self.embedding_dim):\n raise Exception('Please define `num_words` and `embedding_dim` if you not using a pre-trained embedding.')\n if self.use_char and (not self.char_max_length or not self.alphabet_size):\n raise Exception('Please define `char_max_length` and `alphabet_size` if you are using char.')\n\n # Building word-embeddings from scratch\n if self.embedding_layer is None:\n self.embedding_layer = layers.Embedding(\n input_dim=self.num_words,\n output_dim=self.embedding_dim,\n input_length=self.max_seq_length,\n weights=None, trainable=True,\n name=\"word_embedding\"\n )\n\n # WORD-level\n word_input = layers.Input(shape=(self.max_seq_length,), dtype='int32', name='word_input')\n x = self.embedding_layer(word_input)\n if self.dropout_rate:\n x = layers.Dropout(self.dropout_rate)(x)\n x = self.building_block(x, self.kernel_sizes, self.feature_maps)\n x = layers.Activation('relu')(x)\n prediction = layers.Dense(self.nb_classes, activation='softmax')(x)\n #prediction2 = layers.Dense(self.nb_classes, activation='sigmoid')(x)\n\n\n # CHAR-level\n if self.use_char:\n char_input = layers.Input(shape=(self.char_max_length,), dtype='int32', name='char_input')\n x_char = layers.Embedding(\n input_dim=self.alphabet_size + 1,\n output_dim=50,\n input_length=self.char_max_length,\n name='char_embedding'\n )(char_input)\n x_char = self.building_block(x_char, self.char_kernel_sizes, self.char_feature_maps)\n x_char = layers.Activation('relu')(x_char)\n x_char = layers.Dense(self.nb_classes, activation='softmax')(x_char)\n\n prediction = layers.Average()([prediction, x_char])\n return tf.keras.Model(inputs=[word_input, char_input], outputs=prediction, name='CNN_Word_Char')\n\n return tf.keras.Model(inputs=word_input, outputs=prediction, name='CNN_Word')\n\n def building_block(self, input_layer, kernel_sizes, feature_maps):\n \"\"\"\n Creates several CNN channels in parallel and concatenate them\n\n Arguments:\n input_layer : Layer which will be the input for all convolutional blocks\n kernel_sizes: Array of kernel sizes (working as n-gram filter)\n feature_maps: Array of feature maps\n\n Returns:\n x : Building block with one or several channels\n \"\"\"\n channels = []\n for ix in range(len(kernel_sizes)):\n x = self.create_channel(input_layer, kernel_sizes[ix], feature_maps[ix])\n channels.append(x)\n\n # Check how many channels, one channel doesn't need a concatenation\n if (len(channels) > 1):\n x = layers.concatenate(channels)\n return x\n\n def create_channel(self, x, kernel_size, feature_map):\n \"\"\"\n Creates a layer, working channel wise\n\n Arguments:\n x : Input for convoltuional channel\n kernel_size : Kernel size for creating Conv1D\n feature_map : Feature map\n\n Returns:\n x : Channel including (Conv1D + {GlobalMaxPooling & GlobalAveragePooling} + Dense [+ Dropout])\n \"\"\"\n x = layers.SeparableConv1D(feature_map, kernel_size=kernel_size, activation='relu',\n strides=1, padding='valid', depth_multiplier=4)(x)\n\n x1 = layers.GlobalMaxPooling1D()(x)\n x2 = layers.GlobalAveragePooling1D()(x)\n x = layers.concatenate([x1, x2])\n\n x = layers.Dense(self.hidden_units)(x)\n if self.dropout_rate:\n x = layers.Dropout(self.dropout_rate)(x)\n return x\n" ]
[ [ "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.Embedding", "tensorflow.keras.layers.GlobalAveragePooling1D", "tensorflow.keras.layers.concatenate", "tensorflow.keras.Model", "tensorflow.keras.layers.Activation", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.GlobalMaxPooling1D", "tensorflow.keras.layers.SeparableConv1D", "tensorflow.keras.layers.Average", "tensorflow.keras.layers.Input" ] ]
dczifra/TWIST
[ "3b5461c33e7c3a83ae1028e0c4ba71b74be787f6" ]
[ "detection/convert-pretrain-to-detectron2.py" ]
[ "#!/usr/bin/env python\n\nimport pickle as pkl\nimport sys\nimport torch\nfrom lars import *\n\nif __name__ == \"__main__\":\n input = sys.argv[1]\n\n obj = torch.load(input, map_location=\"cpu\")\n if 'backbone' in obj:\n obj = obj[\"backbone\"]\n elif 'state_dict' in obj:\n obj = obj[\"state_dict\"]\n\n newmodel = {}\n for k, v in obj.items():\n old_k = k\n if \"layer\" not in k:\n k = \"stem.\" + k\n for t in [1, 2, 3, 4]:\n k = k.replace(\"layer{}\".format(t), \"res{}\".format(t + 1))\n for t in [1, 2, 3]:\n k = k.replace(\"bn{}\".format(t), \"conv{}.norm\".format(t))\n k = k.replace(\"downsample.0\", \"shortcut\")\n k = k.replace(\"downsample.1\", \"shortcut.norm\")\n print(old_k, \"->\", k)\n newmodel[k] = v.numpy()\n\n res = {\n \"model\": newmodel,\n \"__author__\": \"TWIST\",\n \"matching_heuristics\": True\n }\n\n assert sys.argv[2].endswith('.pkl')\n with open(sys.argv[2], \"wb\") as f:\n pkl.dump(res, f)\n" ]
[ [ "torch.load" ] ]
mehrdad-shokri/cs375
[ "00554ac497b4a2608ae475099f94ab9635d67b9e" ]
[ "2018/tutorials/neural_data.py" ]
[ "\"\"\"\nProvide a dataset_func which builds a tensorflow dataset object for neural data\nSee below for an example about how to use this function\n\"\"\"\nimport tensorflow as tf\nfrom tfutils.imagenet_data import color_normalize\n\nimport os, sys\nimport numpy as np\nimport pdb\nimport h5py\n\n\nclass Generator(object):\n \"\"\"\n Callable generator loading from hdf5\n \"\"\"\n NUM_IMGS = 5760\n\n def __init__(\n self, data_path, \n index_list=None,\n filter_func=None):\n assert os.path.isfile(data_path), \"%s does not exist!\" % data_path\n self.data_path = data_path\n\n # index_list specifies image indexes that will be looped over\n # if it's not provided, will loop over all images \n if not index_list:\n index_list = range(self.NUM_IMGS)\n self.index_list = index_list\n\n # filter_func is supposed to be a function which receives image index\n # and hdf5 data and then returns True of False\n self.filter_func = filter_func\n\n def __call__(self):\n with h5py.File(self.data_path, 'r') as hf:\n for im_indx in self.index_list:\n if not self.filter_func or self.filter_func(im_indx, hf):\n yield hf['images'][im_indx], \\\n hf['image_meta']['category'][im_indx], \\\n hf['image_meta']['object_name'][im_indx], \\\n hf['image_meta']['rotation_xy'][im_indx], \\\n hf['image_meta']['rotation_xz'][im_indx], \\\n hf['image_meta']['rotation_yz'][im_indx], \\\n hf['image_meta']['size'][im_indx], \\\n hf['image_meta']['translation_y'][im_indx], \\\n hf['image_meta']['translation_z'][im_indx], \\\n hf['image_meta']['variation_level'][im_indx], \\\n im_indx\n\n\ndef dataset_func(\n batch_size,\n crop_size=224,\n **generator_kwargs\n ):\n gen = Generator(**generator_kwargs)\n ds = tf.data.Dataset.from_generator(\n gen, \n (tf.uint8, tf.string, tf.string, \n tf.float32, tf.float32, tf.float32,\n tf.float32, tf.float32, tf.float32,\n tf.string, tf.int64\n ), \n )\n\n # Change content in dataset to a dict format\n def _tuple_to_dict(*tuple_value):\n dict_value = {\n 'images': tuple_value[0],\n 'category': tuple_value[1],\n 'object_name': tuple_value[2],\n 'rotation_xy': tuple_value[3],\n 'rotation_xz': tuple_value[4],\n 'rotation_yz': tuple_value[5],\n 'size': tuple_value[6],\n 'translation_y': tuple_value[7],\n 'translation_z': tuple_value[8],\n 'variation_level': tuple_value[9],\n 'index': tuple_value[10]\n }\n return dict_value\n ds = ds.map(\n _tuple_to_dict,\n num_parallel_calls=48)\n\n # Resize the image to 224*224, and color normalize it\n def _resize_normalize_image(value):\n image = value['images']\n image.set_shape([256, 256])\n image = tf.tile(\n tf.expand_dims(image, axis=2),\n [1, 1, 3]\n )\n image = tf.image.resize_bilinear(\n [image], \n [crop_size, crop_size])[0]\n image.set_shape([crop_size, crop_size, 3])\n image = color_normalize(image)\n value['images'] = image\n return value\n ds = ds.map(\n _resize_normalize_image,\n num_parallel_calls=48)\n\n # Make the iterator\n ds = ds.apply(\n tf.contrib.data.batch_and_drop_remainder(batch_size))\n #TODO: make this NOT drop the remainder if that data is important\n value = ds.make_one_shot_iterator().get_next()\n return value\n\n\nif __name__==\"__main__\":\n # Example codes about how to use this data provider\n ## Example of filter_func\n def _filter_func(idx, hf):\n return hf['image_meta']['variation_level'][idx] in ['V3', 'V6']\n\n # here data_path should be the path to your neural data hdf5 file\n data_iter = dataset_func(\n batch_size=64, \n data_path='/data/chengxuz/class/ventral_neural_data.hdf5',\n filter_func=_filter_func,\n )\n os.environ['CUDA_VISIBLE_DEVICES'] = ''\n\n sess = tf.Session()\n test_image = sess.run(data_iter)\n print(test_image.keys())\n print(test_image['images'].shape)\n print(test_image['variation_level'])\n" ]
[ [ "tensorflow.contrib.data.batch_and_drop_remainder", "tensorflow.expand_dims", "tensorflow.image.resize_bilinear", "tensorflow.Session", "tensorflow.data.Dataset.from_generator" ] ]
neurips2021vat/Variance-Aware-Training
[ "2dcd017ef06e81e299448bdd9da65fa682835127" ]
[ "models/segmentation/adv_unet_train_val_late/structure.py" ]
[ "import torch\nimport torch.nn as nn\nfrom time import time\nimport numpy as np\nfrom models.pytorch_revgrad import RevGrad\n\n\nclass DoubleConvBN(nn.Module):\n \"\"\"(convolution => [BN] => ReLU) * 2\"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, dropout):\n super().__init__()\n\n self.conv1 = nn.Conv2d(\n in_channels, out_channels, kernel_size=kernel_size, padding=int(kernel_size / 2)\n )\n self.bn1 = nn.BatchNorm2d(out_channels)\n\n self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=1, padding=0)\n self.bn2 = nn.BatchNorm2d(out_channels)\n\n self.conv3 = nn.Conv2d(\n out_channels, out_channels, kernel_size=kernel_size, padding=int(kernel_size / 2)\n )\n self.bn3 = nn.BatchNorm2d(out_channels)\n self.drop1 = nn.Dropout(dropout)\n self.drop2 = nn.Dropout(dropout)\n self.drop3 = nn.Dropout(dropout)\n\n def forward(self, x):\n\n x = self.bn1(torch.relu(self.conv1(x)))\n x = x = self.drop1(x)\n identity_full = x\n\n x = self.bn2(torch.relu(self.conv2(x)))\n x = self.drop2(x)\n x += identity_full\n identity_1 = x\n\n x = self.bn3(torch.relu(self.conv3(x)))\n x = x = self.drop3(x)\n x += identity_full\n x += identity_1\n\n return x\n\n\nclass DoubleConv(nn.Module):\n \"\"\"(convolution => [BN] => ReLU) * 2\"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, dropout):\n super().__init__()\n\n self.conv1 = nn.Conv2d(\n in_channels, out_channels, kernel_size=kernel_size, padding=int(kernel_size / 2)\n )\n\n self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=1, padding=0)\n\n self.conv3 = nn.Conv2d(\n out_channels, out_channels, kernel_size=kernel_size, padding=int(kernel_size / 2)\n )\n self.drop1 = nn.Dropout(dropout)\n self.drop2 = nn.Dropout(dropout)\n self.drop3 = nn.Dropout(dropout)\n\n def forward(self, x):\n\n x = self.drop1(torch.relu(self.conv1(x)))\n identity_full = x\n\n x = self.drop2(torch.relu(self.conv2(x)))\n x += identity_full\n identity_1 = x\n\n x = self.drop3(torch.relu(self.conv3(x)))\n x += identity_full\n x += identity_1\n\n return x\n\n\nclass Down(nn.Module):\n \"\"\"Downscaling with maxpool then double conv\"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, dropout):\n super().__init__()\n self.maxpool_conv = nn.Sequential(\n nn.MaxPool2d(2), DoubleConvBN(in_channels, out_channels, kernel_size, dropout)\n )\n\n def forward(self, x):\n return self.maxpool_conv(x)\n\n\nclass Up(nn.Module):\n \"\"\"Upscaling then double conv\"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, dropout, bilinear=False):\n super().__init__()\n\n # if bilinear, use the normal convolutions to reduce the number of channels\n if bilinear:\n self.up = nn.Upsample(scale_factor=2)\n self.conv = DoubleConv(in_channels, out_channels, kernel_size, dropout) # , in_channels // 2)\n else:\n self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)\n self.conv = DoubleConv(in_channels, out_channels, kernel_size, dropout)\n\n def forward(self, x1, x2):\n x1 = self.up(x1)\n # input is CHW\n diffY = x2.size()[2] - x1.size()[2]\n diffX = x2.size()[3] - x1.size()[3]\n\n x1 = torch.nn.functional.pad(x1, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2])\n # if you have padding issues, see\n # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a\n # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd\n x = torch.cat([x2, x1], dim=1)\n return self.conv(x)\n\n\nclass OutConv(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(OutConv, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)\n\n def forward(self, x):\n return self.conv(x)\n\n\nclass UNet(nn.Module):\n def __init__(self, hparams, bilinear=False):\n super(UNet, self).__init__()\n\n self.hparams = hparams\n self.n_channels = self.hparams['in_channels']\n self.n_classes = self.hparams['n_classes']\n self.bilinear = bilinear\n\n factor = 2 if bilinear else 1\n\n self.inc = DoubleConv(\n self.n_channels,\n self.hparams['n_filters_input'],\n self.hparams['kernel_size'],\n self.hparams['dropout_rate'],\n )\n self.down1 = Down(\n self.hparams['n_filters_input'],\n self.hparams['n_filters_input'] * 2,\n self.hparams['kernel_size'],\n self.hparams['dropout_rate'],\n )\n self.down2 = Down(\n self.hparams['n_filters_input'] * 2,\n self.hparams['n_filters_input'] * 4,\n self.hparams['kernel_size'],\n self.hparams['dropout_rate'],\n )\n self.down3 = Down(\n self.hparams['n_filters_input'] * 4,\n self.hparams['n_filters_input'] * 8,\n self.hparams['kernel_size'],\n self.hparams['dropout_rate'],\n )\n\n self.down4 = Down(\n self.hparams['n_filters_input'] * 8,\n self.hparams['n_filters_input'] * 16 // factor,\n self.hparams['kernel_size'],\n self.hparams['dropout_rate'],\n )\n self.down5 = Down(\n self.hparams['n_filters_input'] * 16,\n self.hparams['n_filters_input'] * 32 // factor,\n self.hparams['kernel_size'],\n self.hparams['dropout_rate'],\n )\n self.up1 = Up(\n self.hparams['n_filters_input'] * 32,\n self.hparams['n_filters_input'] * 16 // factor,\n self.hparams['kernel_size'],\n self.hparams['dropout_rate'],\n bilinear,\n )\n self.up2 = Up(\n self.hparams['n_filters_input'] * 16,\n self.hparams['n_filters_input'] * 8 // factor,\n self.hparams['kernel_size'],\n self.hparams['dropout_rate'],\n bilinear,\n )\n self.up3 = Up(\n self.hparams['n_filters_input'] * 8,\n self.hparams['n_filters_input'] * 4 // factor,\n self.hparams['kernel_size'],\n self.hparams['dropout_rate'],\n bilinear,\n )\n self.up4 = Up(\n self.hparams['n_filters_input'] * 4,\n self.hparams['n_filters_input'] * 2,\n self.hparams['kernel_size'],\n self.hparams['dropout_rate'],\n bilinear,\n )\n self.up5 = Up(\n self.hparams['n_filters_input'] * 2,\n self.hparams['n_filters_input'],\n self.hparams['kernel_size'],\n self.hparams['dropout_rate'],\n bilinear,\n )\n self.outc = OutConv(self.hparams['n_filters_input'], self.n_classes)\n\n # gradient reversal layer\n self.rever1_6 = RevGrad()\n self.rever1_7 = RevGrad()\n self.rever2_6 = RevGrad()\n self.rever2_7 = RevGrad()\n\n n_filt = self.hparams['n_filters_input'] * (2 ** 5) * 4\n\n self.adv_fc1 = nn.Linear(n_filt, 300)\n self.adv_fc2 = nn.Linear(300, 300)\n self.adv_fc3 = nn.Linear(300, 300)\n self.adv_fc4 = nn.Linear(300, 1)\n\n def forward(self, x1, x2=None, train=False):\n\n if train:\n # main head (predictive)\n out, decoder_x = self.predictive_network(x1)\n # additional head (adversarial)\n out_s = self.adversarial_network(decoder_x, x2)\n return out, out_s\n else:\n # main head (predictive)\n out, _ = self.predictive_network(x1)\n return out\n\n def encoder(self, x):\n\n x1 = self.inc(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n x6 = self.down5(x5)\n\n return x1, x2, x3, x4, x5, x6\n\n def decoder(self, x1, x2, x3, x4, x5, x6):\n\n # x = self.up1(x5, x4)\n # x = self.up2(x, x3)\n # x = self.up3(x, x2)\n # x = self.up4(x, x1)\n\n x = self.up1(x6, x5)\n x = self.up2(x, x4)\n x = self.up3(x, x3)\n x = self.up4(x, x2)\n x = self.up5(x, x1)\n\n return x\n\n def adversarial_network(self, x, x_s):\n x1, x2, x3, x4, x5, x6 = self.encoder(x_s)\n # x_s = self.decoder(x1, x2, x3, x4, x5)\n\n x6_s = self.rever1_6(x6).mean(dim=2).mean(dim=2)\n x7_s = self.rever1_6(x6).std(dim=2).std(dim=2)\n\n x6_p = self.rever2_6(x[5]).mean(dim=2).mean(dim=2)\n x7_p = self.rever2_6(x[5]).std(dim=(2)).std(dim=2)\n\n x = torch.cat([x6_s, x7_s, x6_p, x7_p], dim=1)\n\n x = torch.relu(self.adv_fc1(x))\n # x = torch.relu(self.adv_fc2(x))\n # x = torch.relu(self.adv_fc3(x))\n x = torch.sigmoid(self.adv_fc4(x))\n\n return x\n\n def predictive_network(self, x):\n x1, x2, x3, x4, x5, x6 = self.encoder(x)\n x = self.decoder(x1, x2, x3, x4, x5, x6)\n logits = self.outc(x)\n logits = torch.nn.functional.softmax(logits, dim=1)\n return logits, [x1, x2, x3, x4, x5, x6]\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.MaxPool2d", "torch.nn.Linear", "torch.nn.functional.softmax", "torch.nn.functional.pad", "torch.nn.Upsample", "torch.nn.Conv2d", "torch.cat", "torch.nn.Dropout", "torch.nn.ConvTranspose2d" ] ]
jaymessina3/model-analysis
[ "1617375dd35e72447653e54330484c3a2950e4c6" ]
[ "tensorflow_model_analysis/extractors/extractor_test.py" ]
[ "# Lint as: python3\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test for extractor.\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport apache_beam as beam\nfrom apache_beam.testing import util\nimport tensorflow as tf\nfrom tensorflow_model_analysis.eval_saved_model import testutil\nfrom tensorflow_model_analysis.extractors import extractor\n\n\nclass ExtractorTest(testutil.TensorflowModelAnalysisTest):\n\n def testFilterRaisesValueError(self):\n with self.assertRaises(ValueError):\n with beam.Pipeline() as pipeline:\n _ = (\n pipeline\n | 'Create' >> beam.Create([])\n | 'Filter' >> extractor.Filter(include=['a'], exclude=['b']))\n\n def testIncludeFilter(self):\n with beam.Pipeline() as pipeline:\n got = (\n pipeline\n | 'Create' >> beam.Create([{\n 'a': 1,\n 'b': 2,\n 'c': 3,\n 'd': 4\n }])\n | 'Filter' >> extractor.Filter(include=['a', 'c']))\n\n def check_result(got):\n try:\n self.assertEqual(got, [{'a': 1, 'c': 3}])\n except AssertionError as err:\n raise util.BeamAssertException(err)\n\n util.assert_that(got, check_result)\n\n def testIncludeFilterWithDict(self):\n with beam.Pipeline() as pipeline:\n got = (\n pipeline\n | 'Create' >> beam.Create([{\n 'a': 1,\n 'b': {\n 'b2': 2\n },\n 'c': {\n 'c2': {\n 'c21': 3,\n 'c22': 4\n }\n },\n 'd': {\n 'd2': 4\n }\n }])\n | 'Filter' >> extractor.Filter(include={\n 'b': {},\n 'c': {\n 'c2': {\n 'c21': {}\n }\n }\n }))\n\n def check_result(got):\n try:\n self.assertEqual(got, [{'b': {'b2': 2}, 'c': {'c2': {'c21': 3}}}])\n except AssertionError as err:\n raise util.BeamAssertException(err)\n\n util.assert_that(got, check_result)\n\n def testExludeFilter(self):\n with beam.Pipeline() as pipeline:\n got = (\n pipeline\n | 'Create' >> beam.Create([{\n 'a': 1,\n 'b': 2,\n 'c': 3,\n 'd': 4\n }])\n | 'Filter' >> extractor.Filter(exclude=['b', 'd']))\n\n def check_result(got):\n try:\n self.assertEqual(got, [{'a': 1, 'c': 3}])\n except AssertionError as err:\n raise util.BeamAssertException(err)\n\n util.assert_that(got, check_result)\n\n def testExcludeFilterWithDict(self):\n with beam.Pipeline() as pipeline:\n got = (\n pipeline\n | 'Create' >> beam.Create([{\n 'a': 1,\n 'b': {\n 'b2': 2\n },\n 'c': {\n 'c2': {\n 'c21': 3,\n 'c22': 4\n }\n },\n 'd': {\n 'd2': 4\n }\n }])\n | 'Filter' >> extractor.Filter(exclude={\n 'b': {},\n 'c': {\n 'c2': {\n 'c21': {}\n }\n }\n }))\n\n def check_result(got):\n try:\n self.assertEqual(got, [{\n 'a': 1,\n 'c': {\n 'c2': {\n 'c22': 4\n }\n },\n 'd': {\n 'd2': 4\n }\n }])\n except AssertionError as err:\n raise util.BeamAssertException(err)\n\n util.assert_that(got, check_result)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.test.main" ] ]
thangvubk/SoftGroup
[ "737123a7ee5a6b994c2ba82927677a800b87e242" ]
[ "softgroup/evaluation/instance_eval_util.py" ]
[ "import json\nimport os\n\nimport numpy as np\nfrom plyfile import PlyData\n\n\n# matrix: 4x4 np array\n# points Nx3 np array\ndef transform_points(matrix, points):\n assert len(points.shape) == 2 and points.shape[1] == 3\n num_points = points.shape[0]\n p = np.concatenate([points, np.ones((num_points, 1))], axis=1)\n p = np.matmul(matrix, np.transpose(p))\n p = np.transpose(p)\n p[:, :3] /= p[:, 3, None]\n return p[:, :3]\n\n\ndef export_ids(filename, ids):\n with open(filename, 'w') as f:\n for id in ids:\n f.write('%d\\n' % id)\n\n\ndef load_ids(filename):\n ids = open(filename).read().splitlines()\n ids = np.array(ids, dtype=np.int64)\n return ids\n\n\ndef read_mesh_vertices(filename):\n assert os.path.isfile(filename)\n with open(filename, 'rb') as f:\n plydata = PlyData.read(f)\n num_verts = plydata['vertex'].count\n vertices = np.zeros(shape=[num_verts, 3], dtype=np.float32)\n vertices[:, 0] = plydata['vertex'].data['x']\n vertices[:, 1] = plydata['vertex'].data['y']\n vertices[:, 2] = plydata['vertex'].data['z']\n return vertices\n\n\n# export 3d instance labels for instance evaluation\ndef export_instance_ids_for_eval(filename, label_ids, instance_ids):\n assert label_ids.shape[0] == instance_ids.shape[0]\n output_mask_path_relative = 'pred_mask'\n name = os.path.splitext(os.path.basename(filename))[0]\n output_mask_path = os.path.join(os.path.dirname(filename), output_mask_path_relative)\n if not os.path.isdir(output_mask_path):\n os.mkdir(output_mask_path)\n insts = np.unique(instance_ids)\n zero_mask = np.zeros(shape=(instance_ids.shape[0]), dtype=np.int32)\n with open(filename, 'w') as f:\n for idx, inst_id in enumerate(insts):\n if inst_id == 0: # 0 -> no instance for this vertex\n continue\n output_mask_file = os.path.join(output_mask_path_relative,\n name + '_' + str(idx) + '.txt')\n loc = np.where(instance_ids == inst_id)\n label_id = label_ids[loc[0][0]]\n f.write('%s %d %f\\n' % (output_mask_file, label_id, 1.0))\n # write mask\n mask = np.copy(zero_mask)\n mask[loc[0]] = 1\n export_ids(output_mask_file, mask)\n\n\n# ------------ Instance Utils ------------ #\n\n\nclass Instance(object):\n instance_id = 0\n label_id = 0\n vert_count = 0\n med_dist = -1\n dist_conf = 0.0\n\n def __init__(self, mesh_vert_instances, instance_id):\n if (instance_id == -1):\n return\n self.instance_id = int(instance_id)\n self.label_id = int(self.get_label_id(instance_id))\n self.vert_count = int(self.get_instance_verts(mesh_vert_instances, instance_id))\n\n def get_label_id(self, instance_id):\n return int(instance_id // 1000)\n\n def get_instance_verts(self, mesh_vert_instances, instance_id):\n return (mesh_vert_instances == instance_id).sum()\n\n def to_json(self):\n return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)\n\n def to_dict(self):\n dict = {}\n dict['instance_id'] = self.instance_id\n dict['label_id'] = self.label_id\n dict['vert_count'] = self.vert_count\n dict['med_dist'] = self.med_dist\n dict['dist_conf'] = self.dist_conf\n return dict\n\n def from_json(self, data):\n self.instance_id = int(data['instance_id'])\n self.label_id = int(data['label_id'])\n self.vert_count = int(data['vert_count'])\n if ('med_dist' in data):\n self.med_dist = float(data['med_dist'])\n self.dist_conf = float(data['dist_conf'])\n\n def __str__(self):\n return '(' + str(self.instance_id) + ')'\n\n\ndef read_instance_prediction_file(filename, pred_path):\n lines = open(filename).read().splitlines()\n instance_info = {}\n abs_pred_path = os.path.abspath(pred_path)\n for line in lines:\n parts = line.split(' ')\n if len(parts) != 3:\n print('invalid instance prediction file. Expected (per line): \\\n [rel path prediction] [label id prediction] \\\n [confidence prediction]')\n if os.path.isabs(parts[0]):\n print('invalid instance prediction file. \\\n First entry in line must be a relative path')\n mask_file = os.path.join(os.path.dirname(filename), parts[0])\n mask_file = os.path.abspath(mask_file)\n # check that mask_file lives inside prediction path\n if os.path.commonprefix([mask_file, abs_pred_path]) != abs_pred_path:\n print(('predicted mask {} in prediction text file {}' +\n 'points outside of prediction path.').format(mask_file, filename))\n\n info = {}\n info['label_id'] = int(float(parts[1]))\n info['conf'] = float(parts[2])\n instance_info[mask_file] = info\n return instance_info\n\n\ndef get_instances(ids, class_ids, class_labels, id2label):\n instances = {}\n for label in class_labels:\n instances[label] = []\n instance_ids = np.unique(ids)\n for id in instance_ids:\n if id == 0:\n continue\n inst = Instance(ids, id)\n if inst.label_id in class_ids:\n instances[id2label[inst.label_id]].append(inst.to_dict())\n return instances\n" ]
[ [ "numpy.ones", "numpy.transpose", "numpy.zeros", "numpy.copy", "numpy.array", "numpy.where", "numpy.unique" ] ]
Muetdhiver-lab/KerbolNavigator
[ "6b62cd0feb02390b393cfd96a16e0ff0162820e2" ]
[ "CaveManJool5.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Aug 26 22:12:16 2019\r\n\r\n@author: vince\r\n\"\"\"\r\n\r\nfrom pykep import planet, DEG2RAD, epoch, AU\r\nfrom Math import sqrt, PI\r\n\r\n\r\nfrom _Kerbol_System import Moho, Eve, Kerbin, Duna, Jool\r\n\r\nKAU = 13599840256 #m\r\n\r\n\r\ndef plot_innerKerbol(epoch = epoch(0)):\r\n \r\n \"\"\"\r\n Plots the Galilean Moons of Jupiter at epoch\r\n\r\n USAGE: plot_moons(epoch = epoch(34654, epoch.epoch_type.MJD)):\r\n * epoch: the epoch one wants the galilean moons to be plotted at\r\n\t\r\n \"\"\"\r\n from pykep.orbit_plots import plot_planet\r\n from mpl_toolkits.mplot3d import Axes3D\r\n import matplotlib.pyplot as plt\r\n print(epoch)\r\n fig = plt.figure()\r\n ax1 = fig.gca(projection='3d')\r\n\r\n\t#plot_planet(ax,Moho,color = 'y', units = 1.0, t0 = epoch, legend=True)\r\n\t#plot_planet(ax,Eve,color = 'm', units = 1.0, t0 = epoch, legend=True)\r\n\t#lot_planet(ax=ax1,Kerbin,color = 'c', units = 1.0, t0, legend=True)\r\n #plot_planet(ax = ax1,Duna,color = 'r', units = 1.0, t0 = epoch, legend=True)\r\n plot_planet(Moho, t0=epoch, color = 'y', legend=True, units=KAU, ax=ax1)\r\n plot_planet(Eve, t0=epoch, color = 'm', legend=True, units=KAU, ax=ax1)\r\n plot_planet(Kerbin, t0=epoch, color = 'c', legend=True, units=KAU, ax=ax1)\r\n plot_planet(Duna, t0=epoch, color = 'r', legend=True, units=KAU, ax=ax1) \r\n plot_planet(Jool, t0=epoch, color = 'g', legend=True, units=KAU, ax=ax1) \r\n ax1.set_xlim3d(-3,3)\r\n ax1.set_ylim3d(-3,3)\r\n ax1.set_zlim3d(-3,3)\r\n plt.show()\r\n \r\n \"\"\"\r\n Toying with basic physics and limited input data\r\n \r\n What we got :\r\n > Ap\r\n > Pe\r\n > SMA\r\n > mu_c\r\n > altitude |r|\r\n > speed |v|\r\n > times\r\n \r\n And that's pretty much it.\r\n \r\n e = (Rp*Vp^2)/(mu_c) - 1\r\n \r\n e = 1 - 2*Pe/(Pe+Ap)\r\n \r\n Kepler : M - Mo = n (t - to)\r\n \r\n n = sqrt(mu_c/a^3)\r\n \r\n cos E = (e+cos(nu))/(1+e*cos(nu)) nu = true anomaly, E excentric anomaly\r\n \r\n if e small, nu ~ M + 2e*sin(M) + 1.25e^2*sin(2M)\r\n \r\n We don't have M, but we have t and to and mu_c\r\n \r\n \"\"\"\r\n \r\n\r\n \r\nclass CavemanNavigator:\r\n \r\n ''' \r\n goal\r\n From caveman data predict r2,v2, at t2 from available data.\r\n '''\r\n \r\n def __init__(self):\r\n self.Pe = 10157\r\n self.Ap = 10496\r\n self.SMA = (self.Pe+self.Ap)/2\r\n self.e = 1 - 2*self.Pe/(self.Pe+self.Ap)\r\n self.t0 = 0\r\n self.t1 = 0\r\n self.r0 = 0\r\n self.r1 = 0\r\n self.v0 = 0\r\n self.v1 = 0\r\n self.N = sqrt(Kerbin.mu_c/(self.SMA^3))\r\n \r\n def setT0(self, year, day, hour, minute, second):\r\n h2s = 3600\r\n d2s = 6*h2s\r\n y2s = 2556.5*h2s\r\n self.t0 = year*y2s + day*d2s + hour*h2s + minute*60 + second \r\n \r\n def setT1(self, year, day, hour, minute, second):\r\n h2s = 3600\r\n d2s = 6*h2s\r\n y2s = 2556.5*h2s\r\n self.t0 = year*y2s + day*d2s + hour*h2s + minute*60 + second \r\n \r\n def EccAnom(self):\r\n K = PI/180\r\n max_i = 50\r\n i = 0\r\n delta = 0.0001\r\n \r\n \r\n \r\nplot_innerKerbol(epoch(100))" ]
[ [ "matplotlib.pyplot.figure", "matplotlib.pyplot.show" ] ]
lumbric/cvxpy
[ "bd6f5142effa8cf883d1a0d7fd46c0d906b2fb93" ]
[ "cvxpy/interface/numpy_interface/sparse_matrix_interface.py" ]
[ "\"\"\"\nCopyright 2013 Steven Diamond\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom cvxpy.interface.numpy_interface.ndarray_interface import NDArrayInterface\nimport scipy.sparse as sp\nimport numpy as np\n\n\nclass SparseMatrixInterface(NDArrayInterface):\n \"\"\"\n An interface to convert constant values to the scipy sparse CSC class.\n \"\"\"\n TARGET_MATRIX = sp.csc_matrix\n\n @NDArrayInterface.scalar_const\n def const_to_matrix(self, value, convert_scalars=False):\n \"\"\"Convert an arbitrary value into a matrix of type self.target_matrix.\n\n Args:\n value: The constant to be converted.\n convert_scalars: Should scalars be converted?\n\n Returns:\n A matrix of type self.target_matrix or a scalar.\n \"\"\"\n # Convert cvxopt sparse to coo matrix.\n if isinstance(value, list):\n return sp.csc_matrix(value, dtype=np.double).T\n if value.dtype in [np.double, np.complex]:\n dtype = value.dtype\n else:\n # Cast bool, int, etc to double\n dtype = np.double\n return sp.csc_matrix(value, dtype=dtype)\n\n def identity(self, size):\n \"\"\"Return an identity matrix.\n \"\"\"\n return sp.eye(size, size, format=\"csc\")\n\n def size(self, matrix):\n \"\"\"Return the dimensions of the matrix.\n \"\"\"\n return matrix.shape\n\n def scalar_value(self, matrix):\n \"\"\"Get the value of the passed matrix, interpreted as a scalar.\n \"\"\"\n return matrix[0, 0]\n\n def zeros(self, rows, cols):\n \"\"\"Return a matrix with all 0's.\n \"\"\"\n return sp.csc_matrix((rows, cols), dtype='float64')\n\n def reshape(self, matrix, size):\n \"\"\"Change the shape of the matrix.\n \"\"\"\n matrix = matrix.todense()\n matrix = super(SparseMatrixInterface, self).reshape(matrix, size)\n return self.const_to_matrix(matrix, convert_scalars=True)\n\n def block_add(self, matrix, block, vert_offset, horiz_offset, rows, cols,\n vert_step=1, horiz_step=1):\n \"\"\"Add the block to a slice of the matrix.\n\n Args:\n matrix: The matrix the block will be added to.\n block: The matrix/scalar to be added.\n vert_offset: The starting row for the matrix slice.\n horiz_offset: The starting column for the matrix slice.\n rows: The height of the block.\n cols: The width of the block.\n vert_step: The row step size for the matrix slice.\n horiz_step: The column step size for the matrix slice.\n \"\"\"\n block = self._format_block(matrix, block, rows, cols)\n slice_ = [slice(vert_offset, rows+vert_offset, vert_step),\n slice(horiz_offset, horiz_offset+cols, horiz_step)]\n # Convert to lil before changing sparsity structure.\n matrix[slice_[0], slice_[1]] = matrix[slice_[0], slice_[1]] + block\n" ]
[ [ "scipy.sparse.csc_matrix", "scipy.sparse.eye" ] ]
ElfoLiNk/CCrush-Bot
[ "4647ee2a8ac27fc68a36549f3097a35322288921" ]
[ "main.py" ]
[ "from PIL import Image\nfrom PIL import ImageGrab\nimport numpy as np\nfrom sklearn_decoder import ImgRecognizer\nimport win32api, win32con\nimport time\nimport debug_utils as dbg\nimport simple_solver\nimport cProfile\nimport pstats\n\n# excelent hardcoded values :)\n#board_box = (102, 90, 389, 650)\n#board_size_x = 4\nboard_box = (102, 90, 745, 667)\nboard_size_x = 9\nboard_size_y = 9\n\nimg_size = (board_box[2] - board_box[0], board_box[3] - board_box[1])\ncell_size = (int(img_size[0] / board_size_x), int(img_size[1] / board_size_y))\n\ngame_board = np.zeros((board_size_y, board_size_x), dtype=np.int32)\nrecognizer = ImgRecognizer()\n\n'''\n candy values:\n- 0 blue\n- 1 green\n- 2 orange\n- 3 purple\n- 4 red\n- 5 yellow\n- 6 chocolate'''\n\nmatch_list = [(0, 1, 13, 19), (2, 3, 14, 20), (4, 5, 15, 21), (6, 7, 18, 22), (8, 9, 16, 23), (10, 11, 17, 24)]\n\nspecial_candies = [1, 3, 5, 7, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]\nsimple_candies = [0, 2, 4, 6, 8, 10]\nstriped_candies_h = [1, 3, 5, 7, 9, 11]\nstriped_candies_v = range(13, 19)\n\nstriped_candies = striped_candies_h[:]\nstriped_candies.extend(striped_candies_v)\n\nwrapped_candies = range(19, 25)\nchocolate = [12]\n\nboard_dict = {0: 'blue ', 1: 's_h_blue ', 2: 'green ', 3: 's_h_green ', 4: 'orange ',\n 5: 's_h_orange ',\n 6: 'purple ', 7: 's_h_purple ', 8: 'red ', 9: 's_h_red ', 10: 'yellow ',\n 11: 's_h_yellow ',\n 12: 'chocolate', 13: 's_v_blue ', 14: 's_v_green ', 15: 's_v_orange ', 16: 's_v_red ',\n 17: 's_v_yellow ', 18: 's_v_purple ', 19: 'blue_wrapped', 20: 'green_wrapped', 21: 'orange_wrapped',\n 22: 'purple_wrapped', 23: 'red_wrapped', 24: 'yellow_wrapped', -1: 'empty '}\n\n\n# 3 candies explode for 60 points\n# 4 candies exploder for 120 create striped candy - striped candy explodes the whole vertical line\n# 5 in a line create a chocolate sprinkle. swipe it with a candy and it explodes candies of that color from the board\n\n\n# windows coords\ndef win32_click(x, y):\n win32api.SetCursorPos((x, y))\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)\n\n\ndef get_desktop_coords(cell):\n x = int(board_box[0] + cell[1] * cell_size[0] + cell_size[0] / 2)\n y = int(board_box[1] + cell[0] * cell_size[1] + cell_size[1] / 2)\n return x, y\n\n\ndef do_move(move):\n start = move[0]\n end = move[1]\n\n start_w = get_desktop_coords(start)\n end_w = get_desktop_coords(end)\n\n win32api.SetCursorPos(start_w)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, start_w[0], start_w[1], 0, 0)\n time.sleep(0.3)\n win32api.SetCursorPos(end_w)\n time.sleep(0.3)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, end_w[0], end_w[1], 0, 0)\n\n win32api.SetCursorPos((1100, 1100))\n\n\ndef grab_board():\n global game_board\n img = ImageGrab.grab()\n\n # img = Image.open('board.bmp')\n img = img.crop(board_box)\n # img.save('board.bmp')\n for y in range(0, board_size_y):\n for x in range(0, board_size_x):\n cell_box = (x * cell_size[0], y * cell_size[1], (x + 1) * cell_size[0], (y + 1) * cell_size[1])\n cell = img.crop(cell_box)\n cell.save('Cells/{}_{}.bmp'.format(y, x))\n game_board[y][x] = recognizer.predict(cell)\n\n dbg.print_board(game_board)\n return img\n\n\nref_img = None\n\n\ndef board_is_moving():\n global ref_img\n img = ImageGrab.grab()\n img = img.crop(board_box)\n img = img.resize((int(img.size[0] / 4), int(img.size[1] / 4)), Image.NEAREST)\n\n has_movement = True\n if ref_img:\n has_movement = compare_images(img, ref_img, threshold=100) > 100\n\n ref_img = img\n return has_movement\n\n\ndef are_pixels_equal(p1, p2, threshold):\n diff = 0\n for i in range(3):\n diff += abs(p1[i] - p2[i])\n return diff < threshold\n\n\ndef compare_images(current, reference, threshold):\n current_data = np.array(current.getdata())\n ref_data = np.array(reference.getdata())\n\n diff_pixels = 0\n total_size = current.size[0] * current.size[1]\n for i in range(0, total_size - 3, 3):\n if i == 22881:\n break\n if not are_pixels_equal(current_data[i], ref_data[i], threshold):\n diff_pixels += 1\n\n print(diff_pixels)\n return diff_pixels\n\n\nbackground_img = Image.open('background.bmp')\nbackground_img = background_img.resize((int(background_img.size[0] / 4), int(background_img.size[1] / 4)),\n Image.NEAREST)\n\n\ndef main():\n recognizer.train()\n solver = simple_solver.SimpleSolver()\n img_end_game = Image.open('end_screen.bmp')\n img_end_game = img_end_game.resize((int(img_end_game.size[0] / 4), int(img_end_game.size[1] / 4)), Image.NEAREST)\n total_moves = 0\n while True:\n if not board_is_moving():\n board_img = grab_board()\n board_img = board_img.resize((int(board_img.size[0] / 4), int(board_img.size[1] / 4)), Image.NEAREST)\n if compare_images(board_img, img_end_game, 10) < 3000:\n break\n score, move = solver.solve_board(game_board)\n print('\\nBest move found. Score = {0}, Move = {1}'.format(score, move))\n do_move(move)\n total_moves += 1\n time.sleep(0.4)\n print('Total moves done: ' + str(total_moves))\n\n\nif __name__ == '__main__':\n main()\n # cProfile.run('main()', filename='stats.txt')\n # stats = pstats.Stats('stats.txt').sort_stats('cumulative')\n # stats.print_stats()\n\n # recognizer.train()\n" ]
[ [ "numpy.zeros" ] ]
remaindere/p4-ocr-hansarang
[ "9ae7e64e10bd15f373f8540729588361f0a692f1" ]
[ "demo/dataset.py" ]
[ "import csv\nimport os\nimport random\nimport torch\nfrom PIL import Image, ImageOps\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\nimport cv2\nimport numpy as np\n\nSTART = \"<SOS>\"\nEND = \"<EOS>\"\nPAD = \"<PAD>\"\nSPECIAL_TOKENS = [START, END, PAD]\n\n\n\n# Rather ignorant way to encode the truth, but at least it works.\ndef encode_truth(truth, token_to_id):\n\n truth_tokens = truth.split()\n for token in truth_tokens:\n if token not in token_to_id:\n raise Exception(\"Truth contains unknown token\")\n truth_tokens = [token_to_id[x] for x in truth_tokens]\n if '' in truth_tokens: truth_tokens.remove('')\n return truth_tokens\n\n\ndef load_vocab(tokens_paths):\n tokens = []\n tokens.extend(SPECIAL_TOKENS)\n for tokens_file in tokens_paths:\n with open(tokens_file, \"r\") as fd:\n reader = fd.read()\n for token in reader.split(\"\\n\"):\n if token not in tokens:\n tokens.append(token)\n token_to_id = {tok: i for i, tok in enumerate(tokens)}\n id_to_token = {i: tok for i, tok in enumerate(tokens)}\n return token_to_id, id_to_token\n\n\ndef split_gt(groundtruth, proportion=1.0, test_percent=None):\n root = os.path.join(os.path.dirname(groundtruth), \"images\")\n with open(groundtruth, \"r\") as fd:\n data=[]\n for line in fd:\n data.append(line.strip().split(\"\\t\"))\n random.shuffle(data)\n dataset_len = round(len(data) * proportion)\n data = data[:dataset_len]\n data = [[os.path.join(root, x[0]), x[1]] for x in data]\n \n if test_percent:\n test_len = round(len(data) * test_percent)\n return data[test_len:], data[:test_len]\n else:\n return data\n\n\ndef collate_batch(data):\n max_len = max([len(d[\"truth\"][\"encoded\"]) for d in data])\n # Padding with -1, will later be replaced with the PAD token\n padded_encoded = [\n d[\"truth\"][\"encoded\"] + (max_len - len(d[\"truth\"][\"encoded\"])) * [-1]\n for d in data\n ]\n return {\n \"path\": [d[\"path\"] for d in data],\n \"image\": torch.stack([d[\"image\"] for d in data], dim=0),\n \"truth\": {\n \"text\": [d[\"truth\"][\"text\"] for d in data],\n \"encoded\": torch.tensor(padded_encoded)\n },\n }\n\ndef collate_eval_batch(data):\n max_len = max([len(d[\"truth\"][\"encoded\"]) for d in data])\n # Padding with -1, will later be replaced with the PAD token\n padded_encoded = [\n d[\"truth\"][\"encoded\"] + (max_len - len(d[\"truth\"][\"encoded\"])) * [-1]\n for d in data\n ]\n return {\n \"path\": [d[\"path\"] for d in data],\n \"file_path\":[d[\"file_path\"] for d in data],\n \"image\": torch.stack([d[\"image\"] for d in data], dim=0),\n \"truth\": {\n \"text\": [d[\"truth\"][\"text\"] for d in data],\n \"encoded\": torch.tensor(padded_encoded)\n },\n }\n\nclass LoadDataset(Dataset):\n \"\"\"Load Dataset\"\"\"\n\n def __init__(\n self,\n groundtruth,\n tokens_file,\n crop=False,\n transform=None,\n rgb=3,\n ):\n \"\"\"\n Args:\n groundtruth (string): Path to ground truth TXT/TSV file\n tokens_file (string): Path to tokens TXT file\n ext (string): Extension of the input files\n crop (bool, optional): Crop images to their bounding boxes [Default: False]\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n super(LoadDataset, self).__init__()\n self.crop = crop\n self.transform = transform\n self.rgb = rgb\n self.token_to_id, self.id_to_token = load_vocab(tokens_file)\n self.data = [\n {\n \"path\": p,\n \"truth\": {\n \"text\": truth,\n \"encoded\": [\n self.token_to_id[START],\n *encode_truth(truth, self.token_to_id),\n self.token_to_id[END],\n ],\n },\n }\n for p, truth in groundtruth\n ]\n\n def rotate_img(self, img):\n '''\n input : PIL image\n output : rotated image\n '''\n img = np.array(img)\n prob = np.random.rand(1)\n if prob < 0.5:\n img_rotated = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)\n else:\n img_rotated = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)\n return Image.fromarray(img_rotated)\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, i):\n item = self.data[i]\n image = Image.open(item[\"path\"])\n if self.rgb == 3:\n image = image.convert(\"RGB\")\n elif self.rgb == 1:\n image = image.convert(\"L\")\n else:\n raise NotImplementedError\n\n if image.size[0] / image.size[1] <= 0.6:\n image = self.rotate_img(image)\n\n if self.crop:\n # Image needs to be inverted because the bounding box cuts off black pixels,\n # not white ones.\n bounding_box = ImageOps.invert(image).getbbox()\n image = image.crop(bounding_box)\n\n if self.transform:\n image = self.transform(image)\n\n return {\"path\": item[\"path\"], \"truth\": item[\"truth\"], \"image\": image}\n\nclass LoadEvalDataset(Dataset):\n \"\"\"Load Dataset\"\"\"\n\n def __init__(\n self,\n groundtruth,\n token_to_id,\n id_to_token,\n crop=False,\n transform=None,\n rgb=3,\n ):\n \"\"\"\n Args:\n groundtruth (string): Path to ground truth TXT/TSV file\n tokens_file (string): Path to tokens TXT file\n ext (string): Extension of the input files\n crop (bool, optional): Crop images to their bounding boxes [Default: False]\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n super(LoadEvalDataset, self).__init__()\n self.crop = crop\n self.rgb = rgb\n self.token_to_id = token_to_id\n self.id_to_token = id_to_token\n self.transform = transform\n self.data = [\n {\n \"path\": p,\n \"file_path\":p1,\n \"truth\": {\n \"text\": truth,\n \"encoded\": [\n self.token_to_id[START],\n *encode_truth(truth, self.token_to_id),\n self.token_to_id[END],\n ],\n },\n }\n for p, p1,truth in groundtruth\n ]\n\n def __len__(self):\n return len(self.data)\n\n def rotate_img(self, img):\n '''\n input : PIL image\n output : rotated image\n '''\n img = np.array(img)\n prob = np.random.rand(1)\n if prob < 0.5:\n img_rotated = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)\n else:\n img_rotated = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)\n return Image.fromarray(img_rotated)\n\n def __getitem__(self, i):\n item = self.data[i]\n image = Image.open(item[\"path\"])\n if self.rgb == 3:\n image = image.convert(\"RGB\")\n elif self.rgb == 1:\n image = image.convert(\"L\")\n else:\n raise NotImplementedError\n if image.size[0] / image.size[1] <= 0.6:\n image = self.rotate_img(image)\n if self.crop:\n # Image needs to be inverted because the bounding box cuts off black pixels,\n # not white ones.\n bounding_box = ImageOps.invert(image).getbbox()\n image = image.crop(bounding_box)\n\n if self.transform:\n image = self.transform(image)\n\n return {\"path\": item[\"path\"], \"file_path\":item[\"file_path\"],\"truth\": item[\"truth\"], \"image\": image}\n\ndef dataset_loader(options, transformed):\n # Read data\n train_data, valid_data = [], [] \n if options.data.random_split:\n for i, path in enumerate(options.data.train):\n prop = 1.0\n if len(options.data.dataset_proportions) > i:\n prop = options.data.dataset_proportions[i]\n train, valid = split_gt(path, prop, options.data.test_proportions)\n train_data += train\n valid_data += valid\n else:\n for i, path in enumerate(options.data.train):\n prop = 1.0\n if len(options.data.dataset_proportions) > i:\n prop = options.data.dataset_proportions[i]\n train_data += split_gt(path, prop)\n for i, path in enumerate(options.data.test):\n valid = split_gt(path)\n valid_data += valid\n\n # Load data\n train_dataset = LoadDataset(\n train_data, options.data.token_paths, crop=options.data.crop, transform=transformed, rgb=options.data.rgb\n )\n train_data_loader = DataLoader(\n train_dataset,\n batch_size=options.batch_size,\n shuffle=True,\n num_workers=options.num_workers,\n collate_fn=collate_batch,\n )\n\n valid_dataset = LoadDataset(\n valid_data, options.data.token_paths, crop=options.data.crop, transform=transformed, rgb=options.data.rgb\n )\n valid_data_loader = DataLoader(\n valid_dataset,\n batch_size=options.batch_size,\n shuffle=False,\n num_workers=options.num_workers,\n collate_fn=collate_batch,\n )\n return train_data_loader, valid_data_loader, train_dataset, valid_dataset\n" ]
[ [ "torch.utils.data.DataLoader", "torch.stack", "torch.tensor", "numpy.random.rand", "numpy.array" ] ]
dweiss044/multiclass_tissue_segmentation
[ "6d71cf12adae96bbce87d14e8423acb5866c2766" ]
[ "unet3d/augment.py" ]
[ "import numpy as np\nimport nibabel as nib\nfrom nilearn.image import new_img_like, resample_to_img\nimport random\nimport itertools\n\nfrom scipy.ndimage import affine_transform\nfrom math import pi\nfrom transforms3d import affines, euler\n\ndef scale_image(image, scale_factor):\n scale_factor = np.asarray(scale_factor)\n new_affine = np.copy(image.affine)\n new_affine[:3, :3] = image.affine[:3, :3] * scale_factor\n new_affine[:, 3][:3] = image.affine[:, 3][:3] + (image.shape * np.diag(image.affine)[:3] * (1 - scale_factor)) / 2\n return new_img_like(image, data=image.get_data(), affine=new_affine)\n\n\ndef flip_image(image, axis):\n try:\n new_data = np.copy(image.get_data())\n for axis_index in axis:\n new_data = np.flip(new_data, axis=axis_index)\n except TypeError:\n new_data = np.flip(image.get_data(), axis=axis)\n return new_img_like(image, data=new_data)\n\n\ndef random_flip_dimensions(n_dimensions):\n axis = list()\n for dim in range(n_dimensions):\n if random_boolean():\n axis.append(dim)\n return axis\n\n\ndef random_scale_factor(n_dim=3, mean=1, std=0.25):\n return np.random.normal(mean, std, n_dim)\n\n\ndef random_boolean():\n return np.random.choice([True, False])\n\n\ndef distort_image(image, flip_axis=None, scale_factor=None):\n if flip_axis:\n image = flip_image(image, flip_axis)\n if scale_factor is not None:\n image = scale_image(image, scale_factor)\n return image\n\ndef img_warp(img, vec, theta=15, sign=1, offset=0, scale=1.1, shear=0.1, interpolation=1):\n \"\"\"Training data augmentation with random affine transformation\"\"\"\n if len(img.shape) < 4:\n img = img[np.newaxis]\n # Rotation\n vec /= np.sqrt(np.sum(vec ** 2))\n R = euler.axangle2mat(vec, theta)\n \n # Scale/zoom\n Z = np.ones(3) * scale ** sign\n \n # Translation\n c_in = np.array(img.shape[1:]) // 2\n T = - (c_in).dot((R * Z).T) + c_in + offset\n \n # Shear\n S = shear\n \n # Compose affine\n mat = affines.compose(T, R, Z, S)\n \n # Apply warp\n img_warped = affine_transform(img, mat, order=interpolation) # Trilinear\n \n return np.squeeze(img_warped)\n\ndef augment_data(data, truth, affine, scale_deviation=None, flip=True, warp_params=None):\n n_dim = len(truth.shape)\n if scale_deviation:\n scale_factor = random_scale_factor(n_dim, std=scale_deviation)\n else:\n scale_factor = None\n if flip:\n flip_axis = random_flip_dimensions(n_dim)\n else:\n flip_axis = None\n data_list = list()\n\n if warp_params is not None and np.random.random() < 0.75:\n vec = np.random.normal(0, 1, 3)\n theta = np.random.uniform(- warp_params['theta_max'], warp_params['theta_max'], 1) * pi / 180\n sign = -1 if np.random.random() < 0.5 else 1\n offset = np.random.uniform(- warp_params['offset_max'], warp_params['offset_max'], 3)\n scale = np.random.uniform(1, warp_params['scale_max'], 1)\n shear = np.random.uniform(- warp_params['shear_max'], warp_params['shear_max'], 3)\n \n\n for data_index in range(data.shape[0]):\n image = get_image(data[data_index], affine)\n warped = get_image(img_warp(data[data_index], vec, theta, sign, offset, scale, shear, interpolation=1), affine)\n data_list.append(resample_to_img(warped,\n image, interpolation=\"continuous\").get_data())\n data = np.asarray(data_list)\n truth_image = get_image(truth, affine)\n warped = get_image(img_warp(truth, vec, theta, sign, offset, scale, shear, interpolation=0), affine)\n truth_data = resample_to_img(warped,\n truth_image, interpolation=\"nearest\").get_data()\n else:\n for data_index in range(data.shape[0]):\n image = get_image(data[data_index], affine)\n data_list.append(resample_to_img(distort_image(image,flip_axis=flip_axis,scale_factor=scale_factor), \n image, interpolation=\"continuous\").get_data())\n data = np.asarray(data_list)\n truth_image = get_image(truth, affine)\n truth_data = resample_to_img(distort_image(truth_image,flip_axis=flip_axis,scale_factor=scale_factor),\n truth_image, interpolation=\"nearest\").get_data()\n return data, truth_data\n\n\ndef get_image(data, affine, nib_class=nib.Nifti1Image):\n return nib_class(dataobj=data, affine=affine)\n\n\ndef generate_permutation_keys():\n \"\"\"\n This function returns a set of \"keys\" that represent the 48 unique rotations &\n reflections of a 3D matrix.\n\n Each item of the set is a tuple:\n ((rotate_y, rotate_z), flip_x, flip_y, flip_z, transpose)\n\n As an example, ((0, 1), 0, 1, 0, 1) represents a permutation in which the data is\n rotated 90 degrees around the z-axis, then reversed on the y-axis, and then\n transposed.\n\n 48 unique rotations & reflections:\n https://en.wikipedia.org/wiki/Octahedral_symmetry#The_isometries_of_the_cube\n \"\"\"\n return set(itertools.product(\n itertools.combinations_with_replacement(range(2), 2), range(2), range(2), range(2), range(2)))\n\n\ndef random_permutation_key():\n \"\"\"\n Generates and randomly selects a permutation key. See the documentation for the\n \"generate_permutation_keys\" function.\n \"\"\"\n return random.choice(list(generate_permutation_keys()))\n\n\ndef permute_data(data, key):\n \"\"\"\n Permutes the given data according to the specification of the given key. Input data\n must be of shape (n_modalities, x, y, z).\n\n Input key is a tuple: (rotate_y, rotate_z), flip_x, flip_y, flip_z, transpose)\n\n As an example, ((0, 1), 0, 1, 0, 1) represents a permutation in which the data is\n rotated 90 degrees around the z-axis, then reversed on the y-axis, and then\n transposed.\n \"\"\"\n data = np.copy(data)\n (rotate_y, rotate_z), flip_x, flip_y, flip_z, transpose = key\n\n if rotate_y != 0:\n data = np.rot90(data, rotate_y, axes=(1, 3))\n if rotate_z != 0:\n data = np.rot90(data, rotate_z, axes=(2, 3))\n if flip_x:\n data = data[:, ::-1]\n if flip_y:\n data = data[:, :, ::-1]\n if flip_z:\n data = data[:, :, :, ::-1]\n if transpose:\n for i in range(data.shape[0]):\n data[i] = data[i].T\n return data\n\n\ndef random_permutation_x_y(x_data, y_data):\n \"\"\"\n Performs random permutation on the data.\n :param x_data: numpy array containing the data. Data must be of shape (n_modalities, x, y, z).\n :param y_data: numpy array containing the data. Data must be of shape (n_modalities, x, y, z).\n :return: the permuted data\n \"\"\"\n key = random_permutation_key()\n return permute_data(x_data, key), permute_data(y_data, key)\n\n\ndef reverse_permute_data(data, key):\n key = reverse_permutation_key(key)\n data = np.copy(data)\n (rotate_y, rotate_z), flip_x, flip_y, flip_z, transpose = key\n\n if transpose:\n for i in range(data.shape[0]):\n data[i] = data[i].T\n if flip_z:\n data = data[:, :, :, ::-1]\n if flip_y:\n data = data[:, :, ::-1]\n if flip_x:\n data = data[:, ::-1]\n if rotate_z != 0:\n data = np.rot90(data, rotate_z, axes=(2, 3))\n if rotate_y != 0:\n data = np.rot90(data, rotate_y, axes=(1, 3))\n return data\n\n\ndef reverse_permutation_key(key):\n rotation = tuple([-rotate for rotate in key[0]])\n return rotation, key[1], key[2], key[3], key[4]\n" ]
[ [ "numpy.random.uniform", "numpy.sum", "numpy.ones", "numpy.squeeze", "numpy.diag", "scipy.ndimage.affine_transform", "numpy.asarray", "numpy.copy", "numpy.random.choice", "numpy.random.random", "numpy.rot90", "numpy.flip", "numpy.random.normal", "numpy.array" ] ]
eherr/mg_server
[ "e18537f7d170d53a1960fe9ed79ea88f4049e53f" ]
[ "mg_server/simple_navigation_agent.py" ]
[ "import numpy as np\nfrom vis_utils.scene.components import ComponentBase\n\n\nclass SimpleNavigationAgent(ComponentBase):\n def __init__(self, scene_object):\n ComponentBase.__init__(self, scene_object)\n self.controller = scene_object._components[\"morphablegraph_state_machine\"]\n self.walk_targets = []\n self.tolerance = 20\n\n def get_actions(self):\n return self.controller.get_actions()\n\n def get_keyframe_labels(self, action_name):\n return self.controller.get_keyframe_labels(action_name)\n\n def perform_action(self, action_name, keyframe_label, position):\n self.controller.set_action_constraint(action_name, keyframe_label, position)\n self.controller.transition_to_action(action_name)\n\n def set_walk_target(self, target):\n self.walk_targets.append(target)\n self.update(0)\n if self.controller.node_type == \"idle\":\n self.controller.transition_to_next_state_controlled()\n\n def remove_walk_target(self):\n if len(self.walk_targets) > 1:\n self.walk_targets = self.walk_targets[1:]\n else:\n self.walk_targets = []\n self.controller.target_projection_len = 0\n\n def get_current_walk_target(self):\n if len(self.walk_targets) > 0:\n return self.walk_targets[0]\n else:\n return None\n\n def update(self, dt):\n target = self.get_current_walk_target()\n if target is None:\n return\n controller_pos = np.array(self.controller.getPosition())\n controller_pos[1] = 0\n target_pos = target.getPosition()\n target_pos[1] = 0\n target_dir = target_pos - controller_pos\n distance = np.linalg.norm(target_dir)\n target_dir = target_dir / distance\n self.controller.direction_vector = target_dir\n if distance > self.tolerance:\n self.controller.target_projection_len = min(self.controller.max_step_length, distance)\n else:\n self.remove_walk_target()\n\n\n\n\n" ]
[ [ "numpy.linalg.norm" ] ]
shgoren/viewmaker
[ "d9a7d4b05ac5126fe348c8c5217877ebcff7e2d7" ]
[ "scripts/run_audio.py" ]
[ "import os\nimport wandb\nfrom copy import deepcopy\nfrom src.systems import audio_systems\nfrom src.utils.utils import load_json\nfrom src.utils.setup import process_config\nimport random, torch, numpy\n\nimport pytorch_lightning as pl\n\nSYSTEM = {\n 'PretrainExpertInstDiscSystem': audio_systems.PretrainExpertInstDiscSystem,\n 'PretrainExpertSimCLRSystem': audio_systems.PretrainExpertSimCLRSystem,\n 'PretrainViewMakerSimCLRSystem': audio_systems.PretrainViewMakerSimCLRSystem,\n 'PretrainViewMakerInstDiscSystem': audio_systems.PretrainViewMakerInstDiscSystem,\n # -- \n 'TransferExpertAudioMNISTSystem': audio_systems.TransferExpertAudioMNISTSystem,\n 'TransferExpertGoogleSpeechCommandsSystem': audio_systems.TransferExpertGoogleSpeechCommandsSystem,\n 'TransferExpertFluentSpeechCommandsSystem': audio_systems.TransferExpertFluentSpeechCommandsSystem,\n 'TransferExpertLibriSpeechSystem': audio_systems.TransferExpertLibriSpeechSystem,\n 'TransferExpertVoxCeleb1System': audio_systems.TransferExpertVoxCeleb1System,\n 'TransferViewMakerAudioMNISTSystem': audio_systems.TransferViewMakerAudioMNISTSystem,\n 'TransferViewMakerGoogleSpeechCommandsSystem': audio_systems.TransferViewMakerGoogleSpeechCommandsSystem,\n 'TransferViewMakerFluentSpeechCommandsSystem': audio_systems.TransferViewMakerFluentSpeechCommandsSystem,\n 'TransferViewMakerLibriSpeechSystem': audio_systems.TransferViewMakerLibriSpeechSystem,\n 'TransferViewMakerVoxCeleb1System': audio_systems.TransferViewMakerVoxCeleb1System,\n}\n\n\ndef run(args, gpu_device=None):\n '''Run the Lightning system. \n\n Args:\n args\n args.config_path: str, filepath to the config file\n gpu_device: str or None, specifies GPU device as follows:\n None: CPU (specified as null in config)\n 'cpu': CPU\n '-1': All available GPUs\n '0': GPU 0\n '4': GPU 4\n '0,3' GPUs 1 and 3\n See: https://pytorch-lightning.readthedocs.io/en/latest/multi_gpu.html\n '''\n if gpu_device == 'cpu' or not gpu_device:\n gpu_device = None\n\n if args.caller_intent is not None:\n # for harpervalley, we need to choose between different transfer tasks\n config = process_config(args.config, exp_name_suffix=args.caller_intent)\n config.data_params.caller_intent = args.caller_intent\n else:\n config = process_config(args.config)\n\n # Only override if specified.\n if gpu_device: config.gpu_device = gpu_device\n seed_everything(config.seed)\n SystemClass = SYSTEM[config.system]\n system = SystemClass(config)\n\n if config.optim_params.scheduler:\n lr_callback = globals()[config.optim_params.scheduler](\n initial_lr=config.optim_params.learning_rate,\n max_epochs=config.num_epochs,\n schedule=(\n int(0.6*config.num_epochs),\n int(0.8*config.num_epochs),\n ),\n )\n callbacks = [lr_callback]\n else:\n callbacks = None\n\n # TODO: adjust period for saving checkpoints.\n ckpt_callback = pl.callbacks.ModelCheckpoint(\n os.path.join(config.exp_dir, 'checkpoints'),\n save_top_k=-1,\n period=1,\n )\n wandb.init(project='audio', entity='viewmaker', name=config.exp_name, config=config, sync_tensorboard=True)\n trainer = pl.Trainer(\n default_root_dir=config.exp_dir,\n gpus=gpu_device,\n # 'ddp' is usually faster, but we use 'dp' so the negative samples \n # for the whole batch are used for the SimCLR loss\n distributed_backend=config.distributed_backend or 'dp',\n max_epochs=config.num_epochs,\n min_epochs=config.num_epochs,\n checkpoint_callback=ckpt_callback,\n resume_from_checkpoint=args.ckpt or config.continue_from_checkpoint,\n profiler=args.profiler,\n precision=config.optim_params.precision or 32,\n callbacks=callbacks,\n val_check_interval=config.val_check_interval or 1.0,\n limit_val_batches=config.limit_val_batches or 1.0,\n num_sanity_val_steps=-1,\n )\n trainer.fit(system)\n\n\ndef seed_everything(seed):\n random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n numpy.random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('config', type=str, default='path to config file')\n parser.add_argument('--caller-intent', type=str, default=None)\n parser.add_argument('--gpu-device', type=str, default=None)\n parser.add_argument('--profiler', action='store_true')\n parser.add_argument('--ckpt', type=str, default=None)\n args = parser.parse_args()\n\n # Ensure it's a string, even if from an older config\n gpu_device = str(args.gpu_device) if args.gpu_device else None\n run(args, gpu_device=gpu_device)\n\n" ]
[ [ "torch.manual_seed", "torch.cuda.manual_seed_all", "numpy.random.seed" ] ]
Shaswat2001/Drowsiness-Detector
[ "dd09a9a9806b1113975e227c148edcf050a1670b" ]
[ "model.py" ]
[ "import h5py\r\nimport numpy as np\r\nfrom keras import layers\r\nfrom keras.layers import Input, Add, Dense, Activation, Flatten, Conv2D,MaxPooling2D, Dropout\r\nfrom keras.models import Model, load_model\r\nfrom keras.preprocessing import image\r\nfrom sklearn.metrics import confusion_matrix as cf\r\n\r\ndef load_dataset():\r\n \"\"\"\r\n Reads the h5py file containing the dataset and returns the training and test set\r\n \"\"\"\r\n hf=h5py.File(\"image_final.h5\",'r')\r\n\r\n X_train_orig=np.array(hf.get(\"X_train_orig\"))\r\n X_test_orig=np.array(hf.get(\"X_test_orig\"))\r\n Y_train_orig=np.array(hf.get(\"Y_train_orig\"))\r\n Y_test_orig=np.array(hf.get(\"Y_test_orig\"))\r\n # Reshape the dataset \r\n Y_train_orig=Y_train_orig.reshape(Y_train_orig.shape[0],1)\r\n Y_test_orig=Y_test_orig.reshape(Y_test_orig.shape[0],1)\r\n\r\n hf.close()\r\n\r\n return X_train_orig,X_test_orig,Y_train_orig,Y_test_orig\r\n\r\ndef model_nn(input_shape=(64,64,1)):\r\n \r\n # Input Layer\r\n\tX_input=Input(input_shape)\r\n\t# 32 Filter convolution layer each of size 3x3\r\n\tX=Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(64,64,1))(X_input)\r\n\t# Max Pooling layer\r\n\tX=MaxPooling2D(pool_size=(1,1))(X)\r\n\t# 32 Filter convolution layer each of size 3x3\r\n\tX=Conv2D(32,(3,3),activation='relu')(X)\r\n\t# Max Pooling layer\r\n\tX=MaxPooling2D(pool_size=(1,1))(X)\r\n\t#64 Filter convolution layer each of size 3x3\r\n\tX=Conv2D(64, (3, 3), activation='relu')(X)\r\n\t# Max Pooling layer\r\n\tX=MaxPooling2D(pool_size=(1,1))(X)\r\n\t# Dropout layer for convergence i.e randomly turn neurons on and off\r\n\tX=Dropout(0.25)(X)\r\n\t#flatten since too many dimensions, we only want a classification output\r\n\tX=Flatten()(X)\r\n\t#fully connected to get all relevant data\r\n\tX=Dense(128, activation='relu')(X)\r\n\t#Dropout layer for Convergence\r\n\tX=Dropout(0.5)(X)\r\n\t#output a sigmoid to squash the matrix into output probabilities\r\n\tX=Dense(1, activation='sigmoid')(X)\r\n\tmodel = Model(inputs = X_input, outputs = X,name=\"CNN\")\r\n \r\n\treturn model\r\n\r\nmodel =model_nn(input_shape = (64, 64, 1))\r\n\r\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\r\n\r\n# Calling load_dataset to store the dataset\r\nX_train_orig, X_test_orig, Y_train, Y_test = load_dataset()\r\n# Normalize image vectors\r\nX_train = X_train_orig/255.\r\nX_test = X_test_orig/255.\r\n\r\nmodel.fit(X_train, Y_train, epochs = 15, batch_size = 32)\r\n\r\ny_pred=model.predict(X_test)\r\n# The output of the model is array of real numbers therefore values greater than 0.5 will be evaluated as 1 otherwise 0\r\ny_pred=(y_pred>0.5)\r\n# Confusion Matrix\r\ncf(Y_test,y_pred)\r\n# Save the model for further use\r\nmodel.save('models/CNN_Model.h5', overwrite=True)\r\n\r\n\r\n" ]
[ [ "sklearn.metrics.confusion_matrix" ] ]
mononitogoswami/Snuba
[ "5aa63e6f00de0f761155df7c4f9c99e36181b394" ]
[ "program_synthesis/heuristic_generator.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom sklearn.metrics import f1_score\n\nfrom program_synthesis.synthesizer import Synthesizer\nfrom program_synthesis.verifier import Verifier\n\nclass HeuristicGenerator(object):\n \"\"\"\n A class to go through the synthesizer-verifier loop\n \"\"\"\n\n def __init__(self, train_primitive_matrix, val_primitive_matrix, \n val_ground, train_ground=None, b=0.5):\n \"\"\" \n Initialize HeuristicGenerator object\n\n b: class prior of most likely class (TODO: use somewhere)\n beta: threshold to decide whether to abstain or label for heuristics\n gamma: threshold to decide whether to call a point vague or not\n \"\"\"\n\n self.train_primitive_matrix = train_primitive_matrix\n self.val_primitive_matrix = val_primitive_matrix\n self.val_ground = val_ground\n self.train_ground = train_ground\n self.b = b\n\n self.vf = None\n self.syn = None\n self.hf = []\n self.feat_combos = []\n\n def apply_heuristics(self, heuristics, primitive_matrix, feat_combos, beta_opt):\n \"\"\" \n Apply given heuristics to given feature matrix X and abstain by beta\n\n heuristics: list of pre-trained logistic regression models\n feat_combos: primitive indices to apply heuristics to\n beta: best beta value for associated heuristics\n \"\"\"\n\n def marginals_to_labels(hf,X,beta):\n marginals = hf.predict_proba(X)[:,1]\n labels_cutoff = np.zeros(np.shape(marginals))\n labels_cutoff[marginals <= (self.b-beta)] = -1.\n labels_cutoff[marginals >= (self.b+beta)] = 1.\n return labels_cutoff\n\n L = np.zeros((np.shape(primitive_matrix)[0],len(heuristics)))\n for i,hf in enumerate(heuristics):\n L[:,i] = marginals_to_labels(hf,primitive_matrix[:,feat_combos[i]],beta_opt[i])\n return L\n\n def prune_heuristics(self,heuristics,feat_combos,keep=1):\n \"\"\" \n Selects the best heuristic based on Jaccard Distance and Reliability Metric\n\n keep: number of heuristics to keep from all generated heuristics\n \"\"\"\n\n def calculate_jaccard_distance(num_labeled_total, num_labeled_L):\n scores = np.zeros(np.shape(num_labeled_L)[1])\n for i in range(np.shape(num_labeled_L)[1]):\n scores[i] = np.sum(np.minimum(num_labeled_L[:,i],num_labeled_total))/np.sum(np.maximum(num_labeled_L[:,i],num_labeled_total))\n return 1-scores\n \n L_val = np.array([])\n L_train = np.array([])\n beta_opt = np.array([])\n max_cardinality = len(heuristics)\n for i in range(max_cardinality):\n #Note that the LFs are being applied to the entire val set though they were developed on a subset...\n beta_opt_temp = self.syn.find_optimal_beta(heuristics[i], self.val_primitive_matrix, feat_combos[i], self.val_ground)\n L_temp_val = self.apply_heuristics(heuristics[i], self.val_primitive_matrix, feat_combos[i], beta_opt_temp) \n L_temp_train = self.apply_heuristics(heuristics[i], self.train_primitive_matrix, feat_combos[i], beta_opt_temp) \n \n beta_opt = np.append(beta_opt, beta_opt_temp)\n if i == 0:\n L_val = np.append(L_val, L_temp_val) #converts to 1D array automatically\n L_val = np.reshape(L_val,np.shape(L_temp_val))\n L_train = np.append(L_train, L_temp_train) #converts to 1D array automatically\n L_train = np.reshape(L_train,np.shape(L_temp_train))\n else:\n L_val = np.concatenate((L_val, L_temp_val), axis=1)\n L_train = np.concatenate((L_train, L_temp_train), axis=1)\n \n #Use F1 trade-off for reliability\n acc_cov_scores = [f1_score(self.val_ground, L_val[:,i], average='micro') for i in range(np.shape(L_val)[1])] \n acc_cov_scores = np.nan_to_num(acc_cov_scores)\n \n if self.vf != None:\n #Calculate Jaccard score for diversity\n train_num_labeled = np.sum(np.abs(self.vf.L_train.T), axis=0) \n jaccard_scores = calculate_jaccard_distance(train_num_labeled,np.abs(L_train))\n else:\n jaccard_scores = np.ones(np.shape(acc_cov_scores))\n\n #Weighting the two scores to find best heuristic\n combined_scores = 0.5*acc_cov_scores + 0.5*jaccard_scores\n sort_idx = np.argsort(combined_scores)[::-1][0:keep]\n return sort_idx\n \n\n def run_synthesizer(self, max_cardinality=1, idx=None, keep=1, model='lr'):\n \"\"\" \n Generates Synthesizer object and saves all generated heuristics\n\n max_cardinality: max number of features candidate programs take as input\n idx: indices of validation set to fit programs over\n keep: number of heuristics to pass to verifier\n model: train logistic regression ('lr') or decision tree ('dt')\n \"\"\"\n if idx == None:\n primitive_matrix = self.val_primitive_matrix\n ground = self.val_ground\n else:\n primitive_matrix = self.val_primitive_matrix[idx,:]\n ground = self.val_ground[idx]\n\n\n #Generate all possible heuristics\n self.syn = Synthesizer(primitive_matrix, ground, b=self.b)\n\n #Un-flatten indices\n def index(a, inp):\n i = 0\n remainder = 0\n while inp >= 0:\n remainder = inp\n inp -= len(a[i])\n i+=1\n try:\n return a[i-1][remainder] #TODO: CHECK THIS REMAINDER THING WTF IS HAPPENING\n except:\n import pdb; pdb.set_trace()\n\n #Select keep best heuristics from generated heuristics\n hf, feat_combos = self.syn.generate_heuristics(model, max_cardinality)\n sort_idx = self.prune_heuristics(hf,feat_combos, keep)\n for i in sort_idx:\n self.hf.append(index(hf,i)) \n self.feat_combos.append(index(feat_combos,i))\n\n #create appended L matrices for validation and train set\n beta_opt = self.syn.find_optimal_beta(self.hf, self.val_primitive_matrix, self.feat_combos, self.val_ground)\n self.L_val = self.apply_heuristics(self.hf, self.val_primitive_matrix, self.feat_combos, beta_opt) \n self.L_train = self.apply_heuristics(self.hf, self.train_primitive_matrix, self.feat_combos, beta_opt) \n \n def run_verifier(self):\n \"\"\" \n Generates Verifier object and saves marginals\n \"\"\"\n ###THIS IS WHERE THE SNORKEL FLAG IS SET!!!!\n self.vf = Verifier(self.L_train, self.L_val, self.val_ground, has_snorkel=False)\n self.vf.train_gen_model()\n self.vf.assign_marginals()\n\n def gamma_optimizer(self,marginals):\n \"\"\" \n Returns the best gamma parameter for abstain threshold given marginals\n\n marginals: confidences for data from a single heuristic\n \"\"\"\n m = len(self.hf)\n gamma = 0.5-(1/(m**(3/2.))) \n return gamma\n\n def find_feedback(self):\n \"\"\" \n Finds vague points according to gamma parameter\n\n self.gamma: confidence past 0.5 that relates to a vague or incorrect point\n \"\"\"\n #TODO: flag for re-classifying incorrect points\n #incorrect_idx = self.vf.find_incorrect_points(b=self.b)\n\n gamma_opt = self.gamma_optimizer(self.vf.val_marginals)\n #gamma_opt = self.gamma\n vague_idx = self.vf.find_vague_points(b=self.b, gamma=gamma_opt)\n incorrect_idx = vague_idx\n self.feedback_idx = list(set(list(np.concatenate((vague_idx,incorrect_idx))))) \n\n\n def evaluate(self):\n \"\"\" \n Calculate the accuracy and coverage for train and validation sets\n \"\"\"\n self.val_marginals = self.vf.val_marginals\n self.train_marginals = self.vf.train_marginals\n\n def calculate_accuracy(marginals, b, ground):\n total = np.shape(np.where(marginals != 0.5))[1]\n labels = np.sign(2*(marginals - 0.5))\n return np.sum(labels == ground)/float(total)\n \n def calculate_coverage(marginals, b, ground):\n total = np.shape(np.where(marginals != 0.5))[1]\n labels = np.sign(2*(marginals - 0.5))\n return total/float(len(labels))\n\n \n self.val_accuracy = calculate_accuracy(self.val_marginals, self.b, self.val_ground)\n self.train_accuracy = calculate_accuracy(self.train_marginals, self.b, self.train_ground)\n self.val_coverage = calculate_coverage(self.val_marginals, self.b, self.val_ground)\n self.train_coverage = calculate_coverage(self.train_marginals, self.b, self.train_ground)\n return self.val_accuracy, self.train_accuracy, self.val_coverage, self.train_coverage \n\n def heuristic_stats(self):\n '''For each heuristic, we want the following:\n - idx of the features it relies on\n - if dt, then the thresholds?\n ''' \n\n\n def calculate_accuracy(marginals, b, ground):\n total = np.shape(np.where(marginals != 0.5))[1]\n labels = np.sign(2*(marginals - 0.5))\n return np.sum(labels == ground)/float(total)\n \n def calculate_coverage(marginals, b, ground):\n total = np.shape(np.where(marginals != 0))[1]\n labels = marginals\n return total/float(len(labels))\n\n stats_table = np.zeros((len(self.hf),6))\n for i in range(len(self.hf)):\n stats_table[i,0] = int(self.feat_combos[i][0])\n try:\n stats_table[i,1] = int(self.feat_combos[i][1])\n except:\n stats_table[i,1] = -1.\n stats_table[i,2] = calculate_accuracy(self.L_val[:,i], self.b, self.val_ground)\n stats_table[i,3] = calculate_accuracy(self.L_train[:,i], self.b, self.train_ground)\n stats_table[i,4] = calculate_coverage(self.L_val[:,i], self.b, self.val_ground)\n stats_table[i,5] = calculate_coverage(self.L_train[:,i], self.b, self.train_ground)\n \n #Make table\n column_headers = ['Feat 1', 'Feat 2', 'Val Acc', 'Train Acc', 'Val Cov', 'Train Cov']\n pandas_stats_table = pd.DataFrame(stats_table, columns=column_headers)\n return pandas_stats_table\n\n\n \n\n\n" ]
[ [ "numpy.sum", "numpy.sign", "numpy.append", "numpy.maximum", "pandas.DataFrame", "numpy.abs", "numpy.argsort", "sklearn.metrics.f1_score", "numpy.where", "numpy.shape", "numpy.array", "numpy.concatenate", "numpy.nan_to_num", "numpy.minimum" ] ]
onlytailei/carla_cil_pytorch
[ "25f5fddd58e74b81e2c53d2b86e0de40e75fd880" ]
[ "carla_loader.py" ]
[ "#!/usr/bin/env python\n# coding=utf-8\n'''\nAuthor:Tai Lei\nDate:Thu Nov 22 12:09:27 2018\nInfo:\n'''\n\nimport glob\n\nimport numpy as np\nimport h5py\nimport torch\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset\n\nfrom imgaug import augmenters as iaa\nfrom helper import RandomTransWrapper\n\n\nclass CarlaH5Data():\n def __init__(self,\n train_folder,\n eval_folder,\n batch_size=4, num_workers=4, distributed=False):\n\n self.loaders = {\n \"train\": torch.utils.data.DataLoader(\n CarlaH5Dataset(\n data_dir=train_folder,\n train_eval_flag=\"train\"),\n batch_size=batch_size,\n num_workers=num_workers,\n pin_memory=True,\n shuffle=True\n ),\n \"eval\": torch.utils.data.DataLoader(\n CarlaH5Dataset(\n data_dir=eval_folder,\n train_eval_flag=\"eval\"),\n batch_size=batch_size,\n num_workers=num_workers,\n pin_memory=True,\n shuffle=False\n )}\n\n\nclass CarlaH5Dataset(Dataset):\n def __init__(self, data_dir,\n train_eval_flag=\"train\", sequence_len=200):\n self.data_dir = data_dir\n self.data_list = glob.glob(data_dir+'*.h5')\n self.data_list.sort()\n self.sequnece_len = sequence_len\n self.train_eval_flag = train_eval_flag\n\n self.build_transform()\n\n def build_transform(self):\n if self.train_eval_flag == \"train\":\n self.transform = transforms.Compose([\n transforms.RandomOrder([\n RandomTransWrapper(\n seq=iaa.GaussianBlur(\n (0, 1.5)),\n p=0.09),\n RandomTransWrapper(\n seq=iaa.AdditiveGaussianNoise(\n loc=0,\n scale=(0.0, 0.05),\n per_channel=0.5),\n p=0.09),\n RandomTransWrapper(\n seq=iaa.Dropout(\n (0.0, 0.10),\n per_channel=0.5),\n p=0.3),\n RandomTransWrapper(\n seq=iaa.CoarseDropout(\n (0.0, 0.10),\n size_percent=(0.08, 0.2),\n per_channel=0.5),\n p=0.3),\n RandomTransWrapper(\n seq=iaa.Add(\n (-20, 20),\n per_channel=0.5),\n p=0.3),\n RandomTransWrapper(\n seq=iaa.Multiply(\n (0.9, 1.1),\n per_channel=0.2),\n p=0.4),\n RandomTransWrapper(\n seq=iaa.ContrastNormalization(\n (0.8, 1.2),\n per_channel=0.5),\n p=0.09),\n ]),\n transforms.ToTensor()])\n else:\n self.transform = transforms.Compose([\n transforms.ToTensor(),\n ])\n\n def __len__(self):\n return self.sequnece_len * len(self.data_list)\n\n def __getitem__(self, idx):\n data_idx = idx // self.sequnece_len\n file_idx = idx % self.sequnece_len\n file_name = self.data_list[data_idx]\n\n with h5py.File(file_name, 'r') as h5_file:\n img = np.array(h5_file['rgb'])[file_idx]\n img = self.transform(img)\n target = np.array(h5_file['targets'])[file_idx]\n target = target.astype(np.float32)\n # 2 Follow lane, 3 Left, 4 Right, 5 Straight\n # -> 0 Follow lane, 1 Left, 2 Right, 3 Straight\n command = int(target[24])-2\n # Steer, Gas, Brake (0,1, focus on steer loss)\n target_vec = np.zeros((4, 3), dtype=np.float32)\n target_vec[command, :] = target[:3]\n # in km/h, <90\n speed = np.array([target[10]/90, ]).astype(np.float32)\n mask_vec = np.zeros((4, 3), dtype=np.float32)\n mask_vec[command, :] = 1\n\n # TODO\n # add preprocess\n return img, speed, target_vec.reshape(-1), \\\n mask_vec.reshape(-1),\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
visr/RRMPG
[ "255224f00d2bdde53666f6bf3f3bf8566de15bb5" ]
[ "rrmpg/models/gr4j_model.py" ]
[ "# -*- coding: utf-8 -*-\n# This file is part of RRMPG.\n#\n# RRMPG is free software with the aim to provide a playground for experiments\n# with hydrological rainfall-runoff-models while achieving competitive\n# performance results.\n#\n# You should have received a copy of the MIT License along with RRMPG. If not,\n# see <https://opensource.org/licenses/MIT>\n\nimport numpy as np\n\nfrom numba import njit\n\n@njit\ndef run_gr4j(prec, etp, s_init, r_init, params):\n \"\"\"Implementation of the GR4J model.\n \n This function should be called via the .simulate() function of the GR4J\n class and not directly. It is kept in a separate file for less confusion\n if anyone wants to inspect the actual model routine.\n \n The naming of the variables is kept as in the original publication [1].\n \n Args:\n prec: Numpy [t] array, which contains the precipitation input.\n etp: Numpty [t] array, which contains the evapotranspiration input.\n s_init: Scalar for the initial state of the s-storage.\n r_init: Scalar for the initial state of the r-storage.\n params: Numpy array of custom dtype, which contains the model parameter.\n \n Returns:\n qsim: Numpy [t] array with the simulated streamflow.\n s_store: Numpy [t] array with the state of the s-storage of each\n timestep.\n r_store: Numpy [t] array with the state of the r-storage of each\n timestep.\n \n [1] Perrin, Charles, Claude Michel, and Vazken Andréassian. \"Improvement \n of a parsimonious model for streamflow simulation.\" Journal of hydrology \n 279.1 (2003): 275-289.\n \n \"\"\"\n # Number of simulation timesteps\n num_timesteps = len(prec)\n \n # Unpack the model parameters\n x1 = params['x1']\n x2 = params['x2']\n x3 = params['x3']\n x4 = params['x4']\n \n # initialize empty arrays for discharge and all storages\n s_store = np.zeros(num_timesteps+1, np.float64)\n r_store = np.zeros(num_timesteps+1, np.float64)\n qsim = np.zeros(num_timesteps+1, np.float64)\n \n # for clean array indexing, add 0 element at the 0th index of prec and \n # etp so we start simulating at the index 1\n prec = np.concatenate((np.zeros(1), prec))\n etp = np.concatenate((np.zeros(1), etp))\n \n # set initial values\n s_store[0] = s_init * x1\n r_store[0] = r_init * x3\n \n # calculate number of unit hydrograph ordinates\n num_uh1 = int(np.ceil(x4))\n num_uh2 = int(np.ceil(2*x4 + 1))\n \n # calculate the ordinates of both unit-hydrographs (eq. 16 & 17)\n uh1_ordinates = np.zeros(num_uh1, np.float64)\n uh2_ordinates = np.zeros(num_uh2, np.float64)\n \n for j in range(1, num_uh1 + 1):\n uh1_ordinates[j - 1] = _s_curve1(j, x4) - _s_curve1(j - 1, x4)\n \n for j in range(1, num_uh2 + 1):\n uh2_ordinates[j - 1] = _s_curve2(j, x4) - _s_curve2(j - 1, x4)\n \n # arrys to store the rain distributed through the unit hydrographs\n uh1 = np.zeros(num_uh1, np.float64)\n uh2 = np.zeros(num_uh2, np.float64)\n \n # Start the model simulation loop\n for t in range(1, num_timesteps+1):\n \n # Calculate netto precipitation and evaporation\n if prec[t] >= etp[t]:\n p_n = prec[t] - etp[t]\n pe_n = 0\n \n # calculate fraction of netto precipitation that fills\n # production store (eq. 3)\n p_s = ((x1 * (1 - (s_store[t-1] / x1)**2) * np.tanh(p_n/x1)) /\n (1 + s_store[t-1] / x1 * np.tanh(p_n / x1)))\n \n # no evaporation from production store\n e_s = 0 \n \n else:\n p_n = 0\n pe_n = etp[t] - prec[t]\n \n # calculate the fraction of the evaporation that will evaporate \n # from the production store (eq. 4)\n e_s = ((s_store[t-1] * (2 - s_store[t-1]/x1) * np.tanh(pe_n/x1)) \n / (1 + (1 - s_store[t-1] / x1) * np.tanh(pe_n / x1)))\n \n # no rain that is allocation to the production store\n p_s = 0\n \n # Calculate the new storage content\n s_store[t] = s_store[t-1] - e_s + p_s\n \n # calculate percolation from actual storage level\n perc = s_store[t] * (1 - (1 + (4/9 * s_store[t] / x1)**4)**(-0.25))\n \n # final update of the production store for this timestep\n s_store[t] = s_store[t] - perc\n \n # total quantity of water that reaches the routing\n p_r = perc + (p_n - p_s)\n \n # split this water quantity by .9/.1 for diff. routing (UH1 & UH2)\n p_r_uh1 = 0.9 * p_r \n p_r_uh2 = 0.1 * p_r\n \n # update state of rain, distributed through the unit hydrographs\n for j in range(0, num_uh1 - 1):\n uh1[j] = uh1[j + 1] + uh1_ordinates[j] * p_r_uh1\n uh1[-1] = uh1_ordinates[-1] * p_r_uh1\n \n for j in range(0, num_uh2 - 1):\n uh2[j] = uh2[j + 1] + uh2_ordinates[j] * p_r_uh2\n uh2[-1] = uh2_ordinates[-1] * p_r_uh2\n \n # calculate the groundwater exchange F (eq. 18)\n gw_exchange = x2 * (r_store[t - 1] / x3) ** 3.5\n \n # update routing store\n r_store[t] = max(0, r_store[t - 1] + uh1[0] + gw_exchange)\n \n # outflow of routing store\n q_r = r_store[t] * (1 - (1 + (r_store[t] / x3)**4)**(-0.25))\n \n # subtract outflow from routing store level\n r_store[t] = r_store[t] - q_r\n \n # calculate flow component of unit hydrograph 2\n q_d = max(0, uh2[0] + gw_exchange)\n \n # total discharge of this timestep\n qsim[t] = q_r + q_d\n \n # return all but the artificial 0's step\n return qsim[1:], s_store[1:], r_store[1:] \n\n@njit\ndef _s_curve1(t, x4):\n \"\"\"Calculate the s-curve of the unit-hydrograph 1.\n \n Args:\n t: timestep\n x4: model parameter x4 of the gr4j model.\n \n \"\"\"\n if t <= 0:\n return 0.\n elif t < x4:\n return (t / x4)**2.5\n else:\n return 1.\n\n\n@njit\ndef _s_curve2(t, x4): \n \"\"\"Calculate the s-curve of the unit-hydrograph 2.\n \n Args:\n t: timestep\n x4: model parameter x4 of the gr4j model.\n \n \"\"\"\n if t <= 0:\n return 0.\n elif t <= x4:\n return 0.5 * ((t / x4) ** 2.5)\n elif t < 2*x4:\n return 1 - 0.5 * ((2 - t / x4) ** 2.5)\n else:\n return 1." ]
[ [ "numpy.ceil", "numpy.zeros", "numpy.tanh" ] ]
SpikeKing/GazeEstimation
[ "2f44db8869a69bbefcd39a98a75703a31733bd5a" ]
[ "src/datasources/frames.py" ]
[ "\"\"\"Data source of stream of frames.\"\"\"\nimport bz2\nimport dlib\nimport queue\nimport shutil\nimport threading\nimport time\nfrom typing import Tuple\nimport os\nfrom urllib.request import urlopen\n\nimport cv2 as cv\nimport numpy as np\nimport tensorflow as tf\n\nfrom core import BaseDataSource\n\n\nclass FramesSource(BaseDataSource):\n \"\"\"Preprocessing of stream of frames.\"\"\"\n\n def __init__(self,\n tensorflow_session: tf.Session,\n batch_size: int,\n eye_image_shape: Tuple[int, int],\n staging: bool = False,\n **kwargs):\n \"\"\"Create queues and threads to read and preprocess data.\"\"\"\n self._eye_image_shape = eye_image_shape\n self._proc_mutex = threading.Lock()\n self._read_mutex = threading.Lock()\n\n self._frame_read_queue = queue.Queue(maxsize=1)\n self._frame_read_thread = threading.Thread(target=self.frame_read_job, name='frame_read')\n self._frame_read_thread.daemon = True\n self._frame_read_thread.start()\n\n self._current_index = 0\n self._last_frame_index = 0\n self._indices = []\n self._frames = {}\n self._open = True\n\n # Call parent class constructor\n super().__init__(tensorflow_session, batch_size=batch_size, num_threads=1,\n fread_queue_capacity=batch_size, preprocess_queue_capacity=batch_size,\n shuffle=False, staging=staging, **kwargs)\n\n _short_name = 'Frames'\n\n @property\n def short_name(self):\n \"\"\"Short name specifying source.\"\"\"\n return self._short_name\n\n def frame_read_job(self):\n \"\"\"Read frame from webcam.\"\"\"\n generate_frame = self.frame_generator()\n while True:\n before_frame_read = time.time()\n bgr = next(generate_frame)\n if bgr is not None:\n after_frame_read = time.time()\n with self._read_mutex:\n self._frame_read_queue.queue.clear()\n self._frame_read_queue.put_nowait((before_frame_read, bgr, after_frame_read))\n self._open = False\n\n def frame_generator(self):\n \"\"\"Read frame from webcam.\"\"\"\n raise NotImplementedError('Frames::frame_generator not implemented.')\n\n def entry_generator(self, yield_just_one=False):\n \"\"\"Generate eye image entries by detecting faces and facial landmarks.\"\"\"\n try:\n while range(1) if yield_just_one else True:\n # Grab frame\n with self._proc_mutex:\n before_frame_read, bgr, after_frame_read = self._frame_read_queue.get()\n bgr = cv.flip(bgr, flipCode=1) # Mirror\n current_index = self._last_frame_index + 1\n self._last_frame_index = current_index\n\n grey = cv.cvtColor(bgr, cv.COLOR_BGR2GRAY)\n frame = {\n 'frame_index': current_index,\n 'time': {\n 'before_frame_read': before_frame_read,\n 'after_frame_read': after_frame_read,\n },\n 'bgr': bgr,\n 'grey': grey,\n }\n self._frames[current_index] = frame\n self._indices.append(current_index)\n\n # Keep just a few frames around\n frames_to_keep = 120\n if len(self._indices) > frames_to_keep:\n for index in self._indices[:-frames_to_keep]:\n del self._frames[index]\n self._indices = self._indices[-frames_to_keep:]\n\n # Eye image segmentation pipeline\n self.detect_faces(frame)\n self.detect_landmarks(frame)\n self.calculate_smoothed_landmarks(frame)\n self.segment_eyes(frame)\n self.update_face_boxes(frame)\n frame['time']['after_preprocessing'] = time.time()\n\n for i, eye_dict in enumerate(frame['eyes']):\n yield {\n 'frame_index': np.int64(current_index),\n 'eye': eye_dict['image'],\n 'eye_index': np.uint8(i),\n }\n\n finally:\n # Execute any cleanup operations as necessary\n pass\n\n def preprocess_entry(self, entry):\n \"\"\"Preprocess segmented eye images for use as neural network input.\"\"\"\n eye = entry['eye']\n eye = cv.equalizeHist(eye)\n eye = eye.astype(np.float32)\n eye *= 2.0 / 255.0\n eye -= 1.0\n eye = np.expand_dims(eye, -1 if self.data_format == 'NHWC' else 0)\n entry['eye'] = eye\n return entry\n\n def detect_faces(self, frame):\n \"\"\"Detect all faces in a frame.\"\"\"\n frame_index = frame['frame_index']\n previous_index = self._indices[self._indices.index(frame_index) - 1]\n previous_frame = self._frames[previous_index]\n if ('last_face_detect_index' not in previous_frame or\n frame['frame_index'] - previous_frame['last_face_detect_index'] > 59):\n detector = get_face_detector()\n if detector.__class__.__name__ == 'CascadeClassifier':\n detections = detector.detectMultiScale(frame['grey'])\n else:\n detections = detector(cv.resize(frame['grey'], (0, 0), fx=0.5, fy=0.5), 0)\n faces = []\n for d in detections:\n try:\n l, t, r, b = d.rect.left(), d.rect.top(), d.rect.right(), d.rect.bottom()\n l *= 2\n t *= 2\n r *= 2\n b *= 2\n w, h = r - l, b - t\n except AttributeError: # Using OpenCV LBP detector on CPU\n l, t, w, h = d\n faces.append((l, t, w, h))\n faces.sort(key=lambda bbox: bbox[0])\n frame['faces'] = faces\n frame['last_face_detect_index'] = frame['frame_index']\n\n # Clear previous known landmarks. This is to disable smoothing when new face detect\n # occurs. This allows for recovery of drifted detections.\n previous_frame['landmarks'] = []\n else:\n frame['faces'] = previous_frame['faces']\n frame['last_face_detect_index'] = previous_frame['last_face_detect_index']\n\n def detect_landmarks(self, frame):\n \"\"\"Detect 5-point facial landmarks for faces in frame.\"\"\"\n predictor = get_landmarks_predictor()\n landmarks = []\n for face in frame['faces']:\n l, t, w, h = face\n rectangle = dlib.rectangle(left=int(l), top=int(t), right=int(l + w), bottom=int(t + h))\n landmarks_dlib = predictor(frame['grey'], rectangle)\n\n def tuple_from_dlib_shape(index):\n p = landmarks_dlib.part(index)\n return (p.x, p.y)\n\n num_landmarks = landmarks_dlib.num_parts\n landmarks.append(np.array([tuple_from_dlib_shape(i) for i in range(num_landmarks)]))\n frame['landmarks'] = landmarks\n\n _smoothing_window_size = 10\n _smoothing_coefficient_decay = 0.5\n _smoothing_coefficients = None\n\n def calculate_smoothed_landmarks(self, frame):\n \"\"\"If there are previous landmark detections, try to smooth current prediction.\"\"\"\n # Cache coefficients based on defined sliding window size\n if self._smoothing_coefficients is None:\n coefficients = np.power(self._smoothing_coefficient_decay,\n list(reversed(list(range(self._smoothing_window_size)))))\n coefficients /= np.sum(coefficients)\n self._smoothing_coefficients = coefficients.reshape(-1, 1)\n\n # Get a window of frames\n current_index = self._indices.index(frame['frame_index'])\n a = current_index - self._smoothing_window_size + 1\n if a < 0:\n \"\"\"If slice extends before last known frame.\"\"\"\n return\n window_indices = self._indices[a:current_index + 1]\n window_frames = [self._frames[idx] for idx in window_indices]\n window_num_landmark_entries = np.array([len(f['landmarks']) for f in window_frames])\n if np.any(window_num_landmark_entries == 0):\n \"\"\"Any frame has zero faces detected.\"\"\"\n return\n if not np.all(window_num_landmark_entries == window_num_landmark_entries[0]):\n \"\"\"Not the same number of faces detected in entire window.\"\"\"\n return\n\n # Apply coefficients to landmarks in window\n window_landmarks = np.asarray([f['landmarks'] for f in window_frames])\n frame['smoothed_landmarks'] = np.sum(\n np.multiply(window_landmarks.reshape(self._smoothing_window_size, -1),\n self._smoothing_coefficients),\n axis=0,\n ).reshape(window_num_landmark_entries[-1], -1, 2)\n\n def segment_eyes(self, frame):\n \"\"\"From found landmarks in previous steps, segment eye image.\"\"\"\n eyes = []\n\n # Final output dimensions\n oh, ow = self._eye_image_shape\n\n # Select which landmarks (raw/smoothed) to use\n frame_landmarks = (frame['smoothed_landmarks'] if 'smoothed_landmarks' in frame\n else frame['landmarks'])\n\n for face, landmarks in zip(frame['faces'], frame_landmarks):\n # Segment eyes\n # for corner1, corner2, is_left in [(36, 39, True), (42, 45, False)]:\n for corner1, corner2, is_left in [(2, 3, True), (0, 1, False)]:\n x1, y1 = landmarks[corner1, :]\n x2, y2 = landmarks[corner2, :]\n eye_width = 1.5 * np.linalg.norm(landmarks[corner1, :] - landmarks[corner2, :])\n if eye_width == 0.0:\n continue\n cx, cy = 0.5 * (x1 + x2), 0.5 * (y1 + y2)\n\n # Centre image on middle of eye\n translate_mat = np.asmatrix(np.eye(3))\n translate_mat[:2, 2] = [[-cx], [-cy]]\n inv_translate_mat = np.asmatrix(np.eye(3))\n inv_translate_mat[:2, 2] = -translate_mat[:2, 2]\n\n # Rotate to be upright\n roll = 0.0 if x1 == x2 else np.arctan((y2 - y1) / (x2 - x1))\n rotate_mat = np.asmatrix(np.eye(3))\n cos = np.cos(-roll)\n sin = np.sin(-roll)\n rotate_mat[0, 0] = cos\n rotate_mat[0, 1] = -sin\n rotate_mat[1, 0] = sin\n rotate_mat[1, 1] = cos\n inv_rotate_mat = rotate_mat.T\n\n # Scale\n scale = ow / eye_width\n scale_mat = np.asmatrix(np.eye(3))\n scale_mat[0, 0] = scale_mat[1, 1] = scale\n inv_scale = 1.0 / scale\n inv_scale_mat = np.asmatrix(np.eye(3))\n inv_scale_mat[0, 0] = inv_scale_mat[1, 1] = inv_scale\n\n # Centre image\n centre_mat = np.asmatrix(np.eye(3))\n centre_mat[:2, 2] = [[0.5 * ow], [0.5 * oh]]\n inv_centre_mat = np.asmatrix(np.eye(3))\n inv_centre_mat[:2, 2] = -centre_mat[:2, 2]\n\n # Get rotated and scaled, and segmented image\n transform_mat = centre_mat * scale_mat * rotate_mat * translate_mat\n inv_transform_mat = (inv_translate_mat * inv_rotate_mat * inv_scale_mat *\n inv_centre_mat)\n eye_image = cv.warpAffine(frame['grey'], transform_mat[:2, :], (ow, oh))\n if is_left:\n eye_image = np.fliplr(eye_image)\n eyes.append({\n 'image': eye_image,\n 'inv_landmarks_transform_mat': inv_transform_mat,\n 'side': 'left' if is_left else 'right',\n })\n frame['eyes'] = eyes\n\n def update_face_boxes(self, frame):\n \"\"\"Update face bounding box based on detected landmarks.\"\"\"\n frame_landmarks = (frame['smoothed_landmarks'] if 'smoothed_landmarks' in frame\n else frame['landmarks'])\n for i, (face, landmarks) in enumerate(zip(frame['faces'], frame_landmarks)):\n x_min, y_min = np.amin(landmarks, axis=0)\n x_max, y_max = np.amax(landmarks, axis=0)\n x_mid, y_mid = 0.5 * (x_max + x_min), 0.5 * (y_max + y_min)\n w, h = x_max - x_min, y_max - y_min\n new_w = 2.2 * max(h, w)\n half_w = 0.5 * new_w\n frame['faces'][i] = (int(x_mid - half_w), int(y_mid - half_w), int(new_w), int(new_w))\n\n # x1, y1 = landmarks[0, :]\n # x2, y2 = landmarks[3, :]\n # face_width = 2.5 * np.sqrt((x1 - x2)**2 + (y1 - y2)**2)\n # if face_width == 0.0:\n # continue\n #\n # cx, cy = landmarks[4, :]\n # roll = 0.0 if x1 == x2 else np.arctan((y2 - y1) / (x2 - x1))\n #\n # hdx = 0.5 * face_width * (2. - np.abs(np.cos(roll)))\n # hdy = 0.5 * face_width * (1. + np.abs(np.sin(roll)))\n # print(np.degrees(roll), face_width, hdx, hdy)\n # frame['faces'][i] = (int(cx - hdx), int(cy - hdy), int(2*hdx), int(2*hdy))\n\n\n_face_detector = None\n_landmarks_predictor = None\n\n\ndef _get_dlib_data_file(dat_name):\n dat_dir = os.path.relpath('%s/../3rdparty' % os.path.basename(__file__))\n dat_path = '%s/%s' % (dat_dir, dat_name)\n if not os.path.isdir(dat_dir):\n os.mkdir(dat_dir)\n\n # Download trained shape detector\n if not os.path.isfile(dat_path):\n url = 'http://dlib.net/files/%s.bz2' % dat_name\n print('[Info] Download URL: {}'.format(url))\n with urlopen(url) as response:\n with bz2.BZ2File(response) as bzf, open(dat_path, 'wb') as f:\n shutil.copyfileobj(bzf, f)\n\n print('[Info] 下载完成: {}'.format(dat_path))\n return dat_path\n\n\ndef _get_opencv_xml(xml_name):\n xml_dir = os.path.relpath('%s/../3rdparty' % os.path.basename(__file__))\n xml_path = '%s/%s' % (xml_dir, xml_name)\n if not os.path.isdir(xml_dir):\n os.mkdir(xml_dir)\n\n # Download trained shape detector\n if not os.path.isfile(xml_path):\n url_stem = 'https://raw.githubusercontent.com/opencv/opencv/master/data/lbpcascades'\n url = '%s/%s' % (url_stem, xml_name)\n print('[Info] Download URL: {}'.format(url))\n with urlopen(url) as response:\n with open(xml_path, 'wb') as f:\n shutil.copyfileobj(response, f)\n\n print('[Info] 下载完成: {}'.format(xml_path))\n return xml_path\n\n\ndef get_face_detector():\n \"\"\"Get a singleton dlib face detector.\"\"\"\n global _face_detector\n if not _face_detector:\n try:\n dat_path = _get_dlib_data_file('mmod_human_face_detector.dat')\n _face_detector = dlib.cnn_face_detection_model_v1(dat_path)\n except:\n xml_path = _get_opencv_xml('lbpcascade_frontalface_improved.xml')\n _face_detector = cv.CascadeClassifier(xml_path)\n return _face_detector\n\n\ndef get_landmarks_predictor():\n \"\"\"Get a singleton dlib face landmark predictor.\"\"\"\n global _landmarks_predictor\n if not _landmarks_predictor:\n dat_path = _get_dlib_data_file('shape_predictor_5_face_landmarks.dat')\n # dat_path = _get_dlib_data_file('shape_predictor_68_face_landmarks.dat')\n _landmarks_predictor = dlib.shape_predictor(dat_path)\n return _landmarks_predictor\n" ]
[ [ "numpy.sum", "numpy.eye", "numpy.arctan", "numpy.any", "numpy.fliplr", "numpy.asarray", "numpy.cos", "numpy.int64", "numpy.amin", "numpy.expand_dims", "numpy.all", "numpy.amax", "numpy.sin", "numpy.linalg.norm", "numpy.uint8" ] ]
isabella232/uis-rnn
[ "91764ceaad832be651f3d64a809a183e133154d6" ]
[ "tests/utils_test.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for utils.py.\"\"\"\nimport unittest\n\nimport numpy as np\n\nfrom uisrnn import utils\n\n\nclass TestEnforceClusterIdUniqueness(unittest.TestCase):\n \"\"\"Tests for utils.enforce_cluster_id_uniqueness()\"\"\"\n\n def test_list_of_list(self):\n \"\"\"Test when cluster_ids is a list of list.\"\"\"\n cluster_ids = [['a', 'b', 'c'], ['b', 'c', 'd', 'e']]\n new_cluster_ids = utils.enforce_cluster_id_uniqueness(cluster_ids)\n self.assertEqual(2, len(new_cluster_ids))\n self.assertEqual(3, len(new_cluster_ids[0]))\n self.assertEqual(4, len(new_cluster_ids[1]))\n merged = [x for new_cluster_id in new_cluster_ids for x in new_cluster_id]\n self.assertEqual(7, len(merged))\n self.assertEqual(7, len(set(merged)))\n\n\nclass TestConcatenateTrainingData(unittest.TestCase):\n \"\"\"Tests for utils.concatenate_training_data()\"\"\"\n\n def setUp(self):\n \"\"\"Set up input.\"\"\"\n self.train_sequences = [\n np.zeros((3, 2)),\n np.ones((4, 2))]\n self.train_cluster_ids = [\n ['a', 'b', 'a'],\n np.array(['a', 'b', 'c', 'b'])]\n\n def test_noenforce_noshuffle(self):\n \"\"\"Test when we do not enforce uniqueness, and do not shuffle.\"\"\"\n (concatenated_train_sequence,\n concatenated_train_cluster_id) = utils.concatenate_training_data(\n self.train_sequences, self.train_cluster_ids, False, False)\n self.assertListEqual(\n [0.0] * 6 + [1.0] * 8,\n concatenated_train_sequence.flatten().tolist())\n self.assertListEqual(\n ['a', 'b', 'a', 'a', 'b', 'c', 'b'],\n concatenated_train_cluster_id)\n\n def test_enforce_noshuffle(self):\n \"\"\"Test when we enforce uniqueness, but do not shuffle.\"\"\"\n (concatenated_train_sequence,\n concatenated_train_cluster_id) = utils.concatenate_training_data(\n self.train_sequences, self.train_cluster_ids, True, False)\n self.assertListEqual(\n [0.0] * 6 + [1.0] * 8,\n concatenated_train_sequence.flatten().tolist())\n self.assertEqual(\n 7,\n len(concatenated_train_cluster_id))\n self.assertEqual(\n 5,\n len(set(concatenated_train_cluster_id)))\n\n def test_noenforce_shuffle(self):\n \"\"\"Test when we do not enforce uniqueness, but do shuffle.\"\"\"\n (concatenated_train_sequence,\n concatenated_train_cluster_id) = utils.concatenate_training_data(\n self.train_sequences, self.train_cluster_ids, False, True)\n try:\n self.assertListEqual(\n [0.0] * 6 + [1.0] * 8,\n concatenated_train_sequence.flatten().tolist())\n self.assertListEqual(\n ['a', 'b', 'a', 'a', 'b', 'c', 'b'],\n concatenated_train_cluster_id)\n except AssertionError:\n self.assertListEqual(\n [1.0] * 8 + [0.0] * 6,\n concatenated_train_sequence.flatten().tolist())\n self.assertListEqual(\n ['a', 'b', 'c', 'b', 'a', 'b', 'a'],\n concatenated_train_cluster_id)\n\n\nclass TestSamplePermutedSegments(unittest.TestCase):\n \"\"\"Tests for utils.sample_permuted_segments()\"\"\"\n\n def test_short_sequence(self):\n \"\"\"Test for a short sequence.\"\"\"\n index_sequence = [5, 2, 3, 2, 1]\n number_samples = 10\n sampled_index_sequences = utils.sample_permuted_segments(index_sequence,\n number_samples)\n self.assertEqual(10, len(sampled_index_sequences))\n for output_sequence in sampled_index_sequences:\n self.assertEqual((5,), output_sequence.shape)\n self.assertEqual(4, len(set(output_sequence.tolist())))\n\n\nclass TestResizeSequence(unittest.TestCase):\n \"\"\"Tests for utils.resize_sequence()\"\"\"\n\n def test_resize_sequence_without_permutation1(self):\n \"\"\"Test when we do not permute, output is deterministic.\"\"\"\n sub_sequence, seq_lengths = utils.resize_sequence(\n sequence=np.array([[1, 1], [2, 2], [1, 1]]),\n cluster_id=np.array([1, 2, 1]),\n num_permutations=None)\n self.assertEqual(len(sub_sequence), 2)\n self.assertTrue((sub_sequence[0] == [[1, 1], [1, 1]]).all())\n self.assertTrue((sub_sequence[1] == [[2, 2]]).all())\n self.assertListEqual(seq_lengths, [3, 2])\n\n def test_resize_sequence_without_permutation2(self):\n \"\"\"Test when we do not permute, output is deterministic.\"\"\"\n sub_sequence, seq_lengths = utils.resize_sequence(\n sequence=np.array([[1, 1], [2, 2], [3, 3]]),\n cluster_id=np.array([1, 2, 1]),\n num_permutations=None)\n self.assertEqual(len(sub_sequence), 2)\n self.assertTrue((sub_sequence[0] == [[1, 1], [3, 3]]).all())\n self.assertTrue((sub_sequence[1] == [[2, 2]]).all())\n self.assertListEqual(seq_lengths, [3, 2])\n\n def test_resize_sequence_with_permutation(self):\n \"\"\"Test when we permute, each output can be one of the permutations.\"\"\"\n sub_sequence, seq_lengths = utils.resize_sequence(\n sequence=np.array([[1, 1], [2, 2], [3, 3]]),\n cluster_id=np.array([1, 2, 1]),\n num_permutations=2)\n self.assertEqual(len(sub_sequence), 2 * 2)\n self.assertTrue((sub_sequence[0] == [[1, 1], [3, 3]]).all() or\n (sub_sequence[0] == [[3, 3], [1, 1]]).all())\n self.assertTrue((sub_sequence[1] == [[1, 1], [3, 3]]).all() or\n (sub_sequence[1] == [[3, 3], [1, 1]]).all())\n self.assertTrue((sub_sequence[2] == [[2, 2]]).all())\n self.assertTrue((sub_sequence[3] == [[2, 2]]).all())\n self.assertListEqual(seq_lengths, [3, 3, 2, 2])\n\n\nclass TestEstimateTransitionBias(unittest.TestCase):\n \"\"\"Tests for utils.estimate_transition_bias()\"\"\"\n\n def test_transition_bias_empty_sequences(self):\n \"\"\"Test when the input cluster_id sequences are empty\"\"\"\n (transition_bias,\n denominator) = utils.estimate_transition_bias(cluster_ids=[[], [], []])\n self.assertTrue(np.log(transition_bias) != -np.inf)\n self.assertTrue(np.log(1 - transition_bias) != -np.inf)\n self.assertTrue(denominator != 0)\n\n def test_transition_bias_unique_speaker(self):\n \"\"\"Test when the input cluster_id sequences contain a unique speaker\n and therefore no speaker changes\"\"\"\n transition_bias, _ = utils.estimate_transition_bias(cluster_ids=[[1] * 100])\n self.assertTrue(np.log(transition_bias) != -np.inf)\n self.assertTrue(np.log(1 - transition_bias) != -np.inf)\n\n def test_transition_bias_always_changing_speaker(self):\n \"\"\"Test when in the input cluster_id sequences the speaker always\n changes\"\"\"\n transition_bias, _ = utils.estimate_transition_bias(\n cluster_ids=[[1, 2, 1], [2, 1, 2]])\n self.assertTrue(np.log(transition_bias) != -np.inf)\n self.assertTrue(np.log(1 - transition_bias) != -np.inf)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.ones", "numpy.log", "numpy.zeros" ] ]
satheeshxolo/tensorflow
[ "93082af9e866067d5383ec36c8d840b21d91a9f8" ]
[ "tensorflow/python/distribute/parameter_server_strategy.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Classes implementing a multi-worker ps DistributionStrategy.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\n\n\nfrom tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib\nfrom tensorflow.python.distribute import device_util\nfrom tensorflow.python.distribute import distribute_lib\nfrom tensorflow.python.distribute import input_lib\nfrom tensorflow.python.distribute import mirrored_strategy\nfrom tensorflow.python.distribute import multi_worker_util\nfrom tensorflow.python.distribute import numpy_dataset\nfrom tensorflow.python.distribute import values\nfrom tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver\nfrom tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import device as tf_device\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variable_scope as vs\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import device_setter\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import tf_export\n\n_LOCAL_CPU = \"/device:CPU:0\"\n_LOCAL_GPU_0 = \"/device:GPU:0\"\n\n\n# TODO(yuefengz): maybe cache variables on local CPU.\n@tf_export(\"distribute.experimental.ParameterServerStrategy\")\nclass ParameterServerStrategy(distribute_lib.DistributionStrategy):\n \"\"\"A parameter server DistributionStrategy.\n\n This strategy class works for both local training and between-graph replicated\n training for multiple workers. It uses `TFConfigClusterResolver` to detect\n configurations for multi-worker training. In multi-worker training mode, i.e.\n `TFConfigClusterResolver` has detected 'TF_CONFIG' environment variable and\n 'TF_CONFIG' has a cluster spec, variables and updates to those variables are\n assigned to parameter servers and other operations are assigned to workers.\n In local training mode, variables are assigned to local CPU or the only GPU.\n When each worker has more than one GPU, operations will be replicated on these\n GPUs. In both cases, operations are replicated but variables are not and these\n workers share a common view for which paramater server a variable is assigned\n to.\n\n This class assumes between-graph replication will be used and works on a graph\n for a particular worker. Note that each graph and worker is independent.\n This means that while each worker will synchronously compute a single gradient\n update across all GPUs, updates between workers proceed asynchronously.\n Operations that occur only on the first replica (such as incrementing the\n global step), will occur on the first replica *of every worker*.\n\n It is expected to call `call_for_each_replica(fn, ...)` for any\n operations which potentially can be replicated across replicas (i.e. multiple\n GPUs) even if there is only CPU or one GPU. When defining the `fn`, extra\n caution needs to be taken:\n\n 1) It is generally not recommended to open a device scope under the strategy's\n scope. A device scope (i.e. calling `tf.device`) will be merged with or\n override the device for operations but will not change the device for\n variables.\n\n 2) It is also not recommended to open a colocation scope (i.e. calling\n `tf.colocate_with`) under the strategy's scope. For colocating variables, use\n `strategy.extended.colocate_vars_with` instead. Colocation of ops will\n possibly create conflicts of device assignment.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initializes this strategy with default TFConfigClusterResolver.\"\"\"\n super(ParameterServerStrategy, self).__init__(\n ParameterServerStrategyExtended(self))\n\n\nclass ParameterServerStrategyExtended(\n distribute_lib.DistributionStrategyExtended):\n \"\"\"Implementation of ParameterServerStrategy.\"\"\"\n\n def __init__(self,\n container_strategy,\n cluster_resolver=TFConfigClusterResolver()):\n super(ParameterServerStrategyExtended, self).__init__(container_strategy)\n self._initialize_strategy(cluster_resolver)\n\n # We typically don't need to do all-reduce in this strategy.\n self._cross_device_ops = (\n cross_device_ops_lib.ReductionToOneDevice(reduce_to_device=_LOCAL_CPU))\n\n def _initialize_strategy(self, cluster_resolver):\n if cluster_resolver.cluster_spec().as_dict():\n self._initialize_multi_worker(cluster_resolver)\n else:\n self._initialize_local(cluster_resolver)\n\n def _initialize_multi_worker(self, cluster_resolver):\n \"\"\"Initialize devices for multiple workers.\n\n It creates variable devices and compute devices. Variables and operations\n will be assigned to them respectively. We have one compute device per\n replica. The variable device is a device function or device string. The\n default variable device assigns variables to parameter servers in a\n round-robin fashion.\n\n Args:\n cluster_resolver: a descendant of `ClusterResolver` object.\n\n Raises:\n ValueError: if the cluster doesn't have ps jobs.\n \"\"\"\n # TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in\n # some cases.\n if isinstance(cluster_resolver, TFConfigClusterResolver):\n num_gpus = context.num_gpus()\n else:\n num_gpus = cluster_resolver.num_accelerators().get(\"GPU\", 0)\n\n # Save the num_gpus_per_worker for configure method.\n self._num_gpus_per_worker = num_gpus\n\n cluster_spec = cluster_resolver.cluster_spec()\n task_type = cluster_resolver.task_type\n task_id = cluster_resolver.task_id\n if not task_type or task_id is None:\n raise ValueError(\"When `cluster_spec` is given, you must also specify \"\n \"`task_type` and `task_id`\")\n cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)\n assert cluster_spec.as_dict()\n\n worker_device = \"/job:%s/task:%d\" % (task_type, task_id)\n self._input_host_device = numpy_dataset.SingleDevice(worker_device)\n\n # Define compute devices which is a list of device strings and one for each\n # replica. When there are GPUs, replicate operations on these GPUs.\n # Otherwise, place operations on CPU.\n if num_gpus > 0:\n compute_devices = tuple(\n \"%s/device:GPU:%d\" % (worker_device, i) for i in range(num_gpus))\n else:\n compute_devices = (worker_device,)\n\n self._device_map = values.ReplicaDeviceMap(compute_devices)\n self._input_workers = input_lib.InputWorkers(\n self._device_map, [(worker_device, compute_devices)])\n\n # In distributed mode, place variables on ps jobs in a round-robin fashion.\n # Note that devices returned from `replica_device_setter` are not\n # canonical and therefore we don't canonicalize all variable devices to\n # make them consistent.\n # TODO(yuefengz): support passing a strategy object to control variable\n # assignment.\n # TODO(yuefengz): merge the logic of replica_device_setter into this\n # class.\n num_ps_replicas = len(cluster_spec.as_dict().get(\"ps\", []))\n if num_ps_replicas == 0:\n raise ValueError(\"The cluster spec needs to have `ps` jobs.\")\n self._variable_device = device_setter.replica_device_setter(\n ps_tasks=num_ps_replicas,\n worker_device=worker_device,\n merge_devices=True,\n cluster=cluster_spec)\n\n # The `_parameter_devices` is needed for the `parameter_devices` property\n # and is a list of all variable devices. Here parameter devices are all\n # tasks of the \"ps\" job.\n self._parameter_devices = tuple(map(\"/job:ps/task:{}\".format,\n range(num_ps_replicas)))\n\n # Add a default device so that ops without specified devices will not end up\n # on other workers.\n self._default_device = worker_device\n\n self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type,\n task_id)\n self._cluster_spec = cluster_spec\n self._task_type = task_type\n self._task_id = task_id\n\n logging.info(\n \"Multi-worker ParameterServerStrategy with \"\n \"cluster_spec = %r, task_type = %r, task_id = %r, \"\n \"num_ps_replicas = %r, is_chief = %r, device_map = %r, \"\n \"variable_device = %r\", cluster_spec.as_dict(), task_type, task_id,\n num_ps_replicas, self._is_chief, self._device_map,\n self._variable_device)\n\n def _initialize_local(self, cluster_resolver):\n \"\"\"Initialize internal devices for local training.\"\"\"\n worker_device = device_util.canonicalize(\"/device:CPU:0\")\n self._input_host_device = numpy_dataset.SingleDevice(worker_device)\n\n # TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in\n # some cases.\n if isinstance(cluster_resolver, TFConfigClusterResolver):\n num_gpus = context.num_gpus()\n else:\n num_gpus = cluster_resolver.num_accelerators().get(\"GPU\", 0)\n\n # Save the num_gpus_per_worker for configure method.\n self._num_gpus_per_worker = num_gpus\n\n # Define compute devices which is a list of device strings and one for each\n # replica. When there are GPUs, replicate operations on these GPUs.\n # Otherwise, place operations on CPU.\n if num_gpus > 0:\n compute_devices = tuple(map(\"/device:GPU:{}\".format, range(num_gpus)))\n else:\n compute_devices = (_LOCAL_CPU,)\n\n self._device_map = values.ReplicaDeviceMap(compute_devices)\n self._input_workers = input_lib.InputWorkers(\n self._device_map, [(worker_device, compute_devices)])\n\n # If there is only one GPU, put everything on that GPU. Otherwise, place\n # variables on CPU.\n if num_gpus == 1:\n assert len(compute_devices) == 1\n self._variable_device = _LOCAL_GPU_0\n self._parameter_devices = (_LOCAL_GPU_0,)\n else:\n self._variable_device = _LOCAL_CPU\n self._parameter_devices = (_LOCAL_CPU,)\n\n self._is_chief = True\n self._cluster_spec = None\n self._task_type = None\n self._task_id = None\n\n logging.info(\n \"ParameterServerStrategy with compute_devices = %r, \"\n \"variable_device = %r\", compute_devices, self._variable_device)\n\n def _validate_colocate_with_variable(self, colocate_with_variable):\n values.validate_colocate(colocate_with_variable, self)\n\n def _make_dataset_iterator(self, dataset):\n return input_lib.DatasetIterator(dataset, self._input_workers,\n self._num_replicas_in_sync)\n\n def _make_input_fn_iterator(\n self,\n input_fn,\n replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):\n \"\"\"Distributes the dataset to each local GPU.\"\"\"\n if self._cluster_spec:\n input_pipeline_id = multi_worker_util.id_in_cluster(\n self._cluster_spec, self._task_type, self._task_id)\n num_input_pipelines = multi_worker_util.worker_count(\n self._cluster_spec, self._task_type)\n else:\n input_pipeline_id = 0\n num_input_pipelines = 1\n input_context = distribute_lib.InputContext(\n num_input_pipelines=num_input_pipelines,\n input_pipeline_id=input_pipeline_id,\n num_replicas_in_sync=self._num_replicas_in_sync)\n return input_lib.InputFunctionIterator(input_fn, self._input_workers,\n [input_context])\n\n def _experimental_make_numpy_dataset(self, numpy_input, session):\n return numpy_dataset.one_host_numpy_dataset(\n numpy_input, self._input_host_device, session)\n\n def _broadcast_to(self, tensor, destinations):\n # This is both a fast path for Python constants, and a way to delay\n # converting Python values to a tensor until we know what type it\n # should be converted to. Otherwise we have trouble with:\n # global_step.assign_add(1)\n # since the `1` gets broadcast as an int32 but global_step is int64.\n if isinstance(tensor, (float, int)):\n return tensor\n if not cross_device_ops_lib.check_destinations(destinations):\n # TODO(josh11b): Use current logical device instead of 0 here.\n destinations = values.LogicalDeviceSpec(\n device_map=self._device_map, logical_device=0)\n return self._cross_device_ops.broadcast(tensor, destinations)\n\n def _allow_variable_partition(self):\n return not context.executing_eagerly()\n\n # TODO(yuefengz): not all ops in device_setter.STANDARD_PS_OPS will go through\n # this creator, such as \"MutableHashTable\".\n def _create_variable(self, next_creator, *args, **kwargs):\n if self._num_replicas_in_sync > 1:\n aggregation = kwargs.pop(\"aggregation\", vs.VariableAggregation.NONE)\n if aggregation not in (\n vs.VariableAggregation.NONE,\n vs.VariableAggregation.SUM,\n vs.VariableAggregation.MEAN,\n vs.VariableAggregation.ONLY_FIRST_REPLICA\n ):\n raise ValueError(\"Invalid variable aggregation mode: \" + aggregation +\n \" for variable: \" + kwargs[\"name\"])\n\n def var_creator(*args, **kwargs):\n \"\"\"Create an AggregatingVariable and fix up collections.\"\"\"\n # Record what collections this variable should be added to.\n collections = kwargs.pop(\"collections\", None)\n if collections is None:\n collections = [ops.GraphKeys.GLOBAL_VARIABLES]\n kwargs[\"collections\"] = []\n\n # Create and wrap the variable.\n v = next_creator(*args, **kwargs)\n wrapped = values.AggregatingVariable(\n self._container_strategy(), v, aggregation)\n\n # Add the wrapped variable to the requested collections.\n # The handling of eager mode and the global step matches\n # ResourceVariable._init_from_args().\n if not context.executing_eagerly():\n g = ops.get_default_graph()\n # If \"trainable\" is True, next_creator() will add the contained\n # variable to the TRAINABLE_VARIABLES collection, so we manually\n # remove it and replace with the wrapper. We can't set \"trainable\"\n # to False for next_creator() since that causes functions like\n # implicit_gradients to skip those variables.\n if kwargs.get(\"trainable\", True):\n collections.append(ops.GraphKeys.TRAINABLE_VARIABLES)\n l = g.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES)\n if v in l:\n l.remove(v)\n g.add_to_collections(collections, wrapped)\n elif ops.GraphKeys.GLOBAL_STEP in collections:\n ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, wrapped)\n\n return wrapped\n else:\n var_creator = next_creator\n\n if \"colocate_with\" in kwargs:\n colocate_with = kwargs[\"colocate_with\"]\n if isinstance(colocate_with, numpy_dataset.SingleDevice):\n with ops.device(colocate_with.device):\n return var_creator(*args, **kwargs)\n with ops.device(None):\n with ops.colocate_with(colocate_with):\n return var_creator(*args, **kwargs)\n\n with ops.colocate_with(None, ignore_existing=True):\n with ops.device(self._variable_device):\n return var_creator(*args, **kwargs)\n\n def _call_for_each_replica(self, fn, args, kwargs):\n # pylint: disable=protected-access\n return mirrored_strategy._call_for_each_replica(\n self._container_strategy(), self._device_map, fn, args, kwargs)\n\n def _verify_destinations_not_different_worker(self, destinations):\n if not self._cluster_spec:\n return\n if destinations is None:\n return\n for d in cross_device_ops_lib.get_devices_from(destinations):\n d_spec = tf_device.DeviceSpec.from_string(d)\n if d_spec.job == self._task_type and d_spec.task != self._task_id:\n raise ValueError(\n \"Cannot reduce to another worker: %r, current worker is %r\" %\n (d, self._input_workers.worker_devices[0]))\n\n def _reduce_to(self, reduce_op, value, destinations):\n self._verify_destinations_not_different_worker(destinations)\n if not isinstance(value, values.DistributedValues):\n # pylint: disable=protected-access\n return cross_device_ops_lib.reduce_non_distributed_value(\n reduce_op, self._device_map, value, destinations)\n return self._cross_device_ops.reduce(\n reduce_op, value, destinations=destinations)\n\n def _batch_reduce_to(self, reduce_op, value_destination_pairs):\n for _, destinations in value_destination_pairs:\n self._verify_destinations_not_different_worker(destinations)\n return self._cross_device_ops.batch_reduce(reduce_op,\n value_destination_pairs)\n\n def _select_single_value(self, structured):\n \"\"\"Select any single values in `structured`.\"\"\"\n\n def _select_fn(x): # pylint: disable=g-missing-docstring\n if isinstance(x, values.Mirrored):\n if len(x.devices) == 1:\n return x.primary\n else:\n raise ValueError(\n \"You cannot update variable with a Mirrored object with multiple \"\n \"components %r when using ParameterServerStrategy. You must \"\n \"specify a single value or a Mirrored with a single value.\" % x)\n elif isinstance(x, values.PerReplica):\n raise ValueError(\n \"You cannot update variable with a PerReplica object %r when using \"\n \"ParameterServerStrategy. You must specify a single value or a \"\n \"Mirrored with a single value\" % x)\n else:\n return x\n\n return nest.map_structure(_select_fn, structured)\n\n def _update(self, var, fn, args, kwargs, group):\n if isinstance(var, values.AggregatingVariable):\n var = var.get()\n if not isinstance(var, resource_variable_ops.ResourceVariable):\n raise ValueError(\n \"You can not update `var` %r. It must be a Variable.\" % var)\n with ops.colocate_with(var), distribute_lib.UpdateContext(var.device):\n result = fn(var, *self._select_single_value(args),\n **self._select_single_value(kwargs))\n if group:\n return result\n else:\n return nest.map_structure(self._local_results, result)\n\n # TODO(yuefengz): does it need to call _select_single_value?\n def _update_non_slot(self, colocate_with, fn, args, kwargs, group):\n with ops.device(\n colocate_with.device), distribute_lib.UpdateContext(colocate_with):\n result = fn(*args, **kwargs)\n if group:\n return result\n else:\n return nest.map_structure(self._local_results, result)\n\n def _local_results(self, val):\n if isinstance(val, values.DistributedValues):\n return val.values\n return (val,)\n\n def value_container(self, val):\n if (hasattr(val, \"_aggregating_container\") and\n not isinstance(val, values.AggregatingVariable)):\n wrapper = val._aggregating_container() # pylint: disable=protected-access\n if wrapper is not None:\n return wrapper\n return val\n\n def read_var(self, var):\n # No need to distinguish between normal variables and replica-local\n # variables.\n return array_ops.identity(var)\n\n def _configure(self,\n session_config=None,\n cluster_spec=None,\n task_type=None,\n task_id=None):\n \"\"\"Configures the strategy class.\n\n The strategy object will be re-initialized if `cluster_spec` is given but\n was not passed in the constructor.\n\n Args:\n session_config: not used currently.\n cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the\n cluster configurations.\n task_type: the current task type.\n task_id: the current task id.\n\n Raises:\n ValueError: if `cluster_spec` is given but `task_type` or `task_id` is\n not.\n \"\"\"\n if cluster_spec:\n # Use the num_gpus_per_worker recorded in constructor since _configure\n # doesn't take num_gpus.\n cluster_resolver = SimpleClusterResolver(\n cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),\n task_type=task_type,\n task_id=task_id,\n num_accelerators={\"GPU\": self._num_gpus_per_worker})\n self._initialize_multi_worker(cluster_resolver)\n\n if session_config:\n session_config.CopyFrom(self._update_config_proto(session_config))\n\n def _update_config_proto(self, config_proto):\n updated_config = copy.deepcopy(config_proto)\n if not self._cluster_spec:\n updated_config.isolate_session_state = True\n return updated_config\n\n updated_config.isolate_session_state = False\n\n assert self._task_type\n assert self._task_id is not None\n\n # The device filters prevent communication between workers.\n del updated_config.device_filters[:]\n if self._task_type in [\"chief\", \"worker\"]:\n updated_config.device_filters.extend(\n [\"/job:%s/task:%d\" % (self._task_type, self._task_id), \"/job:ps\"])\n elif self._task_type == \"evaluator\":\n updated_config.device_filters.append(\n \"/job:%s/task:%d\" % (self._task_type, self._task_id))\n return updated_config\n\n @property\n def _num_replicas_in_sync(self):\n return self._device_map.num_replicas_in_graph\n\n @property\n def worker_devices(self):\n return self._device_map.all_devices\n\n @property\n def worker_devices_by_replica(self):\n return self._device_map.devices_by_replica\n\n @property\n def parameter_devices(self):\n return self._parameter_devices\n\n def non_slot_devices(self, var_list):\n return min(var_list, key=lambda x: x.name)\n\n @property\n def experimental_between_graph(self):\n # TODO(yuefengz): Should this return False in the local case?\n return True\n\n @property\n def experimental_should_init(self):\n return self._is_chief\n\n @property\n def should_checkpoint(self):\n return self._is_chief\n\n @property\n def should_save_summary(self):\n return self._is_chief\n\n # TODO(priyag): Delete this once all strategies use global batch size.\n @property\n def _global_batch_size(self):\n \"\"\"`make_dataset_iterator` and `make_numpy_iterator` use global batch size.\n\n `make_input_fn_iterator` assumes per-replica batching.\n\n Returns:\n Boolean.\n \"\"\"\n return True\n" ]
[ [ "tensorflow.python.distribute.values.ReplicaDeviceMap", "tensorflow.python.framework.ops.colocate_with", "tensorflow.python.distribute.multi_worker_util.worker_count", "tensorflow.python.distribute.multi_worker_util.is_chief", "tensorflow.python.distribute.input_lib.InputFunctionIterator", "tensorflow.python.distribute.multi_worker_util.id_in_cluster", "tensorflow.python.distribute.cross_device_ops.check_destinations", "tensorflow.python.distribute.input_lib.InputWorkers", "tensorflow.python.framework.ops.add_to_collections", "tensorflow.python.distribute.distribute_lib.InputContext", "tensorflow.python.eager.context.num_gpus", "tensorflow.python.distribute.values.LogicalDeviceSpec", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.distribute.cross_device_ops.reduce_non_distributed_value", "tensorflow.python.framework.device.DeviceSpec.from_string", "tensorflow.python.distribute.device_util.canonicalize", "tensorflow.python.distribute.numpy_dataset.one_host_numpy_dataset", "tensorflow.python.distribute.cross_device_ops.ReductionToOneDevice", "tensorflow.python.distribute.multi_worker_util.normalize_cluster_spec", "tensorflow.python.framework.ops.device", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.distribute.distribute_lib.UpdateContext", "tensorflow.python.distribute.numpy_dataset.SingleDevice", "tensorflow.python.distribute.input_lib.DatasetIterator", "tensorflow.python.distribute.values.validate_colocate", "tensorflow.python.training.device_setter.replica_device_setter", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.util.nest.map_structure", "tensorflow.python.distribute.cross_device_ops.get_devices_from", "tensorflow.python.distribute.cluster_resolver.TFConfigClusterResolver" ] ]
YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3
[ "a79a8ae4bc0f8b2662f71df4caaa7fa382735f9f" ]
[ "semi_final/noahsark/merge.py" ]
[ "# coding: utf-8\nimport pandas as pd\nimport numpy as np\n\n\ndef main():\n nezha_result = pd.read_csv('./result_nezha.csv', header=None)\n nezha_result.columns = ['report_ID', 'label']\n\n dl_result = pd.read_csv('./result_dl.csv', header=None)\n dl_result.columns = ['report_ID', 'label']\n\n new_label_nezha = [i.strip('|').strip() for i in nezha_result['label'].values]\n nezha_result['label'] = new_label_nezha\n\n new_label_dl = [i.strip('|').strip() for i in dl_result['label'].values]\n dl_result['label'] = new_label_dl\n\n final_result = ['0' for _ in range(len(nezha_result))]\n prob = np.zeros(29)\n\n for i in range(0, len(new_label_nezha)):\n str2list_nezha = new_label_nezha[i].split()\n str2list_dl = new_label_dl[i].split()\n\n copy_nezha = str2list_nezha\n copy_dl = str2list_dl\n\n for j in range(0, len(str2list_nezha)):\n prob[j] = 0.8 * float(copy_nezha[j]) + \\\n 0.2 * float(copy_dl[j])\n\n final_result[i] = ' '.join(str(i) for i in prob)\n\n sub_id = nezha_result['report_ID'].values\n str_w = ''\n\n with open('./result.csv', 'w') as f:\n for i in range(0, len(nezha_result)):\n str_w += sub_id[i] + ',' + '|' + final_result[i] + '\\n'\n\n str_w = str_w.strip('\\n')\n\n f.write(str_w)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.read_csv", "numpy.zeros" ] ]
vishalraj-95/ktrain
[ "26de7fb42f9e576e768a22f9e5984d7d134b6bf0" ]
[ "ktrain/core.py" ]
[ "from .imports import *\n\nfrom .lroptimize.sgdr import *\nfrom .lroptimize.triangular import *\nfrom .lroptimize.lrfinder import *\nfrom .lroptimize.optimization import AdamWeightDecay\nfrom . import utils as U\n\nfrom .vision.preprocessor import ImagePreprocessor\nfrom .vision.predictor import ImagePredictor\nfrom .text.preprocessor import TextPreprocessor, BERTPreprocessor, TransformersPreprocessor\nfrom .text.predictor import TextPredictor\nfrom .text.ner.predictor import NERPredictor\nfrom .text.ner.preprocessor import NERPreprocessor\nfrom .graph.predictor import NodePredictor, LinkPredictor\nfrom .graph.preprocessor import NodePreprocessor, LinkPreprocessor\nfrom .tabular.predictor import TabularPredictor\nfrom .tabular.preprocessor import TabularPreprocessor\n\n\nclass Learner(ABC):\n \"\"\"\n ```\n Abstract class used to tune and train Keras models. The fit method is\n an abstract method and must be implemented by subclasses.\n ```\n\n \"\"\"\n def __init__(self, model, workers=1, use_multiprocessing=False):\n if not isinstance(model, Model):\n raise ValueError('model must be of instance Model')\n self.model = model\n self.lr_finder = LRFinder(self.model)\n self.workers = workers\n self.use_multiprocessing = use_multiprocessing\n self.history = None\n\n # save original weights of model\n try:\n new_file, weightfile = tempfile.mkstemp()\n self.model.save_weights(weightfile)\n self._original_weights = weightfile\n except Exception as e:\n warnings.warn('Could not save original model weights: %s' % (e))\n self._original_weights = None\n\n @property\n def _monitor_metrics(self):\n \"\"\"\n ```\n monitor metrics\n ```\n \"\"\"\n metrics = ['loss']\n try:\n m = U.metrics_from_model(self.model)\n if isinstance(m, list): metrics.extend(m)\n except:\n pass\n if self.val_data is not None:\n for m in metrics[:]:\n metrics.append('val_%s' % (m))\n return metrics\n\n\n def get_weight_decay(self):\n \"\"\"\n ```\n Get current weight decay rate\n ```\n \"\"\"\n if type(self.model.optimizer).__name__ == 'AdamWeightDecay':\n return self.model.optimizer.weight_decay_rate\n else:\n return None\n\n\n def set_weight_decay(self, wd=U.DEFAULT_WD):\n \"\"\"\n ```\n Sets global weight decay via AdamWeightDecay optimizer\n Args:\n wd(float): weight decay\n Returns:\n None\n ```\n \"\"\"\n self._recompile(wd=wd)\n return\n \n\n\n def evaluate(self, test_data=None, print_report=True, save_path='ktrain_classification_report.csv', class_names=[]):\n \"\"\"\n ```\n alias for self.validate().\n Returns confusion matrix and optionally prints\n a classification report.\n This is currently only supported for binary and multiclass\n classification, not multilabel classification.\n\n By default, this uses val_data, as supplied to ktrain.get_learner().\n Other validation or test data can be optionally be supplied as argument via <test_data> argument.\n Supply class_names to include labels instead of intenger class integer values in classification report.\n Args:\n test_data(Dataset|np.ndarray): test or validation data. If None, self.val_data is used.\n print_report(bool): If True, classification report will be printed. If False, report will be saved to CSV \n at save_path. Not applicable to regression models.\n Not applicable to regression models.\n save_path(str): Classification report will be saved to this file path/name if print_report=False\n Not applicable to regression models.\n class_names(list): list of class names to be used in classification report instead of \n class integer IDs.\n ```\n \"\"\"\n return self.validate(val_data=test_data, print_report=print_report, save_path=save_path, class_names=class_names)\n\n\n\n def validate(self, val_data=None, \n print_report=True,\n save_path='ktrain_classification_report.csv', \n class_names=[]):\n \"\"\"\n ```\n Returns confusion matrix and optionally prints\n a classification report.\n This is currently only supported for binary and multiclass\n classification, not multilabel classification.\n\n By default, this uses val_data, as supplied to ktrain.get_learner().\n Other validation or test data can be optionally be supplied as argument.\n Supply class_names to include labels instead of intenger class integer values in classification report.\n Args:\n val_data(Dataset|np.ndarray): validation data. If None, self.val_data is used.\n print_report(bool): If True, classification report will be printed. If False, report will be saved to CSV \n at save path. Not applicable to regression models.\n save_path(str): Classification report will be saved to this file path/name if print_report=False\n class_names(list): list of class names to be used in classification report instead of \n class integer IDs.\n ```\n \"\"\"\n if val_data is not None:\n val = val_data\n else:\n val = self.val_data\n\n classification, multilabel = U.is_classifier(self.model)\n if not classification:\n #warnings.warn('learner.validate is only for classification problems. ' \n #'For regression, etc., use learner.predict and learner.ground_truth '\n #'to manually validate.')\n #return\n pass\n \n if U.is_multilabel(val) or multilabel:\n warnings.warn('multilabel confusion matrices not yet supported')\n return\n y_pred = self.predict(val_data=val)\n y_true = self.ground_truth(val_data=val)\n y_pred = np.squeeze(y_pred)\n y_true = np.squeeze(y_true)\n\n\n # regression evaluation\n if not classification:\n from sklearn.metrics import mean_absolute_error, mean_squared_error\n regout = []\n metrics = U.metrics_from_model(self.model)\n for m in metrics:\n if m in ['mae', 'mean_absolute_error']:\n regout.append( (m, mean_absolute_error(y_true, y_pred)) )\n elif m in ['mse', 'mean_squared_error']:\n regout.append( (m, mean_squared_error(y_true, y_pred)) )\n if not regout:\n warnings.warn('%s is not supported by validate/evaluate - falling back to MAE')\n regout.append( ('mae', mean_absolute_error(y_true, y_pred)) )\n return regout\n\n\n if len(y_pred.shape) == 1:\n y_pred = np.where(y_pred > 0.5, 1, 0)\n y_true = np.where(y_true > 0.5, 1, 0)\n else:\n y_pred = np.argmax(y_pred, axis=1)\n y_true = np.argmax(y_true, axis=1)\n if print_report or save_path is not None:\n if class_names:\n try:\n class_names = [str(s) for s in class_names]\n except:\n pass\n report = classification_report(y_true, y_pred, target_names=class_names, output_dict=not print_report)\n else:\n report = classification_report(y_true, y_pred, output_dict=not print_report)\n if print_report: \n print(report)\n else:\n df = pd.DataFrame(report).transpose()\n df.to_csv(save_path)\n print('classification report saved to: %s' % (save_path))\n cm_func = confusion_matrix\n cm = confusion_matrix(y_true, y_pred)\n return cm\n\n\n def _check_val(self, val_data):\n if val_data is not None:\n val = val_data\n else:\n val = self.val_data\n if val is None: raise Exception('val_data must be supplied to get_learner or view_top_losses')\n return val\n\n\n def top_losses(self, n=4, val_data=None, preproc=None):\n \"\"\"\n ```\n Computes losses on validation set sorted by examples with top losses\n Args:\n n(int or tuple): a range to select in form of int or tuple\n e.g., n=8 is treated as n=(0,8)\n val_data: optional val_data to use instead of self.val_data\n preproc (Preprocessor): A TextPreprocessor or ImagePreprocessor.\n For some data like text data, a preprocessor\n is required to undo the pre-processing\n to correctly view raw data.\n Returns:\n list of n tuples where first element is either \n filepath or id of validation example and second element\n is loss.\n ```\n \"\"\"\n\n\n # check validation data and arguments\n if val_data is not None:\n val = val_data\n else:\n val = self.val_data\n if val is None: raise Exception('val_data must be supplied to get_learner or top_losses')\n if type(n) == type(42):\n n = (0, n)\n\n\n #multilabel = True if U.is_multilabel(val) else False\n classification, multilabel = U.is_classifier(self.model)\n\n\n # get predicictions and ground truth\n y_pred = self.predict(val_data=val)\n y_true = self.ground_truth(val_data=val)\n y_true = y_true.astype('float32')\n\n # adjust y_true for regression problems\n if not classification and len(y_true.shape) == 1 and\\\n (len(y_pred.shape) == 2 and y_pred.shape[1] == 1):\n y_true = np.expand_dims(y_true, -1)\n\n\n # compute loss\n # this doesn't work in tf.keras 1.14\n #losses = self.model.loss_functions[0](tf.convert_to_tensor(y_true), tf.convert_to_tensor(y_pred))\n #if U.is_tf_keras():\n #L = self.model.loss_functions[0].fn\n #else:\n #L = self.model.loss_functions[0]\n L = U.loss_fn_from_model(self.model)\n losses = L(tf.convert_to_tensor(y_true), tf.convert_to_tensor(y_pred))\n if DISABLE_V2_BEHAVIOR:\n losses = tf.Session().run(losses)\n else:\n losses = losses.numpy()\n\n\n class_names = [] if preproc is None else preproc.get_classes()\n if preproc is None: \n class_fcn = lambda x:\"%s\" % (x)\n else:\n class_fcn = lambda x:class_names[x]\n\n # regression output modifications\n if not classification:\n if len(y_pred.shape) == 2 and y_pred.shape[1] == 1:\n y_pred = np.squeeze(y_pred)\n y_pred = np.around(y_pred, 2)\n if len(y_true.shape) == 2 and y_true.shape[1] == 1:\n y_true = np.squeeze(y_true)\n y_true = np.around(y_true, 2)\n\n # sort by loss and prune correct classifications, if necessary\n if classification and not multilabel:\n y_pred = np.squeeze(y_pred)\n y_true = np.squeeze(y_true)\n if len(y_pred.shape) == 1:\n y_p = np.where(y_pred > 0.5, 1, 0)\n y_t = np.where(y_true>0.5, 1, 0)\n else:\n y_p = np.argmax(y_pred, axis=1)\n y_t = np.argmax(y_true, axis=1)\n tups = [(i,x, class_fcn(y_t[i]), class_fcn(y_p[i])) for i, x in enumerate(losses) \n if y_p[i] != y_t[i]]\n else:\n tups = [(i,x, y_true[i], np.around(y_pred[i],2)) for i, x in enumerate(losses)]\n tups.sort(key=operator.itemgetter(1), reverse=True)\n\n # prune by given range\n tups = tups[n[0]:n[1]] if n is not None else tups\n return tups\n\n\n def view_top_losses(self, n=4, preproc=None, val_data=None):\n \"\"\"\n ```\n View observations with top losses in validation set.\n Musta be overridden by Learner subclasses.\n ```\n \"\"\"\n raise NotImplementedError('view_top_losses must be overriden by Learner subclass')\n\n\n def _make_model_folder(self, fpath):\n if os.path.isfile(fpath):\n raise ValueError(f'There is an existing file named {fpath}. ' +\\\n 'Please use dfferent value for fpath.')\n elif os.path.exists(fpath):\n #warnings.warn('model is being saved to folder that already exists: %s' % (fpath))\n pass\n elif not os.path.exists(fpath):\n os.makedirs(fpath)\n\n\n def save_model(self, fpath):\n \"\"\"\n ```\n a wrapper to model.save\n Args:\n fpath(str): path to folder in which to save model\n Returns:\n None\n ```\n \"\"\"\n self._make_model_folder(fpath)\n self.model.save(os.path.join(fpath, U.MODEL_NAME), save_format='h5')\n return\n\n\n def load_model(self, fpath, custom_objects=None, **kwargs):\n \"\"\"\n ```\n loads model from folder.\n Note: **kwargs included for backwards compatibility only, as TransformerTextClassLearner.load_model was removed in v0.18.0.\n Args:\n fpath(str): path to folder containing model\n custom_objects(dict): custom objects required to load model.\n For models included with ktrain, this is populated automatically\n and can be disregarded.\n \n ```\n \"\"\"\n self.model = _load_model(fpath, train_data=self.train_data, custom_objects=custom_objects)\n return\n\n def _is_adamlike(self):\n \"\"\"\n ```\n checks whether optimizer attached to model is an \n \"Adam-like\" optimizer with beta_1 parameter.\n ```\n \"\"\"\n return self.model is not None and hasattr(self.model.optimizer, 'beta_1')\n\n\n def _recompile(self, wd=None):\n metrics = U.metrics_from_model(self.model)\n if wd is not None and wd > 0 and type(self.model.optimizer).__name__ != 'AdamWeightDecay':\n warnings.warn('recompiling model to use AdamWeightDecay as opimizer with weight decay of %s' % (wd) )\n optimizer = U.get_default_optimizer(wd=wd)\n elif wd is not None and wd > 0:\n optimizer = U.get_default_optimizer(wd=wd)\n elif wd is not None and wd == 0:\n optimizer = U.DEFAULT_OPT\n else: # wd is None -> don't modify optimizer\n optimizer = self.model.optimizer\n self.model.compile(optimizer=optimizer,\n loss=self.model.loss,\n metrics=metrics)\n\n return\n\n\n def set_model(self, model):\n \"\"\"\n ```\n replace model in this Learner instance\n ```\n \"\"\"\n if not isinstance(model, Model):\n raise ValueError('model must be of instance Model')\n self.model = model\n self.history = None\n return\n\n\n def freeze(self, freeze_range=None):\n \"\"\"\n ```\n If freeze_range is None, makes all layers trainable=False except last Dense layer.\n If freeze_range is given, freezes the first <freeze_range> layers and\n unfrezes all remaining layers.\n NOTE: Freeze method does not currently work with \n multi-GPU models. If you are using the load_imagemodel method,\n please use the freeze_layers argument of load_imagemodel\n to freeze layers.\n Args:\n freeze_range(int): number of layers to freeze\n Returns:\n None\n ```\n \"\"\"\n\n if freeze_range is None:\n # freeze everything except last Dense layer\n # first find last dense layer\n dense_id = None\n for i, layer in reversed(list(enumerate(self.model.layers))):\n if isinstance(layer, Dense):\n dense_id = i\n break\n if dense_id is None: raise Exception('cannot find Dense layer in this model')\n for i, layer in enumerate(self.model.layers):\n if i < dense_id: \n layer.trainable=False\n else:\n layer.trainable=True\n else:\n # freeze all layers up to and including layer_id\n if type(freeze_range) != type(1) or freeze_range <1: \n raise ValueError('freeze_range must be integer > 0')\n for i, layer in enumerate(self.model.layers):\n if i < freeze_range: \n layer.trainable=False\n else:\n layer.trainable=True\n self._recompile()\n return\n\n\n def unfreeze(self, exclude_range=None):\n \"\"\"\n ```\n Make every layer trainable except those in exclude_range.\n unfreeze is simply a proxy method to freeze.\n NOTE: Unfreeze method does not currently work with \n multi-GPU models. If you are using the load_imagemodel method,\n please use the freeze_layers argument of load_imagemodel\n to freeze layers.\n ```\n \"\"\"\n # make all layers trainable\n for i, layer in enumerate(self.model.layers):\n layer.trainable = True\n if exclude_range:\n for i, layer in enumerate(self.model.layers[:exclude_range]):\n layer.trainable = False\n self._recompile()\n return\n\n\n def reset_weights(self, verbose=1):\n \"\"\"\n ```\n Re-initializes network with original weights\n ```\n \"\"\"\n\n if os.path.isfile(self._original_weights):\n self.model.load_weights(self._original_weights)\n self.history = None\n U.vprint('Model weights have been reset.', verbose=verbose)\n else:\n warnings.warn('Weights have not been reset because the original weights file '+\\\n '(%s) no longer exists.' % (self._original_weights))\n return\n\n\n\n def lr_find(self, start_lr=1e-7, lr_mult=1.01, max_epochs=None, class_weight=None,\n stop_factor=4, show_plot=False, suggest=False, restore_weights_only=False, verbose=1):\n \"\"\"\n ```\n Plots loss as learning rate is increased. Highest learning rate \n corresponding to a still falling loss should be chosen.\n\n If you find the LR finder is running for more epochs than you'd prefer,\n you can set max_epochs (e.g., max_epochs=5) to estimate LR with a \n smaller sample size.\n\n If lr_mult is supplied and max_epochs is None, LR will increase until loss diverges.\n Reasonable values of lr_mult are between 1.01 and 1.05.\n\n If max_epochs is supplied, lr_mult argument is ignored and computed automatically.\n\n Reference: https://arxiv.org/abs/1506.01186\n\n Args:\n start_lr (float): smallest lr to start simulation\n lr_mult (float): multiplication factor to increase LR.\n Ignored if max_epochs is supplied.\n max_epochs (int): maximum number of epochs to simulate.\n lr_mult is ignored if max_epoch is supplied.\n Default is None. Set max_epochs to an integer\n (e.g., 5) if lr_find is taking too long\n and running for more epochs than desired.\n class_weight(dict): class_weight parameter passed to model.fit\n for imbalanced datasets.\n stop_factor(int): factor used to determine threhsold that loss \n must exceed to stop training simulation.\n Increase this if loss is erratic and lr_find\n exits too early.\n show_plot (bool): If True, automatically invoke lr_plot\n restore_weights_only(bool): If True, when training simulation is complete,\n the model weights only are restored, but not\n the original optimizer weights. \n In at least a few cases, this seems to improve performance\n when actual training begins. Further investigation is needed,\n so it is False by default.\n verbose (bool): specifies how much output to print\n Returns:\n None\n ```\n \"\"\"\n # dep_fix: bug in TF 2.2 and 2.3\n if version.parse(tf.__version__) > version.parse('2.1') and version.parse(tf.__version__) < version.parse('2.4'):\n if max_epochs is None:\n raise ValueError('Due to a bug in TensorFlow 2.2 and 2.3, the max_epochs argument is temporarily required. ' +\\\n 'Please re-run with max_epochs (e.g., max_epochs=5). \\n' +\\\n 'More info: https://github.com/tensorflow/tensorflow/issues/41174#issuecomment-656330268')\n\n\n U.vprint('simulating training for different learning rates... this may take a few moments...',\n verbose=verbose)\n # save current weights and temporarily restore original weights\n # dep_fix: temporarily use save_model instead of save_weights as default due to https://github.com/tensorflow/tensorflow/issues/41116\n _weights_only=True\n if restore_weights_only:\n new_file, weightfile = tempfile.mkstemp()\n self.model.save_weights(weightfile)\n else:\n temp_folder = tempfile.mkdtemp()\n self.save_model(temp_folder)\n\n\n # compute steps_per_epoch\n num_samples = U.nsamples_from_data(self.train_data)\n bs = self.train_data.batch_size if hasattr(self.train_data, 'batch_size') else self.batch_size\n if U.is_iter(self.train_data):\n use_gen = True\n steps_per_epoch = num_samples // bs\n else:\n use_gen = False\n steps_per_epoch = np.ceil(num_samples/bs)\n\n # check steps_per_epoch\n if steps_per_epoch <=64 and max_epochs is None:\n warnings.warn('max_epochs is being set to 5 since steps per epoch is small. ' +\\\n 'If you wish to estimate LR using more epochs, set max_epochs manually.')\n max_epochs = 5\n\n\n try:\n # track and plot learning rates\n self.lr_finder = LRFinder(self.model, stop_factor=stop_factor)\n self.lr_finder.find(self._prepare(self.train_data), \n steps_per_epoch,\n use_gen=use_gen,\n start_lr=start_lr, lr_mult=lr_mult, \n max_epochs=max_epochs,\n class_weight=class_weight,\n workers=self.workers, \n use_multiprocessing=self.use_multiprocessing, \n batch_size=self.batch_size,\n verbose=verbose)\n except KeyboardInterrupt:\n # re-load current weights\n #self.model.load_weights(weightfile)\n self.load_model(temp_folder)\n return\n\n # re-load current weights\n # dep_fix: temporarily use load_model instead of load_weights as default due to https://github.com/tensorflow/tensorflow/issues/41116\n if restore_weights_only:\n self.model.load_weights(weightfile)\n else:\n self.load_model(temp_folder)\n\n # instructions to invoker\n U.vprint('\\n', verbose=verbose)\n U.vprint('done.', verbose=verbose)\n if show_plot:\n U.vprint('Visually inspect loss plot and select learning rate associated with falling loss', verbose=verbose)\n self.lr_plot(suggest=suggest)\n else:\n U.vprint('Please invoke the Learner.lr_plot() method to visually inspect '\n 'the loss plot to help identify the maximal learning rate '\n 'associated with falling loss.', verbose=verbose)\n return\n\n\n def lr_estimate(self):\n \"\"\"\n ```\n Return numerical estimates of lr using two different methods:\n 1. learning rate associated with minimum numerical gradient\n 2. learning rate associated with minimum loss divided by 10\n Since neither of these methods are fool-proof and can \n potentially return bad estimates, it is recommended that you \n examine the plot generated by lr_plot to estimate the learning rate.\n Returns:\n tuple: tuple of the form (float, float), where \n First element is lr associated with minimum numerical gradient (None if gradient computation fails).\n Second element is lr associated with minimum loss divided by 10.\n ```\n \"\"\"\n if self.lr_finder is None or not self.lr_finder.find_called(): raise ValueError('Please call lr_find first.')\n return self.lr_finder.estimate_lr()\n \n\n\n def lr_plot(self, n_skip_beginning=10, n_skip_end=5, suggest=False, return_fig=False):\n \"\"\"\n ```\n Plots the loss vs. learning rate to help identify\n The maximal learning rate associated with a falling loss.\n The nskip_beginning and n_skip_end arguments can be used\n to \"zoom in\" on the plot.\n Args:\n n_skip_beginning(int): number of batches to skip on the left.\n n_skip_end(int): number of batches to skip on the right.\n suggest(bool): will highlight numerical estimate\n of best lr if True - methods adapted from fastai\n return_fig(bool): If True, return matplotlib.figure.Figure\n Returns:\n matplotlib.figure.Figure if return_fig else None\n ```\n \"\"\"\n # dep_fix: bug in TF 2.2 and 2.3\n if version.parse(tf.__version__) > version.parse('2.1') and version.parse(tf.__version__) < version.parse('2.4'):\n if n_skip_end == 5: n_skip_end=10\n\n if self.lr_finder is None or not self.lr_finder.find_called(): raise ValueError('Please call lr_find first.')\n return self.lr_finder.plot_loss(n_skip_beginning=n_skip_beginning,\n n_skip_end=n_skip_end, suggest=suggest, return_fig=return_fig)\n\n\n def plot(self, plot_type='loss', return_fig=False):\n \"\"\"\n ```\n plots training history\n Args:\n plot_type (str): A valid value in tf.keras History. Either a built-in value {'loss', 'lr', 'momentum'} or\n other values previously specified by user. For instance, if 'mae' and/or 'mse' is previously specified as metrics\n when creating model, then these values can also be specified.\n return_fig(bool): If True, return matplotlib.figure.Figure\n Return:\n matplotlib.figure.Figure if return_fig else None\n ```\n \"\"\"\n if self.history is None:\n raise Exception('No training history - did you train the model yet?')\n if not isinstance(plot_type, str):\n raise ValueError('plot_type must be str/string')\n\n fig = None\n if plot_type == 'loss':\n plt.plot(self.history.history['loss'])\n if 'val_loss' in self.history.history:\n plt.plot(self.history.history['val_loss'])\n legend_items = ['train', 'validation']\n else:\n legend_items = ['train']\n plt.title('Model Loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(legend_items, loc='upper left')\n elif plot_type == 'lr':\n if 'lr' not in self.history.history:\n raise ValueError('no lr in history: are you sure you used autofit or fit_onecycle to train?')\n plt.plot(self.history.history['lr'])\n plt.title('LR Schedule')\n plt.ylabel('lr')\n plt.xlabel('iterations')\n elif plot_type == 'momentum':\n if 'momentum' not in self.history.history:\n raise ValueError('no momentum history: are you sure you used autofit or fit_onecycle to train?')\n plt.plot(self.history.history['momentum'])\n plt.title('Momentum Schedule')\n plt.ylabel('momentum')\n plt.xlabel('iterations')\n else:\n if plot_type not in self.history.history:\n raise ValueError(f'no {plot_type} in history: are you sure {plot_type} exists in history?')\n plt.plot(self.history.history[plot_type])\n \n val_key = f'val_{plot_type}'\n if val_key in self.history.history:\n plt.plot(self.history.history[val_key])\n legend_items = ['train', 'validation']\n else:\n warnings.warn(f'Validation value for {plot_type} wasn\\'t found in history')\n legend_items = ['train']\n \n plt.title(f'History of {plot_type}')\n plt.ylabel(plot_type)\n plt.xlabel('epoch')\n plt.legend(legend_items, loc='upper left')\n fig = plt.gcf()\n plt.show()\n if return_fig: return fig\n return\n\n\n def print_layers(self, show_wd=False):\n \"\"\"\n ```\n prints the layers of the model along with indices\n ```\n \"\"\"\n if show_wd: warnings.warn('set_weight_decay now uses AdamWeightDecay instead of kernel_regularizers.')\n for i, layer in enumerate(self.model.layers):\n if show_wd and hasattr(layer, 'kernel_regularizer'):\n reg = layer.kernel_regularizer\n if hasattr(reg, 'l2'):\n wd = reg.l2\n elif hasattr(reg, 'l1'):\n wd = reg.l1\n else:\n wd = None\n print(\"%s (trainable=%s, wd=%s) : %s\" % (i, layer.trainable, wd, layer))\n else:\n print(\"%s (trainable=%s) : %s\" % (i, layer.trainable, layer))\n return\n\n\n def layer_output(self, layer_id, example_id=0, use_val=False):\n # should implemented in subclass\n raise NotImplementedError\n\n\n def set_lr(self, lr):\n K.set_value(self.model.optimizer.lr, lr)\n return\n\n\n def _check_cycles(self, n_cycles, cycle_len, cycle_mult):\n if type(n_cycles) != type(1) or n_cycles <1:\n raise ValueError('n_cycles must be >= 1')\n if type(cycle_mult) != type(1) or cycle_mult < 1:\n raise ValueError('cycle_mult must by >= 1')\n if cycle_len is not None:\n if type(cycle_len) != type(1) or cycle_len < 1:\n raise ValueError('cycle_len must either be None or >= 1')\n\n # calculate number of epochs\n if cycle_len is None:\n epochs = n_cycles\n else:\n epochs = 0\n tmp_cycle_len = cycle_len\n for i in range(n_cycles):\n epochs += tmp_cycle_len\n tmp_cycle_len *= cycle_mult\n return epochs\n\n\n def _cb_sgdr(self, max_lr, steps_per_epoch, cycle_len, cycle_mult, lr_decay=1.0, callbacks=[]):\n if callbacks and 'SGDRScheduler' in [type(cb).__name__ for cb in callbacks]: return callbacks\n # configuration\n min_lr = 1e-9\n if max_lr <= min_lr: min_lr = max_lr/10\n\n # use learning_rate schedule\n if cycle_len is not None:\n if not isinstance(callbacks, list): callbacks = []\n schedule = SGDRScheduler(min_lr=min_lr,\n max_lr=max_lr,\n steps_per_epoch=steps_per_epoch,\n lr_decay=lr_decay,\n cycle_length=cycle_len,\n mult_factor=cycle_mult)\n callbacks.append(schedule)\n if not callbacks: callbacks=None\n return callbacks\n\n\n def _cb_checkpoint(self, folder, callbacks=[]):\n if callbacks and 'ModelCheckpoint' in [type(cb).__name__ for cb in callbacks]: return callbacks\n if folder is not None:\n os.makedirs(folder, exist_ok=True)\n if not isinstance(callbacks, list): callbacks = []\n #filepath=os.path.join(folder, \"weights-{epoch:02d}-{val_loss:.2f}.hdf5\")\n filepath=os.path.join(folder, \"weights-{epoch:02d}.hdf5\")\n callbacks.append(ModelCheckpoint(filepath, save_best_only=False, save_weights_only=True))\n if not callbacks: callbacks=None\n return callbacks\n\n\n def _cb_earlystopping(self, early_stopping, callbacks=[]):\n if callbacks and 'EarlyStopping' in [type(cb).__name__ for cb in callbacks]: return callbacks\n if early_stopping:\n if not isinstance(callbacks, list): callbacks = []\n #if StrictVersion(keras.__version__) >= StrictVersion('2.2.3'):\n try:\n callbacks.append(EarlyStopping(monitor='val_loss', min_delta=0, patience=early_stopping, \n restore_best_weights=True, verbose=0, mode='auto'))\n except TypeError:\n warnings.warn(\"\"\"\n The early_stopping=True argument relies on EarlyStopping.restore_best_weights,\n which is only supported on Keras 2.2.3 or greater. \n For now, we are falling back to EarlyStopping.restore_best_weights=False.\n Please use checkpoint_folder option in fit() to restore best weights.\"\"\")\n callbacks.append(EarlyStopping(monitor='val_loss', min_delta=0, patience=early_stopping, \n verbose=0, mode='auto'))\n\n if not callbacks: callbacks=None\n return callbacks\n\n\n def _prepare(self, data, train=True):\n \"\"\"\n ```\n Subclasses can override this method if data\n needs to be specially-prepared prior to invoking fit methods\n Args:\n data: dataset\n train(bool): If True, prepare for training. Otherwise, prepare for evaluation.\n ```\n \"\"\"\n if data is None: return None\n\n if hasattr(data, 'to_tfdataset'):\n return data.to_tfdataset(train=train)\n else:\n return data\n\n\n @abstractmethod\n def fit(self, lr, n_cycles, cycle_len=None, cycle_mult=1, batch_size=U.DEFAULT_BS):\n pass\n\n\n def fit_onecycle(self, lr, epochs, checkpoint_folder=None, \n cycle_momentum=True, max_momentum=0.95, min_momentum=0.85,\n class_weight=None, callbacks=[], steps_per_epoch=None, verbose=1):\n \"\"\"\n ```\n Train model using a version of Leslie Smith's 1cycle policy.\n This method can be used with any optimizer. Thus,\n cyclical momentum is not currently implemented.\n\n Args:\n lr (float): (maximum) learning rate. \n It is recommended that you estimate lr yourself by \n running lr_finder (and lr_plot) and visually inspect plot\n for dramatic loss drop.\n epochs (int): Number of epochs. Number of epochs\n checkpoint_folder (string): Folder path in which to save the model weights \n for each epoch.\n File name will be of the form: \n weights-{epoch:02d}-{val_loss:.2f}.hdf5\n cycle_momentum (bool): If True and optimizer is Adam, Nadam, or Adamax, momentum of \n optimzer will be cycled between 0.95 and 0.85 as described in \n https://arxiv.org/abs/1803.09820.\n Only takes effect if Adam, Nadam, or Adamax optimizer is used.\n max_momentum(float): Maximum momentum to use if cycle_momentum=True\n min_momentum(float): minimum momentum to use if cycle_momentum=True\n class_weight (dict): Optional dictionary mapping class indices (integers) to a weight (float) \n callbacks (list): list of Callback instances to employ during training\n steps_per_epoch(int): Steps per epoch. If None, then, math.ceil(num_samples/batch_size) is used.\n Ignored unless training dataset is generator.\n verbose (bool): verbose mode\n ```\n \"\"\"\n if not self._is_adamlike() and cycle_momentum:\n warnings.warn('cyclical momentum has been disabled because '+\\\n 'optimizer is not \"Adam-like\" with beta_1 param')\n cycle_momentum=False\n\n\n num_samples = U.nsamples_from_data(self.train_data)\n if steps_per_epoch is None:\n steps_per_epoch = math.ceil(num_samples/self.batch_size)\n\n # setup callbacks for learning rates and early stopping\n if not callbacks: kcallbacks = []\n else:\n kcallbacks = callbacks[:] \n if cycle_momentum:\n max_momentum = max_momentum\n min_momentum = min_momentum\n else:\n max_momentum = None\n min_momentum = None\n clr = CyclicLR(base_lr=lr/10, max_lr=lr,\n step_size=math.ceil((steps_per_epoch*epochs)/2), \n reduce_on_plateau=0,\n max_momentum=max_momentum,\n min_momentum=min_momentum,\n verbose=verbose)\n kcallbacks.append(clr)\n\n # start training\n policy='onecycle'\n U.vprint('\\n', verbose=verbose)\n U.vprint('begin training using %s policy with max lr of %s...' % (policy, lr), \n verbose=verbose)\n hist = self.fit(lr, epochs, early_stopping=None,\n checkpoint_folder=checkpoint_folder,\n verbose=verbose, class_weight=class_weight, callbacks=kcallbacks, \n steps_per_epoch=steps_per_epoch)\n hist.history['lr'] = clr.history['lr']\n hist.history['iterations'] = clr.history['iterations']\n if cycle_momentum:\n hist.history['momentum'] = clr.history['momentum']\n self.history = hist\n return hist\n\n\n\n def autofit(self, lr, epochs=None, \n early_stopping=None, reduce_on_plateau=None, reduce_factor=2, \n cycle_momentum=True, max_momentum=0.95, min_momentum=0.85,\n monitor='val_loss', checkpoint_folder=None,\n class_weight=None, callbacks=[], steps_per_epoch=None, verbose=1):\n \"\"\"\n ```\n Automatically train model using a default learning rate schedule shown to work well\n in practice. By default, this method currently employs a triangular learning \n rate policy (https://arxiv.org/abs/1506.01186). \n During each epoch, this learning rate policy varies the learning rate from lr/10 to lr\n and then back to a low learning rate that is near-zero. \n If epochs is None, then early_stopping and reduce_on_plateau are atomatically\n set to 6 and 3, respectively.\n\n Args:\n lr (float): optional initial learning rate. If missing,\n lr will be estimated automatically.\n It is recommended that you estimate lr yourself by \n running lr_finder (and lr_plot) and visually inspect plot\n for dramatic loss drop.\n epochs (int): Number of epochs. If None, training will continue until\n validation loss no longer improves after 5 epochs.\n early_stopping (int): If not None, training will automatically stop after this many \n epochs of no improvement in validation loss.\n Upon completion, model will be loaded with weights from epoch\n with lowest validation loss.\n NOTE: If reduce_on_plateau is also enabled, then\n early_stopping must be greater than reduce_on_plateau.\n Example: early_stopping=6, reduce_on_plateau=3.\n reduce_on_plateau (int): If not None, will lower learning rate when\n when validation loss fails to improve after\n the specified number of epochs.\n NOTE: If early_stopping is enabled, then\n reduce_on_plateu must be less than early_stopping.\n Example: early_stopping=6, reduce_on_plateau=3.\n reduce_factor (int): Learning reate is reduced by this factor on plateau.\n Only takes effect if reduce_on_plateau > 0.\n cycle_momentum (bool): If True and optimizer is Adam, Nadam, or Adamax, momentum of \n optimzer will be cycled between 0.95 and 0.85 as described in \n https://arxiv.org/abs/1803.09820.\n Only takes effect if Adam, Nadam, or Adamax optimizer is used.\n max_momentum(float): maximum momentum to use when cycle_momentum=True\n min_momentum(float): minimum momentum to use when cycle_momentum=True\n checkpoint_folder (string): Folder path in which to save the model weights \n for each epoch.\n File name will be of the form: \n weights-{epoch:02d}-{val_loss:.2f}.hdf5\n monitor (str): what metric to monitor for early_stopping\n and reduce_on_plateau. Defaults to 'val_loss'.\n Only used if early_stopping or reduce_on_plateau\n is enabled.\n class_weight (dict): Optional dictionary mapping class indices (integers) to a weight (float) \n callbacks (list): list of Callback instances to employ during training\n steps_per_epoch(int): Steps per epoch. If None, then, math.ceil(num_samples/batch_size) is used.\n Ignored unless training dataset is generator.\n verbose (bool): verbose mode\n ```\n \"\"\"\n # check optimizer\n if not self._is_adamlike() and cycle_momentum:\n warnings.warn('cyclical momentum has been disabled because '+\\\n 'optimizer is not \"Adam-like\" with beta_1 param')\n cycle_momentum=False\n\n\n # setup learning rate policy \n num_samples = U.nsamples_from_data(self.train_data)\n if steps_per_epoch is None:\n steps_per_epoch = math.ceil(num_samples/self.batch_size)\n step_size = math.ceil(steps_per_epoch/2)\n\n # handle missing epochs\n if epochs is None:\n epochs = 1024\n if not early_stopping:\n early_stopping = U.DEFAULT_ES\n U.vprint('early_stopping automatically enabled at patience=%s' % (U.DEFAULT_ES),\n verbose=verbose)\n if not reduce_on_plateau:\n reduce_on_plateau = U.DEFAULT_ROP\n U.vprint('reduce_on_plateau automatically enabled at patience=%s' % (U.DEFAULT_ROP),\n verbose=verbose)\n if reduce_on_plateau and early_stopping and (reduce_on_plateau > early_stopping):\n warnings.warn('reduce_on_plateau=%s and is greater than ' % (reduce_on_plateau) +\\\n 'early_stopping=%s. ' % (early_stopping) +\\\n 'Either reduce reduce_on_plateau or set early_stopping ' +\\\n 'to be higher.')\n\n # check monitor\n if reduce_on_plateau is not None or early_stopping is not None:\n if monitor.startswith('val_') and self.val_data is None:\n raise ValueError('monitor is %s but no val_data was supplied.\\nChange monitor or supply val_data to get_learner function.' % monitor)\n if monitor != 'val_loss' and monitor not in self._monitor_metrics:\n raise ValueError(\"monitor must be one of {%s}\" % (self._monitor_metrics))\n\n\n # setup callbacks for learning rates and early stopping\n if not callbacks: kcallbacks = []\n else:\n kcallbacks = callbacks[:] \n if cycle_momentum:\n max_momentum = max_momentum\n min_momentum = min_momentum\n else:\n max_momentum = None\n min_momentum = None\n\n clr = CyclicLR(base_lr=lr/10, max_lr=lr,\n step_size=step_size, verbose=verbose,\n monitor=monitor,\n reduce_on_plateau=reduce_on_plateau,\n reduce_factor=reduce_factor,\n max_momentum=max_momentum,\n min_momentum=min_momentum)\n kcallbacks.append(clr)\n if early_stopping:\n kcallbacks.append(EarlyStopping(monitor=monitor, min_delta=0, \n patience=early_stopping,\n restore_best_weights=True, \n verbose=1, mode='auto'))\n\n # start training\n U.vprint('\\n', verbose=verbose)\n policy = 'triangular learning rate'\n U.vprint('begin training using %s policy with max lr of %s...' % (policy, lr), \n verbose=verbose)\n hist = self.fit(lr, epochs, early_stopping=early_stopping,\n checkpoint_folder=checkpoint_folder,\n verbose=verbose, class_weight=class_weight, callbacks=kcallbacks, \n steps_per_epoch=steps_per_epoch)\n hist.history['lr'] = clr.history['lr']\n hist.history['iterations'] = clr.history['iterations']\n if cycle_momentum:\n hist.history['momentum'] = clr.history['momentum']\n self.history = hist\n return hist\n\n\n def ground_truth(self, val_data=None):\n if val_data is not None:\n val = val_data\n else:\n val = self.val_data\n if not val: raise Exception('val_data must be supplied to get_learner or ground_truth')\n return U.y_from_data(val)\n\n\n def predict(self, val_data=None):\n \"\"\"\n ```\n Makes predictions on validation set\n ```\n \"\"\"\n if val_data is not None:\n val = val_data\n else:\n val = self.val_data\n if val is None: raise Exception('val_data must be supplied to get_learner or predict')\n if U.is_iter(val):\n if hasattr(val, 'reset'): val.reset()\n steps = np.ceil(U.nsamples_from_data(val)/val.batch_size)\n # *_generator methods are deprecated from TF 2.1.0\n #result = self.model.predict_generator(self._prepare(val, train=False), \n #steps=steps)\n result = self.model.predict(self._prepare(val, train=False), steps=steps)\n return result\n else:\n return self.model.predict(val[0], batch_size=self.eval_batch_size)\n\n \n\nclass ArrayLearner(Learner):\n \"\"\"\n ```\n Main class used to tune and train Keras models\n using Array data. An objects of this class should be instantiated\n via the ktrain.get_learner method instead of directly.\n Main parameters are:\n\n \n model (Model): A compiled instance of keras.engine.training.Model\n train_data (ndarray): A tuple of (x_train, y_train), where x_train and \n y_train are numpy.ndarrays.\n val_data (ndarray): A tuple of (x_test, y_test), where x_test and \n y_test are numpy.ndarrays.\n ```\n \"\"\"\n\n\n def __init__(self, model, train_data=None, val_data=None, \n batch_size=U.DEFAULT_BS, eval_batch_size=U.DEFAULT_BS, \n workers=1, use_multiprocessing=False):\n super().__init__(model, workers=workers, use_multiprocessing=use_multiprocessing)\n self.train_data = train_data\n self.val_data = val_data\n self.batch_size = batch_size\n self.eval_batch_size = eval_batch_size\n return\n\n \n def fit(self, lr, n_cycles, cycle_len=None, cycle_mult=1, \n lr_decay=1, checkpoint_folder = None, early_stopping=None,\n verbose=1, class_weight=None, callbacks=[], steps_per_epoch=None):\n \"\"\"\n ```\n Trains the model. By default, fit is simply a wrapper for model.fit.\n When cycle_len parameter is supplied, an SGDR learning rate schedule is used.\n Trains the model.\n\n lr (float): learning rate \n n_cycles (int): n_cycles\n cycle_len (int): If not None, decay learning rate over <cycle_len>\n epochs until restarting/resetting learning rate to <lr>.\n If None, lr remains constant\n cycle_mult (int): Increase cycle_len by factor of cycle_mult.\n This will gradually elongate the cycle.\n Has no effect if cycle_len is None.\n lr_decay(float): rate of decay of learning rate each cycle\n checkpoint_folder (string): Folder path in which to save the model weights \n for each epoch.\n File name will be of the form: \n weights-{epoch:02d}-{val_loss:.2f}.hdf5\n early_stopping (int): If not None, training will automatically stop after this many \n epochs of no improvement in validation loss.\n Upon completion, model will be loaded with weights from epoch\n with lowest validation loss.\n callbacks (list): list of Callback instances to employ during training\n class_weight (dict): Optional dictionary mapping class indices (integers) to a weight (float) \n steps_per_epoch(int): Steps per epoch. If None, then, math.ceil(num_samples/batch_size) is used.\n Ignored unless training dataset is generator (and in ArrayLearner instances).\n verbose (bool): whether or not to show progress bar\n ```\n \"\"\"\n\n # check early_stopping\n if self.val_data is None and early_stopping is not None:\n raise ValueError('early_stopping monitors val_loss but validation data not set')\n\n\n # setup data\n x_train = self.train_data[0]\n y_train = self.train_data[1]\n validation = None\n if self.val_data:\n validation = (self.val_data[0], self.val_data[1])\n # setup learning rate schedule\n epochs = self._check_cycles(n_cycles, cycle_len, cycle_mult)\n self.set_lr(lr)\n\n # set call backs\n kcallbacks = callbacks if callbacks else None\n kcallbacks = self._cb_sgdr(lr, \n np.ceil(len(x_train)/self.batch_size),\n cycle_len, cycle_mult, lr_decay, callbacks=kcallbacks)\n kcallbacks = self._cb_checkpoint(checkpoint_folder, callbacks=kcallbacks)\n kcallbacks = self._cb_earlystopping(early_stopping, callbacks=kcallbacks)\n sgdr = [cb for cb in kcallbacks if type(cb).__name__ == 'SGDRScheduler'] if kcallbacks else None\n sgdr = sgdr[0] if sgdr else None\n\n\n # train model\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', message='.*Check your callbacks.*')\n hist = self.model.fit(self._prepare(x_train), \n self._prepare(y_train, train=False),\n batch_size=self.batch_size,\n epochs=epochs,\n validation_data=validation, verbose=verbose, \n shuffle=True,\n class_weight=class_weight,\n callbacks=kcallbacks)\n\n if sgdr is not None: hist.history['lr'] = sgdr.history['lr']\n self.history = hist\n\n if early_stopping:\n U.vprint('Weights from best epoch have been loaded into model.', verbose=verbose)\n #loss, acc = self.model.evaluate(self.val_data[0], self.val_data[1])\n #U.vprint('\\n', verbose=verbose)\n #U.vprint('Early stopping due to no further improvement.', verbose=verbose)\n #U.vprint('final loss:%s, final score:%s' % (loss, acc), verbose=verbose)\n\n return hist\n\n\n def layer_output(self, layer_id, example_id=0, use_val=False):\n \"\"\"\n ```\n Prints output of layer with index <layer_id> to help debug models.\n Uses first example (example_id=0) from training set, by default.\n ```\n \"\"\"\n \n inp = self.model.layers[0].input\n outp = self.model.layers[layer_id].output\n f_out = K.function([inp], [outp])\n if not use_val:\n example = self.train_data[0][example_id]\n else:\n example = self.val_data[0][example_id]\n layer_out = f_out([np.array([example,])])[0]\n return layer_out\n\n\n def view_top_losses(self, n=4, preproc=None, val_data=None):\n \"\"\"\n ```\n Views observations with top losses in validation set.\n Typically over-ridden by Learner subclasses.\n Args:\n n(int or tuple): a range to select in form of int or tuple\n e.g., n=8 is treated as n=(0,8)\n preproc (Preprocessor): A TextPreprocessor or ImagePreprocessor.\n For some data like text data, a preprocessor\n is required to undo the pre-processing\n to correctly view raw data.\n val_data: optional val_data to use instead of self.val_data\n Returns:\n list of n tuples where first element is either \n filepath or id of validation example and second element\n is loss.\n ```\n \"\"\"\n val = self._check_val(val_data)\n\n\n # get top losses and associated data\n tups = self.top_losses(n=n, val_data=val, preproc=preproc)\n\n # get multilabel status and class names\n classes = preproc.get_classes() if preproc is not None else None\n # iterate through losses\n for tup in tups:\n\n # get data\n idx = tup[0]\n loss = tup[1]\n truth = tup[2]\n pred = tup[3]\n\n obs = val[0][idx]\n join_char = ' '\n if preproc is not None: obs = preproc.undo(obs)\n if preproc is not None and isinstance(preproc, TextPreprocessor):\n if preproc.is_nospace_lang(): join_char = ''\n if type(obs) == str:\n obs = join_char.join(obs.split()[:512])\n print('----------')\n print(\"id:%s | loss:%s | true:%s | pred:%s)\\n\" % (idx, round(loss,2), truth, pred))\n print(obs)\n return\n\n\n\nclass GenLearner(Learner):\n \"\"\"\n ```\n Main class used to tune and train Keras models\n using a Keras generator (e.g., DirectoryIterator).\n Objects of this class should be instantiated using the\n ktrain.get_learner function, rather than directly.\n\n Main parameters are:\n\n model (Model): A compiled instance of keras.engine.training.Model\n train_data (Iterator): a Iterator instance for training set\n val_data (Iterator): A Iterator instance for validation set\n ```\n \"\"\"\n\n\n def __init__(self, model, train_data=None, val_data=None, \n batch_size=U.DEFAULT_BS, eval_batch_size=U.DEFAULT_BS,\n workers=1, use_multiprocessing=False):\n super().__init__(model, workers=workers, use_multiprocessing=use_multiprocessing)\n self.train_data = train_data\n self.val_data = val_data\n self.batch_size = batch_size\n self.eval_batch_size = eval_batch_size\n if self.train_data:\n self.train_data.batch_size = batch_size\n if self.val_data:\n self.val_data.batch_size = eval_batch_size\n return\n\n \n def fit(self, lr, n_cycles, cycle_len=None, cycle_mult=1,\n lr_decay=1.0, checkpoint_folder=None, early_stopping=None, \n class_weight=None, callbacks=[], steps_per_epoch=None, verbose=1):\n \"\"\"\n ```\n Trains the model. By default, fit is simply a wrapper for model.fit (for generators/sequences).\n When cycle_len parameter is supplied, an SGDR learning rate schedule is used.\n\n lr (float): learning rate \n n_cycles (int): n_cycles\n cycle_len (int): If not None, decay learning rate over <cycle_len>\n epochs until restarting/resetting learning rate to <lr>.\n If None, lr remains constant\n cycle_mult (int): Increase cycle_len by factor of cycle_mult.\n This will gradually elongate the cycle.\n Has no effect if cycle_len is None.\n lr_decay (float): rate of decay of learning reach each cycle.\n Has no effect if cycle_len is None\n checkpoint_folder (string): Folder path in which to save the model weights \n for each epoch.\n File name will be of the form: \n weights-{epoch:02d}-{val_loss:.2f}.hdf5\n early_stopping (int): If not None, training will automatically stop after this many \n epochs of no improvement in validation loss.\n Upon completion, model will be loaded with weights from epoch\n with lowest validation loss.\n class_weight (dict): Optional dictionary mapping class indices (integers) to a weight (float) \n callbacks (list): list of Callback instances to employ during training\n steps_per_epoch(int): Steps per epoch. If None, then, math.ceil(num_samples/batch_size) is used.\n verbose (boolean): whether or not to print progress bar\n ```\n \"\"\"\n # check early_stopping\n if self.val_data is None and early_stopping is not None:\n raise ValueError('early_stopping monitors val_loss but validation data not set')\n\n \n # handle callbacks\n num_samples = U.nsamples_from_data(self.train_data)\n train_bs = self.train_data.batch_size if hasattr(self.train_data, 'batch_size') else self.batch_size\n if steps_per_epoch is None:\n steps_per_epoch = math.ceil(num_samples/train_bs)\n validation_steps = None\n if self.val_data is not None:\n val_bs = self.val_data.batch_size if hasattr(self.val_data, 'batch_size') else self.batch_size\n validation_steps = math.ceil(U.nsamples_from_data(self.val_data)/val_bs)\n\n epochs = self._check_cycles(n_cycles, cycle_len, cycle_mult)\n self.set_lr(lr)\n\n\n # set call backs\n kcallbacks = callbacks if callbacks else None\n kcallbacks = self._cb_sgdr(lr, \n steps_per_epoch,\n cycle_len, cycle_mult, lr_decay, callbacks=kcallbacks)\n kcallbacks = self._cb_checkpoint(checkpoint_folder, callbacks=kcallbacks)\n kcallbacks = self._cb_earlystopping(early_stopping, callbacks=kcallbacks)\n sgdr = [cb for cb in kcallbacks if type(cb).__name__ == 'SGDRScheduler'] if kcallbacks else None\n sgdr = sgdr[0] if sgdr else None\n #if kcallbacks: print([type(cb).__name__ for cb in kcallbacks])\n\n \n # MNIST times per epoch on Titan V\n # workers=4, usemp=True 9 sec.\n # workers=1, usemp=True 12 sec.\n # workers=1, usemp=False 16 sec.\n # workers=4, usemp=False 30+ sec.\n #print(self.workers)\n #print(self.use_multiprocessing)\n\n # train model\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', message='.*Check your callbacks.*')\n fit_fn = self.model.fit\n hist = fit_fn(self._prepare(self.train_data),\n steps_per_epoch = steps_per_epoch,\n validation_steps = validation_steps,\n epochs=epochs,\n validation_data=self._prepare(self.val_data, train=False),\n workers=self.workers,\n use_multiprocessing=self.use_multiprocessing, \n verbose=verbose,\n shuffle=True,\n class_weight=class_weight,\n callbacks=kcallbacks)\n if sgdr is not None: hist.history['lr'] = sgdr.history['lr']\n self.history = hist\n\n if early_stopping:\n U.vprint('Weights from best epoch have been loaded into model.', verbose=verbose)\n #loss, acc = self.model.evaluate_generator(self.val_data)\n #U.vprint('\\n', verbose=verbose)\n #U.vprint('Early stopping due to no further improvement.', verbose=verbose)\n #U.vprint('final loss:%s, final score:%s' % (loss, acc), verbose=verbose)\n return hist\n\n\n def layer_output(self, layer_id, example_id=0, batch_id=0, use_val=False):\n \"\"\"\n ```\n Prints output of layer with index <layer_id> to help debug models.\n Uses first example (example_id=0) from first batch from training set, by default.\n ```\n \"\"\"\n \n inp = self.model.layers[0].input\n outp = self.model.layers[layer_id].output\n f_out = K.function([inp], [outp])\n if not use_val:\n example = self.train_data[0][batch_id][example_id]\n else:\n example = self.val_data[0][batch_id][example_id]\n layer_out = f_out([np.array([example,])])[0]\n return layer_out\n\n\n #def view_top_losses(self, n=4, preproc=None, val_data=None):\n # \"\"\"\n # Views observations with top losses in validation set.\n # Musta be overridden by Learner subclasses.\n # \"\"\"\n # raise NotImplementedError('view_top_losses must be overriden by GenLearner subclass')\n def view_top_losses(self, n=4, preproc=None, val_data=None):\n \"\"\"\n ```\n Views observations with top losses in validation set.\n Typically over-ridden by Learner subclasses.\n Args:\n n(int or tuple): a range to select in form of int or tuple\n e.g., n=8 is treated as n=(0,8)\n preproc (Preprocessor): A TextPreprocessor or ImagePreprocessor.\n For some data like text data, a preprocessor\n is required to undo the pre-processing\n to correctly view raw data.\n val_data: optional val_data to use instead of self.val_data\n Returns:\n list of n tuples where first element is either \n filepath or id of validation example and second element\n is loss.\n ```\n \"\"\"\n val = self._check_val(val_data)\n\n\n # get top losses and associated data\n tups = self.top_losses(n=n, val_data=val, preproc=preproc)\n\n # get multilabel status and class names\n classes = preproc.get_classes() if preproc is not None else None\n # iterate through losses\n for tup in tups:\n\n # get data\n idx = tup[0]\n loss = tup[1]\n truth = tup[2]\n pred = tup[3]\n\n print('----------')\n print(\"id:%s | loss:%s | true:%s | pred:%s)\\n\" % (idx, round(loss,2), truth, pred))\n return\n\n\n\n#------------------------------------------------------------------------------\n# Predictor functions\n#------------------------------------------------------------------------------\n\ndef get_predictor(model, preproc, batch_size=U.DEFAULT_BS):\n \"\"\"\n ```\n Returns a Predictor instance that can be used to make predictions on\n unlabeled examples. Can be saved to disk and reloaded as part of a \n larger application.\n\n Args\n model (Model): A compiled instance of keras.engine.training.Model\n preproc(Preprocessor): An instance of TextPreprocessor,ImagePreprocessor,\n or NERPreprocessor.\n These instances are returned from the data loading\n functions in the ktrain vision and text modules:\n\n ktrain.vision.images_from_folder\n ktrain.vision.images_from_csv\n ktrain.vision.images_from_array\n ktrain.text.texts_from_folder\n ktrain.text.texts_from_csv\n ktrain.text.ner.entities_from_csv\n batch_size(int): batch size to use. default:32\n ```\n \"\"\"\n\n # check arguments\n if not isinstance(model, Model):\n raise ValueError('model must be of instance Model')\n if not isinstance(preproc, (ImagePreprocessor,TextPreprocessor, NERPreprocessor, NodePreprocessor, LinkPreprocessor, TabularPreprocessor)):\n raise ValueError('preproc must be instance of ktrain.preprocessor.Preprocessor')\n if isinstance(preproc, ImagePreprocessor):\n return ImagePredictor(model, preproc, batch_size=batch_size)\n elif isinstance(preproc, TextPreprocessor):\n #elif type(preproc).__name__ == 'TextPreprocessor':\n return TextPredictor(model, preproc, batch_size=batch_size)\n elif isinstance(preproc, NERPreprocessor):\n return NERPredictor(model, preproc, batch_size=batch_size)\n elif isinstance(preproc, NodePreprocessor):\n return NodePredictor(model, preproc, batch_size=batch_size)\n elif isinstance(preproc, LinkPreprocessor):\n return LinkPredictor(model, preproc, batch_size=batch_size)\n elif isinstance(preproc, TabularPreprocessor):\n return TabularPredictor(model, preproc, batch_size=batch_size)\n\n else:\n raise Exception('preproc of type %s not currently supported' % (type(preproc)))\n\n\ndef load_predictor(fpath, batch_size=U.DEFAULT_BS, custom_objects=None):\n \"\"\"\n ```\n Loads a previously saved Predictor instance\n Args\n fpath(str): predictor path name (value supplied to predictor.save)\n From v0.16.x, this is always the path to a folder.\n Pre-v0.16.x, this is the base name used to save model and .preproc instance.\n batch_size(int): batch size to use for predictions. default:32\n custom_objects(dict): custom objects required to load model.\n This is useful if you compiled the model with a custom loss function, for example.\n For models included with ktrain as is, this is populated automatically\n and can be disregarded. \n ```\n \"\"\"\n\n # load the preprocessor\n preproc = None\n try:\n preproc_name = os.path.join(fpath, U.PREPROC_NAME)\n with open(preproc_name, 'rb') as f: preproc = pickle.load(f)\n except:\n try:\n preproc_name = fpath +'.preproc'\n #warnings.warn('could not load .preproc file as %s - attempting to load as %s' % (os.path.join(fpath, U.PREPROC_NAME), preproc_name))\n with open(preproc_name, 'rb') as f: preproc = pickle.load(f)\n except:\n raise Exception('Failed to load .preproc file in either the post v0.16.x loction (%s) or pre v0.16.x location (%s)' % (os.path.join(fpath, U.PREPROC_NAME), fpath+'.preproc'))\n\n # load the model\n model = _load_model(fpath, preproc=preproc, custom_objects=custom_objects)\n\n\n # preprocessing functions in ImageDataGenerators are not pickable\n # so, we must reconstruct\n if hasattr(preproc, 'datagen') and hasattr(preproc.datagen, 'ktrain_preproc'):\n preproc_name = preproc.datagen.ktrain_preproc\n if preproc_name == 'resnet50':\n preproc.datagen.preprocessing_function = pre_resnet50\n elif preproc_name == 'mobilenet':\n preproc.datagen.preprocessing_function = pre_mobilenet\n elif preproc_name == 'mobilenetv3':\n preproc.datagen.preprocessing_function = pre_mobilenetv3small\n elif preproc_name == 'inception':\n preproc.datagen.preprocessing_function = pre_inception\n elif preproc_name == 'efficientnet':\n preproc.datagen.preprocessing_function = pre_efficientnet\n else:\n raise Exception('Uknown preprocessing_function name: %s' % (preproc_name))\n \n # return the appropriate predictor\n if not isinstance(model, Model):\n raise ValueError('model must be of instance Model')\n if not isinstance(preproc, (ImagePreprocessor, TextPreprocessor, NERPreprocessor, NodePreprocessor, LinkPreprocessor, TabularPreprocessor)):\n raise ValueError('preproc must be instance of ktrain.preprocessor.Preprocessor')\n if isinstance(preproc, ImagePreprocessor):\n return ImagePredictor(model, preproc, batch_size=batch_size)\n elif isinstance(preproc, TextPreprocessor):\n return TextPredictor(model, preproc, batch_size=batch_size)\n elif isinstance(preproc, NERPreprocessor):\n return NERPredictor(model, preproc, batch_size=batch_size)\n elif isinstance(preproc, NodePreprocessor):\n return NodePredictor(model, preproc, batch_size=batch_size)\n elif isinstance(preproc, LinkPreprocessor):\n return LinkPredictor(model, preproc, batch_size=batch_size)\n elif isinstance(preproc, TabularPreprocessor):\n return TabularPredictor(model, preproc, batch_size=batch_size)\n else:\n raise Exception('preprocessor not currently supported')\n\n\n\n\n\n#----------------------------------------\n# Utility Functions\n#----------------------------------------\n\n\n\n\ndef release_gpu_memory(device=0):\n \"\"\"\n ```\n Relase GPU memory allocated by Tensorflow\n Source: \n https://stackoverflow.com/questions/51005147/keras-release-memory-after-finish-training-process\n ```\n \"\"\"\n from numba import cuda\n K.clear_session()\n cuda.select_device(device)\n cuda.close()\n return\n\n\ndef _load_model(fpath, preproc=None, train_data=None, custom_objects=None):\n if not preproc and not train_data:\n raise ValueError('Either preproc or train_data is required.')\n if (preproc and isinstance(preproc, TransformersPreprocessor)) or \\\n (train_data and U.is_huggingface(data=train_data)):\n if preproc:\n model = preproc.get_model(fpath=fpath)\n # if model_name is local_path, update it to reflect current predictor folder\n # in case learner was trained with local path on different machine\n # TODO: support this for Windows paths\n if preproc.model_name.startswith(os.sep): preproc.model_name = fpath\n else:\n model = TransformersPreprocessor.load_model_and_configure_from_data(fpath, train_data)\n return model\n elif (preproc and (isinstance(preproc, BERTPreprocessor) or \\\n type(preproc).__name__ == 'BERTPreprocessor')) or\\\n train_data and U.bert_data_tuple(train_data):\n # custom BERT model\n from keras_bert import get_custom_objects\n if isinstance(custom_objects, dict):\n custom_objects.update(get_custom_objects())\n else:\n custom_objects = get_custom_objects()\n elif (preproc and (isinstance(preproc, NERPreprocessor) or \\\n type(preproc).__name__ == 'NERPreprocessor')) or \\\n train_data and U.is_ner(data=train_data):\n from .text.ner.anago.layers import CRF\n from .text.ner import crf_loss\n custom_objects={'CRF': CRF, 'crf_loss':crf_loss}\n # save old te_model as backup\n if preproc:\n old_te_model = preproc.p.te_model\n # load TransformerEmbedding model from fpath/hf folder\n # if model_name is local_path, update it to reflect current predictor folder, since\n # all model/tokenizer/config files should have been saved there by predictor.save\n \n preproc.p.te_model = os.path.join(fpath, 'hf') if preproc.p.te_model else preproc.p.te_model\n if preproc.p.te_model:\n # te_model should point fpath/hf folder\n try:\n preproc.p.activate_transformer(preproc.p.te_model, layers=preproc.p.te_layers)\n except:\n # fall back to old model id or location if error for backwards compatibility\n warnings.warn(f'could not load TransformerEmbedding model from {preproc.p.te_model} - trying {old_te_model}')\n preproc.p.te_model = old_te_model\n preproc.p.activate_transformer(preproc.p.te_model, layers=preproc.p.te_layers)\n\n elif (preproc and (isinstance(preproc, NodePreprocessor) or \\\n type(preproc).__name__ == 'NodePreprocessor')) or \\\n train_data and U.is_nodeclass(data=train_data):\n from stellargraph.layer import MeanAggregator\n custom_objects={'MeanAggregator': MeanAggregator}\n elif (preproc and (isinstance(preproc, LinkPreprocessor) or \\\n type(preproc).__name__ == 'LinkPreprocessor')) or \\\n train_data and U.is_linkpred(data=train_data):\n from stellargraph.layer import MeanAggregator\n custom_objects={'MeanAggregator': MeanAggregator}\n custom_objects = {} if custom_objects is None else custom_objects\n custom_objects['AdamWeightDecay'] = AdamWeightDecay\n try:\n try:\n model = load_model(os.path.join(fpath, U.MODEL_NAME), custom_objects=custom_objects)\n except:\n try:\n # pre-0.16: model fpath was file name of model not folder for non-Transformer models\n #warnings.warn('could not load model as %s - attempting to load model as %s' % (os.path.join(fpath, U.MODEL_NAME), fpath))\n model = load_model(fpath, custom_objects=custom_objects)\n except:\n # for bilstm models without CRF layer on TF2 where CRF is not supported \n model = load_model(fpath, custom_objects={'AdamWeightDecay':AdamWeightDecay})\n except Exception as e:\n print('Call to keras.models.load_model failed. Try manually invoking this function to investigate error and report issue if necessary.')\n raise Exception('Error detected: %s' % (e))\n\n # see issue https://github.com/amaiya/ktrain/issues/21\n if hasattr(model, '_make_predict_function'):\n model._make_predict_function()\n\n return model\n\n\n\n" ]
[ [ "sklearn.metrics.mean_absolute_error", "sklearn.metrics.mean_squared_error" ] ]
xzlmark/webspider
[ "133c620c65aa45abea1718b0dada09618c2115bf", "133c620c65aa45abea1718b0dada09618c2115bf" ]
[ "matplotlib_examples/examples_src/style_sheets/plot_fivethirtyeight.py", "matplotlib_examples/examples_src/mplot3d/tricontour3d_demo.py" ]
[ "\"\"\"\n===========================\nFiveThirtyEight style sheet\n===========================\n\nThis shows an example of the \"fivethirtyeight\" styling, which\ntries to replicate the styles from FiveThirtyEight.com.\n\"\"\"\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n\nplt.style.use('fivethirtyeight')\n\nx = np.linspace(0, 10)\n\n# Fixing random state for reproducibility\nnp.random.seed(19680801)\n\nfig, ax = plt.subplots()\n\nax.plot(x, np.sin(x) + x + np.random.randn(50))\nax.plot(x, np.sin(x) + 0.5 * x + np.random.randn(50))\nax.plot(x, np.sin(x) + 2 * x + np.random.randn(50))\nax.plot(x, np.sin(x) - 0.5 * x + np.random.randn(50))\nax.plot(x, np.sin(x) - 2 * x + np.random.randn(50))\nax.plot(x, np.sin(x) + np.random.randn(50))\nax.set_title(\"'fivethirtyeight' style sheet\")\n\nplt.show()\n", "\"\"\"\n==========================\nTriangular 3D contour plot\n==========================\n\nContour plots of unstructured triangular grids.\n\nThe data used is the same as in the second plot of trisurf3d_demo2.\ntricontourf3d_demo shows the filled version of this example.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.tri as tri\nimport numpy as np\n\nn_angles = 48\nn_radii = 8\nmin_radius = 0.25\n\n# Create the mesh in polar coordinates and compute x, y, z.\nradii = np.linspace(min_radius, 0.95, n_radii)\nangles = np.linspace(0, 2*np.pi, n_angles, endpoint=False)\nangles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)\nangles[:, 1::2] += np.pi/n_angles\n\nx = (radii*np.cos(angles)).flatten()\ny = (radii*np.sin(angles)).flatten()\nz = (np.cos(radii)*np.cos(angles*3.0)).flatten()\n\n# Create a custom triangulation.\ntriang = tri.Triangulation(x, y)\n\n# Mask off unwanted triangles.\nxmid = x[triang.triangles].mean(axis=1)\nymid = y[triang.triangles].mean(axis=1)\nmask = np.where(xmid*xmid + ymid*ymid < min_radius*min_radius, 1, 0)\ntriang.set_mask(mask)\n\nfig = plt.figure()\nax = fig.gca(projection='3d')\nax.tricontour(triang, z, cmap=plt.cm.CMRmap)\n\n# Customize the view angle so it's easier to understand the plot.\nax.view_init(elev=45.)\n\nplt.show()\n" ]
[ [ "matplotlib.pyplot.style.use", "numpy.random.seed", "numpy.random.randn", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "numpy.sin", "numpy.linspace" ], [ "matplotlib.pyplot.figure", "numpy.repeat", "numpy.cos", "matplotlib.tri.Triangulation", "matplotlib.pyplot.show", "numpy.sin", "numpy.where", "numpy.linspace" ] ]
uvipen/pytorch-ImageNet-CIFAR-COCO-VOC-training
[ "87549fd4bff3b17b1716a248a115112ee8d05e4d" ]
[ "public/detection/models/loss.py" ]
[ "import math\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass RetinaLoss(nn.Module):\n def __init__(self,\n image_w,\n image_h,\n alpha=0.25,\n gamma=2,\n beta=1.0 / 9.0,\n epsilon=1e-4):\n super(RetinaLoss, self).__init__()\n self.alpha = alpha\n self.gamma = gamma\n self.beta = beta\n self.epsilon = epsilon\n self.image_w = image_w\n self.image_h = image_h\n\n def forward(self, cls_heads, reg_heads, batch_anchors, annotations):\n \"\"\"\n compute cls loss and reg loss in one batch\n \"\"\"\n device = annotations.device\n cls_heads = torch.cat(cls_heads, axis=1)\n reg_heads = torch.cat(reg_heads, axis=1)\n batch_anchors = torch.cat(batch_anchors, axis=1)\n\n cls_heads, reg_heads, batch_anchors = self.drop_out_border_anchors_and_heads(\n cls_heads, reg_heads, batch_anchors, self.image_w, self.image_h)\n batch_anchors_annotations = self.get_batch_anchors_annotations(\n batch_anchors, annotations)\n\n cls_loss, reg_loss = [], []\n valid_image_num = 0\n for per_image_cls_heads, per_image_reg_heads, per_image_anchors_annotations in zip(\n cls_heads, reg_heads, batch_anchors_annotations):\n # valid anchors contain all positive anchors\n valid_anchors_num = (per_image_anchors_annotations[\n per_image_anchors_annotations[:, 4] > 0]).shape[0]\n\n if valid_anchors_num == 0:\n cls_loss.append(torch.tensor(0.).to(device))\n reg_loss.append(torch.tensor(0.).to(device))\n else:\n valid_image_num += 1\n one_image_cls_loss = self.compute_one_image_focal_loss(\n per_image_cls_heads, per_image_anchors_annotations)\n one_image_reg_loss = self.compute_one_image_smoothl1_loss(\n per_image_reg_heads, per_image_anchors_annotations)\n cls_loss.append(one_image_cls_loss)\n reg_loss.append(one_image_reg_loss)\n\n cls_loss = sum(cls_loss) / valid_image_num\n reg_loss = sum(reg_loss) / valid_image_num\n\n return cls_loss, reg_loss\n\n def compute_one_image_focal_loss(self, per_image_cls_heads,\n per_image_anchors_annotations):\n \"\"\"\n compute one image focal loss(cls loss)\n per_image_cls_heads:[anchor_num,num_classes]\n per_image_anchors_annotations:[anchor_num,5]\n \"\"\"\n # Filter anchors with gt class=-1, this part of anchor doesn't calculate focal loss\n per_image_cls_heads = per_image_cls_heads[\n per_image_anchors_annotations[:, 4] >= 0]\n per_image_anchors_annotations = per_image_anchors_annotations[\n per_image_anchors_annotations[:, 4] >= 0]\n\n per_image_cls_heads = torch.clamp(per_image_cls_heads,\n min=self.epsilon,\n max=1. - self.epsilon)\n num_classes = per_image_cls_heads.shape[1]\n\n # generate 80 binary ground truth classes for each anchor\n loss_ground_truth = F.one_hot(per_image_anchors_annotations[:,\n 4].long(),\n num_classes=num_classes + 1)\n loss_ground_truth = loss_ground_truth[:, 1:]\n loss_ground_truth = loss_ground_truth.float()\n\n alpha_factor = torch.ones_like(per_image_cls_heads) * self.alpha\n alpha_factor = torch.where(torch.eq(loss_ground_truth, 1.),\n alpha_factor, 1. - alpha_factor)\n pt = torch.where(torch.eq(loss_ground_truth, 1.), per_image_cls_heads,\n 1. - per_image_cls_heads)\n focal_weight = alpha_factor * torch.pow((1. - pt), self.gamma)\n\n bce_loss = -(\n loss_ground_truth * torch.log(per_image_cls_heads) +\n (1. - loss_ground_truth) * torch.log(1. - per_image_cls_heads))\n\n one_image_focal_loss = focal_weight * bce_loss\n\n one_image_focal_loss = one_image_focal_loss.sum()\n positive_anchors_num = per_image_anchors_annotations[\n per_image_anchors_annotations[:, 4] > 0].shape[0]\n # according to the original paper,We divide the focal loss by the number of positive sample anchors\n one_image_focal_loss = one_image_focal_loss / positive_anchors_num\n\n return one_image_focal_loss\n\n def compute_one_image_smoothl1_loss(self, per_image_reg_heads,\n per_image_anchors_annotations):\n \"\"\"\n compute one image smoothl1 loss(reg loss)\n per_image_reg_heads:[anchor_num,4]\n per_image_anchors_annotations:[anchor_num,5]\n \"\"\"\n # Filter anchors with gt class=-1, this part of anchor doesn't calculate smoothl1 loss\n device = per_image_reg_heads.device\n per_image_reg_heads = per_image_reg_heads[\n per_image_anchors_annotations[:, 4] > 0]\n per_image_anchors_annotations = per_image_anchors_annotations[\n per_image_anchors_annotations[:, 4] > 0]\n positive_anchor_num = per_image_anchors_annotations.shape[0]\n\n if positive_anchor_num == 0:\n return torch.tensor(0.).to(device)\n\n # compute smoothl1 loss\n loss_ground_truth = per_image_anchors_annotations[:, 0:4]\n x = torch.abs(per_image_reg_heads - loss_ground_truth)\n one_image_smoothl1_loss = torch.where(torch.ge(x, self.beta),\n x - 0.5 * self.beta,\n 0.5 * (x**2) / self.beta)\n one_image_smoothl1_loss = one_image_smoothl1_loss.mean(axis=1).sum()\n # according to the original paper,We divide the smoothl1 loss by the number of positive sample anchors\n one_image_smoothl1_loss = one_image_smoothl1_loss / positive_anchor_num\n\n return one_image_smoothl1_loss\n\n def drop_out_border_anchors_and_heads(self, cls_heads, reg_heads,\n batch_anchors, image_w, image_h):\n \"\"\"\n dropout out of border anchors,cls heads and reg heads\n \"\"\"\n final_cls_heads, final_reg_heads, final_batch_anchors = [], [], []\n for per_image_cls_head, per_image_reg_head, per_image_anchors in zip(\n cls_heads, reg_heads, batch_anchors):\n per_image_cls_head = per_image_cls_head[per_image_anchors[:,\n 0] > 0.0]\n per_image_reg_head = per_image_reg_head[per_image_anchors[:,\n 0] > 0.0]\n per_image_anchors = per_image_anchors[per_image_anchors[:,\n 0] > 0.0]\n\n per_image_cls_head = per_image_cls_head[per_image_anchors[:,\n 1] > 0.0]\n per_image_reg_head = per_image_reg_head[per_image_anchors[:,\n 1] > 0.0]\n per_image_anchors = per_image_anchors[per_image_anchors[:,\n 1] > 0.0]\n\n per_image_cls_head = per_image_cls_head[\n per_image_anchors[:, 2] < image_w]\n per_image_reg_head = per_image_reg_head[\n per_image_anchors[:, 2] < image_w]\n per_image_anchors = per_image_anchors[\n per_image_anchors[:, 2] < image_w]\n\n per_image_cls_head = per_image_cls_head[\n per_image_anchors[:, 3] < image_h]\n per_image_reg_head = per_image_reg_head[\n per_image_anchors[:, 3] < image_h]\n per_image_anchors = per_image_anchors[\n per_image_anchors[:, 3] < image_h]\n\n per_image_cls_head = per_image_cls_head.unsqueeze(0)\n per_image_reg_head = per_image_reg_head.unsqueeze(0)\n per_image_anchors = per_image_anchors.unsqueeze(0)\n\n final_cls_heads.append(per_image_cls_head)\n final_reg_heads.append(per_image_reg_head)\n final_batch_anchors.append(per_image_anchors)\n\n final_cls_heads = torch.cat(final_cls_heads, axis=0)\n final_reg_heads = torch.cat(final_reg_heads, axis=0)\n final_batch_anchors = torch.cat(final_batch_anchors, axis=0)\n\n # final cls heads shape:[batch_size, anchor_nums, class_num]\n # final reg heads shape:[batch_size, anchor_nums, 4]\n # final batch anchors shape:[batch_size, anchor_nums, 4]\n return final_cls_heads, final_reg_heads, final_batch_anchors\n\n def get_batch_anchors_annotations(self, batch_anchors, annotations):\n \"\"\"\n Assign a ground truth box target and a ground truth class target for each anchor\n if anchor gt_class index = -1,this anchor doesn't calculate cls loss and reg loss\n if anchor gt_class index = 0,this anchor is a background class anchor and used in calculate cls loss\n if anchor gt_class index > 0,this anchor is a object class anchor and used in\n calculate cls loss and reg loss\n \"\"\"\n device = annotations.device\n assert batch_anchors.shape[0] == annotations.shape[0]\n one_image_anchor_nums = batch_anchors.shape[1]\n\n batch_anchors_annotations = []\n for one_image_anchors, one_image_annotations in zip(\n batch_anchors, annotations):\n # drop all index=-1 class annotations\n one_image_annotations = one_image_annotations[\n one_image_annotations[:, 4] >= 0]\n\n if one_image_annotations.shape[0] == 0:\n one_image_anchor_annotations = torch.ones(\n [one_image_anchor_nums, 5], device=device) * (-1)\n else:\n one_image_gt_bboxes = one_image_annotations[:, 0:4]\n one_image_gt_class = one_image_annotations[:, 4]\n one_image_ious = self.compute_ious_for_one_image(\n one_image_anchors, one_image_gt_bboxes)\n\n # snap per gt bboxes to the best iou anchor\n overlap, indices = one_image_ious.max(axis=1)\n # assgin each anchor gt bboxes for max iou annotation\n per_image_anchors_gt_bboxes = one_image_gt_bboxes[indices]\n # transform gt bboxes to [tx,ty,tw,th] format for each anchor\n one_image_anchors_snaped_boxes = self.snap_annotations_as_tx_ty_tw_th(\n per_image_anchors_gt_bboxes, one_image_anchors)\n\n one_image_anchors_gt_class = (torch.ones_like(overlap) *\n -1).to(device)\n # if iou <0.4,assign anchors gt class as 0:background\n one_image_anchors_gt_class[overlap < 0.4] = 0\n # if iou >=0.5,assign anchors gt class as same as the max iou annotation class:80 classes index from 1 to 80\n one_image_anchors_gt_class[\n overlap >=\n 0.5] = one_image_gt_class[indices][overlap >= 0.5] + 1\n\n one_image_anchors_gt_class = one_image_anchors_gt_class.unsqueeze(\n -1)\n\n one_image_anchor_annotations = torch.cat([\n one_image_anchors_snaped_boxes, one_image_anchors_gt_class\n ],\n axis=1)\n one_image_anchor_annotations = one_image_anchor_annotations.unsqueeze(\n 0)\n batch_anchors_annotations.append(one_image_anchor_annotations)\n\n batch_anchors_annotations = torch.cat(batch_anchors_annotations,\n axis=0)\n\n # batch anchors annotations shape:[batch_size, anchor_nums, 5]\n return batch_anchors_annotations\n\n def snap_annotations_as_tx_ty_tw_th(self, anchors_gt_bboxes, anchors):\n \"\"\"\n snap each anchor ground truth bbox form format:[x_min,y_min,x_max,y_max] to format:[tx,ty,tw,th]\n \"\"\"\n anchors_w_h = anchors[:, 2:] - anchors[:, :2]\n anchors_ctr = anchors[:, :2] + 0.5 * anchors_w_h\n\n anchors_gt_bboxes_w_h = anchors_gt_bboxes[:,\n 2:] - anchors_gt_bboxes[:, :2]\n anchors_gt_bboxes_w_h = torch.clamp(anchors_gt_bboxes_w_h, min=1.0)\n anchors_gt_bboxes_ctr = anchors_gt_bboxes[:, :\n 2] + 0.5 * anchors_gt_bboxes_w_h\n\n snaped_annotations_for_anchors = torch.cat(\n [(anchors_gt_bboxes_ctr - anchors_ctr) / anchors_w_h,\n torch.log(anchors_gt_bboxes_w_h / anchors_w_h)],\n axis=1)\n device = snaped_annotations_for_anchors.device\n factor = torch.tensor([[0.1, 0.1, 0.2, 0.2]]).to(device)\n\n snaped_annotations_for_anchors = snaped_annotations_for_anchors / factor\n\n # snaped_annotations_for_anchors shape:[batch_size, anchor_nums, 4]\n return snaped_annotations_for_anchors\n\n def compute_ious_for_one_image(self, one_image_anchors,\n one_image_annotations):\n \"\"\"\n compute ious between one image anchors and one image annotations\n \"\"\"\n # make sure anchors format:[anchor_nums,4],4:[x_min,y_min,x_max,y_max]\n # make sure annotations format: [annotation_nums,4],4:[x_min,y_min,x_max,y_max]\n annotation_num = one_image_annotations.shape[0]\n\n one_image_ious = []\n for annotation_index in range(annotation_num):\n annotation = one_image_annotations[\n annotation_index:annotation_index + 1, :]\n overlap_area_top_left = torch.max(one_image_anchors[:, :2],\n annotation[:, :2])\n overlap_area_bot_right = torch.min(one_image_anchors[:, 2:],\n annotation[:, 2:])\n overlap_area_sizes = torch.clamp(overlap_area_bot_right -\n overlap_area_top_left,\n min=0)\n overlap_area = overlap_area_sizes[:, 0] * overlap_area_sizes[:, 1]\n # anchors and annotations convert format to [x1,y1,w,h]\n anchors_w_h = one_image_anchors[:,\n 2:] - one_image_anchors[:, :2] + 1\n annotations_w_h = annotation[:, 2:] - annotation[:, :2] + 1\n # compute anchors_area and annotations_area\n anchors_area = anchors_w_h[:, 0] * anchors_w_h[:, 1]\n annotations_area = annotations_w_h[:, 0] * annotations_w_h[:, 1]\n\n # compute union_area\n union_area = anchors_area + annotations_area - overlap_area\n union_area = torch.clamp(union_area, min=1e-4)\n # compute ious between one image anchors and one image annotations\n ious = (overlap_area / union_area).unsqueeze(-1)\n\n one_image_ious.append(ious)\n\n one_image_ious = torch.cat(one_image_ious, axis=1)\n\n # one image ious shape:[anchors_num,annotation_num]\n return one_image_ious\n\n\nINF = 100000000\n\n\nclass FCOSLoss(nn.Module):\n def __init__(self,\n strides=[8, 16, 32, 64, 128],\n mi=[[-1, 64], [64, 128], [128, 256], [256, 512], [512, INF]],\n alpha=0.25,\n gamma=2.,\n epsilon=1e-4):\n super(FCOSLoss, self).__init__()\n self.alpha = alpha\n self.gamma = gamma\n self.epsilon = epsilon\n self.strides = strides\n self.mi = mi\n\n def forward(self, cls_heads, reg_heads, center_heads, batch_positions,\n annotations):\n \"\"\"\n compute cls loss, reg loss and center-ness loss in one batch\n \"\"\"\n cls_preds, reg_preds, center_preds, batch_targets = self.get_batch_position_annotations(\n cls_heads, reg_heads, center_heads, batch_positions, annotations)\n\n cls_preds = torch.sigmoid(cls_preds)\n reg_preds = torch.exp(reg_preds)\n center_preds = torch.sigmoid(center_preds)\n batch_targets[:, :, 5:6] = torch.sigmoid(batch_targets[:, :, 5:6])\n\n device = annotations.device\n cls_loss, reg_loss, center_ness_loss = [], [], []\n valid_image_num = 0\n for per_image_cls_preds, per_image_reg_preds, per_image_center_preds, per_image_targets in zip(\n cls_preds, reg_preds, center_preds, batch_targets):\n positive_points_num = (\n per_image_targets[per_image_targets[:, 4] > 0]).shape[0]\n if positive_points_num == 0:\n cls_loss.append(torch.tensor(0.).to(device))\n reg_loss.append(torch.tensor(0.).to(device))\n center_ness_loss.append(torch.tensor(0.).to(device))\n else:\n valid_image_num += 1\n one_image_cls_loss = self.compute_one_image_focal_loss(\n per_image_cls_preds, per_image_targets)\n one_image_reg_loss = self.compute_one_image_giou_loss(\n per_image_reg_preds, per_image_targets)\n one_image_center_ness_loss = self.compute_one_image_center_ness_loss(\n per_image_center_preds, per_image_targets)\n\n cls_loss.append(one_image_cls_loss)\n reg_loss.append(one_image_reg_loss)\n center_ness_loss.append(one_image_center_ness_loss)\n\n cls_loss = sum(cls_loss) / valid_image_num\n reg_loss = sum(reg_loss) / valid_image_num\n center_ness_loss = sum(center_ness_loss) / valid_image_num\n\n return cls_loss, reg_loss, center_ness_loss\n\n def compute_one_image_focal_loss(self, per_image_cls_preds,\n per_image_targets):\n \"\"\"\n compute one image focal loss(cls loss)\n per_image_cls_preds:[points_num,num_classes]\n per_image_targets:[points_num,8]\n \"\"\"\n per_image_cls_preds = torch.clamp(per_image_cls_preds,\n min=self.epsilon,\n max=1. - self.epsilon)\n num_classes = per_image_cls_preds.shape[1]\n\n # generate 80 binary ground truth classes for each anchor\n loss_ground_truth = F.one_hot(per_image_targets[:, 4].long(),\n num_classes=num_classes + 1)\n loss_ground_truth = loss_ground_truth[:, 1:]\n loss_ground_truth = loss_ground_truth.float()\n\n alpha_factor = torch.ones_like(per_image_cls_preds) * self.alpha\n alpha_factor = torch.where(torch.eq(loss_ground_truth, 1.),\n alpha_factor, 1. - alpha_factor)\n pt = torch.where(torch.eq(loss_ground_truth, 1.), per_image_cls_preds,\n 1. - per_image_cls_preds)\n focal_weight = alpha_factor * torch.pow((1. - pt), self.gamma)\n\n bce_loss = -(\n loss_ground_truth * torch.log(per_image_cls_preds) +\n (1. - loss_ground_truth) * torch.log(1. - per_image_cls_preds))\n\n one_image_focal_loss = focal_weight * bce_loss\n\n one_image_focal_loss = one_image_focal_loss.sum()\n positive_points_num = per_image_targets[\n per_image_targets[:, 4] > 0].shape[0]\n # according to the original paper,We divide the focal loss by the number of positive sample anchors\n one_image_focal_loss = one_image_focal_loss / positive_points_num\n\n return one_image_focal_loss\n\n def compute_one_image_giou_loss(self, per_image_reg_preds,\n per_image_targets):\n \"\"\"\n compute one image giou loss(reg loss)\n per_image_reg_preds:[points_num,4]\n per_image_targets:[anchor_num,8]\n \"\"\"\n # only use positive points sample to compute reg loss\n device = per_image_reg_preds.device\n per_image_reg_preds = per_image_reg_preds[per_image_targets[:, 4] > 0]\n per_image_targets = per_image_targets[per_image_targets[:, 4] > 0]\n positive_points_num = per_image_targets.shape[0]\n\n if positive_points_num == 0:\n return torch.tensor(0.).to(device)\n\n center_ness_targets = per_image_targets[:, 5]\n\n pred_bboxes_xy_min = per_image_targets[:,\n 6:8] - per_image_reg_preds[:,\n 0:2]\n pred_bboxes_xy_max = per_image_targets[:,\n 6:8] + per_image_reg_preds[:,\n 2:4]\n gt_bboxes_xy_min = per_image_targets[:, 6:8] - per_image_targets[:,\n 0:2]\n gt_bboxes_xy_max = per_image_targets[:, 6:8] + per_image_targets[:,\n 2:4]\n\n pred_bboxes = torch.cat([pred_bboxes_xy_min, pred_bboxes_xy_max],\n axis=1)\n gt_bboxes = torch.cat([gt_bboxes_xy_min, gt_bboxes_xy_max], axis=1)\n\n overlap_area_top_left = torch.max(pred_bboxes[:, 0:2], gt_bboxes[:,\n 0:2])\n overlap_area_bot_right = torch.min(pred_bboxes[:, 2:4], gt_bboxes[:,\n 2:4])\n overlap_area_sizes = torch.clamp(overlap_area_bot_right -\n overlap_area_top_left,\n min=0)\n overlap_area = overlap_area_sizes[:, 0] * overlap_area_sizes[:, 1]\n\n # anchors and annotations convert format to [x1,y1,w,h]\n pred_bboxes_w_h = pred_bboxes[:, 2:4] - pred_bboxes[:, 0:2] + 1\n gt_bboxes_w_h = gt_bboxes[:, 2:4] - gt_bboxes[:, 0:2] + 1\n\n # compute anchors_area and annotations_area\n pred_bboxes_area = pred_bboxes_w_h[:, 0] * pred_bboxes_w_h[:, 1]\n gt_bboxes_area = gt_bboxes_w_h[:, 0] * gt_bboxes_w_h[:, 1]\n\n # compute union_area\n union_area = pred_bboxes_area + gt_bboxes_area - overlap_area\n union_area = torch.clamp(union_area, min=1e-4)\n # compute ious between one image anchors and one image annotations\n ious = overlap_area / union_area\n\n enclose_area_top_left = torch.min(pred_bboxes[:, 0:2], gt_bboxes[:,\n 0:2])\n enclose_area_bot_right = torch.max(pred_bboxes[:, 2:4], gt_bboxes[:,\n 2:4])\n enclose_area_sizes = torch.clamp(enclose_area_bot_right -\n enclose_area_top_left,\n min=0)\n enclose_area = enclose_area_sizes[:, 0] * enclose_area_sizes[:, 1]\n enclose_area = torch.clamp(enclose_area, min=1e-4)\n\n gious_loss = 1. - ious + (enclose_area - union_area) / enclose_area\n gious_loss = torch.clamp(gious_loss, min=-1.0, max=1.0)\n # use center_ness_targets as the weight of gious loss\n gious_loss = gious_loss * center_ness_targets\n gious_loss = gious_loss.sum() / positive_points_num\n gious_loss = 2. * gious_loss\n\n return gious_loss\n\n def compute_one_image_center_ness_loss(self, per_image_center_preds,\n per_image_targets):\n \"\"\"\n compute one image center_ness loss(center ness loss)\n per_image_center_preds:[points_num,4]\n per_image_targets:[anchor_num,8]\n \"\"\"\n # only use positive points sample to compute center_ness loss\n device = per_image_center_preds.device\n per_image_center_preds = per_image_center_preds[\n per_image_targets[:, 4] > 0]\n per_image_targets = per_image_targets[per_image_targets[:, 4] > 0]\n positive_points_num = per_image_targets.shape[0]\n\n if positive_points_num == 0:\n return torch.tensor(0.).to(device)\n\n center_ness_targets = per_image_targets[:, 5:6]\n\n center_ness_loss = -(\n center_ness_targets * torch.log(per_image_center_preds) +\n (1. - center_ness_targets) *\n torch.log(1. - per_image_center_preds))\n center_ness_loss = center_ness_loss.sum() / positive_points_num\n\n return center_ness_loss\n\n def get_batch_position_annotations(self, cls_heads, reg_heads,\n center_heads, batch_positions,\n annotations):\n \"\"\"\n Assign a ground truth target for each position on feature map\n \"\"\"\n device = annotations.device\n batch_mi = []\n for reg_head, mi in zip(reg_heads, self.mi):\n mi = torch.tensor(mi).to(device)\n B, H, W, _ = reg_head.shape\n per_level_mi = torch.zeros(B, H, W, 2).to(device)\n per_level_mi = per_level_mi + mi\n batch_mi.append(per_level_mi)\n\n cls_preds,reg_preds,center_preds,all_points_position,all_points_mi=[],[],[],[],[]\n for cls_pred, reg_pred, center_pred, per_level_position, per_level_mi in zip(\n cls_heads, reg_heads, center_heads, batch_positions, batch_mi):\n cls_pred = cls_pred.view(cls_pred.shape[0], -1, cls_pred.shape[-1])\n reg_pred = reg_pred.view(reg_pred.shape[0], -1, reg_pred.shape[-1])\n center_pred = center_pred.view(center_pred.shape[0], -1,\n center_pred.shape[-1])\n per_level_position = per_level_position.view(\n per_level_position.shape[0], -1, per_level_position.shape[-1])\n per_level_mi = per_level_mi.view(per_level_mi.shape[0], -1,\n per_level_mi.shape[-1])\n\n cls_preds.append(cls_pred)\n reg_preds.append(reg_pred)\n center_preds.append(center_pred)\n all_points_position.append(per_level_position)\n all_points_mi.append(per_level_mi)\n\n cls_preds = torch.cat(cls_preds, axis=1)\n reg_preds = torch.cat(reg_preds, axis=1)\n center_preds = torch.cat(center_preds, axis=1)\n all_points_position = torch.cat(all_points_position, axis=1)\n all_points_mi = torch.cat(all_points_mi, axis=1)\n\n batch_targets = []\n for per_image_position, per_image_mi, per_image_annotations in zip(\n all_points_position, all_points_mi, annotations):\n per_image_annotations = per_image_annotations[\n per_image_annotations[:, 4] >= 0]\n points_num = per_image_position.shape[0]\n\n if per_image_annotations.shape[0] == 0:\n # 6:l,t,r,b,class_index,center-ness_gt\n per_image_targets = torch.zeros([points_num, 6], device=device)\n else:\n annotaion_num = per_image_annotations.shape[0]\n per_image_gt_bboxes = per_image_annotations[:, 0:4]\n candidates = torch.zeros([points_num, annotaion_num, 4],\n device=device)\n candidates = candidates + per_image_gt_bboxes.unsqueeze(0)\n per_image_position = per_image_position.unsqueeze(1).repeat(\n 1, annotaion_num, 2)\n candidates[:, :,\n 0:2] = per_image_position[:, :,\n 0:2] - candidates[:, :,\n 0:2]\n candidates[:, :,\n 2:4] = candidates[:, :,\n 2:4] - per_image_position[:, :,\n 2:4]\n\n candidates_min_value, _ = candidates.min(axis=-1, keepdim=True)\n sample_flag = (candidates_min_value[:, :, 0] >\n 0).int().unsqueeze(-1)\n # get all negative reg targets which points ctr out of gt box\n candidates = candidates * sample_flag\n\n # get all negative reg targets which assign ground turth not in range of mi\n candidates_max_value, _ = candidates.max(axis=-1, keepdim=True)\n per_image_mi = per_image_mi.unsqueeze(1).repeat(\n 1, annotaion_num, 1)\n m1_negative_flag = (candidates_max_value[:, :, 0] >\n per_image_mi[:, :, 0]).int().unsqueeze(-1)\n candidates = candidates * m1_negative_flag\n m2_negative_flag = (candidates_max_value[:, :, 0] <\n per_image_mi[:, :, 1]).int().unsqueeze(-1)\n candidates = candidates * m2_negative_flag\n\n final_sample_flag = candidates.sum(axis=-1).sum(axis=-1)\n final_sample_flag = final_sample_flag > 0\n positive_index = (final_sample_flag == True).nonzero().squeeze(\n dim=-1)\n\n # if no assign positive sample\n if len(positive_index) == 0:\n del candidates\n # 6:l,t,r,b,class_index,center-ness_gt\n per_image_targets = torch.zeros([points_num, 6],\n device=device)\n else:\n positive_candidates = candidates[positive_index]\n\n del candidates\n\n sample_box_gts = per_image_annotations[:, 0:4].unsqueeze(0)\n sample_box_gts = sample_box_gts.repeat(\n positive_candidates.shape[0], 1, 1)\n sample_class_gts = per_image_annotations[:, 4].unsqueeze(\n -1).unsqueeze(0)\n sample_class_gts = sample_class_gts.repeat(\n positive_candidates.shape[0], 1, 1)\n\n # 6:l,t,r,b,class_index,center-ness_gt\n per_image_targets = torch.zeros([points_num, 6],\n device=device)\n\n if positive_candidates.shape[1] == 1:\n # if only one candidate for each positive sample\n # assign l,t,r,b,class_index,center_ness_gt ground truth\n # class_index value from 1 to 80 represent 80 positive classes\n # class_index value 0 represenet negative class\n positive_candidates = positive_candidates.squeeze(1)\n sample_class_gts = sample_class_gts.squeeze(1)\n per_image_targets[positive_index,\n 0:4] = positive_candidates\n per_image_targets[positive_index,\n 4:5] = sample_class_gts + 1\n\n l, t, r, b = per_image_targets[\n positive_index, 0:1], per_image_targets[\n positive_index, 1:2], per_image_targets[\n positive_index,\n 2:3], per_image_targets[positive_index,\n 3:4]\n per_image_targets[positive_index, 5:6] = torch.sqrt(\n (torch.min(l, r) / torch.max(l, r)) *\n (torch.min(t, b) / torch.max(t, b)))\n else:\n # if a positive point sample have serveral object candidates,then choose the smallest area object candidate as the ground turth for this positive point sample\n gts_w_h = sample_box_gts[:, :,\n 2:4] - sample_box_gts[:, :,\n 0:2]\n gts_area = gts_w_h[:, :, 0] * gts_w_h[:, :, 1]\n positive_candidates_value = positive_candidates.sum(\n axis=2)\n\n # make sure all negative candidates areas==100000000,thus .min() operation wouldn't choose negative candidates\n INF = 100000000\n inf_tensor = torch.ones_like(gts_area) * INF\n gts_area = torch.where(\n torch.eq(positive_candidates_value, 0.),\n inf_tensor, gts_area)\n\n # get the smallest object candidate index\n _, min_index = gts_area.min(axis=1)\n candidate_indexes = (\n torch.linspace(1, positive_candidates.shape[0],\n positive_candidates.shape[0]) -\n 1).long()\n final_candidate_reg_gts = positive_candidates[\n candidate_indexes, min_index, :]\n final_candidate_cls_gts = sample_class_gts[\n candidate_indexes, min_index]\n\n # assign l,t,r,b,class_index,center_ness_gt ground truth\n per_image_targets[positive_index,\n 0:4] = final_candidate_reg_gts\n per_image_targets[positive_index,\n 4:5] = final_candidate_cls_gts + 1\n\n l, t, r, b = per_image_targets[\n positive_index, 0:1], per_image_targets[\n positive_index, 1:2], per_image_targets[\n positive_index,\n 2:3], per_image_targets[positive_index,\n 3:4]\n per_image_targets[positive_index, 5:6] = torch.sqrt(\n (torch.min(l, r) / torch.max(l, r)) *\n (torch.min(t, b) / torch.max(t, b)))\n\n per_image_targets = per_image_targets.unsqueeze(0)\n batch_targets.append(per_image_targets)\n\n batch_targets = torch.cat(batch_targets, axis=0)\n batch_targets = torch.cat([batch_targets, all_points_position], axis=2)\n\n # batch_targets shape:[batch_size, points_num, 8],8:l,t,r,b,class_index,center-ness_gt,point_ctr_x,point_ctr_y\n return cls_preds, reg_preds, center_preds, batch_targets\n\n\nclass CenterNetLoss(nn.Module):\n def __init__(self, alpha=2., beta=4., epsilon=1e-4, max_object_num=100):\n super(CenterNetLoss, self).__init__()\n self.alpha = alpha\n self.beta = beta\n self.epsilon = epsilon\n self.max_object_num = max_object_num\n\n def forward(self, heatmap_heads, offset_heads, wh_heads, annotations):\n batch_heatmap_targets, batch_wh_targets, batch_offset_targets, batch_reg_to_heatmap_index, batch_positive_targets_mask = self.get_batch_targets(\n heatmap_heads, annotations)\n\n heatmap_heads = torch.sigmoid(heatmap_heads)\n B, num_classes = heatmap_heads.shape[0], heatmap_heads.shape[1]\n heatmap_heads = heatmap_heads.permute(0, 2, 3, 1).contiguous().view(\n B, -1, num_classes)\n batch_heatmap_targets = batch_heatmap_targets.permute(\n 0, 2, 3, 1).contiguous().view(B, -1, num_classes)\n\n wh_heads = wh_heads.permute(0, 2, 3, 1).contiguous().view(B, -1, 2)\n offset_heads = offset_heads.permute(0, 2, 3,\n 1).contiguous().view(B, -1, 2)\n\n heatmap_loss, offset_loss, wh_loss = [], [], []\n valid_image_num = 0\n device = annotations.device\n for per_heatmap_heads, per_wh_heads, per_offset_heads, per_heatmap_targets, per_wh_targets, per_offset_targets, per_reg_to_heatmap_index, per_positive_targets_mask in zip(\n heatmap_heads, wh_heads, offset_heads, batch_heatmap_targets,\n batch_wh_targets, batch_offset_targets,\n batch_reg_to_heatmap_index, batch_positive_targets_mask):\n # if no centers on heatmap_targets,this image is not valid\n valid_center_num = (\n per_heatmap_targets[per_heatmap_targets == 1.]).shape[0]\n\n if valid_center_num == 0:\n heatmap_loss.append(torch.tensor(0.).to(device))\n offset_loss.append(torch.tensor(0.).to(device))\n wh_loss.append(torch.tensor(0.).to(device))\n else:\n valid_image_num += 1\n one_image_focal_loss = self.compute_one_image_focal_loss(\n per_heatmap_heads, per_heatmap_targets)\n one_image_offsetl1_loss = self.compute_one_image_offsetl1_loss(\n per_offset_heads, per_offset_targets,\n per_reg_to_heatmap_index, per_positive_targets_mask)\n one_image_whl1_loss = self.compute_one_image_whl1_loss(\n per_wh_heads, per_wh_targets, per_reg_to_heatmap_index,\n per_positive_targets_mask)\n\n heatmap_loss.append(one_image_focal_loss)\n offset_loss.append(one_image_offsetl1_loss)\n wh_loss.append(one_image_whl1_loss)\n\n heatmap_loss = sum(heatmap_loss) / valid_image_num\n offset_loss = sum(offset_loss) / valid_image_num\n wh_loss = sum(wh_loss) / valid_image_num\n\n return heatmap_loss, offset_loss, wh_loss\n\n def compute_one_image_focal_loss(self, per_image_heatmap_heads,\n per_image_heatmap_targets):\n per_image_heatmap_heads = torch.clamp(per_image_heatmap_heads,\n min=self.epsilon,\n max=1. - self.epsilon)\n # all center points\n positive_indexes = (per_image_heatmap_targets == 1.)\n # all non center points\n negative_indexes = (per_image_heatmap_targets < 1.)\n\n positive_loss = torch.log(per_image_heatmap_heads) * torch.pow(\n 1 - per_image_heatmap_heads, self.alpha) * positive_indexes\n negative_loss = torch.log(1 - per_image_heatmap_heads) * torch.pow(\n per_image_heatmap_heads, self.alpha) * torch.pow(\n 1 - per_image_heatmap_targets, self.beta) * negative_indexes\n\n valid_center_num = (per_image_heatmap_targets[per_image_heatmap_targets\n == 1.]).shape[0]\n loss = -(positive_loss.sum() + negative_loss.sum()) / valid_center_num\n\n return loss\n\n def compute_one_image_offsetl1_loss(self,\n per_image_offset_heads,\n per_image_offset_targets,\n per_image_reg_to_heatmap_index,\n per_image_positive_targets_mask,\n factor=1.0 / 9.0):\n per_image_reg_to_heatmap_index = per_image_reg_to_heatmap_index.unsqueeze(\n -1).repeat(1, 2)\n per_image_offset_heads = torch.gather(\n per_image_offset_heads, 0, per_image_reg_to_heatmap_index.long())\n\n valid_object_num = (per_image_positive_targets_mask[\n per_image_positive_targets_mask == 1.]).shape[0]\n\n per_image_positive_targets_mask = per_image_positive_targets_mask.unsqueeze(\n -1).repeat(1, 2)\n per_image_offset_heads = per_image_offset_heads * per_image_positive_targets_mask\n per_image_offset_targets = per_image_offset_targets * per_image_positive_targets_mask\n\n x = torch.abs(per_image_offset_heads - per_image_offset_targets)\n loss = torch.where(torch.ge(x, factor), x - 0.5 * factor,\n 0.5 * (x**2) / factor)\n loss = loss.sum() / valid_object_num\n\n return loss\n\n def compute_one_image_whl1_loss(self,\n per_image_wh_heads,\n per_image_wh_targets,\n per_image_reg_to_heatmap_index,\n per_image_positive_targets_mask,\n factor=1.0 / 9.0):\n per_image_reg_to_heatmap_index = per_image_reg_to_heatmap_index.unsqueeze(\n -1).repeat(1, 2)\n per_image_wh_heads = torch.gather(\n per_image_wh_heads, 0, per_image_reg_to_heatmap_index.long())\n\n valid_object_num = (per_image_positive_targets_mask[\n per_image_positive_targets_mask == 1.]).shape[0]\n\n per_image_positive_targets_mask = per_image_positive_targets_mask.unsqueeze(\n -1).repeat(1, 2)\n per_image_wh_heads = per_image_wh_heads * per_image_positive_targets_mask\n per_image_wh_targets = per_image_wh_targets * per_image_positive_targets_mask\n\n x = torch.abs(per_image_wh_heads - per_image_wh_targets)\n loss = torch.where(torch.ge(x, factor), x - 0.5 * factor,\n 0.5 * (x**2) / factor)\n loss = loss.sum() / valid_object_num\n loss = 0.1 * loss\n\n return loss\n\n def get_batch_targets(self, heatmap_heads, annotations):\n B, num_classes, H, W = heatmap_heads.shape[0], heatmap_heads.shape[\n 1], heatmap_heads.shape[2], heatmap_heads.shape[3]\n device = annotations.device\n\n batch_heatmap_targets, batch_wh_targets, batch_offset_targets, batch_reg_to_heatmap_index, batch_positive_targets_mask=[],[],[],[],[]\n for per_image_annots in annotations:\n # limit max annots num for per image\n per_image_annots = per_image_annots[per_image_annots[:, 4] >= 0]\n num_objs = min(per_image_annots.shape[0], self.max_object_num)\n\n per_image_heatmap_targets = torch.zeros((num_classes, H, W),\n device=device)\n per_image_wh_targets = torch.zeros((self.max_object_num, 2),\n device=device)\n per_image_offset_targets = torch.zeros((self.max_object_num, 2),\n device=device)\n per_image_positive_targets_mask = torch.zeros(\n (self.max_object_num, ), device=device)\n per_image_reg_to_heatmap_index = torch.zeros(\n (self.max_object_num, ), device=device)\n gt_bboxes, gt_classes = per_image_annots[:,\n 0:4], per_image_annots[:,\n 4]\n gt_bboxes = gt_bboxes / 4\n # make sure all height and width >0\n all_h, all_w = gt_bboxes[:,\n 3] - gt_bboxes[:,\n 1], gt_bboxes[:,\n 2] - gt_bboxes[:,\n 0]\n\n per_image_wh_targets[0:num_objs, 0] = all_w\n per_image_wh_targets[0:num_objs, 1] = all_h\n\n centers = torch.cat(\n [((gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2).unsqueeze(-1),\n ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2).unsqueeze(-1)],\n axis=1)\n centers_int = torch.trunc(centers)\n centers_decimal = torch.frac(centers)\n\n per_image_offset_targets[0:num_objs, :] = centers_decimal\n per_image_positive_targets_mask[0:num_objs] = 1\n\n per_image_reg_to_heatmap_index[\n 0:num_objs] = centers_int[:, 1] * W + centers_int[:, 0]\n\n all_radius = self.compute_objects_gaussian_radius((all_h, all_w))\n per_image_heatmap_targets = self.draw_umich_gaussian(\n per_image_heatmap_targets, gt_classes, centers_int, all_radius)\n\n batch_heatmap_targets.append(\n per_image_heatmap_targets.unsqueeze(0))\n batch_wh_targets.append(per_image_wh_targets.unsqueeze(0))\n batch_reg_to_heatmap_index.append(\n per_image_reg_to_heatmap_index.unsqueeze(0))\n batch_offset_targets.append(per_image_offset_targets.unsqueeze(0))\n batch_positive_targets_mask.append(\n per_image_positive_targets_mask.unsqueeze(0))\n\n batch_heatmap_targets = torch.cat(batch_heatmap_targets, axis=0)\n batch_wh_targets = torch.cat(batch_wh_targets, axis=0)\n batch_offset_targets = torch.cat(batch_offset_targets, axis=0)\n batch_reg_to_heatmap_index = torch.cat(batch_reg_to_heatmap_index,\n axis=0)\n batch_positive_targets_mask = torch.cat(batch_positive_targets_mask,\n axis=0)\n\n return batch_heatmap_targets, batch_wh_targets, batch_offset_targets, batch_reg_to_heatmap_index, batch_positive_targets_mask\n\n def compute_objects_gaussian_radius(self, objects_size, min_overlap=0.7):\n all_h, all_w = objects_size\n all_h, all_w = torch.ceil(all_h), torch.ceil(all_w)\n\n a1 = 1\n b1 = (all_h + all_w)\n c1 = all_w * all_h * (1 - min_overlap) / (1 + min_overlap)\n sq1 = torch.sqrt(b1**2 - 4 * a1 * c1)\n r1 = (b1 + sq1) / 2\n\n a2 = 4\n b2 = 2 * (all_h + all_w)\n c2 = (1 - min_overlap) * all_w * all_h\n sq2 = torch.sqrt(b2**2 - 4 * a2 * c2)\n r2 = (b2 + sq2) / 2\n\n a3 = 4 * min_overlap\n b3 = -2 * min_overlap * (all_h + all_w)\n c3 = (min_overlap - 1) * all_w * all_h\n sq3 = torch.sqrt(b3**2 - 4 * a3 * c3)\n r3 = (b3 + sq3) / 2\n\n radius = torch.min(r1, r2)\n radius = torch.min(radius, r3)\n radius = torch.max(torch.zeros_like(radius), torch.trunc(radius))\n\n return radius\n\n def gaussian2D(self, shape, sigma=1):\n m, n = [(ss - 1.) / 2. for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n\n h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))\n h[h < np.finfo(h.dtype).eps * h.max()] = 0\n\n return h\n\n def draw_umich_gaussian(self,\n per_image_heatmap_targets,\n gt_classes,\n all_centers,\n all_radius,\n k=1):\n height, width = per_image_heatmap_targets.shape[\n 1], per_image_heatmap_targets.shape[2]\n device = per_image_heatmap_targets.device\n\n for per_class, per_center, per_radius in zip(gt_classes, all_centers,\n all_radius):\n per_diameter = 2 * per_radius + 1\n per_diameter = int(per_diameter.item())\n gaussian = self.gaussian2D((per_diameter, per_diameter),\n sigma=per_diameter / 6)\n gaussian = torch.FloatTensor(gaussian).to(device)\n\n x, y = per_center[0], per_center[1]\n left, right = min(x, per_radius), min(width - x, per_radius + 1)\n top, bottom = min(y, per_radius), min(height - y, per_radius + 1)\n\n masked_heatmap = per_image_heatmap_targets[per_class.long(), (\n y - top).long():(y +\n bottom).long(), (x -\n left).long():(x +\n right).long()]\n masked_gaussian = gaussian[(per_radius -\n top).long():(per_radius +\n bottom).long(),\n (per_radius -\n left).long():(per_radius +\n right).long()]\n\n if min(masked_gaussian.shape) > 0 and min(\n masked_heatmap.shape) > 0:\n # 如果高斯图重叠,重叠点取最大值\n masked_heatmap = torch.max(masked_heatmap, masked_gaussian * k)\n\n per_image_heatmap_targets[per_class.long(),\n (y - top).long():(y + bottom).long(),\n (x - left).long():(\n x + right).long()] = masked_heatmap\n\n return per_image_heatmap_targets\n\n\nif __name__ == '__main__':\n from retinanet import RetinaNet\n net = RetinaNet(resnet_type=\"resnet50\")\n image_h, image_w = 600, 600\n cls_heads, reg_heads, batch_anchors = net(\n torch.autograd.Variable(torch.randn(3, 3, image_h, image_w)))\n annotations = torch.FloatTensor([[[113, 120, 183, 255, 5],\n [13, 45, 175, 210, 2]],\n [[11, 18, 223, 225, 1],\n [-1, -1, -1, -1, -1]],\n [[-1, -1, -1, -1, -1],\n [-1, -1, -1, -1, -1]]])\n loss = RetinaLoss(image_w, image_h)\n cls_loss, reg_loss = loss(cls_heads, reg_heads, batch_anchors, annotations)\n print(\"1111\", cls_loss, reg_loss)\n\n from fcos import FCOS\n net = FCOS(resnet_type=\"resnet50\")\n image_h, image_w = 600, 600\n cls_heads, reg_heads, center_heads, batch_positions = net(\n torch.autograd.Variable(torch.randn(3, 3, image_h, image_w)))\n annotations = torch.FloatTensor([[[113, 120, 183, 255, 5],\n [13, 45, 175, 210, 2]],\n [[11, 18, 223, 225, 1],\n [-1, -1, -1, -1, -1]],\n [[-1, -1, -1, -1, -1],\n [-1, -1, -1, -1, -1]]])\n loss = FCOSLoss(image_w, image_h)\n cls_loss, reg_loss, center_loss = loss(cls_heads, reg_heads, center_heads,\n batch_positions, annotations)\n print(\"2222\", cls_loss, reg_loss, center_loss)\n\n from centernet import CenterNet\n net = CenterNet(resnet_type=\"resnet50\")\n image_h, image_w = 640, 640\n heatmap_output, offset_output, wh_output = net(\n torch.autograd.Variable(torch.randn(3, 3, image_h, image_w)))\n annotations = torch.FloatTensor([[[113, 120, 183, 255, 5],\n [13, 45, 175, 210, 2]],\n [[11, 18, 223, 225, 1],\n [-1, -1, -1, -1, -1]],\n [[-1, -1, -1, -1, -1],\n [-1, -1, -1, -1, -1]]])\n loss = CenterNetLoss()\n heatmap_loss, offset_loss, wh_loss = loss(heatmap_output, offset_output,\n wh_output, annotations)\n print(\"3333\", heatmap_loss, offset_loss, wh_loss)\n" ]
[ [ "torch.min", "torch.frac", "torch.trunc", "torch.sqrt", "torch.eq", "torch.log", "torch.max", "torch.cat", "torch.randn", "torch.linspace", "torch.sigmoid", "torch.ones_like", "torch.ceil", "torch.ones", "torch.ge", "torch.tensor", "numpy.finfo", "torch.pow", "torch.FloatTensor", "torch.zeros_like", "torch.exp", "numpy.exp", "torch.abs", "torch.zeros", "torch.clamp" ] ]
bandang0/astro_reduce
[ "d499daa071ab9e20086522e81f7d7f049e9e9268" ]
[ "helpers.py" ]
[ "'''helpers module for astro_reduce -- UI constants and handy functions.'''\n\nfrom hashlib import md5\nfrom json import dump\nfrom os.path import basename\nfrom re import sub\n\nimport click\nimport matplotlib.colors as colors\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.visualization import ImageNormalize, ZScaleInterval\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord\n\n# List of all options.\nOPT_LIST = ['setup', 'clear', 'interpolate', 'verbose', 'tmppng', 'stkpng',\n 'sex', 'psfex', 'sexagain', 'scamp', 'nomaster', 'nostack']\n\n# List of astromatic commands.\nASTROMATIC_LIST = ['sex', 'psfex', 'sexagain', 'scamp']\n\n# Comment for header keywords.\nHC = 'Exposure time in seconds'\n\n# Paths and extensions.\n# User image directories.\nUDARK = 'DARK'\nUFLAT = 'FLAT'\nUOBJ = 'ORIGINAL'\n\n# Astro-reduce working directories.\nOBJ = 'ar_objects'\nDARK = 'ar_darks'\nFLAT = 'ar_flats'\nMASTER = 'ar_masters'\nTMP = 'ar_tmp'\n\n# Astro-reduce results directories.\nSTK = 'stacked'\nRED = 'reduced'\n\n# File names and extensions.\nDI = 'dark'\nFI = 'flat'\nAUX = 'aux'\n\n# Astromatic result directories.\nSEX_RES = 'SEXRES'\nPSFEX_RES = 'PSFRES'\nSCAMP_RES = 'SCAMPRES'\n\n# Astromatic configuration files.\nAR = 'astro_reduce'\nDATA = 'data'\nT120_SEX = 't120.sex'\nT120_PARAM = 't120.param'\nT120_PSFEX = 't120.psfex'\nT120_PARAMPSFEX = 't120.parampsfex'\nDEFAULT_CONV = 'default.conv'\nT120_SCAMP = 't120.scamp'\nT120_AHEAD = 't120-andor1024.ahead'\n\n# Astromatic command templates.\nSEX_TMP = 'sex {} -c {} -PARAMETERS_NAME {} -FILTER_NAME {} '\\\n + '-CATALOG_NAME {}/{} '\\\n + '-CHECKIMAGE_TYPE BACKGROUND,OBJECTS '\\\n + '-CHECKIMAGE_NAME {}/{},{}/{} -XML_NAME {}/{} '\nPSFEX_TMP = 'psfex -c {} SEXRES/*-c.ldac -XML_NAME PSFRES/{} '\\\n + '-CHECKIMAGE_TYPE CHI,PROTOTYPES,SAMPLES,RESIDUALS,SNAPSHOTS '\\\n + '-CHECKIMAGE_NAME PSFRES/chi,PSFRES/proto,PSFRES/samp,'\\\n + 'PSFRES/resi,PSFRES/snap '\\\n + '-CHECKPLOT_TYPE FWHM,ELLIPTICITY,COUNTS,COUNT_FRACTION,CHI2,RESIDUALS '\\\n + '-CHECKPLOT_NAME PSFRES/fwhm,PSFRES/ellipticity,PSFRES/counts,PSFRES/'\\\n + 'countfrac,PSFRES/chi,PSFRES/resi '\nSEXAGAIN_OPT_TMP = '-PSF_NAME PSFRES/{} '\nSCAMP_TMP = 'scamp {} -c {} -AHEADER_GLOBAL {}'\n\n# Simple hashing function for file names.\nhsh = lambda x: md5(x.encode('utf-8')).hexdigest()\n\n\ndef init_astro_header(aux_header):\n '''Initialize the header with astrometric data.\n\n If the header does not contain coordinates of center of field, return -1.\n '''\n # Check if contains the c-o-f coordinates, return -1 if not:\n if not ('OBJCTRA' in aux_header and 'OBJCTDEC' in aux_header):\n return -1\n\n # Coordinates are there, proceed.\n # Update header and put basic astrometry information.\n crpix1 = int(aux_header['NAXIS1'] / 2.0)\n crpix2 = int(aux_header['NAXIS2'] / 2.0)\n aux_header['CRPIX1'] = (crpix1, 'Reference pixel on this axis')\n aux_header['CRPIX2'] = (crpix2, 'Reference pixel on this axis')\n # Read and set RADEC center of FOV.\n skycoo = SkyCoord(aux_header['OBJCTRA'],\n aux_header['OBJCTDEC'], unit=[u.hourangle, u.deg])\n aux_header['CRVAL1'] = (skycoo.ra.to('deg').value,\n 'World coordinate on this axis')\n aux_header['CRVAL2'] = (skycoo.dec.to('deg').value,\n 'World coordinate on this axis')\n aux_header['CTYPE1'] = ('RA---TAN', 'WCS projection type for this axis')\n aux_header['CTYPE2'] = ('DEC--TAN', 'WCS projection type for this axis')\n aux_header['CUNIT1'] = ('DEG', 'Axis unit')\n aux_header['CUNIT2'] = ('DEG', 'Axis unit')\n aux_header['EQUINOX'] = (2000.0, 'Mean equinox')\n aux_header['RADESYS'] = ('ICRS ', 'Astrometric system')\n aux_header['CD1_1'] = (-2.138738809045E-04, 'Linear projection matrix')\n aux_header['CD1_2'] = (2.180959444292E-06, 'Linear projection matrix')\n aux_header['CD2_1'] = (-2.331002019312E-06, 'Linear projection matrix')\n aux_header['CD2_2'] = (-2.138176347970E-04, 'Linear projection matrix')\n return aux_header\n\n\ndef dark_read_header(fname):\n '''Return exposure and standard file name for a dark field image.'''\n head = fits.getheader(fname)\n if 'EXPTIME' in head.keys():\n exp = int(1000 * head['EXPTIME'])\n elif 'EXPOSURE' in head.keys():\n exp = int(1000 * head['EXPOSURE'])\n elif 'EXP (MS)' in head.keys():\n exp = int(head['EXP (MS)'])\n else:\n raise IOError('No exposure keyword in header of `{}`.'.format(fname))\n return exp, '{}_{}_{}.fits'.format(DI, exp, hsh(fname))\n\n\ndef flat_read_header(fname):\n '''Return filter, exposure, standard file name for a flat field image.'''\n head = fits.getheader(fname)\n # Filter.\n if 'FILTER' in head.keys():\n fil = sub('[- _]', '', head['FILTER'])\n else:\n raise IOError('No filter keyword in header of `{}`.'.format(fname))\n\n # Exposure.\n if 'EXPTIME' in head.keys():\n exp = int(1000 * head['EXPTIME'])\n elif 'EXPOSURE' in head.keys():\n exp = int(1000 * head['EXPOSURE'])\n elif 'EXP (MS)' in head.keys():\n exp = int(head['EXP (MS)'])\n else:\n raise IOError('No exposure keyword in header of `{}`.'.format(fname))\n\n return fil, exp, '{}_{}_{}_{}.fits'.format(FI, fil, exp, hsh(fname))\n\n\ndef obj_read_header(fname):\n '''Return object, filter, exposure and standard file name for object image.\n '''\n # Add flag to only warn once for empty object names.\n if 'warn_flag' not in obj_read_header.__dict__:\n obj_read_header.warn_flag = True\n\n # Retrieve object image header.\n head = fits.getheader(fname)\n\n # Object.\n if 'OBJECT' in head.keys():\n obj = sub('[ _]', '-', head['OBJECT'])\n else:\n raise IOError('No object keyword in header of `{}`.'.format(fname))\n\n # Warn for empty object name.\n if obj == '' and obj_read_header.warn_flag:\n click.secho('\\nW: The object keyword in file `{}` and similar is empty.'\n '\\nW: Undefined behavior.'.format(fname), fg='magenta')\n obj_read_header.warn_flag = False\n\n # Filter.\n if 'FILTER' in head.keys():\n fil = sub('[- _]', '', head['FILTER'])\n else:\n raise IOError('No filter keyword in header of `{}`.'.format(fname))\n\n # Exposure.\n if 'EXPTIME' in head.keys():\n exp = int(1000 * head['EXPTIME'])\n elif 'EXPOSURE' in head.keys():\n exp = int(1000 * head['EXPOSURE'])\n elif 'EXP (MS)' in head.keys():\n exp = int(head['EXP (MS)'])\n else:\n raise IOError('No exposure keyword in header of `{}`.'.format(fname))\n\n return obj, fil, exp, '{}_{}_{}_{}.fits'.format(obj, fil, exp, hsh(fname))\n\n\ndef write_conf_file(objects, exposures, filters, conf_file_name):\n '''Write configuration file from list of objects, exposures, filters.'''\n conf_dic = {'objects': objects,\n 'exposures': exposures,\n 'filters': filters}\n with open(conf_file_name, 'w') as cdfile:\n dump(conf_dic, cdfile, indent=2)\n\n\ndef fname_bits(fname):\n '''Return the filter and exposure from standard file name of an object.\n\n 'NGC1000_V_1000_0.fits' gives ('V', '1000').\n '''\n pieces = fname.split('.fit')[0].split('_')\n return (pieces[-3], pieces[-2])\n\n\ndef write_png(fname, plt):\n '''Generate PNG version of image read in a fits file.\n\n Try to use a zscale normalization, and fall back to a classical\n linear scale between the min and max of 1000 randomly picked pixels of the\n image. Save the PNG image in same directory as the fits file.\n '''\n image = fits.getdata(fname)\n norm = ImageNormalize(image, ZScaleInterval())\n plt.figure(42)\n plt.imshow(image, norm=norm, cmap='jet')\n try:\n # If the zscale algorithm doesn't converge, an UnboundLocalError is\n # raised by astropy.visualization ...\n plt.colorbar()\n except UnboundLocalError:\n # ... in this case, just pick 1000 random pixels and linearly scale\n # between them.\n plt.clf()\n sample = np.random.choice(image.ravel(), 1000)\n norm = colors.Normalize(np.min(sample), np.max(sample), clip=True)\n plt.imshow(image, norm=norm, cmap='jet')\n plt.colorbar()\n plt.title(basename(fname).split('.fit')[0])\n plt.savefig('{}.png'.format(fname.split('.fit')[0]), bbox_inches='tight')\n plt.close(42)\n" ]
[ [ "numpy.max", "numpy.min" ] ]
sjfreed21/DataAnalysis
[ "b4b852e2faca6633161513ecbfc4295068a6cd78" ]
[ "Class Files/Lab 5/pca_sst.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 15 11:01:01 2020\n\nIn this notebook I give a very simple (and rather uncommented) example of\n how to use scikit-learn to perform an Empirical Orthogonal Function \n decomposition (EOF analysis, often referred to as well as Principal \n Component Analysis or PCA) of a climate field, in this case the monthly\n Sea Surface Temperature (SST) anomalies in the Pacific.\nhttp://nicolasfauchereau.github.io/climatecode/posts/eof-analysis-with-scikit-learn/\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom numpy import ma\nfrom matplotlib import pyplot as plt\nimport xarray as xr\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\n#Import for colortables\nfrom metpy.plots import colortables\n\n'''\nfrom mpl_toolkits.basemap import Basemap as bm\n\n#\n#A small function to plot a 2D field on a map with basemap\n#\ndef plot_field(m, X, lats, lons, vmin, vmax, step, cmap=plt.get_cmap('jet'), \\\n ax=False, title=False, grid=False):\n if not ax: \n f, ax = plt.subplots(figsize=(8, (X.shape[0] / float(X.shape[1])) * 8))\n m.ax = ax\n im = m.contourf(lons, lats, X, np.arange(vmin, vmax+step, step), \\\n latlon=True, cmap=cmap, extend='both', ax=ax)\n m.drawcoastlines()\n if grid: \n m.drawmeridians(np.arange(0, 360, 30), labels=[0,0,0,1])\n m.drawparallels(np.arange(-80, 80, 20), labels=[1,0,0,0])\n m.colorbar(im)\n if title: \n ax.set_title(title)\n'''\n \n'''\nload the SST data\n'''\n\nwdir='/Users/sjfre/Documents/DataAnalysis/Class Files/Lab 5/'\ndset = xr.open_dataset(wdir+'sst.mnmean.v3.nc')\n\nprint(dset)\nlat = dset['lat'].values\nlon = dset['lon'].values\nsst = dset['sst'].values\nprint(lat.shape)\n'''\nSelects the period 1950 - 2013 and the tropical Pacific domain\n\n'''\ndsub = dset.sel(time=slice('1950','2013'), lat=slice(40,-40), lon=slice(120,290))\nlat = dsub['lat'].values\nlon = dsub['lon'].values\nsst = dsub['sst'].values\n# need to calculate annomaly here\nsst_anom = sst\nprint(sst_anom[0:10,0,0])\nlons, lats = np.meshgrid(lon, lat)\n#print(dsub)\nprint(sst.shape)\ndim=sst.shape\n#print(len(lat))\n#print(sst[:,0,0])\n#print(sst[:,0,0].mean())\nilon=len(lon)\nilat=len(lat)\nprint(ilon,ilat)\nprint(type(sst))\nMean_season=np.ndarray(shape=(12,41,86),dtype=float)\nprint(Mean_season.shape)\nMean_season[:,:,:]=np.nan\nfor i in range(len(lat)):\n for j in range(len(lon)):\n for imon in range(12):\n data=[]\n for iyear in range(64):\n ii=iyear*12+imon\n #\n if np.isnan(sst[ii,i,j]): \n continue\n else:\n data.append(sst[ii,i,j])\n #print(ii,sst[ii,i,j])\n if len(data) > 0:\n Mean_season[imon,i,j]=sum(data)/len(data)\n \nprint(Mean_season[0:10,0,0])\nprint(sst[0:10,0,0])\n\nfor i in range(len(lat)):\n for j in range(len(lon)):\n for imon in range(12):\n for iyear in range(64):\n ii=iyear*12+imon\n sst_anom[ii,i,j]=sst[ii,i,j]-Mean_season[imon,i,j]\n\nprint(sst_anom[0:10,0,0])\n'''\nreshape in 2D (time, space)\n'''\nX = np.reshape(sst, (sst.shape[0], len(lat) * len(lon)), order='F')\nnp.any(np.isnan(X))\n\n'''\nMask the land points\n'''\ntype(X)\nX = ma.masked_array(X, np.isnan(X))\ntype(X)\nland = X.sum(0).mask\nocean = ~ land \n\n'''\nkeep only oceanic grid-points\n'''\nX = X[:,ocean]\n\n'''\nStandardize SST using the fit and transform methods of the sklearn.preprocessing.scaler.StandardScaler¶\n'''\n\nfrom sklearn import preprocessing\nscaler = preprocessing.StandardScaler()\nscaler_sst = scaler.fit(X)\n\n'''\nOnce the scaler object has been 'trained' on the data, we can save it as a pickle object\n'''\nimport joblib\njoblib.dump(scaler_sst, './scaler_sst.pkl', compress=9)\nscaler_sst = joblib.load('./scaler_sst.pkl')\n\n'''\nscales: use the transform method of the scaler object¶\n'''\nX = scaler_sst.transform(X)\n\n'''\nverify that mean = 0 and std = 1\n'''\nprint(X.mean())\nprint(X.std())\nX.shape\n\n'''\nEOF decomposition\n'''\nfrom sklearn.decomposition import PCA\nskpca = PCA() #instantiates the PCA object\nskpca.fit(X) #fit\n\n'''\nNow saves the (fitted) PCA object for reuse in operations¶\n'''\njoblib.dump(skpca, '../EOF.pkl', compress=9)\n\nf, ax = plt.subplots(figsize=(5,5))\nax.plot(skpca.explained_variance_ratio_[0:10]*100)\nax.plot(skpca.explained_variance_ratio_[0:10]*100,'ro')\nax.set_title(\"% of variance explained\", fontsize=14)\nax.grid()\n\n'''\nkeep number of PC sufficient to explain 70 % of the original variance\n'''\nipc = np.where(skpca.explained_variance_ratio_.cumsum() >= 0.70)[0][0]\nprint(ipc)\n\n'''\nThe Principal Components (PCs) are obtained by using the transform method of the pca object (skpca)\n'''\nPCs = skpca.transform(X)\nPCs = PCs[:,:ipc]\n\n'''\nThe Empirical Orthogonal Functions (EOFs) are contained in the components_ attribute of the pca object (skpca)\n'''\nEOFs = skpca.components_\n#EOFs = EOFs[:ipc,:]\nEOFs.shape\n\n'''\nwe can the reconstruct the 2D fields (maps)\n'''\nEOF_recons = np.ones((ipc, len(lat) * len(lon))) * -999.\nfor i in range(ipc): \n EOF_recons[i,ocean] = EOFs[i,:]\nEOF_recons = ma.masked_values(np.reshape(EOF_recons, (ipc, len(lat), len(lon)), order='F'), -999.)\nEOF_recons.shape\n\ntype(EOF_recons)\nEOF_recons *= 100\n\nfig = plt.figure(figsize=(10,8))\ncentral_lon, central_lat = 180, 0\nextent=[lons.min(), lons.max(), -40, 40]\nax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree(central_lon))\nax.coastlines()\nax.gridlines()\nax.set_extent(extent)\nim = ax.imshow(EOF_recons[0,:,:],extent=(lons.min(), lons.max(), lats.min(), lats.max())\n ,vmin=-5, vmax=5,transform=ccrs.PlateCarree(),origin='upper',interpolation='bilinear')\nplt.show()\n\n\n" ]
[ [ "matplotlib.pyplot.figure", "matplotlib.pyplot.subplots", "numpy.ndarray", "matplotlib.pyplot.show", "numpy.isnan", "sklearn.preprocessing.StandardScaler", "numpy.meshgrid", "sklearn.decomposition.PCA" ] ]
patchloc/VulnLoc
[ "c7a7d4dac092e3b6302aa8a954518a3348d51d3e" ]
[ "code/utils.py" ]
[ "import pickle\nimport string\nimport numpy as np\nimport multiprocessing\n\n# system setup\nProcessNum=np.min((10, multiprocessing.cpu_count()))\n\n# Used for generating the random filename\nFileNameChars = list(string.letters + string.digits)\nFileNameLen = 30\n\n'''\nProcess the binary file\n'''\ndef read_bin(path):\n\twith open(path, 'rb') as f:\n\t\ttemp = f.readlines()\n\ttemp = ''.join(temp)\n\tcontent = [ord(i) for i in temp]\n\treturn content\n\ndef write_bin(path, inputs):\n\twith open(path, 'wb') as f:\n\t\tf.write(bytearray(list(inputs)))\n\n\n'''\nProcess the normal text file\n'''\ndef read_txt(path):\n\twith open(path, 'r') as f:\n\t\tcontent = f.readlines()\n\treturn content\n\ndef write_txt(path, content):\n\twith open(path, 'w') as f:\n\t\tf.writelines(content)\n\n'''\nProcess the pickle file\n'''\ndef write_pkl(path, info):\n\twith open(path, 'w') as f:\n\t\tpickle.dump(info, f)\n\ndef read_pkl(path):\n\twith open(path) as f:\n\t\tinfo = pickle.load(f)\n\treturn info\n\n\n'''\nGenerating the temp filename\n'''\ndef gen_temp_filename():\n\treturn ''.join(np.random.choice(FileNameChars, FileNameLen))\n" ]
[ [ "numpy.random.choice" ] ]
StefHerregods/ConfidenceBounds
[ "5399647aecf46bf4adee047cbee2b883ac5f14a2" ]
[ "Experiment_2/Exp2_DotMotion.py" ]
[ "# november 2021\n# Internship project: Confidence bounds\n\n# RANDOM DOT MOTION TASK (variant with 6 confidence options)\n# (Does the majority of dots move left or right? How confident are you about your choice?)\n# Participants complete multiple blocks of consecutive dot motion trials\n# 3 training blocks with increasing complexity introduce participants to the task\n# Manipulations: coherence (~ difficulty), accuracy/speed of the decision and accuracy/speed of the confidence ratings\n# Resulting data: trial number, block, coherence, participant response, correct response, RT,\n# confidence ratings, confidence RT, manipulation, label order\n# subject numbers: minimum 1, maximum 40\n\n# Importing modules\nfrom psychopy import visual as vis\nfrom psychopy import event, core, gui, data\nimport os\nimport random\nimport numpy as np\nfrom time import sleep\n\npilot = 0 # Put to 0 for the real experiment\n\n# Number of blocks/trials\nnb_training_trials = 24 # Trials per training block; divisible by 3 (because 3 coherence manipulations) (24)\nnb_main_trials = 60 # Trials per main block; divisible by 3 (because 3 coherence manipulations) (60)\nnb_main_blocks = 12 # Without training blocks; divisible by 4 (because speed/accuracy manipulations) (12)\n\n# Block 1\nmin_accuracy_1 = 0.85 # Minimum accuracy to necessary to continue to the next block (0.85)\np_coherence_1 = 0.5 # Percentage coherence (0.5)\n\n# Block 2\nmin_accuracy_2 = 0.6 # (0.6)\n\n# Block 2 (and every block afterwards)\np_coherence_a = 0.10 # Low coherence manipulation (0.10)\np_coherence_b = 0.20 # Medium coherence manipulation (0.20)\np_coherence_c = 0.40 # High coherence manipulation (0.40)\n\n# Timing\ntime_cross = 1 # Time fixation cross (+ 0.5s when manipulations are shown) (1)\ntime_fb = 1 # Time feedback (1)\n\n\n# Create a Data folder if it doesn't exist yet\nmy_directory = os.getcwd()\nif not os.path.isdir('Exp2_DotMotion_Data'):\n os.mkdir('Exp2_DotMotion_Data')\n\n# GUI\nif pilot:\n sub = 0; age = 30; gender = 'Man'; handedness = 'Right'\n file_name = \"Exp2_DotMotion_Data/DotsTask_sub%d\" % sub\nelse:\n info = {\"Subject number\": 0, \"gender\": ['Woman', 'Man', 'X'], \"age\": 0, \"handedness\": ['Left', 'Right']}\n myDlg = gui.DlgFromDict(dictionary=info, title=\"DotsTask\", show=True)\n sub = info['Subject number']; age = info['age']; gender = info['gender']; handedness = info['handedness']\n file_name = \"Exp2_DotMotion_Data/DotsTask_sub%d\" % sub\n if os.path.isfile(file_name):\n print('This subject number already exists!')\n core.quit()\n\n# TrialHandler: make a data file\ninfo = {\"sub\": sub, \"age\": age, \"gender\": gender, \"handedness\": handedness}\nthisExp = data.ExperimentHandler(dataFileName=file_name, extraInfo=info) # saving extra info\n\n# Counterbalancing confidence labels\n# 0 for high-low confidence; 1 for low-high confidence\nif sub % 2 == 0:\n confidence_labels = 0\nelse:\n confidence_labels = 1\n\n# Counterbalancing manipulation order\n# Number from 1 to 4; refers to the order of accurate/fast manipulations (Latin square counterbalanced)\nif 0 < sub <= 10:\n manipulation_order = 1\nelif 10 < sub <= 20:\n manipulation_order = 2\nelif 20 < sub <= 30:\n manipulation_order = 3\nelif 30 < sub <= 40:\n manipulation_order = 4\nelse:\n print(\"incorrect subject number\")\n core.quit()\n\n# Clock\nclock = core.Clock()\n\n# Visual features\nwin = vis.Window(size=[1920, 1080], color='black', allowGUI=False, units='norm', fullscr=False)\nwin.mouseVisible = False\nfix = vis.TextStim(win, text=\"+\", color='white')\ngood = vis.TextStim(win, text=\"Correct!\", color='green')\nbad = vis.TextStim(win, text=\"Wrong...\", color='red')\n\n# Introduction images\nIntroduction = vis.TextStim(win, text=\"Welcome to this experiment!\\n\\nOn each turn, you will see a series of moving \"\n \"dots in the center of the screen.\\nSome of those dots will consistently move in \"\n \"the same direction: either to the left or to the right.\\nYour job is to \"\n \"determine in which direction most of the dots are moving.\"\n \"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\"\n \"In this example, most of the dots move to the left.\", pos=(0, 0.15), height=.05)\nIntro_block1 = vis.ImageStim(win,\n image=my_directory+'\\\\Exp2_DotMotion_Instructions\\\\Exp2_DotMotion_Intro_Block1.JPG',\n pos=(0, 0.1))\nIntro_block2 = vis.ImageStim(win,\n image=my_directory+'\\\\Exp2_DotMotion_Instructions\\\\Exp2_DotMotion_Intro_Block2.JPG',\n pos=(0, 0.1))\nIntro_block3a = vis.ImageStim(win,\n image=my_directory+'\\\\Exp2_DotMotion_Instructions\\\\Exp2_DotMotion_Intro_Block3a.JPG',\n pos=(0, 0.1))\nIntro_block3b = vis.ImageStim(win,\n image=my_directory+'\\\\Exp2_DotMotion_Instructions\\\\Exp2_DotMotion_Intro_Block3b.JPG',\n pos=(0, 0.1))\nIntro_block4 = vis.ImageStim(win,\n image=my_directory+'\\\\Exp2_DotMotion_Instructions\\\\Exp2_DotMotion_Intro_Block4.JPG',\n pos=(0, 0.1))\n\n# Manipulation images\nFastFast = vis.ImageStim(win, image=my_directory+'\\\\Exp2_DotMotion_Instructions\\\\Exp2_DotMotion_FastFast.JPG',\n pos=(0, 0))\nAccAcc = vis.ImageStim(win, image=my_directory+'\\\\Exp2_DotMotion_Instructions\\\\Exp2_DotMotion_AccAcc.JPG',\n pos=(0, 0))\nFastAcc = vis.ImageStim(win, image=my_directory+'\\\\Exp2_DotMotion_Instructions\\\\Exp2_DotMotion_FastAcc.JPG',\n pos=(0, 0))\nAccFast = vis.ImageStim(win, image=my_directory+'\\\\Exp2_DotMotion_Instructions\\\\Exp2_DotMotion_AccFast.JPG',\n pos=(0, 0))\n\n# Manipulations labels\ndecision_Fast = vis.TextStim(win, text='Make FAST decisions', pos=(0, 0.3), height=.07)\ndecision_Accurate = vis.TextStim(win, text='Make ACCURATE decisions', pos=(0, 0.3), height=.07)\nconfidence_Fast = vis.TextStim(win, text='Give FAST confidence ratings', pos=(0, -0.3), height=.07)\nconfidence_Accurate = vis.TextStim(win, text='Think CAREFULLY about your confidence ratings', pos=(0, -0.3), height=.07)\n\n# Warning label\nwarning = vis.TextStim(win, text='Too slow... Please respond faster', pos=(0, 0), height=.07, color='red')\n\n# Confidence labels, counterbalance the order between participants\nif confidence_labels == 0:\n cj_labels = vis.ImageStim(win,\n image=my_directory+'\\\\Exp2_DotMotion_Instructions\\\\Exp2_DotMotion_ConfidenceLabels_b.JPG',\n pos=(0, 0))\nelif confidence_labels == 1:\n cj_labels = vis.ImageStim(win,\n image=my_directory+'\\\\Exp2_DotMotion_Instructions\\\\Exp2_DotMotion_ConfidenceLabels_a.JPG',\n pos=(0, 0))\n\n# Space to continue\nspace = vis.TextStim(win, text='Press space to continue', pos=(0, -0.85), height=0.05)\n\n# Ending screen\nend = vis.TextStim(win, text='The end! Thank you for participating.\\n\\nPlease wait in silence until everyone is ready.',\n pos=(0, 0), height=.07)\n\n# Define keys\nchoice_keys = ['c', 'n', 'escape'] # left, right, escape\ncj_keys = ['1', '2', '3', '8', '9', '0', 'escape']\n\n# Creating DotMotion stimulus\nDotMotion = vis.DotStim(win, units='pix', nDots=120, fieldShape='circle', dotSize=4, color='white', speed=1,\n signalDots='same', noiseDots='direction', dotLife=5)\n\n# Initialize variables\nACC = 0\nn_manipulations = 0\n\n# Order of manipulations: Latin square counterbalanced across participants\nif manipulation_order == 1:\n manipulations = ['FastFast', 'AccFast', 'AccAcc', 'FastAcc', 'FastFast', 'AccFast', 'AccAcc', 'FastAcc', 'FastFast',\n 'AccFast', 'AccAcc', 'FastAcc']\nelif manipulation_order == 2:\n manipulations = ['FastAcc', 'FastFast', 'AccFast', 'AccAcc', 'FastAcc', 'FastFast', 'AccFast', 'AccAcc', 'FastAcc',\n 'FastFast', 'AccFast', 'AccAcc']\nelif manipulation_order == 3:\n manipulations = ['AccAcc', 'FastAcc', 'FastFast', 'AccFast', 'AccAcc', 'FastAcc', 'FastFast', 'AccFast', 'AccAcc',\n 'FastAcc', 'FastFast', 'AccFast']\nelif manipulation_order == 4:\n manipulations = ['AccFast', 'AccAcc', 'FastAcc', 'FastFast', 'AccFast', 'AccAcc', 'FastAcc', 'FastFast', 'AccFast',\n 'AccAcc', 'FastAcc', 'FastFast']\n\n# Practice trials 1 = 1\n# Practice trials 2 = 2 (lower coherence)\n# Practice trials 3 = 3 (adding confidence)\n# Main trials = 4\nblocks = nb_main_blocks + 4\nfor block in range(1,blocks):\n\n accuracy = 0\n\n if block == 1:\n # Drawing introduction screen + introduction dot motion trial\n resp = []\n DotMotion.coherence = 0.8\n DotMotion.dir = 180\n DotMotion.fieldSize = 300\n DotMotion.dotLife = 8\n while len(resp) == 0:\n DotMotion.draw()\n Introduction.draw()\n space.draw()\n win.flip()\n resp = event.getKeys(keyList='space')\n DotMotion.fieldSize = 500\n DotMotion.dotLife = 5\n\n # Settings block 1\n min_accuracy = min_accuracy_1\n p_coherence = p_coherence_1\n n_trials = nb_training_trials\n\n # Introduction block 1\n Intro_block1.draw(); space.draw(); win.flip()\n event.waitKeys(keyList='space')\n\n if block == 2:\n # Settings block 2\n min_accuracy = min_accuracy_2\n n_trials = nb_training_trials\n\n # Introduction block 2\n Intro_block2.draw(); space.draw(); win.flip()\n event.waitKeys(keyList='space')\n\n if block == 3:\n # Settings block 3\n accuracy = -1\n min_accuracy = 0\n n_trials = nb_training_trials\n\n # Introduction block 3\n if confidence_labels == 0:\n Intro_block3b.draw()\n else:\n Intro_block3a.draw()\n space.draw(); win.flip()\n event.waitKeys(keyList='space')\n\n if block == 4:\n # Introduction block 4\n Intro_block4.draw(); space.draw(); win.flip()\n event.waitKeys(keyList='space')\n\n # Manipulations shown -> longer fixation cross time\n time_cross = time_cross + 0.5\n\n # Randomise manipulations in block 4+\n if 4 <= block <= 3 + nb_main_blocks:\n accuracy = -1\n min_accuracy = 0\n n_trials = nb_main_trials\n\n # Drawing the manipulation instruction\n if manipulations[n_manipulations] == 'FastFast':\n decision_instruction = decision_Fast\n confidence_instruction = confidence_Fast\n FastFast.draw(); space.draw(); win.flip()\n event.waitKeys(keyList='space')\n if manipulations[n_manipulations] == 'AccAcc':\n decision_instruction = decision_Accurate\n confidence_instruction = confidence_Accurate\n AccAcc.draw(); space.draw(); win.flip()\n event.waitKeys(keyList='space')\n if manipulations[n_manipulations] == 'FastAcc':\n decision_instruction = decision_Fast\n confidence_instruction = confidence_Accurate\n FastAcc.draw(); space.draw(); win.flip()\n event.waitKeys(keyList='space')\n if manipulations[n_manipulations] == 'AccFast':\n decision_instruction = decision_Accurate\n confidence_instruction = confidence_Fast\n AccFast.draw(); space.draw(); win.flip()\n event.waitKeys(keyList='space')\n\n # Randomise coherence levels, counterbalanced within the participant\n if block > 1:\n coherence_list = np.repeat([p_coherence_a, p_coherence_b, p_coherence_c], n_trials/3)\n random.shuffle(coherence_list)\n\n repetition = 0\n\n # Single block loop\n while accuracy < min_accuracy:\n accuracy = 0\n RT_mean = 0\n RTconf_mean = 0\n repetition = repetition + 1\n n_slowtrials = 0\n\n # Randomise left vs. right dot movement\n condition = np.repeat(range(2),n_trials/2) # 0 = left correct; 1 = right correct\n random.shuffle(condition)\n\n # Start trial loop\n for trial in range(n_trials):\n SlowTrial = 0\n if block > 1:\n p_coherence = coherence_list[trial]\n if condition[trial] == 0:\n correct = 'left'\n direction = 180\n if condition[trial] == 1:\n correct = 'right'\n direction = 0\n\n # Fixation cross\n fix.draw()\n if block > 3:\n decision_instruction.draw()\n confidence_instruction.draw()\n win.flip()\n sleep(time_cross)\n clock.reset()\n\n # Dot motion settings\n resp = []; conf_press = []; event.clearEvents(); RT = 0; RTconf = 0\n DotMotion.coherence = p_coherence\n DotMotion.dir = direction\n\n # Random dot motion task\n while RT < 5:\n DotMotion.draw()\n win.flip()\n resp = event.getKeys(keyList=choice_keys)\n RT = clock.getTime()\n if len(resp) > 0:\n break\n if RT >= 5:\n resp = 'x'\n SlowTrial = 1\n n_slowtrials = n_slowtrials + 1\n warning.draw(); space.draw()\n win.flip()\n event.waitKeys(keyList='space')\n win.flip()\n\n if resp == ['escape']:\n print('Participant pressed escape')\n win.close()\n core.quit()\n\n # Evaluating response\n if correct == 'left' and resp[0] == choice_keys[0]:\n ACC = 1\n elif correct == 'right' and resp[0] == choice_keys[0]:\n ACC = 0\n elif correct == 'left' and resp[0] == choice_keys[1]:\n ACC = 0\n elif correct == 'right' and resp[0] == choice_keys[1]:\n ACC = 1\n else:\n ACC = -99\n\n if block == 1 or block == 2:\n if ACC == 1:\n good.draw()\n elif ACC == 0:\n bad.draw()\n win.flip()\n sleep(time_fb)\n\n # Ask for confidence about the choice after from the third block on\n if block > 2 and resp != 'x':\n clock.reset()\n event.clearEvents()\n while RTconf < 5:\n cj_labels.draw()\n win.flip()\n conf_press = event.getKeys(keyList=cj_keys)\n RTconf = clock.getTime()\n if len(conf_press) > 0:\n break\n if RTconf >= 5:\n conf_press = 'x'\n SlowTrial = 1\n n_slowtrials = n_slowtrials + 1\n warning.draw(); space.draw()\n win.flip()\n event.waitKeys(keyList='space')\n win.flip()\n\n # Convert conf_press into numeric value from 1 (sure error) to 6 (sure correct)\n if SlowTrial == 0:\n for temp in range(0,6):\n if conf_press[0] == cj_keys[temp]:\n cj = temp + 1\n # Reverse order for half\n if confidence_labels == 0:\n cj = 7 - cj\n else:\n conf_press = 'none'\n cj = -99\n RTconf = -99\n\n else:\n conf_press = 'none'\n cj = -99\n RTconf = -99\n\n # Store data of current trial\n thisExp.addData(\"withinblocktrial\", trial)\n thisExp.addData(\"block\", block)\n thisExp.addData(\"block_repetition\", repetition)\n thisExp.addData(\"rt\", RT)\n thisExp.addData(\"resp\", resp)\n thisExp.addData(\"cor\", ACC)\n thisExp.addData(\"cresp\", correct)\n thisExp.addData(\"conf_press\", conf_press)\n thisExp.addData(\"cj\", cj)\n thisExp.addData(\"rtconf\", RTconf)\n thisExp.addData(\"coherence\", p_coherence)\n thisExp.addData(\"slow_trial\", SlowTrial)\n if block > 3:\n thisExp.addData(\"manipulation\", manipulations[n_manipulations])\n else:\n thisExp.addData(\"manipulation\", 'none')\n\n # Pressing escape\n if conf_press == ['escape']:\n print('Participant pressed escape')\n win.close()\n core.quit()\n\n # Proceed to next trial\n thisExp.nextEntry()\n\n # Add data to variables\n if SlowTrial == 0:\n accuracy = accuracy + ACC\n RT_mean = RT_mean + RT\n RTconf_mean = RTconf_mean + RTconf\n\n # Mean of variables of interest\n accuracy = accuracy/n_trials\n p_accuracy = 100*accuracy\n if (n_trials - n_slowtrials) != 0:\n RT_mean = RT_mean/(n_trials - n_slowtrials)\n RTconf_mean = RTconf_mean/(n_trials - n_slowtrials)\n\n # Show accuracy, mean RT and mean confidence RT\n if block < 3:\n pause = vis.TextStim(win,text='Time for a break\\n\\n\\nResults of the last block:\\n\\nAccuracy: ' +\n str(round(p_accuracy, 2)) +\n '%\\nAverage decision reaction time: ' + str(round(RT_mean, 2)) +\n ' seconds', pos=(0,0), height=.05)\n elif block == 3:\n pause = vis.TextStim(win,text='Time for a break\\n\\n\\nResults of the last block:\\n\\nAccuracy: ' +\n str(round(p_accuracy, 2)) +\n '%\\nAverage decision reaction time: ' + str(round(RT_mean, 2)) +\n ' seconds\\nAverage confidence reaction time: ' + str(round(RTconf_mean, 2)) +\n ' seconds', pos=(0,0), height=.05)\n else:\n pause = vis.TextStim(win,text='Time for a break\\n\\n\\nResults of the last block:\\n\\nAccuracy: ' +\n str(round(p_accuracy, 2)) +\n '%\\nAverage decision reaction time: ' + str(round(RT_mean, 2)) +\n ' seconds\\nAverage confidence reaction time: ' + str(round(RTconf_mean, 2)) +\n ' seconds\\n\\n\\n' + str(nb_main_blocks-(n_manipulations+1)) +\n ' more block(s) to go\\nRemember to try and use all confidence options '\n 'equally', pos=(0, 0), height=.05)\n\n pause.draw(); space.draw(); win.flip()\n event.waitKeys(keyList='space')\n\n # Move on to the next manipulation\n if block > 3:\n n_manipulations = n_manipulations + 1\n\nend.draw();win.flip()\nevent.waitKeys(keyList='space')\n\n# End of the experiment\nwin.close()\ncore.quit()\n" ]
[ [ "numpy.repeat" ] ]
gmggroup/omf
[ "022cb6f84e3b1504555eb0964a37f60281efb03c" ]
[ "notebooks/cbi.py" ]
[ "import numpy as np\nimport properties\nimport z_order_utils\n\n\nclass BaseMetadata(properties.HasProperties):\n name = properties.String(\"Name of the block model\", default=\"\")\n description = properties.String(\"Description of the block model\", default=\"\")\n # Other named metadata?\n\n\nclass BaseOrientation(properties.HasProperties):\n corner = properties.Vector3(\n \"Origin of the block model, where axes extend from\",\n default=\"ZERO\",\n )\n axis_u = properties.Vector3(\"Vector orientation of u-direction\", default=\"X\")\n axis_v = properties.Vector3(\"Vector orientation of v-direction\", default=\"Y\")\n axis_w = properties.Vector3(\"Vector orientation of w-direction\", default=\"Z\")\n\n\nclass RegularBlockModel(BaseMetadata, BaseOrientation):\n block_size = properties.Vector3(\n \"Size of each block\",\n )\n block_count = properties.List(\n \"Number of blocks in each dimension\",\n min_length=3,\n max_length=3,\n prop=properties.Integer(\"\", min=1),\n )\n\n\nclass TensorBlockModel(BaseMetadata, BaseOrientation):\n tensor_u = properties.Array(\n \"Tensor cell widths, u-direction\", shape=(\"*\",), dtype=float\n )\n tensor_v = properties.Array(\n \"Tensor cell widths, v-direction\", shape=(\"*\",), dtype=float\n )\n tensor_w = properties.Array(\n \"Tensor cell widths, w-direction\", shape=(\"*\",), dtype=float\n )\n\n @property\n def block_count(self):\n return [\n len(self.tensor_u),\n len(self.tensor_v),\n len(self.tensor_w),\n ]\n\n @property\n def num_blocks(self):\n return np.prod(self.block_count)\n\n\nclass BaseCompressedBlockStorage(properties.HasProperties):\n\n parent_block_size = properties.Vector3(\n \"Size of each parent block\",\n )\n parent_block_count = properties.List(\n \"Number of parent blocks in each dimension\",\n min_length=3,\n max_length=3,\n prop=properties.Integer(\"\", min=1),\n )\n\n @property\n def num_parent_blocks(self):\n return np.prod(self.parent_block_count)\n\n @property\n def num_blocks(self):\n return self.compressed_block_index[-1]\n\n @property\n def is_sub_blocked(self):\n self.compressed_block_index # assert that _cbi exists\n return (self._cbi[1:] - self._cbi[:-1]) > 1\n\n def _get_starting_cbi(self):\n return np.arange(self.num_parent_blocks + 1, dtype=\"uint32\")\n\n @property\n def compressed_block_index(self):\n # Need the block counts to exist\n assert self._props[\"parent_block_count\"].assert_valid(\n self, self.parent_block_count\n )\n if \"sub_block_count\" in self._props:\n assert self._props[\"sub_block_count\"].assert_valid(\n self, self.sub_block_count\n )\n # Note: We could have some warnings here, if the above change\n # It is probably less relevant as these are not targeted\n # to be used in a dynamic context?\n\n # If the sub block storage does not exist, create it\n if not hasattr(self, \"_cbi\"):\n # Each parent cell has a single attribute before refinement\n self._cbi = self._get_starting_cbi()\n return self._cbi\n\n def _get_parent_index(self, ijk):\n pbc = self.parent_block_count\n assert len(ijk) == 3 # Should be a 3 length integer tuple/list\n assert (\n (0 <= ijk[0] < pbc[0]) & (0 <= ijk[1] < pbc[1]) & (0 <= ijk[2] < pbc[2])\n ), \"Must be valid ijk index\"\n\n (parent_index,) = np.ravel_multi_index(\n [[ijk[0]], [ijk[1]], [ijk[2]]], # Index into the block model\n self.parent_block_count, # shape of the parent\n order=\"F\", # Explicit column major ordering, \"i moves fastest\"\n )\n return parent_index\n\n\nclass RegularSubBlockModel(BaseMetadata, BaseOrientation, BaseCompressedBlockStorage):\n\n sub_block_count = properties.List(\n \"Number of sub blocks in each sub-blocked parent\",\n min_length=3,\n max_length=3,\n prop=properties.Integer(\"\", min=1),\n )\n\n @property\n def sub_block_size(self):\n return self.parent_block_size / np.array(self.sub_block_count)\n\n def refine(self, ijk):\n self.compressed_block_index # assert that _cbi exists\n parent_index = self._get_parent_index(ijk)\n # Adding \"num_sub_blocks\" - 1, because the parent was already counted\n self._cbi[parent_index + 1 :] += np.prod(self.sub_block_count) - 1\n # Attribute index is where to insert into attribute arrays\n attribute_index = tuple(self._cbi[parent_index : parent_index + 2])\n return parent_index, attribute_index\n\n # Note: Perhaps if there is an unrefined RSBM,\n # then OMF should serialize as a RBM?\n\n\nclass OctreeSubBlockModel(BaseMetadata, BaseOrientation, BaseCompressedBlockStorage):\n @property\n def z_order_curves(self):\n forest = self._get_forest()\n cbi = self.compressed_block_index\n curves = np.zeros(self.num_blocks, dtype=\"uint32\")\n for i, tree in enumerate(forest):\n curves[cbi[i] : cbi[i + 1]] = sorted(tree)\n return curves\n\n def _get_forest(self):\n \"\"\"Want a set before we create the array.\n This may not be useful for less dynamic implementations.\n \"\"\"\n if not hasattr(self, \"_forest\"):\n # Do your part for the planet:\n # Plant trees in every parent block.\n self._forest = [{0} for _ in range(self.num_parent_blocks)]\n return self._forest\n\n def _refine_child(self, ijk, ind):\n\n self.compressed_block_index # assert that _cbi exists\n parent_index = self._get_parent_index(ijk)\n tree = self._get_forest()[parent_index]\n\n if ind not in tree:\n raise IndexError(ind)\n\n p, lvl = z_order_utils.get_pointer(ind)\n w = z_order_utils.level_width(lvl + 1)\n\n children = [\n [p[0], p[1], p[2], lvl + 1],\n [p[0] + w, p[1], p[2], lvl + 1],\n [p[0], p[1] + w, p[2], lvl + 1],\n [p[0] + w, p[1] + w, p[2], lvl + 1],\n [p[0], p[1], p[2] + w, lvl + 1],\n [p[0] + w, p[1], p[2] + w, lvl + 1],\n [p[0], p[1] + w, p[2] + w, lvl + 1],\n [p[0] + w, p[1] + w, p[2] + w, lvl + 1],\n ]\n\n for child in children:\n tree.add(z_order_utils.get_index(child[:3], child[3]))\n tree.remove(ind)\n\n # Adding \"num_sub_blocks\" - 1, because the parent was already counted\n self._cbi[parent_index + 1 :] += 7\n\n return children\n\n\nclass ArbitrarySubBlockModel(BaseMetadata, BaseOrientation, BaseCompressedBlockStorage):\n def _get_starting_cbi(self):\n \"\"\"Unlike octree and rsbm, this has zero sub-blocks to start with.\"\"\"\n return np.zeros(self.num_parent_blocks + 1, dtype=\"uint32\")\n\n def _get_lists(self):\n \"\"\"Want a set before we create the array.\n This may not be useful for less dynamic implementations.\n \"\"\"\n if not hasattr(self, \"_lists\"):\n # Do your part for the planet:\n # Plant trees in every parent block.\n self._lists = [\n (np.zeros((0, 3)), np.zeros((0, 3)))\n for _ in range(self.num_parent_blocks)\n ]\n return self._lists\n\n def _add_sub_blocks(self, ijk, new_centroids, new_sizes):\n self.compressed_block_index # assert that _cbi exists\n parent_index = self._get_parent_index(ijk)\n centroids, sizes = self._get_lists()[parent_index]\n\n if not isinstance(new_centroids, np.ndarray):\n new_centroids = np.array(new_centroids)\n new_centroids = new_centroids.reshape((-1, 3))\n\n if not isinstance(new_sizes, np.ndarray):\n new_sizes = np.array(new_sizes)\n new_sizes = new_sizes.reshape((-1, 3))\n\n assert (\n (new_centroids.size % 3 == 0)\n & (new_sizes.size % 3 == 0)\n & (new_centroids.size == new_sizes.size)\n )\n\n # TODO: Check that the centroid exists in the block\n\n self._lists[parent_index] = (\n np.r_[centroids, new_centroids],\n np.r_[sizes, new_sizes],\n )\n\n self._cbi[parent_index + 1 :] += new_sizes.size // 3\n" ]
[ [ "numpy.zeros", "numpy.ravel_multi_index", "numpy.arange", "numpy.prod", "numpy.array" ] ]
mgelbart/ray
[ "4cec2286572e368a4bd64aae467751a384eff62d" ]
[ "rllib/agents/es/es.py" ]
[ "# Code in this file is copied and adapted from\n# https://github.com/openai/evolution-strategies-starter.\n\nfrom collections import namedtuple\nimport logging\nimport numpy as np\nimport random\nimport time\nfrom typing import Optional\n\nimport ray\nfrom ray.rllib.agents import Trainer, TrainerConfig\nfrom ray.rllib.agents.es import optimizers, utils\nfrom ray.rllib.agents.es.es_tf_policy import ESTFPolicy, rollout\nfrom ray.rllib.env.env_context import EnvContext\nfrom ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID\nfrom ray.rllib.utils import FilterManager\nfrom ray.rllib.utils.annotations import override\nfrom ray.rllib.utils.deprecation import Deprecated\nfrom ray.rllib.utils.metrics import (\n NUM_AGENT_STEPS_SAMPLED,\n NUM_AGENT_STEPS_TRAINED,\n NUM_ENV_STEPS_SAMPLED,\n NUM_ENV_STEPS_TRAINED,\n)\nfrom ray.rllib.utils.torch_utils import set_torch_seed\nfrom ray.rllib.utils.typing import TrainerConfigDict\n\nlogger = logging.getLogger(__name__)\n\nResult = namedtuple(\n \"Result\",\n [\n \"noise_indices\",\n \"noisy_returns\",\n \"sign_noisy_returns\",\n \"noisy_lengths\",\n \"eval_returns\",\n \"eval_lengths\",\n ],\n)\n\n\nclass ESConfig(TrainerConfig):\n \"\"\"Defines an ESTrainer configuration class from which an ESTrainer can be built.\n\n Example:\n >>> from ray.rllib.agents.es import ESConfig\n >>> config = ESConfig().training(sgd_stepsize=0.02, report_length=20)\\\n ... .resources(num_gpus=0)\\\n ... .rollouts(num_rollout_workers=4)\n >>> print(config.to_dict())\n >>> # Build a Trainer object from the config and run 1 training iteration.\n >>> trainer = config.build(env=\"CartPole-v1\")\n >>> trainer.train()\n\n Example:\n >>> from ray.rllib.agents.es import ESConfig\n >>> from ray import tune\n >>> config = ESConfig()\n >>> # Print out some default values.\n >>> print(config.action_noise_std)\n >>> # Update the config object.\n >>> config.training(rollouts_used=tune.grid_search([32, 64]), eval_prob=0.5)\n >>> # Set the config object's env.\n >>> config.environment(env=\"CartPole-v1\")\n >>> # Use to_dict() to get the old-style python config dict\n >>> # when running with tune.\n >>> tune.run(\n ... \"ES\",\n ... stop={\"episode_reward_mean\": 200},\n ... config=config.to_dict(),\n ... )\n \"\"\"\n\n def __init__(self):\n \"\"\"Initializes a ESConfig instance.\"\"\"\n super().__init__(trainer_class=ESTrainer)\n\n # fmt: off\n # __sphinx_doc_begin__\n\n # ES specific settings:\n self.action_noise_std = 0.01\n self.l2_coeff = 0.005\n self.noise_stdev = 0.02\n self.episodes_per_batch = 1000\n self.eval_prob = 0.03\n # self.return_proc_mode = \"centered_rank\" # only supported return_proc_mode\n self.stepsize = 0.01\n self.noise_size = 250000000\n self.report_length = 10\n\n # Override some of TrainerConfig's default values with ES-specific values.\n self.train_batch_size = 10000\n self.num_workers = 10\n self.observation_filter = \"MeanStdFilter\"\n # ARS will use Trainer's evaluation WorkerSet (if evaluation_interval > 0).\n # Therefore, we must be careful not to use more than 1 env per eval worker\n # (would break ARSPolicy's compute_single_action method) and to not do\n # obs-filtering.\n self.evaluation_config[\"num_envs_per_worker\"] = 1\n self.evaluation_config[\"observation_filter\"] = \"NoFilter\"\n\n # __sphinx_doc_end__\n # fmt: on\n\n @override(TrainerConfig)\n def training(\n self,\n *,\n action_noise_std: Optional[float] = None,\n l2_coeff: Optional[float] = None,\n noise_stdev: Optional[int] = None,\n episodes_per_batch: Optional[int] = None,\n eval_prob: Optional[float] = None,\n # return_proc_mode: Optional[int] = None,\n stepsize: Optional[float] = None,\n noise_size: Optional[int] = None,\n report_length: Optional[int] = None,\n **kwargs,\n ) -> \"ESConfig\":\n \"\"\"Sets the training related configuration.\n\n Args:\n action_noise_std: Std. deviation to be used when adding (standard normal)\n noise to computed actions. Action noise is only added, if\n `compute_actions` is called with the `add_noise` arg set to True.\n l2_coeff: Coefficient to multiply current weights with inside the globalg\n optimizer update term.\n noise_stdev: Std. deviation of parameter noise.\n episodes_per_batch: Minimum number of episodes to pack into the train batch.\n eval_prob: Probability of evaluating the parameter rewards.\n stepsize: SGD step-size used for the Adam optimizer.\n noise_size: Number of rows in the noise table (shared across workers).\n Each row contains a gaussian noise value for each model parameter.\n report_length: How many of the last rewards we average over.\n\n Returns:\n This updated TrainerConfig object.\n \"\"\"\n # Pass kwargs onto super's `training()` method.\n super().training(**kwargs)\n\n if action_noise_std is not None:\n self.action_noise_std = action_noise_std\n if l2_coeff is not None:\n self.l2_coeff = l2_coeff\n if noise_stdev is not None:\n self.noise_stdev = noise_stdev\n if episodes_per_batch is not None:\n self.episodes_per_batch = episodes_per_batch\n if eval_prob is not None:\n self.eval_prob = eval_prob\n # Only supported return_proc mode is \"centered_rank\" right now. No need to\n # configure this.\n # if return_proc_mode is not None:\n # self.return_proc_mode = return_proc_mode\n if stepsize is not None:\n self.stepsize = stepsize\n if noise_size is not None:\n self.noise_size = noise_size\n if report_length is not None:\n self.report_length = report_length\n\n return self\n\n\[email protected]\ndef create_shared_noise(count):\n \"\"\"Create a large array of noise to be shared by all workers.\"\"\"\n seed = 123\n noise = np.random.RandomState(seed).randn(count).astype(np.float32)\n return noise\n\n\nclass SharedNoiseTable:\n def __init__(self, noise):\n self.noise = noise\n assert self.noise.dtype == np.float32\n\n def get(self, i, dim):\n return self.noise[i : i + dim]\n\n def sample_index(self, dim):\n return np.random.randint(0, len(self.noise) - dim + 1)\n\n\[email protected]\nclass Worker:\n def __init__(\n self,\n config,\n policy_params,\n env_creator,\n noise,\n worker_index,\n min_task_runtime=0.2,\n ):\n\n # Set Python random, numpy, env, and torch/tf seeds.\n seed = config.get(\"seed\")\n if seed is not None:\n # Python random module.\n random.seed(seed)\n # Numpy.\n np.random.seed(seed)\n # Torch.\n if config.get(\"framework\") == \"torch\":\n set_torch_seed(seed)\n\n self.min_task_runtime = min_task_runtime\n self.config = config\n self.config.update(policy_params)\n self.config[\"single_threaded\"] = True\n self.noise = SharedNoiseTable(noise)\n\n env_context = EnvContext(config[\"env_config\"] or {}, worker_index)\n self.env = env_creator(env_context)\n # Seed the env, if gym.Env.\n if not hasattr(self.env, \"seed\"):\n logger.info(\"Env doesn't support env.seed(): {}\".format(self.env))\n # Gym.env.\n else:\n self.env.seed(seed)\n\n from ray.rllib import models\n\n self.preprocessor = models.ModelCatalog.get_preprocessor(\n self.env, config[\"model\"]\n )\n\n _policy_class = get_policy_class(config)\n self.policy = _policy_class(\n self.env.observation_space, self.env.action_space, config\n )\n\n @property\n def filters(self):\n return {DEFAULT_POLICY_ID: self.policy.observation_filter}\n\n def sync_filters(self, new_filters):\n for k in self.filters:\n self.filters[k].sync(new_filters[k])\n\n def get_filters(self, flush_after=False):\n return_filters = {}\n for k, f in self.filters.items():\n return_filters[k] = f.as_serializable()\n if flush_after:\n f.reset_buffer()\n return return_filters\n\n def rollout(self, timestep_limit, add_noise=True):\n rollout_rewards, rollout_fragment_length = rollout(\n self.policy, self.env, timestep_limit=timestep_limit, add_noise=add_noise\n )\n return rollout_rewards, rollout_fragment_length\n\n def do_rollouts(self, params, timestep_limit=None):\n # Set the network weights.\n self.policy.set_flat_weights(params)\n\n noise_indices, returns, sign_returns, lengths = [], [], [], []\n eval_returns, eval_lengths = [], []\n\n # Perform some rollouts with noise.\n task_tstart = time.time()\n while (\n len(noise_indices) == 0 or time.time() - task_tstart < self.min_task_runtime\n ):\n\n if np.random.uniform() < self.config[\"eval_prob\"]:\n # Do an evaluation run with no perturbation.\n self.policy.set_flat_weights(params)\n rewards, length = self.rollout(timestep_limit, add_noise=False)\n eval_returns.append(rewards.sum())\n eval_lengths.append(length)\n else:\n # Do a regular run with parameter perturbations.\n noise_index = self.noise.sample_index(self.policy.num_params)\n\n perturbation = self.config[\"noise_stdev\"] * self.noise.get(\n noise_index, self.policy.num_params\n )\n\n # These two sampling steps could be done in parallel on\n # different actors letting us update twice as frequently.\n self.policy.set_flat_weights(params + perturbation)\n rewards_pos, lengths_pos = self.rollout(timestep_limit)\n\n self.policy.set_flat_weights(params - perturbation)\n rewards_neg, lengths_neg = self.rollout(timestep_limit)\n\n noise_indices.append(noise_index)\n returns.append([rewards_pos.sum(), rewards_neg.sum()])\n sign_returns.append(\n [np.sign(rewards_pos).sum(), np.sign(rewards_neg).sum()]\n )\n lengths.append([lengths_pos, lengths_neg])\n\n return Result(\n noise_indices=noise_indices,\n noisy_returns=returns,\n sign_noisy_returns=sign_returns,\n noisy_lengths=lengths,\n eval_returns=eval_returns,\n eval_lengths=eval_lengths,\n )\n\n\ndef get_policy_class(config):\n if config[\"framework\"] == \"torch\":\n from ray.rllib.agents.es.es_torch_policy import ESTorchPolicy\n\n policy_cls = ESTorchPolicy\n else:\n policy_cls = ESTFPolicy\n return policy_cls\n\n\nclass ESTrainer(Trainer):\n \"\"\"Large-scale implementation of Evolution Strategies in Ray.\"\"\"\n\n @classmethod\n @override(Trainer)\n def get_default_config(cls) -> TrainerConfigDict:\n return ESConfig().to_dict()\n\n @override(Trainer)\n def validate_config(self, config: TrainerConfigDict) -> None:\n # Call super's validation method.\n super().validate_config(config)\n\n if config[\"num_gpus\"] > 1:\n raise ValueError(\"`num_gpus` > 1 not yet supported for ES!\")\n if config[\"num_workers\"] <= 0:\n raise ValueError(\"`num_workers` must be > 0 for ES!\")\n if config[\"evaluation_config\"][\"num_envs_per_worker\"] != 1:\n raise ValueError(\n \"`evaluation_config.num_envs_per_worker` must always be 1 for \"\n \"ES! To parallelize evaluation, increase \"\n \"`evaluation_num_workers` to > 1.\"\n )\n if config[\"evaluation_config\"][\"observation_filter\"] != \"NoFilter\":\n raise ValueError(\n \"`evaluation_config.observation_filter` must always be \"\n \"`NoFilter` for ES!\"\n )\n\n @override(Trainer)\n def setup(self, config):\n # Setup our config: Merge the user-supplied config (which could\n # be a partial config dict with the class' default).\n if isinstance(config, dict):\n self.config = self.merge_trainer_configs(\n self.get_default_config(), config, self._allow_unknown_configs\n )\n else:\n self.config = config.to_dict()\n\n # Call super's validation method.\n self.validate_config(self.config)\n\n # Generate `self.env_creator` callable to create an env instance.\n self.env_creator = self._get_env_creator_from_env_id(self._env_id)\n # Generate the local env.\n env_context = EnvContext(self.config[\"env_config\"] or {}, worker_index=0)\n env = self.env_creator(env_context)\n\n self.callbacks = self.config[\"callbacks\"]()\n\n self._policy_class = get_policy_class(self.config)\n self.policy = self._policy_class(\n obs_space=env.observation_space,\n action_space=env.action_space,\n config=self.config,\n )\n self.optimizer = optimizers.Adam(self.policy, self.config[\"stepsize\"])\n self.report_length = self.config[\"report_length\"]\n\n # Create the shared noise table.\n logger.info(\"Creating shared noise table.\")\n noise_id = create_shared_noise.remote(self.config[\"noise_size\"])\n self.noise = SharedNoiseTable(ray.get(noise_id))\n\n # Create the actors.\n logger.info(\"Creating actors.\")\n self.workers = [\n Worker.remote(self.config, {}, self.env_creator, noise_id, idx + 1)\n for idx in range(self.config[\"num_workers\"])\n ]\n\n self.episodes_so_far = 0\n self.reward_list = []\n self.tstart = time.time()\n\n @override(Trainer)\n def get_policy(self, policy=DEFAULT_POLICY_ID):\n if policy != DEFAULT_POLICY_ID:\n raise ValueError(\n \"ES has no policy '{}'! Use {} \"\n \"instead.\".format(policy, DEFAULT_POLICY_ID)\n )\n return self.policy\n\n @override(Trainer)\n def step_attempt(self):\n config = self.config\n\n theta = self.policy.get_flat_weights()\n assert theta.dtype == np.float32\n assert len(theta.shape) == 1\n\n # Put the current policy weights in the object store.\n theta_id = ray.put(theta)\n # Use the actors to do rollouts. Note that we pass in the ID of the\n # policy weights as these are shared.\n results, num_episodes, num_timesteps = self._collect_results(\n theta_id, config[\"episodes_per_batch\"], config[\"train_batch_size\"]\n )\n # Update our sample steps counters.\n self._counters[NUM_AGENT_STEPS_SAMPLED] += num_timesteps\n self._counters[NUM_ENV_STEPS_SAMPLED] += num_timesteps\n\n all_noise_indices = []\n all_training_returns = []\n all_training_lengths = []\n all_eval_returns = []\n all_eval_lengths = []\n\n # Loop over the results.\n for result in results:\n all_eval_returns += result.eval_returns\n all_eval_lengths += result.eval_lengths\n\n all_noise_indices += result.noise_indices\n all_training_returns += result.noisy_returns\n all_training_lengths += result.noisy_lengths\n\n assert len(all_eval_returns) == len(all_eval_lengths)\n assert (\n len(all_noise_indices)\n == len(all_training_returns)\n == len(all_training_lengths)\n )\n\n self.episodes_so_far += num_episodes\n\n # Assemble the results.\n eval_returns = np.array(all_eval_returns)\n eval_lengths = np.array(all_eval_lengths)\n noise_indices = np.array(all_noise_indices)\n noisy_returns = np.array(all_training_returns)\n noisy_lengths = np.array(all_training_lengths)\n\n # Process the returns.\n proc_noisy_returns = utils.compute_centered_ranks(noisy_returns)\n\n # Compute and take a step.\n g, count = utils.batched_weighted_sum(\n proc_noisy_returns[:, 0] - proc_noisy_returns[:, 1],\n (self.noise.get(index, self.policy.num_params) for index in noise_indices),\n batch_size=500,\n )\n g /= noisy_returns.size\n assert (\n g.shape == (self.policy.num_params,)\n and g.dtype == np.float32\n and count == len(noise_indices)\n )\n # Compute the new weights theta.\n theta, update_ratio = self.optimizer.update(-g + config[\"l2_coeff\"] * theta)\n\n # Update our train steps counters.\n self._counters[NUM_AGENT_STEPS_TRAINED] += num_timesteps\n self._counters[NUM_ENV_STEPS_TRAINED] += num_timesteps\n\n # Set the new weights in the local copy of the policy.\n self.policy.set_flat_weights(theta)\n # Store the rewards\n if len(all_eval_returns) > 0:\n self.reward_list.append(np.mean(eval_returns))\n\n # Now sync the filters\n FilterManager.synchronize(\n {DEFAULT_POLICY_ID: self.policy.observation_filter}, self.workers\n )\n\n info = {\n \"weights_norm\": np.square(theta).sum(),\n \"grad_norm\": np.square(g).sum(),\n \"update_ratio\": update_ratio,\n \"episodes_this_iter\": noisy_lengths.size,\n \"episodes_so_far\": self.episodes_so_far,\n }\n\n reward_mean = np.mean(self.reward_list[-self.report_length :])\n result = dict(\n episode_reward_mean=reward_mean,\n episode_len_mean=eval_lengths.mean(),\n timesteps_this_iter=noisy_lengths.sum(),\n info=info,\n )\n\n return result\n\n @override(Trainer)\n def compute_single_action(self, observation, *args, **kwargs):\n action, _, _ = self.policy.compute_actions([observation], update=False)\n if kwargs.get(\"full_fetch\"):\n return action[0], [], {}\n return action[0]\n\n @Deprecated(new=\"compute_single_action\", error=False)\n def compute_action(self, observation, *args, **kwargs):\n return self.compute_single_action(observation, *args, **kwargs)\n\n @override(Trainer)\n def _sync_weights_to_workers(self, *, worker_set=None, workers=None):\n # Broadcast the new policy weights to all evaluation workers.\n assert worker_set is not None\n logger.info(\"Synchronizing weights to evaluation workers.\")\n weights = ray.put(self.policy.get_flat_weights())\n worker_set.foreach_policy(lambda p, pid: p.set_flat_weights(ray.get(weights)))\n\n @override(Trainer)\n def cleanup(self):\n # workaround for https://github.com/ray-project/ray/issues/1516\n for w in self.workers:\n w.__ray_terminate__.remote()\n\n def _collect_results(self, theta_id, min_episodes, min_timesteps):\n num_episodes, num_timesteps = 0, 0\n results = []\n while num_episodes < min_episodes or num_timesteps < min_timesteps:\n logger.info(\n \"Collected {} episodes {} timesteps so far this iter\".format(\n num_episodes, num_timesteps\n )\n )\n rollout_ids = [\n worker.do_rollouts.remote(theta_id) for worker in self.workers\n ]\n # Get the results of the rollouts.\n for result in ray.get(rollout_ids):\n results.append(result)\n # Update the number of episodes and the number of timesteps\n # keeping in mind that result.noisy_lengths is a list of lists,\n # where the inner lists have length 2.\n num_episodes += sum(len(pair) for pair in result.noisy_lengths)\n num_timesteps += sum(sum(pair) for pair in result.noisy_lengths)\n\n return results, num_episodes, num_timesteps\n\n def __getstate__(self):\n return {\n \"weights\": self.policy.get_flat_weights(),\n \"filter\": self.policy.observation_filter,\n \"episodes_so_far\": self.episodes_so_far,\n }\n\n def __setstate__(self, state):\n self.episodes_so_far = state[\"episodes_so_far\"]\n self.policy.set_flat_weights(state[\"weights\"])\n self.policy.observation_filter = state[\"filter\"]\n FilterManager.synchronize(\n {DEFAULT_POLICY_ID: self.policy.observation_filter}, self.workers\n )\n\n\n# Deprecated: Use ray.rllib.agents.es.ESConfig instead!\nclass _deprecated_default_config(dict):\n def __init__(self):\n super().__init__(ESConfig().to_dict())\n\n @Deprecated(\n old=\"ray.rllib.agents.es.es.DEFAULT_CONFIG\",\n new=\"ray.rllib.agents.es.es.ESConfig(...)\",\n error=False,\n )\n def __getitem__(self, item):\n return super().__getitem__(item)\n\n\nDEFAULT_CONFIG = _deprecated_default_config()\n" ]
[ [ "numpy.random.uniform", "numpy.sign", "numpy.random.seed", "numpy.random.RandomState", "numpy.array", "numpy.square", "numpy.mean" ] ]
AnimeshSinha1309/qaoa-optimizer
[ "2a93a46bacc99f22f49e7b5121eb3aa9f12c0163" ]
[ "qleet/simulators/pqc_trainer.py" ]
[ "\"\"\"The module which houses the Parametrized Quantum Circuit trainer class.\n\nIt generates the TensorFlow Quantum model, and allows Keras like API to\ntrain and evaluate a model.\n\"\"\"\n\nimport typing\n\nimport cirq\nimport tqdm.auto as tqdm\n\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\nimport tensorflow as tf\nimport tensorflow_quantum as tfq\n\nfrom ..interface.metas import AnalyzerList\nfrom ..interface.circuit import CircuitDescriptor\n\n\nclass PQCSimulatedTrainer:\n \"\"\"A class to train parametrized Quantum Circuits in Tensorflow Quantum\n Uses gradient descent over the provided parameters, using the TFQ Adjoin differentiator.\n \"\"\"\n\n def __init__(self, circuit: CircuitDescriptor):\n \"\"\"Constructs a PQC Trainer object to train the circuit.\n :type circuit: CircuitDescriptor\n :param circuit: The circuit object to train on the loss function\n \"\"\"\n self.optimizer = tf.keras.optimizers.Adam(lr=0.01)\n self.pqc_layer = tfq.layers.PQC(\n circuit.cirq_circuit,\n circuit.cirq_cost,\n differentiator=tfq.differentiators.Adjoint(),\n )\n self.model = tf.keras.models.Sequential(\n [tf.keras.layers.Input(shape=(), dtype=tf.dtypes.string), self.pqc_layer]\n )\n self.circuit = circuit\n\n def train(\n self, n_samples=100, loggers: typing.Optional[AnalyzerList] = None\n ) -> tf.keras.Model:\n \"\"\"Trains the parameter of the circuit to minimize the loss.\n :type n_samples: int\n :param n_samples: Number of samples to train the circuit over\n :type loggers: `AnalyzerList`\n :param loggers: The AnalyzerList that tracks the training of the model\n :returns: The trained model\n :rtype: tf.keras.Model\n \"\"\"\n dummy_input = tfq.convert_to_tensor([cirq.Circuit()])\n total_error = 0.0\n with tqdm.trange(n_samples) as iterator:\n iterator.set_description(\"QAOA Optimization Loop\")\n for step in iterator:\n with tf.GradientTape() as tape:\n error = self.model(dummy_input)\n grads = tape.gradient(error, self.model.trainable_variables)\n self.optimizer.apply_gradients(\n zip(grads, self.model.trainable_variables)\n )\n error = error.numpy()[0][0]\n if loggers is not None:\n loggers.log(self, error)\n total_error += error\n iterator.set_postfix(error=total_error / (step + 1))\n return self.model\n\n def evaluate(self, n_samples: int = 1000) -> float:\n \"\"\"Evaluates the Parametrized Quantum Circuit.\n :type n_samples: int\n :param n_samples: The number of samples to evaluate the circuit over\n :returns: The average loss of the circuit over all the samples\n :rtype: float\n \"\"\"\n dummy_input = tfq.convert_to_tensor([cirq.Circuit()])\n total_error = 0.0\n with tqdm.trange(n_samples) as iterator:\n iterator.set_description(\"QAOA Evaluation Loop\")\n for step in iterator:\n error = self.model(dummy_input)\n error = error.numpy()[0][0]\n total_error += error\n iterator.set_postfix(error=total_error / (step + 1))\n return total_error / n_samples\n" ]
[ [ "tensorflow.keras.optimizers.Adam", "tensorflow.GradientTape", "tensorflow.keras.layers.Input" ] ]
Adel-Moumen/speechbrain
[ "1837bbdab24d40b73f3eb354db00fbb063e5aca4" ]
[ "recipes/IEMOCAP/emotion_recognition/iemocap_prepare.py" ]
[ "\"\"\"\nDownloads and creates data manifest files for IEMOCAP\n(https://paperswithcode.com/dataset/iemocap).\n\nAuthors:\n * Mirco Ravanelli, 2021\n * Modified by Pierre-Yves Yanni, 2021\n * Abdel Heba, 2021\n\"\"\"\n\nimport os\nimport sys\nimport re\nimport json\nimport random\nimport logging\nimport glob\nfrom scipy.io import wavfile\nfrom speechbrain.utils.data_utils import get_all_files\nfrom speechbrain.dataio.dataio import read_audio\n\nlogger = logging.getLogger(__name__)\nSAMPLERATE = 16000\nNUMBER_UTT = 5531\n\n\ndef prepare_data(\n data_original,\n data_transformed,\n save_json_train,\n save_json_valid,\n save_json_test,\n split_ratio=[80, 10, 10],\n different_speakers=False,\n seed=12,\n):\n \"\"\"\n Prepares the json files for the IEMOCAP dataset.\n\n We here use only the audio part of the dataset. The assumpion is\n that the data folder is structured as:\n\n <session_id>/<emotion>/<file:name>.wav\n\n e.g.\n session1/ang/psno1_ang_s084_orgn.wav\n\n Please, process the original IEMOCAP folder to match the expected\n folder structure.\n\n\n Arguments\n ---------\n data_original : str\n Path to the folder where the original IEMOCAP dataset is stored.\n data_transformed : str\n Path to the folder where the transformed IEMOCAP dataset will be stored.\n save_json_train : str\n Path where the train data specification file will be saved.\n save_json_valid : str\n Path where the validation data specification file will be saved.\n save_json_test : str\n Path where the test data specification file will be saved.\n split_ratio: list\n List composed of three integers that sets split ratios for train,\n valid, and test sets, respecively.\n For instance split_ratio=[80, 10, 10] will assign 80% of the sentences\n to training, 10% for validation, and 10% for test.\n seed : int\n Seed for reproducibility\n\n Example\n -------\n >>> data_original = '/path/to/iemocap/IEMOCAP_full_release/Session'\n >>> data_transformed = '/path/to/iemocap/IEMOCAP_ahsn_leave-two-speaker-out'\n >>> prepare_data(data_original, data_transformed, 'train.json', 'valid.json',\n 'test.json')\n \"\"\"\n\n # setting seeds for reproducible code.\n random.seed(seed)\n\n # Check if this phase is already done (if so, skip it)\n if skip(save_json_train, save_json_valid, save_json_test):\n logger.info(\"Preparation completed in previous run, skipping.\")\n return\n\n # Check if the transformed data folder exist, generate it otherwise.\n if not check_folders(data_transformed):\n logger.info(\n \"The data transformed folder doesn't exist. Do the transformation step.\"\n )\n transform_data(data_original, data_transformed)\n else:\n logger.info(\"Data Transformation completed in previous run, skipping.\")\n\n if (\n not len(list(glob.iglob(data_transformed + \"/*/*/*\", recursive=True)))\n == NUMBER_UTT\n ):\n logger.error(\n \"Error: The data folder is not in the expected format. Expected <session_id>/<emo_id>/<file_name>.wav (e.g., session1/ang/psno1_ang_s084_orgn.wav)\"\n )\n sys.exit(\n \"Data transformed dirctory \"\n + data_transformed\n + \"contains: \"\n + str(\n len(\n list(\n glob.iglob(data_transformed + \"/*/*/*\", recursive=True)\n )\n )\n )\n + \" file. Expected \"\n + str(NUMBER_UTT)\n + \".\"\n )\n\n # List files and create manifest from list\n logger.info(\n f\"Creating {save_json_train}, {save_json_valid}, and {save_json_test}\"\n )\n extension = [\".wav\"]\n\n # Randomly split the signal list into train, valid, and test sets.\n wav_list = get_all_files(data_transformed, match_and=extension)\n if different_speakers:\n data_split = split_different_speakers(wav_list)\n else:\n data_split = split_sets(wav_list, split_ratio)\n\n # Creating json files\n create_json(data_split[\"train\"], save_json_train)\n create_json(data_split[\"valid\"], save_json_valid)\n create_json(data_split[\"test\"], save_json_test)\n\n\ndef create_json(wav_list, json_file):\n \"\"\"\n Creates the json file given a list of wav files.\n\n Arguments\n ---------\n wav_list : list of str\n The list of wav files.\n json_file : str\n The path of the output json file\n \"\"\"\n # Processing all the wav files in the list\n\n json_dict = {}\n for wav_file in wav_list:\n\n # Reading the signal (to retrieve duration in seconds)\n signal = read_audio(wav_file)\n duration = signal.shape[0] / SAMPLERATE\n\n # Manipulate path to get relative path and uttid\n path_parts = wav_file.split(os.path.sep)\n uttid, _ = os.path.splitext(path_parts[-1])\n relative_path = os.path.join(\"{data_root}\", *path_parts[-3:])\n\n # Getting emotion\n emo = path_parts[-2]\n\n # Create entry for this utterance\n json_dict[uttid] = {\n \"wav\": relative_path,\n \"length\": duration,\n \"emo\": emo,\n }\n\n # Writing the dictionary to the json file\n with open(json_file, mode=\"w\") as json_f:\n json.dump(json_dict, json_f, indent=2)\n\n logger.info(f\"{json_file} successfully created!\")\n\n\ndef skip(*filenames):\n \"\"\"\n Detects if the data preparation has been already done.\n If the preparation has been done, we can skip it.\n\n Returns\n -------\n bool\n if True, the preparation phase can be skipped.\n if False, it must be done.\n \"\"\"\n for filename in filenames:\n if not os.path.isfile(filename):\n return False\n return True\n\n\ndef check_folders(*folders):\n \"\"\"Returns False if any passed folder does not exist.\"\"\"\n for folder in folders:\n if not os.path.exists(folder):\n return False\n return True\n\n\ndef split_different_speakers(wav_list):\n \"\"\"\"Constructs train, validation and test sets that do not share common\n speakers. There are two different speakers in each session. Train set is\n constituted of 3 sessions, validation set another session and test set the\n remaining session.\n\n Arguments\n ---------\n wav_list: list\n list of all signals in the dataset\n\n Returns\n ------\n dictionary containing train, valid, and test splits.\n \"\"\"\n data_split = {k: [] for k in [\"train\", \"valid\", \"test\"]}\n sessions = list(range(1, 6))\n random.shuffle(sessions)\n random.shuffle(wav_list)\n\n for path_wav in wav_list:\n session = int(os.path.split(path_wav)[-1][4])\n if session in sessions[:3]:\n data_split[\"train\"].append(path_wav)\n elif session == sessions[3]:\n data_split[\"valid\"].append(path_wav)\n else:\n data_split[\"test\"].append(path_wav)\n return data_split\n\n\ndef split_sets(wav_list, split_ratio):\n \"\"\"Randomly splits the wav list into training, validation, and test lists.\n Note that a better approach is to make sure that all the classes have the\n same proportion of samples (e.g, spk01 should have 80% of samples in\n training, 10% validation, 10% test, the same for speaker2 etc.). This\n is the approach followed in some recipes such as the Voxceleb one. For\n simplicity, we here simply split the full list without necessarly\n respecting the split ratio within each class.\n\n Arguments\n ---------\n wav_list : list\n list of all the signals in the dataset\n split_ratio: list\n List composed of three integers that sets split ratios for train,\n valid, and test sets, respectively.\n For instance split_ratio=[80, 10, 10] will assign 80% of the sentences\n to training, 10% for validation, and 10% for test.\n\n Returns\n ------\n dictionary containing train, valid, and test splits.\n \"\"\"\n # Random shuffle of the list\n random.shuffle(wav_list)\n tot_split = sum(split_ratio)\n tot_snts = len(wav_list)\n data_split = {}\n splits = [\"train\", \"valid\"]\n\n for i, split in enumerate(splits):\n n_snts = int(tot_snts * split_ratio[i] / tot_split)\n data_split[split] = wav_list[0:n_snts]\n del wav_list[0:n_snts]\n data_split[\"test\"] = wav_list\n\n return data_split\n\n\ndef transform_data(path_loadSession, path_structured_data):\n \"\"\"\n Process the original IEMOCAP folder to match the expected\n folder structure. This function will transform data as:\n\n <session_id>/<emotion>/<file:name>.wav\n\n e.g.\n session1/ang/psno1_ang_s084_orgn.wav\n\n Please,\n\n\n Arguments\n ---------\n path_loadSession : str\n Path to the folder where the original IEMOCAP dataset is stored.\n path_structured_data : str\n Path to the folder where the transformed IEMOCAP dataset will be stored.\n\n Example\n -------\n >>> data_original = '/path/to/iemocap/IEMOCAP_full_release/Session'\n >>> data_transformed = '/path/to/iemocap/IEMOCAP_ahsn_leave-two-speaker-out'\n >>> transform_data(data_original, data_transformed)\n \"\"\"\n\n for k in range(5):\n session_ = []\n session = load_session(\"%s%s\" % (path_loadSession, k + 1))\n for idx in range(len(session)):\n session_.append(session[idx])\n\n dic_ = count_emotion(session_)\n logger.info(\"=\" * 50)\n logger.info(\"Total Session_%d :\" % (k + 1) + \" %d\" % sum(dic_.values()))\n logger.info(dic_)\n pathName = \"%s/session%d/\" % (path_structured_data, (k + 1))\n logger.info(\"=\" * 50)\n if save_wavFile(session_, pathName) == 0:\n logger.info(\n \"Completed to save session_%d Wave files successfully.\"\n % (k + 1)\n )\n logger.info(\"=\" * 50)\n\n\ndef load_utterInfo(inputFile):\n \"\"\"\n Load utterInfo from original IEMOCAP database\n \"\"\"\n\n # this regx allow to create a list with:\n # [START_TIME - END_TIME] TURN_NAME EMOTION [V, A, D]\n # [V, A, D] means [Valence, Arousal, Dominance]\n pattern = re.compile(\n \"[\\[]*[0-9]*[.][0-9]*[ -]*[0-9]*[.][0-9]*[\\]][\\t][a-z0-9_]*[\\t][a-z]{3}[\\t][\\[][0-9]*[.][0-9]*[, ]+[0-9]*[.][0-9]*[, ]+[0-9]*[.][0-9]*[\\]]\",\n re.IGNORECASE,\n ) # noqa\n with open(inputFile, \"r\") as myfile:\n data = myfile.read().replace(\"\\n\", \" \")\n result = pattern.findall(data)\n out = []\n for i in result:\n a = i.replace(\"[\", \"\")\n b = a.replace(\" - \", \"\\t\")\n c = b.replace(\"]\", \"\")\n x = c.replace(\", \", \"\\t\")\n out.append(x.split(\"\\t\"))\n return out\n\n\ndef load_session(pathSession):\n \"\"\"\n Load wav file from IEMOCAP session\n and keep only the following 4 emotions:\n [neural, happy, sad, anger].\n\n Arguments\n ---------\n pathSession: str\n Path folder of IEMOCAP session.\n Returns\n -------\n improvisedUtteranceList: list\n List of improvised utterancefor IEMOCAP session.\n \"\"\"\n pathEmo = pathSession + \"/dialog/EmoEvaluation/\"\n pathWavFolder = pathSession + \"/sentences/wav/\"\n\n improvisedUtteranceList = []\n for emoFile in [\n f\n for f in os.listdir(pathEmo)\n if os.path.isfile(os.path.join(pathEmo, f))\n ]:\n for utterance in load_utterInfo(pathEmo + emoFile):\n if (\n (utterance[3] == \"neu\")\n or (utterance[3] == \"hap\")\n or (utterance[3] == \"sad\")\n or (utterance[3] == \"ang\")\n or (utterance[3] == \"exc\")\n ):\n path = (\n pathWavFolder\n + utterance[2][:-5]\n + \"/\"\n + utterance[2]\n + \".wav\"\n )\n (sr, signal) = wavfile.read(path, mmap=False)\n\n if emoFile[7] != \"i\" and utterance[2][7] == \"s\":\n improvisedUtteranceList.append(\n [signal, utterance[3], utterance[2][18]]\n )\n else:\n improvisedUtteranceList.append(\n [signal, utterance[3], utterance[2][15]]\n )\n return improvisedUtteranceList\n\n\ndef count_emotion(session):\n \"\"\"\n Count number utterance per emotion for IEMOCAP session.\n\n Arguments\n ---------\n session: list\n List of utterance for IEMOCAP session.\n Returns\n -------\n dic: dict\n Number of example per emotion for IEMOCAP session.\n \"\"\"\n dic = {\n \"neu\": 0,\n \"hap\": 0,\n \"sad\": 0,\n \"ang\": 0,\n \"sur\": 0,\n \"fea\": 0,\n \"dis\": 0,\n \"fru\": 0,\n \"exc\": 0,\n \"xxx\": 0,\n }\n for i in range(len(session)):\n if session[i][1] == \"neu\":\n dic[\"neu\"] += 1\n elif session[i][1] == \"hap\":\n dic[\"hap\"] += 1\n elif session[i][1] == \"sad\":\n dic[\"sad\"] += 1\n elif session[i][1] == \"ang\":\n dic[\"ang\"] += 1\n elif session[i][1] == \"sur\":\n dic[\"sur\"] += 1\n elif session[i][1] == \"fea\":\n dic[\"fea\"] += 1\n elif session[i][1] == \"dis\":\n dic[\"dis\"] += 1\n elif session[i][1] == \"fru\":\n dic[\"fru\"] += 1\n elif session[i][1] == \"exc\":\n dic[\"exc\"] += 1\n elif session[i][1] == \"xxx\":\n dic[\"xxx\"] += 1\n return dic\n\n\ndef save_wavFile(session, pathName):\n \"\"\"\n Save wav files for each IEMOCAP session.\n\n Arguments\n ---------\n session: str\n IEMOCAP session name.\n pathName: str\n Path folder where the wav files will be saved.\n \"\"\"\n makedirs(pathName)\n for idx, utterance in enumerate(session):\n label = utterance[1]\n if label == \"exc\":\n label = \"hap\"\n directory = \"%s/%s\" % (pathName, label)\n makedirs(directory)\n filename = \"%s/psn%s%s_%s_s%03d_orgn.wav\" % (\n directory,\n pathName[-4],\n pathName[-2],\n label,\n idx,\n )\n wavfile.write(filename, 16000, utterance[0])\n\n return 0\n\n\ndef makedirs(path):\n \"\"\"\n Create directory if not exist.\n\n Arguments\n ---------\n path: str\n Path folder.\n \"\"\"\n if not os.path.exists(path):\n print(\" [*] Make directories : {}\".format(path))\n os.makedirs(path)\n" ]
[ [ "scipy.io.wavfile.write", "scipy.io.wavfile.read" ] ]
13488151126/mobilenet-yolov4-lite-tf2-main
[ "3f80aaa73d3dd305abc55fc65e26c309b19d9467" ]
[ "nets/mobilenet_v2.py" ]
[ "#-------------------------------------------------------------#\n# MobileNetV2的网络部分\n#-------------------------------------------------------------#\nimport math\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import backend\nfrom tensorflow.keras.layers import (Activation, Add, BatchNormalization,\n Conv2D, Dense, DepthwiseConv2D, Dropout,\n GlobalAveragePooling2D,\n GlobalMaxPooling2D, Input, MaxPooling2D,\n ZeroPadding2D)\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.preprocessing import image\n\n\n# relu6!\ndef relu6(x):\n return backend.relu(x, max_value=6)\n \n# 用于计算padding的大小\ndef correct_pad(inputs, kernel_size):\n img_dim = 1\n input_size = backend.int_shape(inputs)[img_dim:(img_dim + 2)]\n\n if isinstance(kernel_size, int):\n kernel_size = (kernel_size, kernel_size)\n\n if input_size[0] is None:\n adjust = (1, 1)\n else:\n adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)\n\n correct = (kernel_size[0] // 2, kernel_size[1] // 2)\n\n return ((correct[0] - adjust[0], correct[0]),\n (correct[1] - adjust[1], correct[1]))\n\n# 使其结果可以被8整除,因为使用到了膨胀系数α\ndef _make_divisible(v, divisor, min_value=None):\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v\n\n\ndef _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):\n in_channels = backend.int_shape(inputs)[-1]\n pointwise_conv_filters = int(filters * alpha)\n pointwise_filters = _make_divisible(pointwise_conv_filters, 8)\n\n x = inputs\n prefix = 'block_{}_'.format(block_id)\n # part1 数据扩张\n if block_id:\n # Expand\n x = Conv2D(expansion * in_channels,\n kernel_size=1,\n padding='same',\n use_bias=False,\n activation=None,\n name=prefix + 'expand')(x)\n x = BatchNormalization(epsilon=1e-3,\n momentum=0.999,\n name=prefix + 'expand_BN')(x)\n x = Activation(relu6, name=prefix + 'expand_relu')(x)\n else:\n prefix = 'expanded_conv_'\n\n if stride == 2:\n x = ZeroPadding2D(padding=correct_pad(x, 3),\n name=prefix + 'pad')(x)\n \n # part2 可分离卷积\n x = DepthwiseConv2D(kernel_size=3,\n strides=stride,\n activation=None,\n use_bias=False,\n padding='same' if stride == 1 else 'valid',\n name=prefix + 'depthwise')(x)\n x = BatchNormalization(epsilon=1e-3,\n momentum=0.999,\n name=prefix + 'depthwise_BN')(x)\n\n x = Activation(relu6, name=prefix + 'depthwise_relu')(x)\n\n # part3压缩特征,而且不使用relu函数,保证特征不被破坏\n x = Conv2D(pointwise_filters,\n kernel_size=1,\n padding='same',\n use_bias=False,\n activation=None,\n name=prefix + 'project')(x)\n\n x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'project_BN')(x)\n\n if in_channels == pointwise_filters and stride == 1:\n return Add(name=prefix + 'add')([inputs, x])\n return x\n\ndef MobileNetV2(inputs, alpha=1.0):\n if alpha not in [0.5, 0.75, 1.0, 1.3]:\n raise ValueError('Unsupported alpha - `{}` in MobilenetV2, Use 0.5, 0.75, 1.0, 1.3'.format(alpha))\n # stem部分\n first_block_filters = _make_divisible(32 * alpha, 8)\n x = ZeroPadding2D(padding=correct_pad(inputs, 3),\n name='Conv1_pad')(inputs)\n # 416,416,3 -> 208,208,32\n x = Conv2D(first_block_filters,\n kernel_size=3,\n strides=(2, 2),\n padding='valid',\n use_bias=False,\n name='Conv1')(x)\n x = BatchNormalization(epsilon=1e-3,\n momentum=0.999,\n name='bn_Conv1')(x)\n x = Activation(relu6, name='Conv1_relu')(x)\n\n # 208,208,32 -> 208,208,16\n x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1,\n expansion=1, block_id=0)\n # 208,208,16 -> 104,104,24\n x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2,\n expansion=6, block_id=1)\n x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1,\n expansion=6, block_id=2)\n\n # 104,104,24 -> 52,52,32\n x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2,\n expansion=6, block_id=3)\n x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,\n expansion=6, block_id=4)\n x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,\n expansion=6, block_id=5)\n feat1 = x\n\n # 52,52,32 -> 26,26,96\n x = _inverted_res_block(x, filters=64, alpha=alpha, stride=2,\n expansion=6, block_id=6)\n x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,\n expansion=6, block_id=7)\n x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,\n expansion=6, block_id=8)\n x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,\n expansion=6, block_id=9)\n x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,\n expansion=6, block_id=10)\n x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,\n expansion=6, block_id=11)\n x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,\n expansion=6, block_id=12)\n feat2 = x\n\n # 26,26,96 -> 13,13,320\n x = _inverted_res_block(x, filters=160, alpha=alpha, stride=2,\n expansion=6, block_id=13)\n x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1,\n expansion=6, block_id=14)\n x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1,\n expansion=6, block_id=15)\n x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1,\n expansion=6, block_id=16)\n feat3 = x\n\n return feat1,feat2,feat3\n\n\n" ]
[ [ "tensorflow.keras.backend.int_shape", "tensorflow.keras.layers.Activation", "tensorflow.keras.layers.DepthwiseConv2D", "tensorflow.keras.backend.relu", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Add", "tensorflow.keras.layers.Conv2D" ] ]
OObasuyi/evidential-deep-learning
[ "995764dd3a1923ec3b0f35392d2e25e8a6831bd9" ]
[ "neurips2020/models/depth/bbbp.py" ]
[ "import tensorflow as tf\nimport tensorflow_probability as tfp\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, \\\n UpSampling2D, Cropping2D, concatenate, ZeroPadding2D, SpatialDropout2D\n\nimport functools\n\ndef create(input_shape, num_class=1, activation=tf.nn.relu):\n opts = locals().copy()\n\n # model = Depth_BBBP(num_class, activation)\n # return model, opts\n\n concat_axis = 3\n inputs = tf.keras.layers.Input(shape=input_shape)\n\n Conv2D_ = functools.partial(tfp.layers.Convolution2DReparameterization, activation=activation, padding='same')\n\n conv1 = Conv2D_(32, (3, 3))(inputs)\n conv1 = Conv2D_(32, (3, 3))(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n\n conv2 = Conv2D_(64, (3, 3))(pool1)\n conv2 = Conv2D_(64, (3, 3))(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n\n conv3 = Conv2D_(128, (3, 3))(pool2)\n conv3 = Conv2D_(128, (3, 3))(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n\n conv4 = Conv2D_(256, (3, 3))(pool3)\n conv4 = Conv2D_(256, (3, 3))(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n\n conv5 = Conv2D_(512, (3, 3))(pool4)\n conv5 = Conv2D_(512, (3, 3))(conv5)\n\n up_conv5 = UpSampling2D(size=(2, 2))(conv5)\n ch, cw = get_crop_shape(conv4, up_conv5)\n crop_conv4 = Cropping2D(cropping=(ch,cw))(conv4)\n up6 = concatenate([up_conv5, crop_conv4], axis=concat_axis)\n conv6 = Conv2D_(256, (3, 3))(up6)\n conv6 = Conv2D_(256, (3, 3))(conv6)\n\n up_conv6 = UpSampling2D(size=(2, 2))(conv6)\n ch, cw = get_crop_shape(conv3, up_conv6)\n crop_conv3 = Cropping2D(cropping=(ch,cw))(conv3)\n up7 = concatenate([up_conv6, crop_conv3], axis=concat_axis)\n conv7 = Conv2D_(128, (3, 3))(up7)\n conv7 = Conv2D_(128, (3, 3))(conv7)\n\n up_conv7 = UpSampling2D(size=(2, 2))(conv7)\n ch, cw = get_crop_shape(conv2, up_conv7)\n crop_conv2 = Cropping2D(cropping=(ch,cw))(conv2)\n up8 = concatenate([up_conv7, crop_conv2], axis=concat_axis)\n conv8 = Conv2D_(64, (3, 3))(up8)\n conv8 = Conv2D_(64, (3, 3))(conv8)\n\n up_conv8 = UpSampling2D(size=(2, 2))(conv8)\n ch, cw = get_crop_shape(conv1, up_conv8)\n crop_conv1 = Cropping2D(cropping=(ch,cw))(conv1)\n up9 = concatenate([up_conv8, crop_conv1], axis=concat_axis)\n conv9 = Conv2D_(32, (3, 3))(up9)\n conv9 = Conv2D_(32, (3, 3))(conv9)\n\n ch, cw = get_crop_shape(inputs, conv9)\n conv9 = ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0], cw[1])))(conv9)\n conv10 = Conv2D(num_class, (1, 1))(conv9)\n conv10 = 1e-6 * conv10\n\n model = tf.keras.models.Model(inputs=inputs, outputs=conv10)\n return model, opts\n\ndef get_crop_shape(target, refer):\n # width, the 3rd dimension\n cw = (target.get_shape()[2] - refer.get_shape()[2])\n assert (cw >= 0)\n if cw % 2 != 0:\n cw1, cw2 = int(cw/2), int(cw/2) + 1\n else:\n cw1, cw2 = int(cw/2), int(cw/2)\n # height, the 2nd dimension\n ch = (target.get_shape()[1] - refer.get_shape()[1])\n assert (ch >= 0)\n if ch % 2 != 0:\n ch1, ch2 = int(ch/2), int(ch/2) + 1\n else:\n ch1, ch2 = int(ch/2), int(ch/2)\n\n return (ch1, ch2), (cw1, cw2)\n#\n# # import numpy as np\n# # model = create((64,64,3), 2)\n# # x = np.ones((1,64,64,3), dtype=np.float32)\n# # output = model(x)\n# # import pdb; pdb.set_trace()\n" ]
[ [ "tensorflow.keras.layers.UpSampling2D", "tensorflow.keras.layers.Cropping2D", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.concatenate", "tensorflow.keras.layers.ZeroPadding2D", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.Input" ] ]
OlivierGarciaDev/fanalysis
[ "9aa2cc8b1e5cc5600a05813144973b77143cfe42" ]
[ "fanalysis/mca.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\" mca module\n\"\"\"\n\n# Author: Olivier Garcia <[email protected]>\n# License: BSD 3 clause\n\nimport numpy as np\nfrom sklearn.preprocessing import LabelBinarizer\n\nfrom fanalysis.base import Base\n\n\nclass MCA(Base):\n \"\"\" Multiple Correspondence Analysis (MCA)\n \n This class inherits from the Base class.\n \n MCA performs a Multiple Correspondence Analysis, given a table of\n categorical variables ; shape= n_rows x n_vars.\n Here n_columns = n_categories = the number of categories that are\n extracted from the data table.\n\n This implementation only works for dense arrays.\n\n Parameters\n ----------\n n_components : int, float or None\n Number of components to keep.\n - If n_components is None, keep all the components.\n - If 0 <= n_components < 1, select the number of components such\n that the amount of variance that needs to be explained is\n greater than the percentage specified by n_components.\n - If 1 <= n_components :\n - If n_components is int, select a number of components\n equal to n_components.\n - If n_components is float, select the higher number of\n components lower than n_components.\n \n row_labels : array of strings or None\n - If row_labels is an array of strings : this array provides the\n row labels.\n If the shape of the array doesn't match with the number of\n rows : labels are automatically computed for each row.\n - If row_labels is None : labels are automatically computed for\n each row.\n \n var_labels : array of strings or None\n - If var_labels is an array of strings : this array provides the \n variable labels.\n If the shape of the array doesn't match with the number of \n variables : labels are automatically computed for each\n variable.\n - If var_labels is None : labels are automatically computed for\n each variable.\n \n stats : bool\n - If stats is true : stats are computed : contributions and\n square cosines for rows and columns\n (here columns = categories).\n - If stats is false : stats are not computed.\n\n Attributes\n ----------\n n_components_ : int\n The estimated number of components.\n \n row_labels_ : array of strings\n Labels for the rows.\n \n var_labels : array of strings\n Labels for the variables.\n\n col_labels_ : array of strings\n Labels for the columns (here columns = categories).\n They are prefixed by the names of the variables.\n \n prefixes_ : array of strings\n Prefixes for the elements of col_labels_\n\n col_labels_short_ : array of strings\n Short labels for the columns (here columns = categories).\n They are not prefixed by the names of the variables).\n \n eig_ : array of float\n A 3 x n_components_ matrix containing all the eigenvalues\n (1st row), the percentage of variance (2nd row) and the\n cumulative percentage of variance (3rd row).\n \n row_coord_ : array of float\n A n_rows x n_components_ matrix containing the row coordinates.\n \n col_coord_ : array of float\n A n_categories_ x n_components_ matrix containing the column \n coordinates (= the categories coordinates).\n \n row_contrib_ : array of float\n A n_rows x n_components_ matrix containing the row\n contributions.\n \n col_contrib_ : array of float\n A n_categories_ x n_components_ matrix containing the column \n contributions (= the categories contributions).\n \n row_cos2_ : array of float\n A n_rows x n_components_ matrix containing the row cosines.\n \n col_cos2_ : array of float\n A n_categories_ x n_components_ matrix containing the columns\n cosines (= the categories cosines).\n\n n_vars_ : float\n Number of variables in the data table.\n \n n_categories_ : float\n Number of categories that are extracted from the data table.\n\n n_ : float\n Here n_ = n_rows x n_vars\n \n r_ : float\n Absolute frequencies for the rows = n_vars\n \n c_ : float\n Absolute frequencies for the categories.\n \n model_ : string\n The model fitted = 'mca'\n \"\"\"\n def __init__(self, n_components=None, row_labels=None, var_labels=None,\n stats=True):\n Base.__init__(self, n_components, row_labels, None, stats)\n self.var_labels = var_labels\n \n def fit(self, X, y=None):\n \"\"\" Fit the model to X.\n \n Parameters\n ----------\n X : array of string, int or float, shape (n_rows, n_vars)\n Training data, where n_rows in the number of rows and n_vars\n is the number of variables.\n X is a data table containing a category in each cell.\n Categories can be coded by strings or numeric values.\n \n y : None\n y is ignored.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n # Create a dummy variables table\n X_dummies = self._binarization(X)\n \n # Fit a Factorial Analysis to the dummy variables table\n self.r_ = np.sum(X_dummies, axis=1).reshape(-1, 1)\n Base.fit(self, X_dummies, y=None)\n \n # Adjustment of the number of components\n n_eigen = self.n_categories_ - self.n_vars_\n if (self.n_components_ > n_eigen):\n self.n_components_ = n_eigen\n self.eig_ = self.eig_[:, :self.n_components_]\n self.row_coord_ = self.row_coord_[:, :self.n_components_]\n self.col_coord_ = self.col_coord_[:, :self.n_components_]\n if self.stats:\n self.row_contrib_ = self.row_contrib_[:, :self.n_components_]\n self.col_contrib_ = self.col_contrib_[:, :self.n_components_]\n self.row_cos2_ = self.row_cos2_[:, :self.n_components_]\n self.col_cos2_ = self.col_cos2_[:, :self.n_components_]\n\n # Set col_labels_short_\n self.col_labels_short_ = self.col_labels_short_temp_\n \n # Set col_labels_\n self.col_labels_ = self.col_labels_temp_ \n \n self.model_ = \"mca\"\n \n return self\n\n def transform(self, X, y=None):\n \"\"\" Apply the dimensionality reduction on X. X is projected on\n the first axes previous extracted from a training set.\n\n Parameters\n ----------\n X : array of string, int or float, shape (n_rows_sup, n_vars)\n New data, where n_rows_sup is the number of supplementary\n row points and n_vars is the number of variables.\n X is a data table containing a category in each cell.\n Categories can be coded by strings or numeric values.\n X rows correspond to supplementary row points that are\n projected onto the axes.\n \n y : None\n y is ignored.\n\n Returns\n -------\n X_new : array of float, shape (n_rows_sup, n_components_)\n X_new : coordinates of the projections of the supplementary\n row points onto the axes.\n \"\"\"\n # Build dummy variables for the supplementary rows table\n nrows = X.shape[0]\n #ncols = self.col_labels_.shape[0]\n ncols = len(self.col_labels_)\n Y = np.zeros(shape=(nrows, ncols))\n for i in np.arange(0, nrows, 1):\n values = [self.prefixes_[k] + str(X[i, k])\n for k in np.arange(0, self.n_vars_)]\n for j in np.arange(0, ncols, 1):\n if self.col_labels_[j] in values:\n Y[i, j] = 1\n \n # Apply the transform method to Y\n return Base.transform(self, Y)\n\n def _binarization(self, X):\n \"\"\" Create a dummy variables table\n \n This function also sets columns prefixes and\n self.col_labels_short_, which is useful for some graphs.\n \n Parameters\n ----------\n X : array of string, int or float, shape (n_rows, n_vars)\n X is a data table containing a category in each cell.\n Categories can be coded by strings or numeric values.\n\n Returns\n -------\n X_d : object\n Returns the dummy variables table.\n \n \"\"\"\n # Set columns prefixes\n self.n_vars_ = X.shape[1]\n if self.var_labels is None:\n self.prefixes_ = [\"col\" + str(x) + \"_\"\n for x in np.arange(0, self.n_vars_)]\n elif len(self.var_labels) != self.n_vars_:\n self.prefixes_ = [\"col\" + str(x) + \"_\"\n for x in np.arange(0, self.n_vars_)]\n else:\n self.prefixes_ = [str(x) + \"_\" for x in self.var_labels]\n \n # Dummy variables creation\n X_d = np.empty(shape=(X.shape[0], 0))\n self.col_labels_short_temp_ = np.empty(shape=(0,))\n self.col_labels_temp_ = np.empty(shape=(0,))\n for i in range(X.shape[1]):\n lb = LabelBinarizer()\n lb.fit(X[:, i])\n X_di = lb.transform(X[:, i])\n if lb.classes_.shape[0] == 2:\n if X_di[0,0] == (X[0, i] == lb.classes_[0]):\n X_di = np.c_[X_di, 1 - X_di]\n else:\n X_di = np.c_[1 - X_di, X_di]\n X_d = np.append(X_d, X_di, axis=1)\n self.col_labels_short_temp_ = np.append(\n self.col_labels_short_temp_,\n lb.classes_)\n self.col_labels_temp_ = np.append(self.col_labels_temp_,\n [self.prefixes_[i]\n + str(x)\n for x in lb.classes_]\n )\n\n self.n_categories_ = X_d.shape[1]\n \n # Return the dummy variables table\n return X_d\n" ]
[ [ "sklearn.preprocessing.LabelBinarizer", "numpy.sum", "numpy.append", "numpy.empty", "numpy.zeros", "numpy.arange" ] ]
MarinusHeindl/chesscog
[ "9022e19b1ad125481b561c765a886285abc16eb0" ]
[ "tests/core/test_coordinates.py" ]
[ "import numpy as np\n\nfrom chesscog.core.coordinates import from_homogenous_coordinates, to_homogenous_coordinates\n\n\ndef test_from_homogenous_coordinates():\n coords = np.array([2., 4., 2.])\n expected = np.array([1., 2.])\n assert np.allclose(from_homogenous_coordinates(coords), expected)\n\n\ndef test_to_homogenous_coordinates():\n coords = np.array([1., 2.])\n actual = to_homogenous_coordinates(coords)\n assert actual[2] != 0\n assert np.allclose(from_homogenous_coordinates(actual), coords)\n" ]
[ [ "numpy.array" ] ]
cse-bristol/smeter-ml
[ "db97902eec77c555c1dd030a676e7ebf5da40314" ]
[ "src/ml/model/ensemble.py" ]
[ "\"\"\"Module for generating an ensemble model from a given config file.\n\nExports a single class, Ensemble, which provides methods for training the ensemble,\nsaving the individual model files to disk and making predictions.\n\"\"\"\n\nimport os\nimport sys\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.keras as k\nfrom typing import Callable\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.backend import clear_session\nfrom ml.model.cnn import (\n base_cnn,\n base_cnn_with_static_features,\n base_cnn_single_output,\n base_cnn_with_static_features_and_single_output\n)\nfrom ml.model.loss import make_qd_loss_fn\nfrom ml.features import (\n make_X_and_y,\n make_sqm_X,\n make_htc_proxy_X,\n unscale_outputs\n)\nfrom ml.common.paths import MODELS_DIR, MODEL_CONFIG_DIR\nfrom ml.model.helpers import read_config_file\n\n\n_ENSEMBLE_PATH = os.path.join(MODELS_DIR, \"ensemble\")\n\n\nclass Ensemble:\n \"\"\"Class that wraps an ensemble model.\n\n This consists of the specified number of individual models which are trained\n and saved to disk.\n\n Ensemble predictions are simply the average of all the individual models' predictions.\n\n The only argument to the constructor is the name of the ensemble, which must correspend\n to a config file in model-config/.\n\n Config options are:\n\n Option Type Description Default\n ------ ---- ----------- -------\n ensemble_size int Number of ensemble members None\n epochs int Number of training epochs None\n input_days int Number of days to run model on None\n training_repeats int Number of training samples to make per house None\n loss_fn_lambda float lambda param for loss function None\n loss_fn_s float s param for loss function None\n training_htc_lower_bound float Only train on houses with htc > value None\n training_htc_upper_bound float Only train on houses with htc > value None\n channels list Which channels the model will use [mean_temp, outdoor_temp,\n gas_kwh, elec_kwh]\n validation_split float validation_split param in keras None\n early_stopping_variable str For keras EarlyStopping, e.g. val_loss None\n early_stopping_patience int For keras EarlyStopping, how many iterations None\n to wait before stopping early\n ensemble_type str Either regular or bagged None\n static_features list Either floor_area or proxy None\n (proxy is sum(gas) / mean(in_temp - out_temp)\n over whole data series)\n scale_inputs bool Whether or not to scale inputs into (0,1) False\n scale_targets bool Whether or not to scale targets into (0,1) False\n single_output_only bool Whether or not to only output HTC, rather False\n than upper and lower bounds\n building_types list Subset of SEMI, MID_TERRACE,END_TERRACE, []\n DETACHED, BUNGALOW. Only these building types\n will be used in training data.\n \"\"\"\n # Set default values for ensemble options\n ensemble_size: int = None\n epochs: int = None\n input_days: int = None\n training_repeats: int = None\n loss_fn_lambda: float = None\n loss_fn_s: float = None\n training_htc_lower_bound: float = None\n training_htc_upper_bound: float = None\n channels: list = [\"mean_temp\", \"outdoor_temp\", \"gas_kwh\", \"elec_kwh\"]\n validation_split: float = None\n early_stopping_variable: str = None\n early_stopping_patience: int = None\n ensemble_type: str = None\n static_features: list = None\n scale_inputs: bool = False\n scale_targets: bool = False\n single_output_only: bool = False\n building_types: list = []\n\n def __init__(self, name: str):\n \"\"\"Constructor. Read all the config options from the file and set them on self.\"\"\"\n self.name = name\n config_path = os.path.join(MODEL_CONFIG_DIR, f\"{ensemble_name}.txt\")\n options = read_config_file(config_path)\n\n for option, value in options.items():\n setattr(self, option, value)\n\n def train(self):\n \"\"\"Train all the ensemble members, save them in models/ensemble/<ensemble_name>\n and print out some stats about the training.\"\"\"\n # Make the basic X and y\n X_train, y_train = self.__make_X_and_y()\n\n # Filter channels and add static features if necessary\n X_train, y_train, X_static = self.__modify_training_features(X_train, y_train)\n\n # Add early stopping condition if specified\n if self.early_stopping_variable:\n es = EarlyStopping(monitor=self.early_stopping_variable, patience=self.early_stopping_patience)\n\n # Make loss function\n loss_fn = make_qd_loss_fn(lam=self.loss_fn_lambda, s=self.loss_fn_s)\n\n # Make dir to save models in\n self.ensemble_path = os.path.join(_ENSEMBLE_PATH, ensemble_name)\n os.makedirs(self.ensemble_path)\n\n # Make the individual ensemble members\n for i in range(self.ensemble_size):\n self.__train_ensemble_member(i, X_train, y_train, X_static, loss_fn, es)\n\n self.__create_training_statistics(X_train, y_train, X_static)\n\n def predict(self, X: np.ndarray):\n \"\"\"Make predictions on X.\"\"\"\n ensemble_predictions = []\n\n # Load each of the ensemble members and make predictions. Append predictions to ensemble_predictions.\n for f in os.scandir(self.ensemble_path):\n if \"json\" in f.name:\n member_name = f.name[:-5]\n model = self.__load_ensemble_member(member_name)\n pred = model.predict(X)\n ensemble_predictions.append(pred)\n\n predictions = np.array(ensemble_predictions).mean(axis=0)\n # The prediction intervals may not be the right way round, so sort them to make sure they are.\n predictions.sort(axis=1)\n\n return predictions\n\n def __create_training_statistics(self, X_train: np.ndarray, y_train: np.ndarray, X_static: np.ndarray):\n \"\"\"\n Computes, some statistics about how the model was trained, prints them out and also\n writes them to models/ensemble/<model name>/trainingsummary.txt.\n\n For models with a single output feature, just computes the training RMSE.\n For models with upper and lower bounds as output, computes the RMSE + the average\n prediction interval width and prediction interval coverage.\n \"\"\"\n # Write some statistics about the trained model to file\n has_static_features = self.static_features and len(self.static_features) > 0\n X_pred = [X_train, X_static] if has_static_features else X_train\n\n if self.single_output_only:\n preds = self.predict(X_pred)\n if self.scale_targets:\n pi_preds = unscale_outputs(preds)\n y_train = unscale_outputs(y_train)\n\n rmse = np.sqrt(np.mean((preds - y_train) ** 2))\n print(f\"Train RMSE: {round(rmse)}\")\n\n with open(os.path.join(self.ensemble_path, \"trainingsummary.txt\"), \"w+\") as f:\n f.write(f\"Train RMSE: {round(rmse)}\")\n else:\n pi_preds = self.predict(X_pred)\n if self.scale_targets:\n pi_preds = unscale_outputs(pi_preds)\n y_train = unscale_outputs(y_train)\n\n htc_preds = pi_preds.mean(axis=1)\n rmse = np.sqrt(np.mean((htc_preds - y_train) ** 2))\n pi_coverage = np.mean(((y_train >= pi_preds[:,0]) & (y_train <= pi_preds[:,1])).astype(int))\n mean_pi_width = np.mean(pi_preds[:,1] - pi_preds[:,0])\n\n print(\"\\nTraining summary\")\n print(\"----------------\")\n print(f\"Train RMSE: {round(rmse)}\")\n print(f\"PI coverage: {round(pi_coverage * 100)}%\")\n print(f\"Mean PI width: {round(mean_pi_width)}\")\n\n with open(os.path.join(self.ensemble_path, \"trainingsummary.txt\"), \"w+\") as f:\n f.write(f\"Train RMSE: {round(rmse)}\\n\")\n f.write(f\"PI coverage: {round(pi_coverage * 100)}%\\n\")\n f.write(f\"Mean PI width: {round(mean_pi_width)}\")\n\n def __make_X_and_y(self):\n \"\"\"Makes X and y for training.\"\"\"\n # Make the basic X and y\n X_train, y_train = make_X_and_y(\n days=self.input_days,\n repeats=self.training_repeats,\n scale_inputs=self.scale_inputs,\n scale_targets=self.scale_targets,\n building_types=self.building_types,\n htc_upper_bound=self.training_htc_upper_bound,\n htc_lower_bound=self.training_htc_lower_bound\n )\n\n return X_train, y_train\n\n def __modify_training_features(self, X_train: np.ndarray, y_train: np.ndarray):\n \"\"\"Takes X and y and filters channels and adds static features if necessary.\"\"\"\n has_static_features = self.static_features and len(self.static_features) > 0\n\n # If static features are to be used in the model, make the training data arrays\n if has_static_features:\n # This assumes there is only ever one static features\n # TODO deal with the case where there are multiple\n if self.static_features == [\"floor_area\"]:\n X_static = make_sqm_X(repeats=self.training_repeats, scale=self.scale_inputs)\n elif self.static_features == [\"proxy\"]:\n X_static = make_htc_proxy_X(X_train)\n else:\n X_static = np.array([])\n\n # Filter channels if specified\n channels = self.channels.copy()\n if len(channels) < 4:\n all_channels = [\"mean_temp\", \"outdoor_temp\", \"gas_kwh\", \"elec_kwh\"]\n\n if \"temp_diff\" in channels:\n temp_diff_X = X_train[:,:,0] - X_train[:,:,1]\n temp_diff_X = temp_diff_X.reshape((temp_diff_X.shape[0], temp_diff_X.shape[1], 1))\n channels.remove(\"temp_diff\")\n other_channel_indices = [all_channels.index(c) for c in channels]\n other_channels_X = X_train[:,:,other_channel_indices]\n\n X_train = np.concatenate((temp_diff_X, other_channels_X), axis=2)\n else:\n channel_indices = [all_channels.index(c) for c in channels]\n X_train = X_train[:,:,channel_indices]\n\n return X_train, y_train, X_static\n\n def __train_ensemble_member(\n self,\n i: int,\n X_train: np.ndarray,\n y_train: np.ndarray,\n X_static: np.ndarray,\n loss_fn: Callable,\n es: EarlyStopping\n ):\n \"\"\"Trains an individual member of the ensemble and saves it to file.\"\"\"\n tf.set_random_seed(i)\n\n # Instantiate the model\n has_static_features = self.static_features and len(self.static_features) > 0\n\n if has_static_features and self.single_output_only:\n model = base_cnn_with_static_features_and_single_output(\n cnn_input_shape=X_train[0].shape,\n static_features_input_shape=X_static[0].shape,\n )\n elif has_static_features and not self.single_output_only:\n model = base_cnn_with_static_features(\n cnn_input_shape=X_train[0].shape,\n static_features_input_shape=X_static[0].shape,\n loss_fn=loss_fn\n )\n elif not has_static_features and self.single_output_only:\n model = base_cnn_single_output(input_shape=X_train[0].shape)\n else:\n model = base_cnn(input_shape=X_train[0].shape, loss_fn=loss_fn)\n\n # Train the model\n # Construct the arguments to fit() as a list and a dict as it makes the logic slightly less messy\n kwargs = {\"epochs\": self.epochs, \"validation_split\": self.validation_split, \"verbose\": 0}\n if self.early_stopping_variable:\n kwargs[\"callbacks\"] = [es]\n\n # Create training data based on whether we are using bagging and whether there are static features\n # Bagged AND static features\n if self.ensemble_type == \"bagged\" and has_static_features:\n X_train_, X_static_, y_train_, = _make_bagged_training_sets(X_train, y_train, X_static, seed=i)\n args = [{\"timeseries\": X_train_, \"static_features\": X_static_}, {\"output\": y_train}]\n\n # Bagged AND NO static features\n elif self.ensemble_type == \"bagged\" and not has_static_features:\n args = _make_bagged_training_sets(X_train, y_train, seed=i)\n\n # Regular AND static features\n elif self.ensemble_type == \"regular\" and has_static_features:\n args = [{\"timeseries\": X_train, \"static_features\": X_static}, {\"output\", y_train}]\n\n # Regular AND NO static features\n elif self.ensemble_type == \"regular\" and not has_static_features:\n args = [X_train, y_train]\n\n model.fit(*args, **kwargs)\n\n # Save model\n model.save_weights(os.path.join(self.ensemble_path, f\"model{i}.h5\"), save_format=\"h5\")\n with open(os.path.join(self.ensemble_path, f\"model{i}.json\"), \"w+\") as f:\n f.write(model.to_json())\n\n print(f\"Trained model {i + 1} of {self.ensemble_size}\", flush=True)\n\n # Clear model from memory\n clear_session()\n\n def __load_ensemble_member(self, member_name: str):\n \"\"\"Load CNN from saved serialized architecture and weights.\"\"\"\n with open(os.path.join(self.ensemble_path, f\"{member_name}.json\")) as f:\n json_str = f.read()\n model = k.models.model_from_json(json_str, custom_objects={\"GlorotUniform\": k.initializers.glorot_uniform})\n\n model.load_weights(os.path.join(self.ensemble_path, f\"{member_name}.h5\"))\n\n return model\n\n\ndef _make_bagged_training_sets(\n X: np.ndarray,\n y: np.ndarray,\n X_static: np.ndarray = None,\n seed: int = 0\n):\n \"\"\"Generate new copies of X and y by randomly choosing with replacement.\"\"\"\n rng = np.random.default_rng(seed)\n indices = rng.choice(X.shape[0], X.shape[0], replace=True)\n\n if X_static is None:\n return X[indices,:,:], y[indices]\n else:\n return X[indices,:,:], X_static[indices,:], y[indices]\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n raise ValueError(\"Correct usage via make is: make ensemble config=<config file name>\")\n else:\n # Allow user to pass in either e.g. `model1.txt` or just `model1`\n ensemble_name = sys.argv[1].replace(\".txt\", \"\")\n ensemble = Ensemble(ensemble_name)\n ensemble.train()\n" ]
[ [ "numpy.random.default_rng", "tensorflow.keras.backend.clear_session", "tensorflow.set_random_seed", "tensorflow.keras.callbacks.EarlyStopping", "numpy.array", "numpy.concatenate", "tensorflow.keras.models.model_from_json", "numpy.mean" ] ]
katetolstaya/graph_rl
[ "f48dcfd4b9b0872de49dc5e91b8950fe825d0f0d" ]
[ "eval_exp_field.py" ]
[ "import numpy as np\nimport gym\nimport gym_flock\nimport glob\nimport sys\nimport rl_comm.gnn_fwd as gnn_fwd\nfrom rl_comm.ppo2 import PPO2\nfrom stable_baselines.common.vec_env import SubprocVecEnv\nfrom stable_baselines.common.base_class import BaseRLModel\nimport matplotlib.pyplot as plt\n\nplt.rcParams['font.family'] = 'serif'\nplt.rcParams['font.serif'] = ['Times New Roman'] + plt.rcParams['font.serif']\n\n\ndef make_env():\n env_name = \"CoverageARL-v1\"\n my_env = gym.make(env_name)\n my_env = gym.wrappers.FlattenDictWrapper(my_env, dict_keys=my_env.env.keys)\n return my_env\n\n\ndiameters = [15,20,25,30,40,50,60,70,80,90,100]\n\ndef eval_model(env, model, n_episodes):\n \"\"\"\n Evaluate a model against an environment over N games.\n \"\"\"\n results = {'reward': np.zeros(n_episodes), 'diameter': np.zeros(n_episodes)}\n for k in range(n_episodes):\n\n env.env.env.subgraph_size = env.env.env.range_xy / np.random.uniform(1.9, 4.0)\n done = False\n obs = env.reset()\n\n env.env.env.controller(random=False, greedy=True)\n diameter = env.env.env.graph_diameter\n print(diameter)\n if diameter in diameters:\n results['diameter'][k] = env.env.env.graph_diameter\n # Run one game.\n while not done:\n action, states = model.predict(obs, deterministic=True)\n obs, rewards, done, info = env.step(action)\n results['reward'][k] += rewards\n\n return results\n\n\ndef load_model(model_name, vec_env, new_model=None):\n model_params, params = BaseRLModel._load_from_file(model_name)\n\n if new_model is None:\n new_model = PPO2(\n policy=gnn_fwd.MultiGnnFwd,\n policy_kwargs=model_params['policy_kwargs'],\n env=vec_env)\n\n # update new model's parameters\n new_model.load_parameters(params)\n return new_model\n\n\nif __name__ == '__main__':\n fnames = ['nl2_1_3_16', 'nl2_1_19_16']\n labels = ['K = 3', 'K = 19']\n colors = ['tab:blue', 'tab:orange']\n\n env = make_env()\n vec_env = SubprocVecEnv([make_env])\n fig = plt.figure()\n fig = plt.figure(figsize=(6, 4))\n\n for fname, label, color in zip(fnames, labels, colors):\n print('Evaluating ' + fname)\n\n ckpt_dir = 'models/' + fname + '/ckpt'\n new_model = None\n\n try:\n ckpt_list = sorted(glob.glob(str(ckpt_dir) + '/*.pkl'))\n ckpt_idx = int(ckpt_list[-2][-7:-4])\n except IndexError:\n print('Invalid experiment folder name!')\n raise\n\n model_name = ckpt_dir + '/ckpt_' + str(ckpt_idx).zfill(3) + '.pkl'\n new_model = load_model(model_name, vec_env, None)\n n_episodes = 2000\n results = eval_model(env, new_model, n_episodes)\n\n # x = results['diameter']\n # y = results['reward']\n # z = np.polyfit(results['diameter'], results['reward'], 1)\n # f = np.poly1d(z)\n # x_new = np.linspace(np.min(x), np.max(x), 50)\n # y_new = f(x_new)\n # plt.plot(x, y, 'o', x_new, y_new, label=label, color=color)\n\n means = []\n sems = []\n cur_diameters = []\n for d in diameters:\n rewards = results['reward'][results['diameter'] == d][0:10]\n if len(rewards) > 0:\n means.append(np.mean(rewards))\n sems.append(np.std(rewards)/np.sqrt(len(rewards)))\n cur_diameters.append(d)\n\n plt.errorbar(cur_diameters, means, yerr=sems, label=label)\n\n mean_reward = np.mean(results['reward'])\n std_reward = np.std(results['reward'])\n print('Reward over {} episodes: mean = {:.1f}, std = {:.1f}'.format(n_episodes, mean_reward, std_reward))\n plt.xlabel('Graph Diameter')\n plt.ylabel('Episode Reward')\n plt.legend()\n plt.savefig('field2.eps', format='eps')\n plt.show()\n" ]
[ [ "numpy.random.uniform", "matplotlib.pyplot.legend", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "matplotlib.pyplot.errorbar", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "numpy.std", "matplotlib.pyplot.xlabel", "numpy.mean" ] ]
a7532ariel/grid-feats-vqa
[ "907828898457cfbd099dff347b58488c1642986b" ]
[ "grid_feats/roi_heads.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom detectron2.layers import ShapeSpec\nfrom detectron2.modeling.roi_heads import (\n build_box_head,\n build_mask_head,\n select_foreground_proposals,\n ROI_HEADS_REGISTRY,\n ROIHeads,\n Res5ROIHeads,\n StandardROIHeads,\n)\nfrom detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers\nfrom detectron2.modeling.poolers import ROIPooler\n\n\nclass AttributePredictor(nn.Module):\n \"\"\"\n Head for attribute prediction, including feature/score computation and\n loss computation.\n\n \"\"\"\n def __init__(self, cfg, input_dim):\n super().__init__()\n\n # fmt: off\n self.num_objs = cfg.MODEL.ROI_HEADS.NUM_CLASSES\n self.obj_embed_dim = cfg.MODEL.ROI_ATTRIBUTE_HEAD.OBJ_EMBED_DIM\n self.fc_dim = cfg.MODEL.ROI_ATTRIBUTE_HEAD.FC_DIM\n self.num_attributes = cfg.MODEL.ROI_ATTRIBUTE_HEAD.NUM_CLASSES\n self.max_attr_per_ins = cfg.INPUT.MAX_ATTR_PER_INS\n self.loss_weight = cfg.MODEL.ROI_ATTRIBUTE_HEAD.LOSS_WEIGHT\n # fmt: on\n\n # object class embedding, including the background class\n self.obj_embed = nn.Embedding(self.num_objs + 1, self.obj_embed_dim)\n input_dim += self.obj_embed_dim\n self.fc = nn.Sequential(\n nn.Linear(input_dim, self.fc_dim),\n nn.ReLU()\n )\n self.attr_score = nn.Linear(self.fc_dim, self.num_attributes)\n nn.init.normal_(self.attr_score.weight, std=0.01)\n nn.init.constant_(self.attr_score.bias, 0)\n\n def forward(self, x, obj_labels):\n attr_feat = torch.cat((x, self.obj_embed(obj_labels)), dim=1)\n return self.attr_score(self.fc(attr_feat))\n\n def loss(self, score, label):\n n = score.shape[0]\n score = score.unsqueeze(1)\n score = score.expand(n, self.max_attr_per_ins, self.num_attributes).contiguous()\n score = score.view(-1, self.num_attributes)\n inv_weights = (\n (label >= 0).sum(dim=1).repeat(self.max_attr_per_ins, 1).transpose(0, 1).flatten()\n )\n weights = inv_weights.float().reciprocal()\n weights[weights > 1] = 0.\n n_valid = len((label >= 0).sum(dim=1).nonzero())\n label = label.view(-1)\n attr_loss = F.cross_entropy(score, label, reduction=\"none\", ignore_index=-1)\n attr_loss = (attr_loss * weights).view(n, -1).sum(dim=1)\n\n if n_valid > 0:\n attr_loss = attr_loss.sum() * self.loss_weight / n_valid\n else:\n attr_loss = attr_loss.sum() * 0.\n return {\"loss_attr\": attr_loss}\n\n\nclass AttributeROIHeads(ROIHeads):\n \"\"\"\n An extension of ROIHeads to include attribute prediction.\n \"\"\"\n def forward_attribute_loss(self, proposals, box_features):\n proposals, fg_selection_attributes = select_foreground_proposals(\n proposals, self.num_classes\n )\n attribute_features = box_features[torch.cat(fg_selection_attributes, dim=0)]\n obj_labels = torch.cat([p.gt_classes for p in proposals])\n attribute_labels = torch.cat([p.gt_attributes for p in proposals], dim=0)\n attribute_scores = self.attribute_predictor(attribute_features, obj_labels)\n return self.attribute_predictor.loss(attribute_scores, attribute_labels)\n\n\n@ROI_HEADS_REGISTRY.register()\nclass AttributeRes5ROIHeads(AttributeROIHeads, Res5ROIHeads):\n \"\"\"\n An extension of Res5ROIHeads to include attribute prediction.\n \"\"\"\n def __init__(self, cfg, input_shape):\n super(Res5ROIHeads, self).__init__(cfg, input_shape)\n\n assert len(self.in_features) == 1\n\n # fmt: off\n pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION\n pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE\n pooler_scales = (1.0 / input_shape[self.in_features[0]].stride, )\n sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO\n self.mask_on = cfg.MODEL.MASK_ON\n self.attribute_on = cfg.MODEL.ATTRIBUTE_ON\n if self.attribute_on:\n self.attribute_thre = cfg.MODEL.ATTRIBUTE_THRE\n # fmt: on\n assert not cfg.MODEL.KEYPOINT_ON\n\n self.pooler = ROIPooler(\n output_size=pooler_resolution,\n scales=pooler_scales,\n sampling_ratio=sampling_ratio,\n pooler_type=pooler_type,\n )\n\n self.res5, out_channels = self._build_res5_block(cfg)\n self.box_predictor = FastRCNNOutputLayers(\n cfg, ShapeSpec(channels=out_channels, height=1, width=1)\n )\n\n if self.mask_on:\n self.mask_head = build_mask_head(\n cfg,\n ShapeSpec(channels=out_channels, width=pooler_resolution, height=pooler_resolution),\n )\n\n if self.attribute_on:\n self.attribute_predictor = AttributePredictor(cfg, out_channels)\n\n def forward(self, images, features, proposals, targets=None):\n del images\n\n if self.training:\n assert targets\n proposals = self.label_and_sample_proposals(proposals, targets)\n del targets\n\n proposal_boxes = [x.proposal_boxes for x in proposals]\n box_features = self._shared_roi_transform(\n [features[f] for f in self.in_features], proposal_boxes\n )\n feature_pooled = box_features.mean(dim=[2, 3])\n predictions = self.box_predictor(feature_pooled)\n\n if self.training:\n del features\n losses = self.box_predictor.losses(predictions, proposals)\n if self.mask_on:\n proposals, fg_selection_masks = select_foreground_proposals(\n proposals, self.num_classes\n )\n mask_features = box_features[torch.cat(fg_selection_masks, dim=0)]\n del box_features\n losses.update(self.mask_head(mask_features, proposals))\n if self.attribute_on:\n losses.update(self.forward_attribute_loss(proposals, feature_pooled))\n return [], losses\n else:\n pred_instances, chose_indices = self.box_predictor.inference(predictions, proposals)\n pred_instances = self.forward_with_given_boxes(features, pred_instances)\n pred_instances[0].pred_attributes = [[] for _ in range(chose_indices[0].size(0))]\n pred_instances[0].attr_scores = [[] for _ in range(chose_indices[0].size(0))]\n\n if self.attribute_on and chose_indices[0].size(0) != 0:\n attr_labels, attr_scores = self.predict_attrs(\n feature_pooled[chose_indices], \n predictions[0][chose_indices],\n self.attribute_thre\n )\n pred_instances[0].pred_attributes = attr_labels\n pred_instances[0].attr_scores = attr_scores\n\n return pred_instances, {}\n\n def predict_attrs(self, features, obj_probs, score_thresh=0.5):\n obj_labels = torch.argmax(obj_probs, dim=1)\n\n attribute_scores = self.attribute_predictor(features, obj_labels)\n attr_labels = torch.argmax(attribute_scores, dim=1) \n attr_scores = attribute_scores.gather(1, attr_labels.unsqueeze(1))\n\n return attr_labels, attr_scores\n\n def get_conv5_features(self, features):\n features = [features[f] for f in self.in_features]\n return self.res5(features[0])\n\n\n@ROI_HEADS_REGISTRY.register()\nclass AttributeStandardROIHeads(AttributeROIHeads, StandardROIHeads):\n \"\"\"\n An extension of StandardROIHeads to include attribute prediction.\n \"\"\"\n def __init__(self, cfg, input_shape):\n super(StandardROIHeads, self).__init__(cfg, input_shape)\n self._init_box_head(cfg, input_shape)\n self._init_mask_head(cfg, input_shape)\n self._init_keypoint_head(cfg, input_shape)\n\n def _init_box_head(self, cfg, input_shape):\n # fmt: off\n pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION\n pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features)\n sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO\n pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE\n self.train_on_pred_boxes = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES\n self.attribute_on = cfg.MODEL.ATTRIBUTE_ON\n # fmt: on\n\n in_channels = [input_shape[f].channels for f in self.in_features]\n assert len(set(in_channels)) == 1, in_channels\n in_channels = in_channels[0]\n\n self.box_pooler = ROIPooler(\n output_size=pooler_resolution,\n scales=pooler_scales,\n sampling_ratio=sampling_ratio,\n pooler_type=pooler_type,\n )\n self.box_head = build_box_head(\n cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution)\n )\n self.box_predictor = FastRCNNOutputLayers(cfg, self.box_head.output_shape)\n\n if self.attribute_on:\n self.attribute_predictor = AttributePredictor(cfg, self.box_head.output_shape.channels)\n\n def _forward_box(self, features, proposals):\n features = [features[f] for f in self.in_features]\n box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])\n box_features = self.box_head(box_features)\n predictions = self.box_predictor(box_features)\n\n if self.training:\n if self.train_on_pred_boxes:\n with torch.no_grad():\n pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(\n predictions, proposals\n )\n for proposals_per_image, pred_boxes_per_image in zip(proposals, pred_boxes):\n proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image)\n losses = self.box_predictor.losses(predictions, proposals)\n if self.attribute_on:\n losses.update(self.forward_attribute_loss(proposals, box_features))\n del box_features\n\n return losses\n else:\n pred_instances, _ = self.box_predictor.inference(predictions, proposals)\n return pred_instances\n\n def get_conv5_features(self, features):\n assert len(self.in_features) == 1\n\n features = [features[f] for f in self.in_features]\n return features[0]\n\n" ]
[ [ "torch.nn.Linear", "torch.nn.init.constant_", "torch.argmax", "torch.no_grad", "torch.nn.Embedding", "torch.nn.init.normal_", "torch.nn.ReLU", "torch.nn.functional.cross_entropy", "torch.cat" ] ]
Stevanus-Christian/tensorflow
[ "d44afcf5ca16c5d704c66f891b99eac804e7cd14" ]
[ "tensorflow/compiler/xla/python/xla_client.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"An XLA client in Python.\"\"\"\n\nimport atexit\nimport contextlib\nimport enum # pylint: disable=g-bad-import-order\nimport gzip\nimport inspect\nimport os\nfrom typing import List, Sequence, Tuple, Union\n\nfrom . import xla_extension as _xla\n\nimport numpy as np\n\n# Note this module does *not* depend on any Python protocol buffers. The XLA\n# Python bindings are currently packaged both as part of jaxlib and as part\n# of TensorFlow. If we use protocol buffers here, then importing both jaxlib\n# and TensorFlow may fail with duplicate protocol buffer message definitions.\n\n# Most functions are snake_case for consistency with other modules, some\n# method names are CamelCase for consistency with XLA.\n# pylint: disable=invalid-name\n\n# Pylint has false positives for type annotations.\n# pylint: disable=invalid-sequence-index\n\nops = _xla.ops\nprofiler = _xla.profiler\n\n# Just an internal arbitrary increasing number to help with backward-compatible\n# changes.\n_version = 69\n\n# Version number for MLIR:Python components.\nmlir_api_version = 18\n\nxla_platform_names = {\n 'cpu': 'Host',\n 'gpu': 'CUDA',\n}\n\n\ndef make_interpreter_client():\n return _xla.get_interpreter_client()\n\n\ndef make_cpu_client(*, use_tfrt: bool = True) -> ...:\n if use_tfrt:\n return _xla.get_tfrt_cpu_client(asynchronous=True)\n else:\n return _xla.get_cpu_client(asynchronous=True)\n\n\ndef make_gpu_client(distributed_client=None, node_id=0, platform_name=None):\n \"\"\"Returns a GPU client. BFC allocator is used by default.\"\"\"\n allocator = os.getenv('XLA_PYTHON_CLIENT_ALLOCATOR', 'default').lower()\n memory_fraction = os.getenv('XLA_PYTHON_CLIENT_MEM_FRACTION')\n preallocate = os.getenv('XLA_PYTHON_CLIENT_PREALLOCATE')\n if allocator not in ('default', 'platform', 'bfc', 'cuda_async'):\n raise ValueError(\n 'XLA_PYTHON_CLIENT_ALLOCATOR env var must be \"default\", \"platform\", '\n '\"bfc\", or \"cuda_async\", got \"%s\"' % allocator)\n config = _xla.GpuAllocatorConfig()\n if allocator == 'default':\n config.kind = _xla.GpuAllocatorConfig.Kind.DEFAULT\n if allocator == 'platform':\n config.kind = _xla.GpuAllocatorConfig.Kind.PLATFORM\n if allocator == 'bfc':\n config.kind = _xla.GpuAllocatorConfig.Kind.BFC\n if allocator == 'cuda_async':\n config.kind = _xla.GpuAllocatorConfig.Kind.CUDA_ASYNC\n if memory_fraction:\n config.memory_fraction = float(memory_fraction)\n config.preallocate = preallocate not in ('0', 'false', 'False')\n\n return _xla.get_gpu_client(\n asynchronous=True,\n allocator_config=config,\n distributed_client=distributed_client,\n node_id=node_id,\n platform_name=platform_name)\n\n\ndef make_tpu_client():\n \"\"\"Returns a TPU client. Defaults to allowing 32 in-flight computations.\"\"\"\n max_inflight_computations = os.getenv(\n 'JAX_TPU_MAX_INFLIGHT_COMPUTATIONS', '32')\n try:\n max_inflight_computations = int(max_inflight_computations)\n except ValueError as e:\n raise ValueError(\n f'JAX_TPU_MAX_INFLIGHT_COMPUTATIONS env var must be an int, '\n f'got {max_inflight_computations}') from e\n return _xla.get_tpu_client(\n max_inflight_computations=max_inflight_computations)\n\n\nclass OpMetadata:\n \"\"\"Python representation of a xla.OpMetadata protobuf.\"\"\"\n __slots__ = ('op_type', 'op_name', 'source_file', 'source_line')\n\n def __init__(self, op_type='', op_name='', source_file='', source_line=0):\n self.op_type = op_type\n self.op_name = op_name\n self.source_file = source_file\n self.source_line = source_line\n\n\ndef CurrentSourceInfoMetadata(op_type=None, op_name=None, skip_frames=1):\n \"\"\"Helper for use in source mapping that returns an OpMetadata object.\"\"\"\n full_filename, lineno = inspect.stack()[skip_frames][1:3]\n filename = os.path.basename(full_filename)\n return OpMetadata(\n op_type=op_type,\n op_name=op_name,\n source_file=filename,\n source_line=lineno)\n\n\nPrimitiveType = _xla.PrimitiveType\n\nbfloat16 = _xla.bfloat16_dtype()\n\nXLA_ELEMENT_TYPE_TO_DTYPE = {\n PrimitiveType.PRED: np.dtype('bool'),\n PrimitiveType.S8: np.dtype('int8'),\n PrimitiveType.S16: np.dtype('int16'),\n PrimitiveType.S32: np.dtype('int32'),\n PrimitiveType.S64: np.dtype('int64'),\n PrimitiveType.U8: np.dtype('uint8'),\n PrimitiveType.U16: np.dtype('uint16'),\n PrimitiveType.U32: np.dtype('uint32'),\n PrimitiveType.U64: np.dtype('uint64'),\n PrimitiveType.BF16: np.dtype(bfloat16),\n PrimitiveType.F16: np.dtype('float16'),\n PrimitiveType.F32: np.dtype('float32'),\n PrimitiveType.F64: np.dtype('float64'),\n PrimitiveType.C64: np.dtype('complex64'),\n PrimitiveType.C128: np.dtype('complex128'),\n PrimitiveType.TUPLE: np.dtype(np.object_),\n PrimitiveType.TOKEN: np.dtype(np.object_),\n}\n\n# Note the conversion on the key. Numpy has a known issue wherein dtype hashing\n# doesn't work as expected (https://github.com/numpy/numpy/issues/7242). Thus,\n# when keying by dtype in this dict, we use the string form of dtypes.\nDTYPE_TO_XLA_ELEMENT_TYPE = {\n str(dt): et for et, dt in XLA_ELEMENT_TYPE_TO_DTYPE.items()\n}\n\n\ndef dtype_to_etype(dtype):\n \"\"\"Convenience function for reading DTYPE_TO_XLA_ELEMENT_TYPE.\"\"\"\n return DTYPE_TO_XLA_ELEMENT_TYPE[str(np.dtype(dtype))]\n\n\nShape = _xla.Shape\nShape.__doc__ = \"\"\"\nA Shape is an object defined in C++ that duck types like the following class:\n\nclass Shape:\n '''Represents an XLA shape.\n\n A shape is either an array shape, having rank-many integer\n dimensions and an element type (represented by a Numpy dtype), or it\n is a tuple shape, having a shape for every tuple component:\n\n type shape =\n TupleShape of shape list\n | ArrayShape of { dimensions: int list; element_type: dtype }\n '''\n\n @staticmethod\n def tuple_shape(tuple_shapes) -> Shape:\n \"Construct a tuple shape.\"\n\n @staticmethod\n def array_shape(element_type, dimensions, minor_to_major=None) -> Shape:\n\n @staticmethod\n def from_pyval(pyval) -> Shape:\n \"Returns a Shape that describes a tuple-tree of Numpy arrays.\"\n\n def __init__(self, str) -> Shape:\n \"Parses a shape string.\"\n def __eq__(self, other: Shape) -> bool:\n def __ne__(self, other: Shape) -> bool:\n def __hash__(self):\n def __repr__(self):\n def is_tuple(self) -> bool:\n def is_array(self) -> bool:\n def tuple_shapes(self) -> [Shape]:\n def numpy_dtype(self) -> np.dtype:\n \"Like element_type(), but returns dtype('O') for a tuple shape.\"\n def xla_element_type(self) -> PrimitiveType:\n def element_type(self) -> np.dtype:\n def dimensions(self) -> (int, int, ...):\n def rank(self) -> int:\n def with_major_to_minor_layout_if_absent(self) -> Shape:\n \"Returns a copy with missing layouts set to major-to-minor.\"\n\n def to_serialized_proto(self) -> bytes:\n \"Returns 'shape' as a serialized proto.\"\n\"\"\"\n\nProgramShape = _xla.ProgramShape\nProgramShape.__doc__ = \"\"\"\nA ProgramShape is a C++ object that duck types like the following class.\n\nclass ProgramShape:\n def __init__(self, parameter_shapes, result_shape):\n def parameter_shapes(self) -> [Shape]:\n def result_shape(self) -> Shape:\n def __repr__(self):\n\"\"\"\n\nShapeIndex = _xla.ShapeIndex\nShapeIndex.__doc__ = \"\"\"\nA Shape is an object defined in C++ that duck types like the following class:\n\nclass ShapeIndex:\n '''Represents an XLA ShapeIndex.\n\n An index for specifying a particular nested subshape within a shape. Used in\n ShapeUtil::GetSubshape and other interfaces. ShapeIndex defines a path through\n the Shape tree where each element of ShapeIndex indexes into a tuple (or\n nested tuple) within the shape. For a non-nested tuple, an index has a single\n element.\n '''\n\n def __init__(self, List[int]) -> ShapeIndex:\n def __eq__(self, other: Shape) -> bool:\n def __ne__(self, other: Shape) -> bool:\n def __hash__(self):\n def __repr__(self):\n\"\"\"\n\n\ndef shape_from_pyval(pyval):\n \"\"\"Returns a Shape that describes a tuple-tree of Numpy arrays.\"\"\"\n\n def convert(pyval):\n if isinstance(pyval, tuple):\n return Shape.tuple_shape(tuple(convert(elt) for elt in pyval))\n else:\n return Shape.array_shape(pyval.dtype, np.shape(pyval))\n\n return convert(pyval)\n\n\nDeviceAssignment = _xla.DeviceAssignment\nDeviceAssignment.__doc__ = \"\"\"\nA DeviceAssignment is a C++ object with the following signature.\n\ndef create(assignment):\n '''Builds a device assignment.\n\n Args:\n assignment: a 2D numpy array of device ordinal integers, indexed by\n [replica][computation_in_replica].\n Returns:\n A device assignment.\n '''\n\ndef replica_count():\n '''Returns the number of replicas.'''\ndef computation_count():\n '''Returns the number of computations per replica.'''\n\"\"\"\n\nDevice = _xla.Device\nCompileOptions = _xla.CompileOptions\n\nHostBufferSemantics = _xla.HostBufferSemantics\n\n# An Executable is a C++ class that duck types with the following API:\n# class Executable:\n# def local_devices(self) -> [Device]:\n# def execute(self, arguments : [Buffer]) -> Buffer:\n# \"\"\"Execute on one replica with Buffer arguments and return value.\"\"\"\n#\n# def size_of_generated_code_in_bytes(self) -> int:\n# \"\"\"Return generated binary size, or -1 if not known.\"\"\"\n#\n# def execute_sharded_on_local_devices(self, arguments: [[Buffer]])\n# -> [Buffer]:\n# \"\"\"Execute on many replicas with Buffer arguments and return value.\n#\n# Args:\n# arguments: A sequence of sequences of Buffers. The i'th element of each\n# sequence comprises the arguments for execution on the i'th local\n# device.\n#\n# Returns:\n# A list of the computation's outputs as a list of Buffers for each\n# device.\n# \"\"\"\n#\n# There are different implementations of Executable for different backends.\n\n\ndef execute_with_python_values(executable, arguments, backend):\n \"\"\"Execute on one replica with Python values as arguments and output.\"\"\"\n\n def put(arg):\n return backend.buffer_from_pyval(arg, device=executable.local_devices()[0])\n\n arguments = [put(arg) for arg in arguments]\n outputs = executable.execute(arguments)\n return [x.to_py() for x in outputs]\n\n\ndef execute_with_python_values_replicated(executable, arguments, backend):\n \"\"\"Execute on many replicas with Python values as arguments and output.\n\n Args:\n executable: the program to run.\n arguments: a list of lists of Python values indexed by `[replica][arg_num]`\n to pass as inputs.\n backend: the backend we are targeting.\n\n Returns:\n A list of python values, one per replica.\n \"\"\"\n devices = executable.local_devices()\n\n # pylint: disable=g-complex-comprehension\n def copy_to_devices(pyvals):\n return [backend.buffer_from_pyval(v, d) for v, d in zip(pyvals, devices)]\n\n inputs = [copy_to_devices(pyvals) for pyvals in zip(*arguments)]\n outputs = executable.execute_sharded_on_local_devices(inputs)\n return [[x.to_py() for x in xs] for xs in zip(*outputs)]\n\n\nclass PaddingType(enum.Enum):\n VALID = 1\n SAME = 2\n\n\ndef window_padding_type_to_pad_values(padding_type, lhs_dims, rhs_dims,\n window_strides):\n \"\"\"Maps PaddingType or string to pad values (list of pairs of ints).\"\"\"\n if not isinstance(padding_type, (str, PaddingType)):\n msg = 'padding_type must be str or PaddingType, got {}.'\n raise TypeError(msg.format(type(padding_type)))\n\n if isinstance(padding_type, str):\n if padding_type.upper() == 'VALID':\n padding_type = PaddingType.VALID\n elif padding_type.upper() == 'SAME':\n padding_type = PaddingType.SAME\n else:\n msg = 'Unknown padding type string: expected \"VALID\" or \"SAME\", got {}.'\n raise ValueError(msg.format(padding_type))\n\n if padding_type == PaddingType.VALID:\n return [(0, 0)] * len(window_strides)\n elif padding_type == PaddingType.SAME:\n out_shape = np.ceil(np.true_divide(lhs_dims, window_strides)).astype(int)\n pad_sizes = [\n max((out_size - 1) * stride + filter_size - in_size, 0)\n for out_size, stride, filter_size, in_size in zip(\n out_shape, window_strides, rhs_dims, lhs_dims)\n ]\n return [(pad_size // 2, pad_size - pad_size // 2) for pad_size in pad_sizes]\n else:\n msg = 'Unexpected PaddingType value: {}'\n raise ValueError(msg.format(padding_type))\n\n\nXlaBuilder = _xla.XlaBuilder\nXlaComputation = _xla.XlaComputation\nXlaOp = _xla.XlaOp\nFftType = _xla.FftType\nClient = _xla.Client\nBuffer = _xla.Buffer\nDeviceArrayBase = _xla.DeviceArrayBase\nExecutable = _xla.Executable\nOpSharding = _xla.OpSharding\n\n\ndef register_custom_call_target(name, fn, platform='cpu'):\n \"\"\"Registers a custom call target.\n\n Args:\n name: bytes containing the name of the function.\n fn: a PyCapsule object containing the function pointer.\n platform: the target platform.\n \"\"\"\n # To support AMD GPUs, we need to have xla_platform_names[\"gpu\"] == \"ROCM\"\n # Since that is hardcoded to CUDA, we are using the following as workaround.\n _xla.register_custom_call_target(name, fn,\n xla_platform_names.get(platform, platform))\n\n\n# Deprecated. Use register_custom_call_target instead.\nregister_cpu_custom_call_target = register_custom_call_target\n\n\nclass PaddingConfigDimension:\n \"\"\"Python representation of a xla.PaddingConfigDimension protobuf.\"\"\"\n __slots__ = ('edge_padding_low', 'edge_padding_high', 'interior_padding')\n\n edge_padding_low: int\n edge_padding_high: int\n interior_padding: int\n\n def __init__(self):\n self.edge_padding_low = 0\n self.edge_padding_high = 0\n self.interior_padding = 0\n\n\nclass PaddingConfig:\n \"\"\"Python representation of a xla.PaddingConfig protobuf.\"\"\"\n __slots__ = ('dimensions',)\n\n def __init__(self):\n self.dimensions = []\n\n\ndef make_padding_config(\n padding_config: Union[PaddingConfig, Sequence[Tuple[int, int, int]]]\n) -> PaddingConfig:\n \"\"\"Create PaddingConfig proto from list of triples of integers.\n\n Args:\n padding_config: either a PaddingConfig or a list of integer triples\n (edge_padding_low, edge_padding_high, interior_padding) representing the\n configuration of the padding operation.\n\n Returns:\n A `PaddingConfig` object.\n \"\"\"\n if not isinstance(padding_config, PaddingConfig):\n triples = padding_config\n padding_config = PaddingConfig()\n for lo, hi, interior in triples:\n dimension = PaddingConfigDimension()\n dimension.edge_padding_low = lo\n dimension.edge_padding_high = hi\n dimension.interior_padding = interior\n padding_config.dimensions.append(dimension)\n return padding_config\n\n\nclass DotDimensionNumbers:\n \"\"\"Python representation of a xla.DotDimensionNumbers protobuf.\"\"\"\n __slots__ = ('lhs_contracting_dimensions', 'rhs_contracting_dimensions',\n 'lhs_batch_dimensions', 'rhs_batch_dimensions')\n\n def __init__(self):\n self.lhs_contracting_dimensions = []\n self.rhs_contracting_dimensions = []\n self.lhs_batch_dimensions = []\n self.rhs_batch_dimensions = []\n\n\ndef make_dot_dimension_numbers(\n dimension_numbers: Union[DotDimensionNumbers,\n Tuple[Tuple[List[int], List[int]],\n Tuple[List[int], List[int]]]]\n) -> DotDimensionNumbers:\n \"\"\"Builds a DotDimensionNumbers object from a specification.\n\n Args:\n dimension_numbers: either a `DotDimensionNumbers` or a nested tuple\n `((lhs_contract, rhs_contract), (lhs_batch, rhs_batch))` of lists of\n integers representing the dimensions to treat as contracting dimensions\n and batch dimensions on each input operand.\n\n Returns:\n A `DotDimensionNumbers` object.\n \"\"\"\n if isinstance(dimension_numbers, (list, tuple)):\n (lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers\n dot_dims_proto = DotDimensionNumbers()\n dot_dims_proto.lhs_contracting_dimensions.extend(lhs_contract)\n dot_dims_proto.rhs_contracting_dimensions.extend(rhs_contract)\n dot_dims_proto.lhs_batch_dimensions.extend(lhs_batch)\n dot_dims_proto.rhs_batch_dimensions.extend(rhs_batch)\n return dot_dims_proto\n else:\n return dimension_numbers\n\n\nclass ConvolutionDimensionNumbers:\n \"\"\"Python representation of a xla.ConvolutionDimensionNumbers protobuf.\"\"\"\n __slots__ = ('input_batch_dimension', 'input_feature_dimension',\n 'input_spatial_dimensions', 'kernel_input_feature_dimension',\n 'kernel_output_feature_dimension', 'kernel_spatial_dimensions',\n 'output_batch_dimension', 'output_feature_dimension',\n 'output_spatial_dimensions')\n\n def __init__(self):\n self.input_batch_dimension = 0\n self.input_feature_dimension = 0\n self.input_spatial_dimensions = []\n self.kernel_input_feature_dimension = 0\n self.kernel_output_feature_dimension = 0\n self.kernel_spatial_dimensions = []\n self.output_batch_dimension = 0\n self.output_feature_dimension = 0\n self.output_spatial_dimensions = []\n\n\ndef make_convolution_dimension_numbers(\n dimension_numbers: Union[None, ConvolutionDimensionNumbers, Tuple[str, str,\n str]],\n num_spatial_dimensions: int) -> ConvolutionDimensionNumbers:\n \"\"\"Builds a ConvolutionDimensionNumbers object from a specification.\n\n Args:\n dimension_numbers: optional, either a ConvolutionDimensionNumbers object or\n a tuple (lhs_spec, rhs_spec, out_spec). Each element is a string of\n length N+2 identifying by position: (1) batch dimensions in lhs, rhs, and\n the output with the character 'N', (2) feature dimensions in lhs and the\n output with the character 'C', (3) input and output feature dimensions\n in rhs with the characters 'I' and 'O' respectively, and (4) spatial\n dimension correspondences between lhs, rhs, and the output using any\n distinct characters. For example, to indicate dimension numbers\n consistent with the Conv operation with two spatial dimensions, one\n could use ('NCHW', 'OIHW', 'NCHW'). As another example, to indicate\n dimension numbers consistent with the TensorFlow Conv2D operation, one\n could use ('NHWC', 'HWIO', 'NHWC'). When using the latter form of\n convolution dimension specification, window strides are associated with\n spatial dimension character labels according to the order in which the\n labels appear in the rhs_spec string, so that window_strides[0] is\n matched with the dimension corresponding to the first character\n appearing in rhs_spec that is not 'I' or 'O'. By default, use the same\n dimension numbering as Conv and ConvWithGeneralPadding.\n num_spatial_dimensions: the number of spatial dimensions.\n\n Returns:\n A `ConvolutionDimensionNumbers` object.\n \"\"\"\n if dimension_numbers is None:\n nd = num_spatial_dimensions\n dimension_numbers = ConvolutionDimensionNumbers()\n dimension_numbers.input_batch_dimension = 0\n dimension_numbers.input_feature_dimension = 1\n dimension_numbers.output_batch_dimension = 0\n dimension_numbers.output_feature_dimension = 1\n dimension_numbers.kernel_output_feature_dimension = 0\n dimension_numbers.kernel_input_feature_dimension = 1\n dimension_numbers.input_spatial_dimensions.extend(range(2, 2 + nd))\n dimension_numbers.kernel_spatial_dimensions.extend(range(2, 2 + nd))\n dimension_numbers.output_spatial_dimensions.extend(range(2, 2 + nd))\n elif isinstance(dimension_numbers, tuple):\n lhs_spec, rhs_spec, out_spec = dimension_numbers\n dimension_numbers = ConvolutionDimensionNumbers()\n\n dimension_numbers.input_batch_dimension = lhs_spec.index('N')\n dimension_numbers.input_feature_dimension = lhs_spec.index('C')\n dimension_numbers.output_batch_dimension = out_spec.index('N')\n dimension_numbers.output_feature_dimension = out_spec.index('C')\n dimension_numbers.kernel_output_feature_dimension = rhs_spec.index('O')\n dimension_numbers.kernel_input_feature_dimension = rhs_spec.index('I')\n\n dimension_numbers.kernel_spatial_dimensions.extend(\n i for i, c in enumerate(rhs_spec) if c not in {'I', 'O'})\n dimension_numbers.input_spatial_dimensions.extend(\n sorted((i for i, c in enumerate(lhs_spec) if c not in {'N', 'C'}),\n key=lambda i: rhs_spec.index(lhs_spec[i])))\n dimension_numbers.output_spatial_dimensions.extend(\n sorted((i for i, c in enumerate(out_spec) if c not in {'N', 'C'}),\n key=lambda i: rhs_spec.index(out_spec[i])))\n return dimension_numbers\n\n\nclass PrecisionConfig:\n \"\"\"Python representation of a xla.PrecisionConfig protobuf.\"\"\"\n __slots__ = ('operand_precision',)\n\n Precision = _xla.PrecisionConfig_Precision\n\n def __init__(self):\n self.operand_precision = []\n\n\nclass GatherDimensionNumbers:\n \"\"\"Python representation of a xla.GatherDimensionNumbers protobuf.\"\"\"\n __slots__ = ('offset_dims', 'collapsed_slice_dims', 'start_index_map',\n 'index_vector_dim')\n\n def __init__(self):\n self.offset_dims = []\n self.collapsed_slice_dims = []\n self.start_index_map = []\n self.index_vector_dim = 0\n\n\nclass ScatterDimensionNumbers:\n \"\"\"Python representation of a xla.ScatterDimensionNumbers protobuf.\"\"\"\n __slots__ = ('update_window_dims', 'inserted_window_dims',\n 'scatter_dims_to_operand_dims', 'index_vector_dim')\n\n def __init__(self):\n self.update_window_dims = []\n self.inserted_window_dims = []\n self.scatter_dims_to_operand_dims = []\n self.index_vector_dim = 0\n\n\nclass ReplicaGroup:\n \"\"\"Python representation of a xla.ReplicaGroup protobuf.\"\"\"\n __slots__ = ('replica_ids',)\n\n def __init__(self):\n self.replica_ids = []\n\n\ndef _make_replica_group_proto(replica_group):\n replica_group_proto = ReplicaGroup()\n replica_group_proto.replica_ids.extend(replica_group)\n return replica_group_proto\n\n\ndef make_replica_groups(replica_groups):\n if replica_groups is None:\n replica_groups_protos = [] # special value for XLA API\n else:\n replica_groups = list(replica_groups)\n replica_groups_protos = [\n _make_replica_group_proto(group) for group in replica_groups\n ]\n return replica_groups_protos\n\n\nTraceback = _xla.Traceback\nFrame = _xla.Frame\n\n\[email protected]\ndef tracebacks(enabled=True):\n \"\"\"Context manager that enables or disables traceback collection.\"\"\"\n saved = Traceback.enabled\n Traceback.enabled = enabled\n try:\n yield\n finally:\n Traceback.enabled = saved\n\n\ndef heap_profile(client: Client) -> bytes:\n \"\"\"Returns a gzipped pprof protocol buffer containing a heap profile.\"\"\"\n return gzip.compress(client.heap_profile())\n\n\nXlaRuntimeError = _xla.XlaRuntimeError\n\n# Perform one last garbage collection of deferred Python references. This is\n# mostly to keep ASAN happy.\natexit.register(_xla.collect_garbage)\n" ]
[ [ "numpy.dtype", "numpy.shape", "numpy.true_divide" ] ]
simran2905/pytorch-lightning
[ "024cf23c67fb92fabb2d238bd33d73b24aafa7a9" ]
[ "pytorch_lightning/trainer/training_loop.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import OrderedDict\nfrom contextlib import contextmanager, suppress\nfrom functools import partial, update_wrapper\nfrom typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom torch.optim import Optimizer\n\nfrom pytorch_lightning.core.optimizer import LightningOptimizer\nfrom pytorch_lightning.plugins import ParallelPlugin\nfrom pytorch_lightning.trainer.connectors.logger_connector.result import ResultCollection\nfrom pytorch_lightning.trainer.supporters import TensorRunningAccum\nfrom pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType\nfrom pytorch_lightning.utilities.distributed import rank_zero_info\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.finite_checks import detect_nan_parameters\nfrom pytorch_lightning.utilities.grads import grad_norm\nfrom pytorch_lightning.utilities.model_helpers import is_overridden\nfrom pytorch_lightning.utilities.parsing import AttributeDict\nfrom pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature\nfrom pytorch_lightning.utilities.warnings import WarningCache\n\n\nclass TrainLoop:\n\n def __init__(\n self,\n trainer,\n max_epochs: Optional[int],\n min_epochs: Optional[int],\n max_steps: Optional[int],\n min_steps: Optional[int],\n num_sanity_val_steps: int,\n ):\n self.trainer = trainer\n self.accumulated_loss = None\n self.warning_cache = WarningCache()\n self.running_loss = TensorRunningAccum(window_length=20)\n self._skip_backward = False\n self._optimizer_freq_cumsum = None\n self._hiddens = None\n\n self.global_step = 0\n self.current_epoch = 0\n self.trainer.should_stop = False\n\n # the total batch index across all epochs\n self.total_batch_idx = 0\n # the current batch index in the loop that runs over the dataloader(s)\n self.batch_idx = 0\n # the current split index when the batch gets split into chunks in truncated backprop through time\n self.split_idx = None\n\n self.trainer.num_training_batches = 0\n self.trainer.train_dataloader = None\n\n # If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000\n self.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs\n # If neither min_epochs or min_steps is set, then use existing default of min_epochs = 1\n self.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs\n self.max_steps = max_steps\n self.min_steps = min_steps\n\n if num_sanity_val_steps == -1:\n self.trainer.num_sanity_val_steps = float(\"inf\")\n else:\n self.trainer.num_sanity_val_steps = num_sanity_val_steps\n\n self.results = ResultCollection(training=True)\n\n @property\n def num_active_optimizers(self) -> int:\n return len(self.get_active_optimizers())\n\n @property\n def optimizer_freq_cumsum(self):\n if self._optimizer_freq_cumsum is None:\n self._optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)\n return self._optimizer_freq_cumsum\n\n def should_skip_training(self) -> bool:\n should_by_max_steps = self.max_steps is not None and self.global_step >= self.max_steps\n should_by_epoch = self.max_epochs is not None and self.current_epoch >= self.max_epochs\n return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0\n\n def on_train_start(self):\n self.results.to(device=self.trainer.lightning_module.device)\n\n self.trainer.call_hook(\"on_train_start\")\n\n def on_train_end(self):\n # trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates\n # when a checkpoint was saved at the last step\n self.global_step -= 1\n self.check_checkpoint_callback(should_update=True, is_last=True)\n self.global_step += 1\n\n # hook\n self.trainer.call_hook(\"on_train_end\")\n\n # todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.\n # It might be related to xla tensors blocked when moving the cpu\n # kill loggers\n if self.trainer.logger is not None:\n self.trainer.logger.finalize(\"success\")\n\n # summarize profile results\n self.trainer.profiler.describe()\n\n # give accelerators a chance to finish\n self.trainer.accelerator.on_train_end()\n\n # reset bookkeeping\n self.trainer.state.stage = None\n\n def check_checkpoint_callback(self, should_update, is_last=False):\n # TODO bake this logic into the ModelCheckpoint callback\n if should_update and self.trainer.checkpoint_connector.has_trained:\n callbacks = self.trainer.checkpoint_callbacks\n\n if is_last and any(cb.save_last and cb.verbose for cb in callbacks):\n rank_zero_info(\"Saving latest checkpoint...\")\n\n model = self.trainer.lightning_module\n\n for cb in callbacks:\n cb.on_validation_end(self.trainer, model)\n\n def on_train_epoch_start(self, epoch):\n\n # update training progress in trainer\n self.current_epoch = epoch\n\n model = self.trainer.lightning_module\n\n # reset train dataloader\n if epoch != 0 and self.trainer.reload_dataloaders_every_epoch:\n self.trainer.reset_train_dataloader(model)\n\n # todo: specify the possible exception\n with suppress(Exception):\n # set seed for distributed sampler (enables shuffling for each epoch)\n self.trainer.train_dataloader.sampler.set_epoch(epoch)\n\n # changing gradient according accumulation_scheduler\n self.trainer.accumulation_scheduler.on_train_epoch_start(self.trainer, self.trainer.lightning_module)\n\n # stores accumulated grad fractions per batch\n self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)\n\n # hook\n self.trainer.logger_connector.on_epoch_start()\n self.trainer.call_hook(\"on_epoch_start\")\n self.trainer.call_hook(\"on_train_epoch_start\")\n\n def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx):\n batch_end_outputs = [opt_idx_out for opt_idx_out in batch_end_outputs if len(opt_idx_out)]\n\n processed_batch_end_outputs = TrainLoop._prepare_outputs(batch_end_outputs, batch_mode=True)\n\n # hook\n self.trainer.call_hook('on_train_batch_end', processed_batch_end_outputs, batch, batch_idx, dataloader_idx)\n self.trainer.call_hook('on_batch_end')\n self.trainer.logger_connector.on_batch_end()\n\n # figure out what to track for epoch end\n self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs)\n\n def reset_train_val_dataloaders(self, model) -> None:\n \"\"\"\n Resets train and val dataloaders if none are attached to the trainer.\n\n The val dataloader must be initialized before training loop starts, as the training loop\n inspects the val dataloader to determine whether to run the evaluation loop.\n \"\"\"\n if self.trainer.train_dataloader is None:\n self.trainer.reset_train_dataloader(model)\n\n if self.trainer.val_dataloaders is None:\n self.trainer.reset_val_dataloader(model)\n\n def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs):\n hook_overridden = self._should_add_batch_output_to_epoch_output()\n if not hook_overridden:\n return\n\n # track the outputs to reduce at the end of the epoch\n for opt_idx, opt_outputs in enumerate(batch_end_outputs):\n # with 1 step (no tbptt) don't use a sequence at epoch end\n if (\n isinstance(opt_outputs, list) and len(opt_outputs) == 1\n and not isinstance(opt_outputs[0], ResultCollection)\n ):\n opt_outputs = opt_outputs[0]\n\n epoch_output[opt_idx].append(opt_outputs)\n\n def _should_add_batch_output_to_epoch_output(self) -> bool:\n # We add to the epoch outputs if\n # 1. The model defines training_epoch_end OR\n # 2. The model overrides on_train_epoch_end which has `outputs` in the signature\n # TODO: in v1.5 this only needs to check if training_epoch_end is overridden\n lightning_module = self.trainer.lightning_module\n if is_overridden(\"training_epoch_end\", lightning_module):\n return True\n\n if is_overridden(\"on_train_epoch_end\", lightning_module):\n model_hook_fx = getattr(lightning_module, \"on_train_epoch_end\")\n if is_param_in_hook_signature(model_hook_fx, \"outputs\"):\n return True\n\n return False\n\n def get_active_optimizers(self, batch_idx: Optional[int] = None) -> List[Tuple[int, Optimizer]]:\n \"\"\"\n Returns the currently active optimizers. When multiple optimizers are used with different frequencies,\n only one of the optimizers is active at a time.\n\n Returns:\n A list of tuples (opt_idx, optimizer) of currently active optimizers.\n \"\"\"\n if not self.trainer.optimizer_frequencies:\n # call training_step once per optimizer\n return list(enumerate(self.trainer.optimizers))\n\n batch_idx = self.total_batch_idx if batch_idx is None else batch_idx\n optimizers_loop_length = self.optimizer_freq_cumsum[-1]\n current_place_in_loop = batch_idx % optimizers_loop_length\n\n # find optimzier index by looking for the first {item > current_place} in the cumsum list\n opt_idx = int(np.argmax(self.optimizer_freq_cumsum > current_place_in_loop))\n return [(opt_idx, self.trainer.optimizers[opt_idx])]\n\n def on_after_backward(self, batch_idx, untouched_loss):\n # insert after step hook\n self.trainer.call_hook(\"on_after_backward\")\n\n # when in dev debugging track the losses\n self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())\n\n def _check_training_step_output(self, training_step_output):\n if isinstance(training_step_output, torch.Tensor) and not self.trainer.lightning_module.automatic_optimization:\n if training_step_output.grad_fn is None:\n # TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...\n raise MisconfigurationException(\"In manual optimization, `training_step` should not return a Tensor\")\n elif self.trainer.lightning_module.automatic_optimization:\n if not any((\n isinstance(training_step_output, torch.Tensor),\n (isinstance(training_step_output, Mapping)\n and 'loss' in training_step_output), training_step_output is None\n )):\n raise MisconfigurationException(\n \"In automatic optimization, `training_step` must either return a Tensor, \"\n \"a dict with key 'loss' or None (where the step will be skipped).\"\n )\n\n def training_step(self, split_batch, batch_idx, opt_idx, hiddens):\n # give the PL module a result for logging\n model_ref = self.trainer.lightning_module\n\n with self.trainer.profiler.profile(\"model_forward\"):\n step_kwargs = self._build_kwargs(split_batch, batch_idx, opt_idx, hiddens)\n\n # manually capture logged metrics\n model_ref._current_fx_name = 'training_step'\n with self.trainer.profiler.profile(\"training_step\"):\n training_step_output = self.trainer.accelerator.training_step(step_kwargs)\n self.trainer.accelerator.post_training_step()\n\n training_step_output = self.trainer.call_hook(\"training_step_end\", training_step_output)\n\n self._check_training_step_output(training_step_output)\n\n training_step_output = self._process_training_step_output(training_step_output)\n if training_step_output is None:\n return\n\n closure_loss = None\n loss = None\n if self.trainer.lightning_module.automatic_optimization:\n # accumulate loss. if accumulate_grad_batches==1, no effect\n closure_loss = training_step_output.minimize / self.trainer.accumulate_grad_batches\n # the loss will get scaled for amp. avoid any modifications to it\n loss = closure_loss.detach().clone()\n return AttributeDict(closure_loss=closure_loss, loss=loss, training_step_output=training_step_output)\n\n def _process_training_step_output(self, training_step_output):\n if training_step_output is None:\n return None\n\n results = self.results\n loss = None\n hiddens = None\n results.extra = {}\n\n # handle dict return\n if isinstance(training_step_output, dict):\n loss = training_step_output.pop(\"loss\", None)\n hiddens = training_step_output.pop(\"hiddens\", None)\n if hiddens is not None:\n hiddens = hiddens.detach()\n results.extra = training_step_output\n\n # handle scalar return\n elif isinstance(training_step_output, torch.Tensor):\n loss = training_step_output\n\n # map to results under the hood\n results.minimize = loss\n self._hiddens = hiddens\n\n if self.trainer.move_metrics_to_cpu:\n results.cpu()\n return results\n\n @staticmethod\n def _prepare_outputs(\n outputs: List[List[List['ResultCollection']]],\n batch_mode: bool,\n ) -> Union[List[List[List[Dict]]], List[List[Dict]], List[Dict], Dict]:\n \"\"\"\n Extract required information from batch or epoch end results.\n\n Args:\n outputs: A 3-dimensional list of ``ResultCollection`` objects with dimensions:\n ``[optimizer outs][batch outs][tbptt steps]``.\n\n batch_mode: If True, ignore the batch output dimension.\n\n Returns:\n The cleaned outputs with ``ResultCollection`` objects converted to dictionaries.\n All list dimensions of size one will be collapsed.\n \"\"\"\n processed_outputs = []\n for opt_outputs in outputs:\n # handle an edge case where an optimizer output is the empty list\n if len(opt_outputs) == 0:\n continue\n\n processed_batch_outputs = []\n\n if batch_mode:\n opt_outputs = [opt_outputs]\n\n for batch_outputs in opt_outputs:\n processed_tbptt_outputs = []\n\n if isinstance(batch_outputs, ResultCollection):\n batch_outputs = [batch_outputs]\n\n for tbptt_output in batch_outputs:\n out = tbptt_output.extra\n if tbptt_output.minimize is not None:\n out['loss'] = tbptt_output.minimize.detach()\n processed_tbptt_outputs.append(out)\n\n # if there was only one tbptt step then we can collapse that dimension\n if len(processed_tbptt_outputs) == 1:\n processed_tbptt_outputs = processed_tbptt_outputs[0]\n processed_batch_outputs.append(processed_tbptt_outputs)\n\n # batch_outputs should be just one dict (or a list of dicts if using tbptt) per optimizer\n if batch_mode:\n processed_batch_outputs = processed_batch_outputs[0]\n processed_outputs.append(processed_batch_outputs)\n\n # if there is only one optimiser then we collapse that dimension\n if len(processed_outputs) == 1:\n processed_outputs = processed_outputs[0]\n return processed_outputs\n\n def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):\n model_ref = self.trainer.lightning_module\n\n is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)\n using_native_amp = self.trainer.amp_backend == AMPType.NATIVE\n\n # native amp + lbfgs is a no go right now\n if using_native_amp and is_lbfgs:\n raise MisconfigurationException(\n 'native PyTorch amp and lbfgs are not compatible.'\n ' To request, please file a Github issue in PyTorch and tag @mcarilli'\n )\n\n # wraps into LightningOptimizer only for running step\n optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, opt_idx)\n\n # model hook\n model_ref.optimizer_step(\n self.trainer.current_epoch,\n batch_idx,\n optimizer,\n opt_idx,\n train_step_and_backward_closure,\n on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,\n using_native_amp=using_native_amp,\n using_lbfgs=is_lbfgs,\n )\n\n def on_before_zero_grad(self, optimizer):\n self.trainer.call_hook('on_before_zero_grad', optimizer)\n\n def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):\n self.trainer.accelerator.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)\n\n def track_and_norm_grad(self, optimizer) -> dict:\n # track gradient norms\n grad_norm_dict = {}\n if (self.global_step + 1) % self.trainer.log_every_n_steps == 0 and float(self.trainer.track_grad_norm) > 0:\n grad_norm_dict = grad_norm(self.trainer.lightning_module, self.trainer.track_grad_norm)\n\n # clip gradients\n self.trainer.accelerator.clip_gradients(\n optimizer, self.trainer.gradient_clip_val, gradient_clip_algorithm=self.trainer.gradient_clip_algorithm\n )\n return grad_norm_dict\n\n def _tbptt_split_batch(self, batch: Any) -> List[Any]:\n splits = [batch]\n truncated_bptt_enabled = self._truncated_bptt_enabled()\n if truncated_bptt_enabled:\n model_ref = self.trainer.lightning_module\n with self.trainer.profiler.profile(\"tbptt_split_batch\"):\n splits = model_ref.tbptt_split_batch(batch, self._truncated_bptt_steps())\n return splits\n\n def run_training_epoch(self):\n # modify dataloader if needed (ddp, etc...)\n train_dataloader = self.trainer.accelerator.process_dataloader(self.trainer.train_dataloader)\n\n # track epoch output\n epoch_output = [[] for _ in range(self.num_active_optimizers)]\n\n train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)\n dataloader_idx = 0\n batch_idx = None\n\n for batch_idx, (batch, is_last_batch) in train_dataloader:\n self.batch_idx = batch_idx\n\n # ------------------------------------\n # TRAINING_STEP + TRAINING_STEP_END\n # ------------------------------------\n with self.trainer.profiler.profile(\"run_training_batch\"):\n batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)\n\n # when returning -1 from train_step, we end epoch early\n if batch_output.signal == -1:\n break\n\n # hook\n self.on_train_batch_end(\n epoch_output,\n batch_output.training_step_output,\n batch,\n batch_idx,\n dataloader_idx,\n )\n\n # -----------------------------------------\n # SAVE METRICS TO LOGGERS AND PROGRESS_BAR\n # -----------------------------------------\n self.trainer.logger_connector.update_train_step_metrics()\n\n # -----------------------------------------\n # VALIDATE IF NEEDED\n # -----------------------------------------\n should_check_val = self._should_check_val_fx(batch_idx, is_last_batch)\n if should_check_val:\n self.trainer.validating = True\n self.trainer._run_evaluation()\n self.trainer.training = True\n\n # -----------------------------------------\n # SAVE LOGGERS (ie: Tensorboard, etc...)\n # -----------------------------------------\n self.save_loggers_on_train_batch_end()\n\n # update LR schedulers\n self.update_lr_schedulers('step')\n self.trainer.checkpoint_connector.has_trained = True\n\n self.total_batch_idx += 1\n\n # progress global step according to grads progress\n self.increment_accumulated_grad_global_step()\n\n max_steps_reached = (self.max_steps is not None and self.max_steps <= self.global_step)\n if max_steps_reached or self.trainer.should_stop or self._num_training_batches_reached(is_last_batch):\n break\n\n if batch_idx is None:\n # dataloader/iterator did not produce a batch\n return\n\n # handle epoch_output on epoch end\n self.on_train_epoch_end(epoch_output)\n\n # the global step is manually decreased here due to backwards compatibility with existing loggers\n # as they expect that the same step is used when logging epoch end metrics even when the batch loop has\n # finished. this means the attribute does not exactly track the number of optimizer steps applied.\n # TODO(@carmocca): deprecate and rename so users don't get confused\n self.global_step -= 1\n # log epoch metrics\n self.trainer.logger_connector.update_train_epoch_metrics()\n self.global_step += 1\n\n self.update_lr_schedulers('epoch')\n\n did_train_only = self.trainer.disable_validation or self.trainer.evaluation_loop.should_skip_evaluation(\n self.trainer.num_val_batches\n )\n if did_train_only:\n self.global_step -= 1\n self.check_checkpoint_callback(True)\n self.global_step += 1\n\n def on_train_epoch_end(self, epoch_output: List[List[List['ResultCollection']]]) -> None:\n # inform logger the batch loop has finished\n self.trainer.logger_connector.epoch_end_reached()\n\n # prepare epoch output\n processed_epoch_output = TrainLoop._prepare_outputs(epoch_output, batch_mode=False)\n\n # get the model and call model.training_epoch_end\n model = self.trainer.lightning_module\n\n if is_overridden('training_epoch_end', model):\n # run training_epoch_end\n # refresh the result for custom logging at the epoch level\n model._current_fx_name = 'training_epoch_end'\n training_epoch_end_output = model.training_epoch_end(processed_epoch_output)\n\n if training_epoch_end_output is not None:\n raise MisconfigurationException(\n 'training_epoch_end expects a return of None. '\n 'HINT: remove the return statement in training_epoch_end'\n )\n\n # call train epoch end hooks\n self._on_train_epoch_end_hook(processed_epoch_output)\n self.trainer.call_hook('on_epoch_end')\n self.trainer.logger_connector.on_epoch_end()\n\n def _on_train_epoch_end_hook(self, processed_epoch_output) -> None:\n # We cannot rely on Trainer.call_hook because the signatures might be different across\n # lightning module and callback\n # As a result, we need to inspect if the module accepts `outputs` in `on_train_epoch_end`\n\n # This implementation is copied from Trainer.call_hook\n hook_name = \"on_train_epoch_end\"\n prev_fx_name = self.trainer.lightning_module._current_fx_name\n self.trainer.lightning_module._current_fx_name = hook_name\n\n # always profile hooks\n with self.trainer.profiler.profile(hook_name):\n\n # first call trainer hook\n if hasattr(self.trainer, hook_name):\n trainer_hook = getattr(self.trainer, hook_name)\n trainer_hook(processed_epoch_output)\n\n # next call hook in lightningModule\n model_ref = self.trainer.lightning_module\n if is_overridden(hook_name, model_ref):\n hook_fx = getattr(model_ref, hook_name)\n if is_param_in_hook_signature(hook_fx, \"outputs\"):\n self.warning_cache.warn(\n \"The signature of `ModelHooks.on_train_epoch_end` has changed in v1.3.\"\n \" `outputs` parameter has been deprecated.\"\n \" Support for the old signature will be removed in v1.5\", DeprecationWarning\n )\n model_ref.on_train_epoch_end(processed_epoch_output)\n else:\n model_ref.on_train_epoch_end()\n\n # call the accelerator hook\n if hasattr(self.trainer.accelerator, hook_name):\n accelerator_hook = getattr(self.trainer.accelerator, hook_name)\n accelerator_hook()\n\n # restore current_fx when nested context\n self.trainer.lightning_module._current_fx_name = prev_fx_name\n\n def run_training_batch(self, batch, batch_idx, dataloader_idx):\n # bookkeeping\n self._hiddens = None\n\n optimizers = list(enumerate(self.trainer.optimizers))\n\n # track all outputs across time and num of optimizers\n batch_outputs = [[] for _ in range(len(optimizers))]\n\n if batch is None:\n self.warning_cache.warn(\"train_dataloader yielded None. If this was on purpose, ignore this warning...\")\n return AttributeDict(signal=0, training_step_output=batch_outputs)\n\n # hook\n self.trainer.logger_connector.on_batch_start()\n response = self.trainer.call_hook(\"on_batch_start\")\n if response == -1:\n return AttributeDict(signal=-1)\n\n # hook\n response = self.trainer.call_hook(\"on_train_batch_start\", batch, batch_idx, dataloader_idx)\n if response == -1:\n return AttributeDict(signal=-1)\n\n # lightning module hook\n splits = self._tbptt_split_batch(batch)\n\n for split_idx, split_batch in enumerate(splits):\n self.split_idx = split_idx\n\n # let logger connector extract batch size\n self.trainer.logger_connector.on_train_split_start(batch_idx, split_idx, split_batch)\n\n if self.trainer.lightning_module.automatic_optimization:\n for opt_idx, optimizer in self.get_active_optimizers(batch_idx):\n result = self._run_optimization(batch_idx, split_batch, opt_idx, optimizer)\n if result:\n batch_outputs[opt_idx].append(result.training_step_output)\n else:\n # in manual optimization, there is no looping over optimizers\n result = self._run_optimization(batch_idx, split_batch)\n if result:\n batch_outputs[0].append(result.training_step_output)\n\n return AttributeDict(signal=0, training_step_output=batch_outputs)\n\n def _run_optimization(self, batch_idx, split_batch, opt_idx=0, optimizer=None):\n # TODO: In v1.5, when optimizer_idx gets removed from training_step in manual_optimization, change\n # opt_idx=0 to opt_idx=None in the signature here\n\n # toggle model params\n self.run_optimization_start(opt_idx, optimizer)\n\n result = AttributeDict()\n closure = self.make_closure(split_batch, batch_idx, opt_idx, optimizer, self._hiddens, result)\n\n if self.should_accumulate():\n # For gradient accumulation\n\n # -------------------\n # calculate loss (train step + train step end)\n # -------------------\n # automatic_optimization=True: perform ddp sync only when performing optimizer_step\n # automatic_optimization=False: don't block synchronization here\n with self.block_ddp_sync_behaviour():\n closure()\n\n # ------------------------------\n # BACKWARD PASS\n # ------------------------------\n # gradient update with accumulated gradients\n else:\n if self.trainer.lightning_module.automatic_optimization:\n self.optimizer_step(optimizer, opt_idx, batch_idx, closure)\n if len(self.trainer.optimizers) > 1:\n # revert back to previous state\n self.trainer.lightning_module.untoggle_optimizer(opt_idx)\n else:\n result = self.training_step(split_batch, batch_idx, opt_idx, self._hiddens)\n\n if not result:\n # user decided to skip optimization\n return result\n\n # update running loss + reset accumulated loss\n self.update_running_loss(result.loss)\n\n self._process_closure_result(result)\n return result\n\n def training_step_and_backward_closure(\n self,\n split_batch: Any,\n batch_idx: int,\n opt_idx: int,\n optimizer: Optimizer,\n hiddens,\n return_result: AttributeDict,\n ) -> Optional[torch.Tensor]:\n\n result = self.training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens)\n if result is not None:\n return_result.update(result)\n return return_result.loss\n\n def make_closure(self, *closure_args, **closure_kwargs: Any) -> Callable:\n \"\"\" Wraps the training step closure into a partial object which will be called within ``optimizer.step``. \"\"\"\n partial_func = partial(self.training_step_and_backward_closure, *closure_args, **closure_kwargs)\n return update_wrapper(partial_func, self.training_step_and_backward_closure)\n\n @contextmanager\n def block_ddp_sync_behaviour(self, should_block_sync: bool = False):\n \"\"\"\n automatic_optimization = True\n Blocks ddp sync gradients behaviour on backwards pass.\n This is useful for skipping sync when accumulating gradients, reducing communication overhead\n\n automatic_optimization = False\n do not block ddp gradient sync when using manual optimization\n as gradients are needed within the training step\n\n Returns:\n context manager with sync behaviour off\n\n \"\"\"\n if (\n isinstance(self.trainer.training_type_plugin, ParallelPlugin)\n and (self.trainer.lightning_module.automatic_optimization or should_block_sync)\n ):\n with self.trainer.training_type_plugin.block_backward_sync():\n yield None\n else:\n yield None\n\n def _process_closure_result(self, opt_closure_result: Optional[AttributeDict]) -> None:\n if not opt_closure_result:\n return\n\n # check if loss or model weights are nan\n if self.trainer.terminate_on_nan:\n self._check_finite(opt_closure_result.loss)\n\n def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):\n \"\"\"Wrap forward, zero_grad and backward in a closure so second order methods work\"\"\"\n with self.trainer.profiler.profile(\"training_step_and_backward\"):\n # lightning module hook\n result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)\n\n if not self._skip_backward and self.trainer.lightning_module.automatic_optimization:\n is_first_batch_to_accumulate = batch_idx % self.trainer.accumulate_grad_batches == 0\n\n if is_first_batch_to_accumulate:\n self.on_before_zero_grad(optimizer)\n self.optimizer_zero_grad(batch_idx, optimizer, opt_idx)\n\n # backward pass\n if result is not None:\n with self.trainer.profiler.profile(\"backward\"):\n self.backward(result, optimizer, opt_idx)\n\n # hook - call this hook only\n # when gradients have finished to accumulate\n if not self.should_accumulate():\n self.on_after_backward(batch_idx, result.loss)\n\n # check if loss or model weights are nan\n if self.trainer.terminate_on_nan:\n self._check_finite(result.loss)\n\n else:\n self.warning_cache.warn(\n \"training_step returned None. If this was on purpose, ignore this warning...\"\n )\n\n return result\n\n def _check_finite(self, loss: torch.Tensor) -> None:\n if not torch.isfinite(loss).all():\n raise ValueError(f'The loss returned in `training_step` is {loss}.')\n model = self.trainer.lightning_module\n detect_nan_parameters(model)\n\n def backward(self, result, optimizer, opt_idx, *args, **kwargs):\n self.trainer.dev_debugger.track_event(\"backward_call\")\n\n should_accumulate = self.should_accumulate()\n\n # backward can be called manually in the training loop\n if isinstance(result, torch.Tensor):\n self.trainer.accelerator.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs)\n else:\n result.closure_loss = self.trainer.accelerator.backward(\n result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs\n )\n\n if not self.should_accumulate():\n # track gradients\n grad_norm_dict = self.track_and_norm_grad(optimizer=optimizer)\n if grad_norm_dict:\n self.trainer.lightning_module._current_fx_name = \"on_after_backward\"\n self.trainer.lightning_module.log_grad_norm(grad_norm_dict)\n\n def update_lr_schedulers(self, interval: str) -> None:\n if interval == \"step\":\n finished_accumulation = self._accumulated_batches_reached()\n finished_epoch = self._num_training_batches_reached()\n if not finished_accumulation and not finished_epoch:\n return\n self.trainer.optimizer_connector.update_learning_rates(\n interval=interval,\n opt_indices=[opt_idx for opt_idx, _ in self.get_active_optimizers()],\n )\n\n def increment_accumulated_grad_global_step(self):\n num_accumulated_batches_reached = self._accumulated_batches_reached()\n num_training_batches_reached = self._num_training_batches_reached()\n\n # progress global step according to grads progress\n if num_accumulated_batches_reached or num_training_batches_reached:\n self.global_step = self.trainer.accelerator.update_global_step(self.total_batch_idx, self.global_step)\n\n def _accumulated_batches_reached(self):\n return (self.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0\n\n def _num_training_batches_reached(self, is_last_batch=False):\n return (self.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch\n\n def should_accumulate(self):\n # checks if backward or backward + optimizer step (via closure)\n accumulation_done = self._accumulated_batches_reached()\n is_final_batch = self._num_training_batches_reached()\n return not (accumulation_done or is_final_batch)\n\n def _should_check_val_fx(self, batch_idx: int, is_last_batch: bool) -> bool:\n \"\"\" Decide if we should run validation. \"\"\"\n if not self.trainer.enable_validation:\n return False\n\n is_val_check_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0\n if not is_val_check_epoch:\n return False\n\n # val_check_batch is inf for iterable datasets with no length defined\n is_infinite_dataset = self.trainer.val_check_batch == float('inf')\n if is_last_batch and is_infinite_dataset:\n return True\n\n if self.trainer.should_stop:\n return True\n\n # TODO: let training/eval loop handle logic around limit_*_batches and val_check_batch\n is_val_check_batch = is_last_batch\n if isinstance(self.trainer.limit_train_batches, int) and is_infinite_dataset:\n is_val_check_batch = (batch_idx + 1) % self.trainer.limit_train_batches == 0\n elif self.trainer.val_check_batch != float('inf'):\n is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0\n return is_val_check_batch\n\n def _build_kwargs(self, batch, batch_idx, opt_idx, hiddens):\n # enable not needing to add opt_idx to training_step\n step_kwargs = OrderedDict([('batch', batch), ('batch_idx', batch_idx)])\n\n lightning_module = self.trainer.lightning_module\n\n if len(self.trainer.optimizers) > 1:\n training_step_fx = getattr(lightning_module, \"training_step\")\n has_opt_idx_in_train_step = is_param_in_hook_signature(training_step_fx, \"optimizer_idx\")\n if has_opt_idx_in_train_step:\n if not lightning_module.automatic_optimization:\n self.warning_cache.warn(\n \"`training_step` hook signature has changed in v1.3.\"\n \" `optimizer_idx` argument has been removed in case of manual optimization. Support for\"\n \" the old signature will be removed in v1.5\", DeprecationWarning\n )\n step_kwargs['optimizer_idx'] = opt_idx\n elif not has_opt_idx_in_train_step and self.trainer.lightning_module.automatic_optimization:\n raise ValueError(\n f\"Your LightningModule defines {len(self.trainer.optimizers)} optimizers but\"\n ' `training_step` is missing the `optimizer_idx` argument.'\n )\n\n # pass hiddens if using tbptt\n if self._truncated_bptt_enabled():\n step_kwargs['hiddens'] = hiddens\n\n return step_kwargs\n\n def _truncated_bptt_enabled(self) -> bool:\n \"\"\" Temporary tbptt utilities until this flag is fully migrated to the lightning module. \"\"\"\n return self._truncated_bptt_steps() > 0\n\n def _truncated_bptt_steps(self) -> int:\n lightning_module = self.trainer.lightning_module\n # Give precedence to the LightningModule as the Trainer flag will be removed in v1.5\n if lightning_module.truncated_bptt_steps > 0:\n return lightning_module.truncated_bptt_steps\n return self.trainer.truncated_bptt_steps or 0\n\n def save_loggers_on_train_batch_end(self):\n # when loggers should save to disk\n should_flush_logs = self.trainer.logger_connector.should_flush_logs\n if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:\n self.trainer.logger.save()\n\n def run_optimization_start(self, opt_idx, optimizer):\n # make sure only the gradients of the current optimizer's parameters are calculated\n # in the training step to prevent dangling gradients in multiple-optimizer setup.\n if self.trainer.lightning_module.automatic_optimization and len(self.trainer.optimizers) > 1:\n model = self.trainer.lightning_module\n model.toggle_optimizer(optimizer, opt_idx)\n\n def update_running_loss(self, current_loss: torch.Tensor) -> None:\n if self.trainer.lightning_module.automatic_optimization:\n # track total loss for logging (avoid mem leaks)\n self.accumulated_loss.append(current_loss)\n\n accumulated_loss = self.accumulated_loss.mean()\n\n if accumulated_loss is not None:\n # calculate running loss for display\n self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)\n\n # reset for next set of accumulated grads\n self.accumulated_loss.reset()\n" ]
[ [ "torch.isfinite", "numpy.cumsum", "numpy.argmax" ] ]
vutuanhai237/QuantumTomography
[ "52916096482d7e7cd29782c049478bbba901d9bd" ]
[ "codes/tomography/shadow_tomography251.py" ]
[ "\nimport sys\nfrom itertools import combinations\nimport qiskit\nimport numpy as np\nimport tqix\n\n\nsys.path.insert(1, '../')\nimport qtm.base\nimport qtm.nqubit\nimport qtm.fubini_study\n\ndef self_tensor(matrix, n):\n product = matrix\n for i in range(1, n):\n product = np.kron(product, matrix)\n return product\n \nnum_qubits = 5\npsi = [0.26424641, 0.23103536, 0.11177099, 0.17962657, 0.18777508, 0.07123707,\n 0.20165063, 0.27101361, 0.21302122, 0.11930997, 0.09439792, 0.1763813,\n 0.28546319, 0.0394065, 0.19575109, 0.09014811, 0.12315693, 0.03726953,\n 0.10579994, 0.26516434, 0.21545716, 0.11265348, 0.20488736, 0.10268576,\n 0.27819402, 0.0785904, 0.09997989, 0.17438181, 0.16625928, 0.23213874,\n 0.01231226, 0.18198155]\n\nnum_layers = 2\nthetas = np.ones(num_layers*num_qubits*4)\n\nqc = qiskit.QuantumCircuit(num_qubits, num_qubits)\nqc.initialize(psi, range(0, num_qubits))\n\nloss_values = []\nthetass = []\n\nfor i in range(0, 400):\n grad_loss = qtm.base.grad_loss(\n qc,\n qtm.nqubit.create_Wchain_layerd_state,\n thetas, r=1/2, s=np.pi/2, num_layers=num_layers)\n if i == 0:\n m, v = list(np.zeros(thetas.shape[0])), list(\n np.zeros(thetas.shape[0]))\n thetas = qtm.base.adam(thetas, m, v, i, grad_loss)\n thetass.append(thetas.copy())\n qc_copy = qtm.nqubit.create_Wchain_layerd_state(\n qc.copy(), thetas, num_layers)\n loss = qtm.base.loss_basis(qtm.base.measure(\n qc_copy, list(range(qc_copy.num_qubits))))\n loss_values.append(loss)\nvariances = []\nfor thetas in thetass:\n qc = qiskit.QuantumCircuit(num_qubits, num_qubits)\n qc = qtm.nqubit.create_Wchain_layerd_state(\n qc, thetas, num_layers=num_layers).inverse()\n psi_hat = qiskit.quantum_info.Statevector.from_instruction(qc).data\n variances.append((np.conjugate(np.transpose(psi_hat)) @ self_tensor(tqix.sigmaz(), num_qubits) @ psi_hat)\n ** 2 - (np.conjugate(np.transpose(psi)) @ self_tensor(tqix.sigmaz(), num_qubits) @ psi)**2)\n\n\nnp.savetxt(\"./thetass\" + str(num_qubits) + \".csv\",\n thetass,\n delimiter=\",\")\nnp.savetxt(\"./variances\" + str(num_qubits) + \".csv\",\n variances,\n delimiter=\",\")\n\nprint(min((abs(x), x) for x in variances)[1])\nprint(variances[-1])\n" ]
[ [ "numpy.ones", "numpy.kron", "numpy.transpose", "numpy.zeros" ] ]
Hakan-er/utma
[ "f7cd6253ec894047b460d4df9b43eeb9b109bae2" ]
[ "src/ml/Univariate_Time_Series/Exponenial_Univariate_Forecasting.py" ]
[ "from Load_And_Visualize_Time_Data import Load_and_Visualize_Time_Data\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom sktime.forecasting.model_selection import temporal_train_test_split\nfrom sktime.forecasting.exp_smoothing import ExponentialSmoothing\nfrom sktime.utils.plotting import plot_series\nfrom sktime.performance_metrics.forecasting import smape_loss\n\n\nclass Exponential_Univariate_Forecasting:\n def __init__(self, path, time_freq, trend=None, seasonal=None, time_column=0, day_to_month=False,\n month_to_year=False, sp=12, test_size=0.2, model=\"additive\"):\n preload = Load_and_Visualize_Time_Data(path, time_column, model)\n self.data, self.columns = preload.return_data()\n preload.visualize_data()\n preload.decompose_time_series()\n preload.isstationary()\n if day_to_month and time_freq == 'M':\n self.day_to_month()\n elif month_to_year and time_freq == 'Y':\n self.day_to_month()\n self.month_to_year()\n else:\n sys.exit(\"time frequency and converted frequency does not match\")\n self.time_freq = time_freq\n self.trend = trend\n self.seasonal = seasonal\n self.sp = sp\n self.test_size = test_size\n\n def train_test_split(self):\n self.data.index = pd.PeriodIndex(self.data.index, freq=self.time_freq)\n self.y_train, self.y_test = temporal_train_test_split(self.data, test_size=self.test_size)\n\n def day_to_month(self):\n self.data = self.data.resample('M').sum()\n\n def month_to_year(self):\n self.data = self.data.resample('Y').sum()\n\n def forecast_and_visualize(self):\n forecaster = ExponentialSmoothing(trend=self.trend, seasonal=self.seasonal, sp=self.sp)\n forecaster.fit(self.y_train)\n fh = np.arange(1, len(self.y_test) + 1)\n y_pred = forecaster.predict(fh)\n plot_series(self.y_train, self.y_test, y_pred, labels=[\"y_train\", \"y_test\", \"y_pred\"]);\n print(\"Loss is :\", smape_loss(self.y_test, y_pred))\n" ]
[ [ "pandas.PeriodIndex" ] ]
lokinou/mne-python
[ "f4aa12bc9118d0739ca05c5ed5a4fba7ae71138b" ]
[ "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n.. _tut-cluster-spatiotemporal-sensor:\n\n=====================================================\nSpatiotemporal permutation F-test on full sensor data\n=====================================================\n\nTests for differential evoked responses in at least\none condition using a permutation clustering test.\nThe FieldTrip neighbor templates will be used to determine\nthe adjacency between sensors. This serves as a spatial prior\nto the clustering. Spatiotemporal clusters will then\nbe visualized using custom matplotlib code.\n\nSee the `FieldTrip website`_ for a caveat regarding\nthe possible interpretation of \"significant\" clusters.\n\"\"\"\n# Authors: Denis Engemann <[email protected]>\n# Jona Sassenhagen <[email protected]>\n# Alex Rockhill <[email protected]>\n#\n# License: BSD-3-Clause\n\n# %%\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nimport mne\nfrom mne.stats import spatio_temporal_cluster_test, combine_adjacency\nfrom mne.datasets import sample\nfrom mne.channels import find_ch_adjacency\nfrom mne.viz import plot_compare_evokeds\nfrom mne.time_frequency import tfr_morlet\n\nprint(__doc__)\n\n# %%\n# Set parameters\n# --------------\ndata_path = sample.data_path()\nmeg_path = data_path / 'MEG' / 'sample'\nraw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif'\nevent_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif'\nevent_id = {'Aud/L': 1, 'Aud/R': 2, 'Vis/L': 3, 'Vis/R': 4}\ntmin = -0.2\ntmax = 0.5\n\n# Setup for reading the raw data\nraw = mne.io.read_raw_fif(raw_fname, preload=True)\nraw.filter(1, 30, fir_design='firwin')\nevents = mne.read_events(event_fname)\n\n# %%\n# Read epochs for the channel of interest\n# ---------------------------------------\n\npicks = mne.pick_types(raw.info, meg='mag', eog=True)\n\nreject = dict(mag=4e-12, eog=150e-6)\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=None, reject=reject, preload=True)\n\nepochs.drop_channels(['EOG 061'])\nepochs.equalize_event_counts(event_id)\n\nX = [epochs[k].get_data() for k in event_id] # as 3D matrix\nX = [np.transpose(x, (0, 2, 1)) for x in X] # transpose for clustering\n\n\n# %%\n# Find the FieldTrip neighbor definition to setup sensor adjacency\n# ----------------------------------------------------------------\nadjacency, ch_names = find_ch_adjacency(epochs.info, ch_type='mag')\n\nprint(type(adjacency)) # it's a sparse matrix!\n\nfig, ax = plt.subplots(figsize=(5, 4))\nax.imshow(adjacency.toarray(), cmap='gray', origin='lower',\n interpolation='nearest')\nax.set_xlabel('{} Magnetometers'.format(len(ch_names)))\nax.set_ylabel('{} Magnetometers'.format(len(ch_names)))\nax.set_title('Between-sensor adjacency')\nfig.tight_layout()\n\n# %%\n# Compute permutation statistic\n# -----------------------------\n#\n# How does it work? We use clustering to \"bind\" together features which are\n# similar. Our features are the magnetic fields measured over our sensor\n# array at different times. This reduces the multiple comparison problem.\n# To compute the actual test-statistic, we first sum all F-values in all\n# clusters. We end up with one statistic for each cluster.\n# Then we generate a distribution from the data by shuffling our conditions\n# between our samples and recomputing our clusters and the test statistics.\n# We test for the significance of a given cluster by computing the probability\n# of observing a cluster of that size. For more background read:\n# Maris/Oostenveld (2007), \"Nonparametric statistical testing of EEG- and\n# MEG-data\" Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.\n# doi:10.1016/j.jneumeth.2007.03.024\n\n\n# set cluster threshold\nthreshold = 50.0 # very high, but the test is quite sensitive on this data\n# set family-wise p-value\np_accept = 0.01\n\ncluster_stats = spatio_temporal_cluster_test(X, n_permutations=1000,\n threshold=threshold, tail=1,\n n_jobs=1, buffer_size=None,\n adjacency=adjacency)\n\nF_obs, clusters, p_values, _ = cluster_stats\ngood_cluster_inds = np.where(p_values < p_accept)[0]\n\n# %%\n# Note. The same functions work with source estimate. The only differences\n# are the origin of the data, the size, and the adjacency definition.\n# It can be used for single trials or for groups of subjects.\n#\n# Visualize clusters\n# ------------------\n\n# configure variables for visualization\ncolors = {\"Aud\": \"crimson\", \"Vis\": 'steelblue'}\nlinestyles = {\"L\": '-', \"R\": '--'}\n\n# organize data for plotting\nevokeds = {cond: epochs[cond].average() for cond in event_id}\n\n# loop over clusters\nfor i_clu, clu_idx in enumerate(good_cluster_inds):\n # unpack cluster information, get unique indices\n time_inds, space_inds = np.squeeze(clusters[clu_idx])\n ch_inds = np.unique(space_inds)\n time_inds = np.unique(time_inds)\n\n # get topography for F stat\n f_map = F_obs[time_inds, ...].mean(axis=0)\n\n # get signals at the sensors contributing to the cluster\n sig_times = epochs.times[time_inds]\n\n # create spatial mask\n mask = np.zeros((f_map.shape[0], 1), dtype=bool)\n mask[ch_inds, :] = True\n\n # initialize figure\n fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3))\n\n # plot average test statistic and mark significant sensors\n f_evoked = mne.EvokedArray(f_map[:, np.newaxis], epochs.info, tmin=0)\n f_evoked.plot_topomap(times=0, mask=mask, axes=ax_topo, cmap='Reds',\n vmin=np.min, vmax=np.max, show=False,\n colorbar=False, mask_params=dict(markersize=10))\n image = ax_topo.images[0]\n\n # create additional axes (for ERF and colorbar)\n divider = make_axes_locatable(ax_topo)\n\n # add axes for colorbar\n ax_colorbar = divider.append_axes('right', size='5%', pad=0.05)\n plt.colorbar(image, cax=ax_colorbar)\n ax_topo.set_xlabel(\n 'Averaged F-map ({:0.3f} - {:0.3f} s)'.format(*sig_times[[0, -1]]))\n\n # add new axis for time courses and plot time courses\n ax_signals = divider.append_axes('right', size='300%', pad=1.2)\n title = 'Cluster #{0}, {1} sensor'.format(i_clu + 1, len(ch_inds))\n if len(ch_inds) > 1:\n title += \"s (mean)\"\n plot_compare_evokeds(evokeds, title=title, picks=ch_inds, axes=ax_signals,\n colors=colors, linestyles=linestyles, show=False,\n split_legend=True, truncate_yaxis='auto')\n\n # plot temporal cluster extent\n ymin, ymax = ax_signals.get_ylim()\n ax_signals.fill_betweenx((ymin, ymax), sig_times[0], sig_times[-1],\n color='orange', alpha=0.3)\n\n # clean up viz\n mne.viz.tight_layout(fig=fig)\n fig.subplots_adjust(bottom=.05)\n plt.show()\n\n# %%\n# Permutation statistic for time-frequencies\n# ------------------------------------------\n#\n# Let's do the same thing with the time-frequency decomposition of the data\n# (see :ref:`tut-sensors-time-freq` for a tutorial and\n# :ref:`ex-tfr-comparison` for a comparison of time-frequency methods) to\n# show how cluster permutations can be done on higher-dimensional data.\n\ndecim = 4\nfreqs = np.arange(7, 30, 3) # define frequencies of interest\nn_cycles = freqs / freqs[0]\n\nepochs_power = list()\nfor condition in [epochs[k] for k in ('Aud/L', 'Vis/L')]:\n this_tfr = tfr_morlet(condition, freqs, n_cycles=n_cycles,\n decim=decim, average=False, return_itc=False)\n this_tfr.apply_baseline(mode='ratio', baseline=(None, 0))\n epochs_power.append(this_tfr.data)\n\n# transpose again to (epochs, frequencies, times, vertices)\nX = [np.transpose(x, (0, 2, 3, 1)) for x in epochs_power]\n\n# %%\n# Let's extend our adjacency definition to include the time-frequency\n# dimensions. Here, the integer inputs are converted into a lattice and\n# combined with the sensor adjacency matrix so that data at similar\n# times and with similar frequencies and at close sensor locations are\n# clustered together.\ntfr_adjacency = combine_adjacency(\n len(freqs), len(this_tfr.times), adjacency)\n\n# %%\n# Now we can run the cluster permutation test, but first we have to set a\n# threshold. This example decimates in time and uses few frequencies so we need\n# to increase the threshold from the default value in order to have\n# differentiated clusters (i.e. so that our algorithm doesn't just find one\n# large cluster). For a more principled method of setting this parameter,\n# threshold-free cluster enhancement may be used or the p-value may be set with\n#\n# .. code-block:: python\n#\n# from scipy import stats\n# n_comparisons = len(X) # L auditory vs L visual stimulus\n# n_conditions = X[0].shape[0] # 55 epochs per comparison\n# threshold = stats.distributions.f.ppf(\n# 1 - p_accept, n_comparisons - 1, n_conditions - 1)\n#\n# See :ref:`disc-stats` for a discussion.\n\ntfr_threshold = 15.0\n\n# run statistic\ncluster_stats = spatio_temporal_cluster_test(\n X, n_permutations=1000, threshold=tfr_threshold, tail=1, n_jobs=1,\n buffer_size=None, adjacency=tfr_adjacency)\n\n# %%\n# Finally, we can plot our results. It is difficult to visualize clusters in\n# time-frequency-sensor space; plotting time-frequency spectrograms and\n# plotting topomaps display time-frequency and sensor space respectively\n# but they are difficult to combine. We will plot topomaps with the clustered\n# sensors colored in white adjacent to spectrograms in order to provide a\n# visualization of the results. This is a dimensionally limited view, however.\n# Each sensor has its own significant time-frequencies, but, in order to\n# display a single spectrogram, all the time-frequencies that are significant\n# for any sensor in the cluster are plotted as significant. This is a\n# difficulty inherent to visualizing high-dimensional data and should be taken\n# into consideration when interpreting results.\nF_obs, clusters, p_values, _ = cluster_stats\ngood_cluster_inds = np.where(p_values < p_accept)[0]\n\nfor i_clu, clu_idx in enumerate(good_cluster_inds):\n # unpack cluster information, get unique indices\n freq_inds, time_inds, space_inds = clusters[clu_idx]\n ch_inds = np.unique(space_inds)\n time_inds = np.unique(time_inds)\n freq_inds = np.unique(freq_inds)\n\n # get topography for F stat\n f_map = F_obs[freq_inds].mean(axis=0)\n f_map = f_map[time_inds].mean(axis=0)\n\n # get signals at the sensors contributing to the cluster\n sig_times = epochs.times[time_inds]\n\n # initialize figure\n fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3))\n\n # create spatial mask\n mask = np.zeros((f_map.shape[0], 1), dtype=bool)\n mask[ch_inds, :] = True\n\n # plot average test statistic and mark significant sensors\n f_evoked = mne.EvokedArray(f_map[:, np.newaxis], epochs.info, tmin=0)\n f_evoked.plot_topomap(times=0, mask=mask, axes=ax_topo, cmap='Reds',\n vmin=np.min, vmax=np.max, show=False,\n colorbar=False, mask_params=dict(markersize=10))\n image = ax_topo.images[0]\n\n # create additional axes (for ERF and colorbar)\n divider = make_axes_locatable(ax_topo)\n\n # add axes for colorbar\n ax_colorbar = divider.append_axes('right', size='5%', pad=0.05)\n plt.colorbar(image, cax=ax_colorbar)\n ax_topo.set_xlabel(\n 'Averaged F-map ({:0.3f} - {:0.3f} s)'.format(*sig_times[[0, -1]]))\n\n # add new axis for spectrogram\n ax_spec = divider.append_axes('right', size='300%', pad=1.2)\n title = 'Cluster #{0}, {1} spectrogram'.format(i_clu + 1, len(ch_inds))\n if len(ch_inds) > 1:\n title += \" (max over channels)\"\n F_obs_plot = F_obs[..., ch_inds].max(axis=-1)\n F_obs_plot_sig = np.zeros(F_obs_plot.shape) * np.nan\n F_obs_plot_sig[tuple(np.meshgrid(freq_inds, time_inds))] = \\\n F_obs_plot[tuple(np.meshgrid(freq_inds, time_inds))]\n\n for f_image, cmap in zip([F_obs_plot, F_obs_plot_sig], ['gray', 'autumn']):\n c = ax_spec.imshow(f_image, cmap=cmap, aspect='auto', origin='lower',\n extent=[epochs.times[0], epochs.times[-1],\n freqs[0], freqs[-1]])\n ax_spec.set_xlabel('Time (ms)')\n ax_spec.set_ylabel('Frequency (Hz)')\n ax_spec.set_title(title)\n\n # add another colorbar\n ax_colorbar2 = divider.append_axes('right', size='5%', pad=0.05)\n plt.colorbar(c, cax=ax_colorbar2)\n ax_colorbar2.set_ylabel('F-stat')\n\n # clean up viz\n mne.viz.tight_layout(fig=fig)\n fig.subplots_adjust(bottom=.05)\n\n\n# %%\n# Exercises\n# ----------\n#\n# - What is the smallest p-value you can obtain, given the finite number of\n# permutations?\n# - use an F distribution to compute the threshold by traditional significance\n# levels. Hint: take a look at :obj:`scipy.stats.f`\n#\n# .. _fieldtrip website:\n# http://www.fieldtriptoolbox.org/faq/\n# how_not_to_interpret_results_from_a_cluster-based_permutation_test\n" ]
[ [ "numpy.transpose", "numpy.squeeze", "numpy.zeros", "matplotlib.pyplot.subplots", "numpy.arange", "matplotlib.pyplot.show", "matplotlib.pyplot.colorbar", "numpy.where", "numpy.meshgrid", "numpy.unique" ] ]
issca/inferbeddings
[ "80492a7aebcdcac21e758514c8af403d77e8594a" ]
[ "tests/inferbeddings/adversarial/closedform/test_lifted_simple_transe_unit_sphere.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom inferbeddings.models import base as models\nfrom inferbeddings.models import similarities\nfrom inferbeddings.knowledgebase import Fact, KnowledgeBaseParser\nfrom inferbeddings.parse import parse_clause\nfrom inferbeddings.models.training import constraints\n\nfrom inferbeddings.adversarial import Adversarial\nfrom inferbeddings.adversarial.closedform import ClosedForm\n\nimport logging\n\nimport pytest\n\nlogger = logging.getLogger(__name__)\n\ntriples = [\n ('a', 'p', 'b'),\n ('c', 'p', 'd'),\n ('a', 'q', 'b')\n]\nfacts = [Fact(predicate_name=p, argument_names=[s, o]) for s, p, o in triples]\nparser = KnowledgeBaseParser(facts)\n\nnb_entities = len(parser.entity_to_index)\nnb_predicates = len(parser.predicate_to_index)\n\n# Clauses\nclause_str = 'q(X, Y) :- p(X, Y)'\nclauses = [parse_clause(clause_str)]\n\n# Instantiating the model parameters\nmodel_class = models.get_function('TransE')\nsimilarity_function = similarities.get_function('l2_sqr')\n\nmodel_parameters = dict(similarity_function=similarity_function)\n\n\[email protected]\ndef test_transe_unit_sphere():\n for seed in range(32):\n tf.reset_default_graph()\n\n np.random.seed(seed)\n tf.set_random_seed(seed)\n\n entity_embedding_size = np.random.randint(low=1, high=5)\n predicate_embedding_size = entity_embedding_size\n\n # Instantiating entity and predicate embedding layers\n entity_embedding_layer = tf.get_variable('entities',\n shape=[nb_entities + 1, entity_embedding_size],\n initializer=tf.contrib.layers.xavier_initializer())\n\n predicate_embedding_layer = tf.get_variable('predicates',\n shape=[nb_predicates + 1, predicate_embedding_size],\n initializer=tf.contrib.layers.xavier_initializer())\n\n # Adversary - used for computing the adversarial loss\n adversarial = Adversarial(clauses=clauses, parser=parser,\n entity_embedding_layer=entity_embedding_layer,\n predicate_embedding_layer=predicate_embedding_layer,\n model_class=model_class,\n model_parameters=model_parameters,\n batch_size=1)\n\n adv_projection_steps = [constraints.unit_sphere(adv_emb_layer) for adv_emb_layer in adversarial.parameters]\n\n adversarial_loss = adversarial.loss\n\n v_optimizer = tf.train.AdagradOptimizer(learning_rate=1e-1)\n v_training_step = v_optimizer.minimize(- adversarial_loss, var_list=adversarial.parameters)\n\n init_op = tf.global_variables_initializer()\n\n closed_form_lifted = ClosedForm(parser=parser,\n predicate_embedding_layer=predicate_embedding_layer,\n model_class=model_class, model_parameters=model_parameters,\n is_unit_cube=False)\n opt_adversarial_loss = closed_form_lifted(clauses[0])\n\n with tf.Session() as session:\n session.run(init_op)\n\n for finding_epoch in range(1, 100 + 1):\n _ = session.run([v_training_step])\n\n for projection_step in adv_projection_steps:\n session.run([projection_step])\n\n violation_loss_val, opt_adversarial_loss_val = session.run([adversarial_loss, opt_adversarial_loss])\n\n if violation_loss_val + 1e-1 > opt_adversarial_loss_val:\n print('{} <= {}'.format(violation_loss_val, opt_adversarial_loss_val))\n\n assert violation_loss_val <= (opt_adversarial_loss_val + 1e-4)\n\n tf.reset_default_graph()\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n pytest.main([__file__])\n" ]
[ [ "tensorflow.global_variables_initializer", "numpy.random.seed", "tensorflow.train.AdagradOptimizer", "tensorflow.set_random_seed", "tensorflow.Session", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.reset_default_graph", "numpy.random.randint" ] ]
nicolas-lair/DiscourseOverrule
[ "48c64db5d84b3fbc72f59a1a7ebe416fdc7f9ac1" ]
[ "Exp3C-metusalem2012_Reservoir50.py" ]
[ "from wikipedia2vec import Wikipedia2Vec\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport nltk\r\nnltk.download('stopwords')\r\nfrom nltk.corpus import stopwords\r\nimport csv\r\n\r\nimport scipy\r\nfrom scipy import stats\r\n\r\nfrom easyesn.optimizers import GradientOptimizer\r\nfrom easyesn import PredictionESN\r\nfrom easyesn.optimizers import GridSearchOptimizer\r\nfrom easyesn import helper as hlp\r\n\r\n\r\nvectorDim = 100\r\n\r\nnumNode = 100\r\n\r\ninputDataTraining = np.load('C:/Users/PeterDell/Google Drive/GoogleWIP/People/Uchida/ExpDuplication/reservoir/trainingData_averaging/inputDataTraining_4k_average.npy')\r\noutputDataTraining = np.load('C:/Users/PeterDell/Google Drive/GoogleWIP/People/Uchida/ExpDuplication/reservoir/trainingData_averaging/outputDataTraining_4k_average.npy')\r\n\r\n\r\n\r\ndef cos_sim(v1, v2):\r\n\treturn np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))\r\n\r\nwiki2vec = Wikipedia2Vec.load('../enwiki_20180420_100d.pkl')\r\n\r\n\r\nN = 72\r\n\r\nreps = 50\r\n\r\nA = np.empty(N)\r\nB = np.empty(N)\r\nC = np.empty(N)\r\nD = np.empty(N)\r\nE = np.empty(N)\r\nF = np.empty(N)\r\n\r\nAreps = np.empty(reps)\r\nBreps = np.empty(reps)\r\nCreps = np.empty(reps)\r\nDreps = np.empty(reps)\r\nEreps = np.empty(reps)\r\nFreps = np.empty(reps)\r\n\r\nf = open('metusalem2012_experiment.csv', 'w')\r\nwriter = csv.writer(f, lineterminator='\\n')\r\n\r\n# here is the loop on reservoir instances\r\nfor instances in range(reps):\r\n\r\n # training the reservoir\r\n\r\n np.random.seed(instances)\r\n\r\n print('Start reservoir training', instances)\r\n esn = PredictionESN(n_input=vectorDim, n_output=vectorDim, n_reservoir=numNode, leakingRate=0.2, regressionParameters=[1e-2], solver=\"lsqr\", feedback=False)\r\n esn.fit(inputDataTraining, outputDataTraining, transientTime=\"Auto\", verbose=1)\r\n\r\n print('Reservoir trainging done')\r\n \r\n\r\n # here is the loop on scenarios\r\n for i in range(N):\r\n print('\\n############### ' + str(i + 1) + ' ###############')\r\n\r\n # read txt file for data\r\n f = open('./data_metusalem2012/'+str(i + 1)+'.txt', 'r')\r\n list = f.readlines()\r\n discourse_words_1 = list[1].split()\r\n discourse_words_2 = list[0].split()\r\n discourse_words_1and2 = discourse_words_2 + discourse_words_1\r\n target_word_1 = list[2].lower()\r\n target_word_2 = list[3].lower()\r\n target_word_3 = list[4].lower()\r\n f.close()\r\n\r\n # large capital -> small capital\r\n discourse_words_1 = [s.replace(s, s.lower()) for s in discourse_words_1]\r\n discourse_words_1and2 = [s.replace(s, s.lower()) for s in discourse_words_1and2]\r\n\r\n # remove '.' and ',' from word list\r\n discourse_words_1 = [s.replace('.', '') for s in discourse_words_1]\r\n discourse_words_1and2 = [s.replace('.', '') for s in discourse_words_1and2]\r\n discourse_words_1 = [s.replace(',', '') for s in discourse_words_1]\r\n discourse_words_1and2 = [s.replace(',', '') for s in discourse_words_1and2]\r\n\r\n # remove stop words from word list\r\n stop_words = stopwords.words('english')\r\n #print(stop_words)\r\n for stop_word in stop_words:\r\n while stop_word in discourse_words_1 :\r\n discourse_words_1.remove(stop_word)\r\n \r\n while stop_word in discourse_words_1and2 :\r\n discourse_words_1and2.remove(stop_word)\r\n \r\n # remove \"'s\" and \"'\" and \"-\" and \"'d\" and \"'ll\" and \"'ve\" and \"re\" from word list\r\n discourse_words_1 = [s.replace(\"'s\", '') for s in discourse_words_1]\r\n discourse_words_1and2 = [s.replace(\"'s\", '') for s in discourse_words_1and2]\r\n discourse_words_1 = [s.replace(\"'\", '') for s in discourse_words_1]\r\n discourse_words_1and2 = [s.replace(\"'\", '') for s in discourse_words_1and2]\r\n discourse_words_1 = [s.replace(\"-\", '') for s in discourse_words_1]\r\n discourse_words_1and2 = [s.replace(\"-\", '') for s in discourse_words_1and2]\r\n discourse_words_1 = [s.replace(\"'d\", '') for s in discourse_words_1]\r\n discourse_words_1and2 = [s.replace(\"'d\", '') for s in discourse_words_1and2]\r\n discourse_words_1 = [s.replace(\"'ll\", '') for s in discourse_words_1]\r\n discourse_words_1and2 = [s.replace(\"'ll\", '') for s in discourse_words_1and2]\r\n discourse_words_1 = [s.replace(\"'ve\", '') for s in discourse_words_1]\r\n discourse_words_1and2 = [s.replace(\"'ve\", '') for s in discourse_words_1and2]\r\n discourse_words_1 = [s.replace(\"'re\", '') for s in discourse_words_1]\r\n discourse_words_1and2 = [s.replace(\"'re\", '') for s in discourse_words_1and2]\r\n\r\n # replace '\\n' from target words\r\n target_word_1 = target_word_1.replace('\\n', '')\r\n target_word_2 = target_word_2.replace('\\n', '')\r\n target_word_3 = target_word_3.replace('\\n', '')\r\n\r\n\r\n print('Data:')\r\n print('target_word_1: %s' % target_word_1)\r\n print('target_word_2: %s' % target_word_2)\r\n print('target_word_3: %s' % target_word_3)\r\n\r\n print('discourse_words_1:')\r\n print(discourse_words_1)\r\n print('discourse_words_1and2:')\r\n print(discourse_words_1and2)\r\n\r\n\r\n target_word_1_vector = wiki2vec.get_word_vector(target_word_1)\r\n target_word_2_vector = wiki2vec.get_word_vector(target_word_2)\r\n target_word_3_vector = wiki2vec.get_word_vector(target_word_3)\r\n\r\n '''\r\n fig, ax = plt.subplots()\r\n t = np.linspace(1, 2, 2)\r\n '''\r\n\r\n trajectory_word_1 = np.array([])\r\n trajectory_word_2 = np.array([])\r\n trajectory_word_3 = np.array([])\r\n\r\n print('\\nStep1: ')\r\n \r\n # here we create discourse_vector_1 by averaging word2vecs\r\n '''\r\n for num in range(len(discourse_words_1)):\r\n #print(discourse_words_1[num])\r\n if num == 0:\r\n discourse_vector_1 = wiki2vec.get_word_vector(discourse_words_1[num])\r\n else:\r\n discourse_vector_1 = (num * discourse_vector_1 + wiki2vec.get_word_vector(discourse_words_1[num])) / (num + 1)\r\n '''\r\n # end of creating discourse vector by averaging\r\n\r\n #now we want to create the dicourse_vector_1 using the reservoir\r\n\r\n inputDataTesting = np.empty((0,vectorDim))\r\n print(inputDataTesting.shape)\r\n\r\n for num in range(len(discourse_words_1)):\r\n inputDataTesting = np.append(inputDataTesting, np.array([wiki2vec.get_word_vector(discourse_words_1[num])]), axis=0)\r\n print(inputDataTesting.shape)\r\n\r\n prediction = esn.predict(inputDataTesting)\r\n #print(prediction)\r\n print(prediction.shape)\r\n print(len(prediction))\r\n discourse_vector_1 = prediction[len(prediction)-1]\r\n\r\n # end of creating discourse vector by reservoir\r\n\r\n print('cos(discourse_vector_1, %s)=%f' % (target_word_1, cos_sim(discourse_vector_1, target_word_1_vector)))\r\n print('cos(discourse_vector_1, %s)=%f' % (target_word_2, cos_sim(discourse_vector_1, target_word_2_vector)))\r\n print('cos(discourse_vector_1, %s)=%f' % (target_word_3, cos_sim(discourse_vector_1, target_word_3_vector)))\r\n\r\n trajectory_word_1 = np.append(trajectory_word_1, cos_sim(discourse_vector_1, target_word_1_vector))\r\n trajectory_word_2 = np.append(trajectory_word_2, cos_sim(discourse_vector_1, target_word_2_vector))\r\n trajectory_word_3 = np.append(trajectory_word_3, cos_sim(discourse_vector_1, target_word_3_vector))\r\n\r\n A[i] = cos_sim(discourse_vector_1, target_word_1_vector)\r\n B[i] = cos_sim(discourse_vector_1, target_word_2_vector)\r\n C[i] = cos_sim(discourse_vector_1, target_word_3_vector)\r\n\r\n\r\n print('\\nStep2: ')\r\n\r\n '''\r\n for num in range(len(discourse_words_1and2)):\r\n #print(discourse_words_1and2[num])\r\n if num == 0:\r\n discourse_vector_1and2 = wiki2vec.get_word_vector(discourse_words_1and2[num])\r\n else:\r\n discourse_vector_1and2 = (num * discourse_vector_1and2 + wiki2vec.get_word_vector(discourse_words_1and2[num])) / (num + 1)\r\n '''\r\n\r\n #now we want to create the dicourse_vector_1 using the reservoir\r\n\r\n inputDataTesting = np.empty((0,vectorDim))\r\n print(inputDataTesting.shape)\r\n\r\n for num in range(len(discourse_words_1and2)):\r\n inputDataTesting = np.append(inputDataTesting, np.array([wiki2vec.get_word_vector(discourse_words_1and2[num])]), axis=0)\r\n print(inputDataTesting.shape)\r\n\r\n prediction = esn.predict(inputDataTesting)\r\n #print(prediction)\r\n print(prediction.shape)\r\n print(len(prediction))\r\n discourse_vector_1and2 = prediction[len(prediction)-1]\r\n\r\n # end of creating discourse vector by reservoir\r\n print('cos(discourse_vector_1and2, %s)=%f' % (target_word_1, cos_sim(discourse_vector_1and2, target_word_1_vector)))\r\n print('cos(discourse_vector_1and2, %s)=%f' % (target_word_2, cos_sim(discourse_vector_1and2, target_word_2_vector)))\r\n print('cos(discourse_vector_1and2, %s)=%f' % (target_word_3, cos_sim(discourse_vector_1and2, target_word_3_vector)))\r\n \r\n writer.writerow([cos_sim(discourse_vector_1, target_word_1_vector), cos_sim(discourse_vector_1, target_word_2_vector), cos_sim(discourse_vector_1, target_word_3_vector), cos_sim(discourse_vector_1and2, target_word_1_vector), cos_sim(discourse_vector_1and2, target_word_2_vector), cos_sim(discourse_vector_1and2, target_word_3_vector)])\r\n\r\n D[i] = cos_sim(discourse_vector_1and2, target_word_1_vector)\r\n E[i] = cos_sim(discourse_vector_1and2, target_word_2_vector)\r\n F[i] = cos_sim(discourse_vector_1and2, target_word_3_vector)\r\n\r\n trajectory_word_1 = np.append(trajectory_word_1, cos_sim(discourse_vector_1and2, target_word_1_vector))\r\n trajectory_word_2 = np.append(trajectory_word_2, cos_sim(discourse_vector_1and2, target_word_2_vector))\r\n trajectory_word_3 = np.append(trajectory_word_3, cos_sim(discourse_vector_1and2, target_word_3_vector))\r\n\r\n '''\r\n ax.set_xlabel('1=vector1, 2=vector1&2')\r\n ax.set_ylabel('cosine similarity')\r\n ax.set_title(r'cosine similarity reproduction of metusalem2012')\r\n ax.set_xlim([1, 2])\r\n ax.set_ylim([0, 1])\r\n\r\n ax.plot(t, trajectory_word_1, color=\"blue\", label=target_word_1)\r\n ax.plot(t, trajectory_word_2, color=\"red\", label=target_word_2)\r\n ax.plot(t, trajectory_word_3, color=\"green\", label=target_word_3)\r\n\r\n ax.legend(loc=0)\r\n fig.tight_layout()\r\n #plt.savefig(fig_name)\r\n #plt.show()\r\n '''\r\n\r\n f.close()\r\n Areps[instances] = np.mean(A)\r\n Breps[instances] = np.mean(B)\r\n Creps[instances] = np.mean(C)\r\n Dreps[instances] = np.mean(D)\r\n Ereps[instances] = np.mean(E)\r\n Freps[instances] = np.mean(F)\r\n\r\n\r\ndata_to_plot = [Areps, Breps, Creps, Dreps, Ereps, Freps]\r\n\r\nprint(data_to_plot)\r\n\r\n# Create a figure instance\r\n#fig = plt.figure(1, figsize=(9, 6))\r\nfig = plt.figure(1)\r\n# Create an axes instance\r\nax = fig.add_subplot(111)\r\n\r\n# Create the boxplot\r\nbp = ax.boxplot(data_to_plot)\r\n\r\nax.set_xticklabels(['Sent-Expect', 'Sent-Relat', 'Sent-Unrel', 'Disc-Expect', 'Disc-Relat', 'Disc-Unrel'])\r\nfig.tight_layout()\r\nplt.savefig('Metusalem-72-res-instances.png')\r\n#plt.show()\r\n\r\n\r\nprint('t-test: for A vs B: ', stats.ttest_ind(Areps,Breps))\r\nprint('t-test: for B vs C: ', stats.ttest_ind(Breps,Creps))\r\nprint('t-test: for D vs E: ', stats.ttest_ind(Dreps,Ereps))\r\nprint('t-test: for E vs F: ', stats.ttest_ind(Ereps,Freps))\r\n\r\n\r\nfig, axes = plt.subplots(ncols=3)\r\naxes[0].set_title('Expected')\r\nn, bins, patches = axes[0].hist(Dreps, 10, normed=1, facecolor='c', alpha=0.5)\r\n\r\naxes[1].set_title('Related')\r\nn2, bins2, patches2 = axes[1].hist(Ereps, 10, normed=1, facecolor='blue', alpha=0.5)\r\n\r\naxes[2].set_title('Unrelated')\r\nn3, bins3, patches3 = axes[2].hist(Freps, 10, normed=1, facecolor='red', alpha=0.5)\r\n\r\nplt.savefig('Metusalem-72-res-distributions-instances.png')\r\n#plt.show()\r\n\r\n\r\n# PCA on prediction\r\n" ]
[ [ "numpy.load", "numpy.empty", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "scipy.stats.ttest_ind", "numpy.random.seed", "matplotlib.pyplot.subplots", "numpy.array", "numpy.dot", "numpy.linalg.norm", "numpy.mean" ] ]
owen633/caw-quant-training
[ "e333d38cf05483434300f55740b541f57e707d4e" ]
[ "section1/task1/task1.py" ]
[ "# Task1 Get hourly candle data from CryptoCompare\n## 1. Explore CryptoCompare Data API\n### Required\n#### 1. **Write a function** to download histohour data, parameters:\n# fsym: BTC, tsym: USDT, start_time=\"2017-04-01\", end_time=\"2020-04-01\", e='binance'\n\n# import libraries\nimport requests\nimport pandas as pd\nimport time\nimport dateparser\nimport pytz\n\nfrom datetime import datetime\n\n\n# write a function to convert time\ndef date_to_seconds(date_str):\n epoch = datetime.utcfromtimestamp(0).replace(tzinfo=pytz.utc)\n d = dateparser.parse(date_str)\n if d.tzinfo is None or d.tzinfo.utcoffset(d) is None:\n d = d.replace(tzinfo=pytz.utc)\n return int((d - epoch).total_seconds())\n\n# write a function to extract API data\ndef get_hour(fsym, tsym, e, start_time, end_time):\n output_data = pd.DataFrame()\n limit = 2000\n start_ts = date_to_seconds(start_time)\n end_ts = date_to_seconds(end_time)\n df_rows = (end_ts - start_ts)/3600 + 1\n\n while len(output_data) < df_rows:\n dat_param = {'fsym': fsym , 'tsym': tsym, 'limit':limit, 'e': e, 'toTs': end_ts}\n resp = requests.get('https://min-api.cryptocompare.com/data/v2/histohour', params = dat_param)\n temp_data = pd.DataFrame.from_dict(resp.json()['Data']['Data'])\n output_data = output_data.append(temp_data, ignore_index=True)\n end_ts = temp_data['time'].iloc[0] - 3600\n hour_remain = (end_ts - start_ts)/3600\n\n if hour_remain < limit:\n dat_param2 = {'fsym': fsym , 'tsym': tsym, 'limit':hour_remain, 'e': e, 'toTs': end_ts}\n resp2 = requests.get('https://min-api.cryptocompare.com/data/v2/histohour', params = dat_param2)\n final_data = pd.DataFrame.from_dict(resp2.json()['Data']['Data'])\n output_data = output_data.append(final_data, ignore_index=True)\n break\n\n return output_data\n \n# write a function to format data\ndef format_data(df):\n tidy_df = df.sort_values(by=['time'], inplace=False).rename(\n columns={\n \"volumefrom\": \"volume\",\n \"volumeto\": \"baseVolume\",\n \"time\": \"datetime\"\n }, inplace=False\n ).drop(['conversionType', 'conversionSymbol'], axis=1, inplace=False)\n \n tidy_df['datetime'] = pd.to_datetime(tidy_df['datetime'], unit='s')\n\n return tidy_df\n\n# execute module code and export to csv file\nif __name__ == '__main__':\n raw_data = get_hour('BTC', 'USDT', 'binance', '2017-04-01', '2020-04-01')\n formatted_data = format_data(raw_data)\n formatted_data.to_csv('.\\histohour.csv', index=False)\n\n\n\n\n\n\n### Optional\n\n#### 1. Modularize your code\n\n\n#### 2. Add one more data endpoint\n\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame" ] ]
pernambucano/myo_libras
[ "8909a43f8d455cc63215843ece7128704fab13b3" ]
[ "libras_myo/plotpandas.py" ]
[ "#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, BaggingClassifier\n\n\ndef plotSensores():\n ### Letra A\n df_a = pd.read_csv(\"data/bernardo/bernardo-A-3-emg.csv\")\n\n df_a_avg = np.genfromtxt(\"data/bernardo/bernardo-A-3-avg.csv\", delimiter=',', dtype=float)\n df_a_avg = pd.DataFrame(df_a_avg)\n df_a = df_a.drop(df_a.columns[[0]], axis=1)\n df_a.columns = [\"sensor1\", \"sensor2\", \"sensor3\", \"sensor4\", \"sensor5\", \"sensor6\", \"sensor7\", \"sensor8\"]\n\n ### Letra B\n df_b = pd.read_csv(\"data/bernardo/bernardo-D-3-emg.csv\")\n\n df_b_avg = np.genfromtxt(\"data/bernardo/bernardo-D-3-avg.csv\", delimiter=',', dtype=float)\n df_b_avg = pd.DataFrame(df_b_avg)\n df_b = df_b.drop(df_b.columns[[0]], axis=1)\n df_b.columns = [\"sensor1\", \"sensor2\", \"sensor3\", \"sensor4\", \"sensor5\", \"sensor6\", \"sensor7\", \"sensor8\"]\n\n\n\n plt.figure()\n fig, axes = plt.subplots(figsize=(8,8),nrows=2, ncols=2)\n\n ## Letra A\n df_a.plot(legend=False,ax=axes[0, 0])\n axes[0,0].set_ylabel(\"Letra A - Todos os sensores\")\n df_a_avg.plot(legend=False,ax=axes[1, 0])\n axes[1,0].set_ylabel(u\"Letra A - Valores Médios\");\n\n\n ## Letra A\n df_b.plot(legend=False,ax=axes[0, 1])\n axes[0,1].set_ylabel(\"Letra D - Todos os sensores\")\n df_b_avg.plot(legend=False,ax=axes[1, 1])\n axes[1,1].set_ylabel(u\"Letra D - Valores Médios\");\n\n\n for ax in axes:\n for bx in ax:\n bx.set_xticks([], [])\n bx.set_yticks([], [])\n\n plt.show()\n\ndef plot_crossvalidation():\n\n plt.figure()\n df = pd.Series([78,78,76,62], index=[\"Grupo 1\", \"Grupo 2\", \"Grupo 3\", \"Grupo 4\"])\n ax =df.plot(kind='bar', rot=0, title=\"10-Fold Cross-Validation\")\n ax.grid(True, which='major', axis='y')\n ax.set_ylim(0,100)\n plt.show()\n\ndef plotLeaveOneOut():\n featureMatrix = pd.read_csv(\"featureMatrix.csv\")\n n_estimators = 150\n i = 5\n\n ## 15 - 0.333333333333 0.4 0.466666666667 0.733333333333 0.733333333333\n ## 10 - 0.5 0.5 0.6 0.6 0.6\n ## 5 - 0.8 0.6 0.4 1.0 0.8\n ## 3 - 0.666666666667 0.666666666667 0.666666666667 1.0 1.0\n #\n # s1 = featureMatrix.iloc[0:i,:]\n # s2 = featureMatrix.iloc[i:i*2,:]\n # s3 = featureMatrix.iloc[i*2:i*3,:]\n # s4 = featureMatrix.iloc[i*3:i*4,:]\n # s5 = featureMatrix.iloc[i*4:i*5,:]\n #\n # ### W/o S1\n # trainingwos1 = s2.append(s3).append(s4).append(s5)\n # clf = RandomForestClassifier(n_estimators=n_estimators, random_state=30)\n # clf.fit(trainingwos1.iloc[:,:24], trainingwos1.iloc[:,24])\n # scorewos1 = clf.score(s1.iloc[:,:24], s1.iloc[:,24])\n # ### W/o S2\n # trainingwos2 = s1.append(s3).append(s4).append(s5)\n # clf = RandomForestClassifier(n_estimators=n_estimators, random_state=30)\n # clf.fit(trainingwos2.iloc[:,:24], trainingwos2.iloc[:,24])\n # scorewos2 = clf.score(s2.iloc[:,:24], s2.iloc[:,24])\n # ### W/o S3\n # trainingwos3 = s1.append(s2).append(s4).append(s5)\n # clf = RandomForestClassifier(n_estimators=n_estimators, random_state=30)\n # clf.fit(trainingwos3.iloc[:,:24], trainingwos3.iloc[:,24])\n # scorewos3 = clf.score(s3.iloc[:,:24], s3.iloc[:,24])\n # ### W/o S4\n # trainingwos4 = s1.append(s2).append(s3).append(s5)\n # clf = RandomForestClassifier(n_estimators=n_estimators, random_state=30)\n # clf.fit(trainingwos4.iloc[:,:24], trainingwos4.iloc[:,24])\n # scorewos4 = clf.score(s4.iloc[:,:24], s4.iloc[:,24])\n # ### W/o S5\n # trainingwos5 = s1.append(s2).append(s3).append(s4)\n # clf = RandomForestClassifier(n_estimators=n_estimators, random_state=30)\n # clf.fit(trainingwos5.iloc[:,:24], trainingwos5.iloc[:,24])\n # scorewos5 = clf.score(s5.iloc[:,:24], s5.iloc[:,24])\n # print scorewos1, scorewos2, scorewos3, scorewos4, scorewos5\n\n plt.figure()\n mdict = {'Grupo 1': [0.66, 0.66, 0.66, 1.0, 1.0], 'Grupo 2': [0.8, 0.6, 0.4, 1.0, 0.8],\n 'Grupo 3':[0.5, 0.5, 0.6, 0.6, 0.6], 'Grupo 4': [0.33, 0.4, 0.46, 0.73, 0.73]}\n df = pd.DataFrame(mdict)\n df.index = [\"P1\", \"P2\", \"P3\", \"P4\", \"P5\"]\n ax = df.plot(kind='bar', title=u\"Validação por 'Leave One Subject Out'\", rot=0)\n ax.set_ylim(0,1.2)\n ax.grid(True, which='major', axis='y')\n plt.show()" ]
[ [ "pandas.Series", "pandas.read_csv", "matplotlib.pyplot.figure", "pandas.DataFrame", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "numpy.genfromtxt" ] ]
fercook/alone_chat
[ "2e9fd992acabeb41163ed7c1917e0a380ebeaf2e" ]
[ "chatbot/botui.py" ]
[ "# Copyright 2017 Bo Shao. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport tensorflow as tf\n\nfrom settings import PROJECT_ROOT\nfrom chatbot.botpredictor import BotPredictor\n\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n\ndef bot_ui():\n corp_dir = os.path.join(PROJECT_ROOT, 'Data', 'Corpus')\n knbs_dir = os.path.join(PROJECT_ROOT, 'Data', 'KnowledgeBase')\n res_dir = os.path.join(PROJECT_ROOT, 'Data', 'Result')\n\n with tf.Session() as sess:\n predictor = BotPredictor(sess, corpus_dir=corp_dir, knbase_dir=knbs_dir,\n result_dir=res_dir, result_file='basic')\n # This command UI has a single chat session only\n session_id = predictor.session_data.add_session()\n\n print(\"Welcome to Chat with ChatLearner!\")\n print(\"Type exit and press enter to end the conversation.\")\n # Waiting from standard input.\n sys.stdout.write(\"> \")\n sys.stdout.flush()\n question = sys.stdin.readline()\n while question:\n if question.strip() == 'exit':\n print(\"Thank you for using ChatLearner. Goodbye.\")\n break\n\n print(predictor.predict(session_id, question))\n print(\"> \", end=\"\")\n sys.stdout.flush()\n question = sys.stdin.readline()\n\nif __name__ == \"__main__\":\n bot_ui()\n" ]
[ [ "tensorflow.Session" ] ]
slyviacassell/Multi-taks-UNITE
[ "a010a92c94c0ee0f1ffed27df6d89da58d6d34c5" ]
[ "modules/Stage.py" ]
[ "# coding=utf-8\n\n'''\nCreated: 2021/3/12\n@author: [email protected]\n'''\n\nimport torch\nimport torch.nn as nn\n\nfrom memonger import SublinearSequential\n\n\nclass Stage(nn.Module):\n def __init__(self, out_channels, layers):\n super(Stage, self).__init__()\n if isinstance(layers, (nn.Sequential, SublinearSequential)):\n self.feature = layers\n else:\n self.feature = nn.Sequential(*layers)\n self.out_channels = out_channels\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n out = self.feature(x)\n # for n, m in self.named_modules():\n # if isinstance(m, nn.BatchNorm2d):\n # print(m.running_mean, m.running_var)\n return out\n" ]
[ [ "torch.nn.Sequential" ] ]
Suhwan-Dev/torch-kalman
[ "f310c42e264d1642819e4c49a8b0212209a18a85" ]
[ "torch_kalman/state_belief/utils.py" ]
[ "from typing import Tuple, Optional\n\nfrom torch import Tensor\nfrom torch.distributions import MultivariateNormal\n\nimport numpy as np\nfrom torch.distributions.multivariate_normal import _batch_mv\nfrom torch.distributions.utils import _standard_normal\n\n\ndef bmat_idx(*args) -> Tuple:\n \"\"\"\n Create indices for tensor assignment that act like slices. E.g., batch[:,[1,2,3],[1,2,3]] does not select the upper\n 3x3 sub-matrix over batches, but batch[bmat_idx(slice(None),[1,2,3],[1,2,3])] does.\n\n :param args: Each arg is a sequence of integers. The first N args can be slices, and the last N args can be slices.\n :return: A tuple that can be used for matrix/tensor-selection.\n \"\"\"\n\n if len(args) == 0:\n return ()\n elif isinstance(args[-1], slice):\n # trailing slices can't be passed to np._ix, but can be appended to its results\n return bmat_idx(*args[:-1]) + (args[-1],)\n elif isinstance(args[0], slice):\n # leading slices can't be passed to np._ix, but can be prepended to its results\n return (args[0],) + bmat_idx(*args[1:])\n else:\n if any(isinstance(arg, slice) for arg in args[1:]):\n raise ValueError(\"Only the first/last contiguous args can be slices, not middle args.\")\n return np.ix_(*args)\n\n\ndef deterministic_sample_mvnorm(distribution: MultivariateNormal, eps: Optional[Tensor] = None) -> Tensor:\n if isinstance(eps, Tensor):\n if eps.shape[-len(distribution.event_shape):] != distribution.event_shape:\n raise RuntimeError(f\"Expected shape ending in {distribution.event_shape}, got {eps.shape}.\")\n\n else:\n shape = distribution.batch_shape + distribution.event_shape\n if eps is None:\n eps = 1.0\n eps *= _standard_normal(shape, dtype=distribution.loc.dtype, device=distribution.loc.device)\n return distribution.loc + _batch_mv(distribution._unbroadcasted_scale_tril, eps)\n" ]
[ [ "numpy.ix_", "torch.distributions.utils._standard_normal", "torch.distributions.multivariate_normal._batch_mv" ] ]
iamjli/AnswerALS_QTL
[ "0adf7bb8747730dd23d8669b6cd5d27e20a4743f" ]
[ "dev/src/load/external_data.py" ]
[ "#!/usr/bin/env python3\n\nfrom pathlib import Path\n\nimport pandas as pd\n\nfrom src import base_dir, logger\n\n\n_external_data_paths = {\n\t# \"rsid\": base_dir / \"tensorqtl_runs/genomes_210409/snp_list.biallelic_known_snps.harmonized.VQSR_filtered_99.rsID.GT_only.pkl\",\n\t\"rsid\": base_dir / \"tensorqtl_runs/genomes_210409/snp_positions.biallelic_known_snps.harmonized.VQSR_filtered_99.rsID.GT_only.pickle\",\n\t\"ensg\": base_dir / \"data/external/ENSG_to_symbol.tsv\", \n\t\"snp2tf\": base_dir / \"data/external/SNP2TF/snp2tfbs_JASPAR_CORE_2014_vert.bed.gz\",\n\t\"open_targets\": base_dir / \"data/external/targets_associated_with_amyotrophic_lateral_sclerosis.csv\", \n\t\"pe_eqtl\": base_dir / \"data/external/PsychENCODE/DER-08a_hg38_eQTL.significant.txt\",\n\t\"pe_enhancers\": base_dir / \"data/external/PsychENCODE/DER-04a_hg38lft_PEC_enhancers.bed\", \n\t\"project_mine\": base_dir / \"data/external/Summary_Statistics_GWAS_2016/als.sumstats.lmm.parquet\",\n\t\"project_mine_hg38\": base_dir / \"data/external/Summary_Statistics_GWAS_2016/als.sumstats.lmm.hg38.parquet\", \n\t\"encode_tfs\": base_dir / \"data/external/encode_TFs_bed/combined_peaks.bed\",\n}\n\nclass ExternalData: \n\t\"\"\"Externally downloaded data that has been preprocessed.\"\"\"\n\n\tdef __init__(self, paths): \n\n\t\tself.paths = paths\n\n\t\tself._rsid = None\n\t\tself._ensg = None\n\t\tself._snp2tf = None\n\t\tself._open_targets = None\n\t\tself._PE_eqtl = None\n\t\tself._PE_enh = None\n\t\tself._PM_gwas = None\n\t\tself._encode_tfs = None\n\t\n\t@property\n\tdef rsid(self):\n\t\tif self._rsid is None: \n\t\t\tself._rsid = _load_rsid_pickle(self.paths[\"rsid\"])\n\t\t\t# self._rsid = _load_rsid_parquet(self.paths[\"rsid\"])\n\t\treturn self._rsid\n\n\t@property\n\tdef ensg(self):\n\t\tif self._ensg is None: \n\t\t\tself._ensg = _load_ensg(self.paths[\"ensg\"])\n\t\treturn self._ensg\n\n\t@property\n\tdef snp2tf(self):\n\t\tif self._snp2tf is None: \n\t\t\tself._snp2tf = _load_snp2tf(self.paths[\"snp2tf\"])\n\t\treturn self._snp2tf\n\n\t@property\n\tdef open_targets(self): \n\t\tif self._open_targets is None: \n\t\t\tself._open_targets = _load_opentargets(self.paths[\"open_targets\"])\n\t\treturn self._open_targets\n\n\t@property\n\tdef PE_eqtl(self):\n\t\tif self._PE_eqtl is None: \n\t\t\tself._PE_eqtl = _load_psychencode(self.paths[\"pe_eqtl\"])\n\t\treturn self._PE_eqtl\n\n\t@property\n\tdef PE_enh(self):\n\t\tif self._PE_enh is None: \n\t\t\tself._PE_enh = _load_psychencode_enhancers(self.paths[\"pe_enhancers\"])\n\t\treturn self._PE_enh\n\n\t@property\n\tdef PM_gwas(self):\n\t\tif self._PM_gwas is None: \n\t\t\tself._PM_gwas = _load_project_mine(self.paths[\"project_mine\"])\n\t\treturn self._PM_gwas\n\n\t@property\n\tdef encode_tfs(self):\n\t\tif self._encode_tfs is None: \n\t\t\tself._encode_tfs = _load_encode_tfs(self.paths[\"encode_tfs\"])\n\t\treturn self._encode_tfs\n\n\n#----------------------------------------------------------------------------------------------------#\n# Load data \n#----------------------------------------------------------------------------------------------------#\ndef _load_rsid_pickle(path): \n\t\"\"\"See README.md for how this was generated. Section `Generate variant list`.\"\"\"\n\tlogger.write(\"Loading rsID file...\")\n\trsid = pd.read_pickle(path)\n\trsid.loc[rsid.index[0]] # initialize loc - for some reason the first loc takes forever\n\treturn rsid\n\ndef _load_rsid_parquet(path): \n\t\"\"\"See README.md for how this was generated. Section `Generate variant list`.\"\"\"\n\tlogger.write(\"Loading rsID file...\")\n\trsid = pd.read_parquet(path)\n\trsid.loc[rsid.index[0]] # initialize loc - for some reason the first loc takes forever\n\treturn rsid\n\ndef _load_ensg(path):\n\t\"\"\"Loads series of gene symbols indexed by ENSG.\"\"\"\n\treturn pd.read_csv(path, sep=\"\\t\", names=[\"gene_id\", \"symbol\"], skiprows=1, index_col=0)[\"symbol\"]\n\ndef _load_snp2tf(path, collapse=True): \n\t\"\"\"Load TF annotations for SNPs.\"\"\"\n\timport gzip\n\n\tlogger.write(\"Loading snp2tf annotations...\")\n\n\twith gzip.open(path, \"r\") as f: \n\t\tif collapse: \n\t\t\tresults = {}\n\t\t\tfor line in f: \n\t\t\t\trow = line.decode().rstrip(\"\\n\").split(\"\\t\")\n\t\t\t\tvariant_id, tfs, scores = row[5], row[7], row[8]\n\t\t\t\tscores = \",\".join([str(int(s)) for s in scores.split(\",\")])\n\n\t\t\t\tfor var in variant_id.split(\";\"): # some rows contain two SNPs together\n\t\t\t\t\tif var not in results: \n\t\t\t\t\t\tresults[var] = dict(tfs=tfs, scores=scores)\n\t\t\t\t\telse: \n\t\t\t\t\t\tresults[var][\"tfs\"]\t+= \",\" + tfs\n\t\t\t\t\t\tresults[var][\"scores\"] += \",\" + scores\n\n\tresults_df = pd.DataFrame.from_dict(results, orient=\"index\")\n\tresults_df.index.name = \"variant_id\"\n\treturn results_df\n\ndef _load_opentargets(path):\n\tals = pd.read_csv(path)\n\tals.columns = als.columns.str.split(\".\").str[-1]\n\tals.set_index(\"symbol\", inplace=True)\n\treturn als\n\ndef _load_psychencode(path): \n\tlogger.write(\"Loading psychENCODE eQTLs...\")\n\tPE_eqtls = pd.read_csv(path, sep='\\t', usecols=[\"gene_id\", \"SNP_id\", \"nominal_pval\", \"regression_slope\", \"top_SNP\"])\n\tPE_eqtls[\"SNP_id\"] = \"chr\" + PE_eqtls[\"SNP_id\"]\n\tPE_eqtls[\"gene_id\"] = PE_eqtls[\"gene_id\"].str.split(\".\").str[0]\n\treturn PE_eqtls\n\ndef _load_psychencode_enhancers(path): \n\tlogger.write(\"Loading psychENCODE enhancers...\")\n\timport pyranges as pr\n\treturn pr.read_bed(str(path))\n\ndef _load_project_mine(path): \n\tlogger.write(\"Loading Project MinE GWAS...\")\n\treturn pd.read_parquet(path)\n\ndef _load_encode_tfs(path): \n\tlogger.write(\"Loading ENCODE merged TFs...\")\n\timport pyranges as pr\n\treturn pr.read_bed(str(path))\n\n#----------------------------------------------------------------------------------------------------#\ndata = ExternalData(_external_data_paths)" ]
[ [ "pandas.read_csv", "pandas.read_pickle", "pandas.DataFrame.from_dict", "pandas.read_parquet" ] ]
sdss/lvmspec
[ "befd6991537c4947fdf63ca262937f2bb845148f" ]
[ "py/desispec/scripts/mergebundles.py" ]
[ "\n\"\"\"\nexspec extracts individual bundles of spectra with one bundle per output file.\nThis script merges them back together into a single file combining all\nbundles.\n\nThis workflow is hacky. Release early, release often, but also refactor often.\n\nStephen Bailey, LBL\nMarch 2014\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport sys\nimport os\nimport numpy as np\nfrom astropy.io import fits\n\nfrom desispec.frame import Frame\nimport desispec.io\nfrom desiutil.log import get_logger\n\nimport argparse\n\n\ndef parse(options=None):\n parser = argparse.ArgumentParser(description=\"Merge extracted spectra bundles into one file.\")\n parser.add_argument(\"-o\", \"--output\", type=str, required=True,\n help=\"output file name\")\n parser.add_argument(\"-d\", \"--delete\", action=\"store_true\",\n help=\"delete input files when done\")\n parser.add_argument(\"-f\", \"--force\", action=\"store_true\",\n help=\"merge files even if some fibers are missing\")\n parser.add_argument(\"files\", nargs='*')\n\n args = None\n if options is None:\n args = parser.parse_args()\n else:\n args = parser.parse_args(options)\n return args\n\n\ndef main(args):\n\n log = get_logger()\n\n nspec = 500 #- Hardcode! Number of DESI fibers per spectrograph\n\n #- Sanity check that all spectra are represented\n fibers = set()\n for filename in args.files:\n x = fits.getdata(filename, 'FIBERMAP')\n fibers.update( set(x['FIBER']) )\n\n if len(fibers) != nspec:\n msg = \"Input files only have {} instead of {} spectra\".format(len(fibers), nspec)\n if args.force:\n log.warning(msg)\n else:\n log.fatal(msg)\n sys.exit(1)\n\n #- Read a file to get basic dimensions\n w = fits.getdata(args.files[0], 'WAVELENGTH')\n nwave = len(w)\n R1 = fits.getdata(args.files[0], 'RESOLUTION')\n ndiag = R1.shape[1]\n hdr = fits.getheader(args.files[0])\n\n camera = hdr['CAMERA'].lower() #- b0, r1, .. z9\n spectrograph = int(camera[1])\n fibermin = spectrograph*nspec\n\n #- Output arrays to fill\n flux = np.zeros( (nspec, nwave) )\n ivar = np.zeros( (nspec, nwave) )\n R = np.zeros( (nspec, ndiag, nwave) )\n fibermap = None\n mask = np.zeros( (nspec, nwave), dtype=np.uint32)\n chi2pix = np.zeros( (nspec, nwave) )\n\n #- Fill them!\n for filename in args.files :\n fx = fits.open(filename)\n xhdr = fx[0].header\n xflux = fx['FLUX'].data\n xivar = fx['IVAR'].data\n xR = fx['RESOLUTION'].data\n xfibermap = fx['FIBERMAP'].data\n xmask = fx['MASK'].data\n xchi2pix = fx['CHI2PIX'].data\n fx.close()\n\n ii = xfibermap['FIBER'] % nspec\n\n flux[ii] = xflux\n ivar[ii] = xivar\n R[ii] = xR\n mask[ii] = xmask\n chi2pix[ii] = xchi2pix\n\n if fibermap is None:\n fibermap = np.zeros(nspec, dtype=xfibermap.dtype)\n fibermap['FIBER'] = np.arange(fibermin, fibermin+nspec)\n\n fibermap[ii] = xfibermap\n\n #- Write it out\n print(\"Writing\", args.output)\n frame = Frame(w, flux, ivar, mask=mask, resolution_data=R,\n spectrograph=spectrograph,\n meta=hdr, fibermap=fibermap, chi2pix=chi2pix)\n desispec.io.write_frame(args.output, frame)\n\n #- Scary! Delete input files\n if args.delete:\n for filename in args.files:\n os.remove(filename)\n" ]
[ [ "numpy.arange", "numpy.zeros" ] ]
thisisgopalmandal/opencv
[ "4e2ef8c8f57644ccb8e762a37f70a61007c6be1c" ]
[ "samples/python/tutorial_code/video/meanshift/meanshift.py" ]
[ "import numpy as np\nimport cv2 as cv\nimport argparse\n\nparser = argparse.ArgumentParser(description='This sample demonstrates the meanshift algorithm. \\\n The example file can be downloaded from: \\\n https://www.bogotobogo.com/python/OpenCV_Python/images/mean_shift_tracking/slow_traffic_small.mp4')\nparser.add_argument('image', type=str, help='path to image file')\nargs = parser.parse_args()\n\ncap = cv.VideoCapture(args.image)\n\n# take first frame of the video\nret,frame = cap.read()\n\n# setup initial location of window\nx, y, w, h = 300, 200, 100, 50 # simply hardcoded the values\ntrack_window = (x, y, w, h)\n\n# set up the ROI for tracking\nroi = frame[y:y+h, x:x+w]\nhsv_roi = cv.cvtColor(roi, cv.COLOR_BGR2HSV)\nmask = cv.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))\nroi_hist = cv.calcHist([hsv_roi],[0],mask,[180],[0,180])\ncv.normalize(roi_hist,roi_hist,0,255,cv.NORM_MINMAX)\n\n# Setup the termination criteria, either 10 iteration or move by atleast 1 pt\nterm_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 )\n\nwhile(1):\n ret, frame = cap.read()\n\n if ret == True:\n hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)\n dst = cv.calcBackProject([hsv],[0],roi_hist,[0,180],1)\n\n # apply meanshift to get the new location\n ret, track_window = cv.meanShift(dst, track_window, term_crit)\n\n # Draw it on image\n x,y,w,h = track_window\n img2 = cv.rectangle(frame, (x,y), (x+w,y+h), 255,2)\n cv.imshow('img2',img2)\n\n k = cv.waitKey(30) & 0xff\n if k == 27:\n break\n else:\n break\n" ]
[ [ "numpy.array" ] ]
cabuliwallah/analytics-zoo
[ "5e662bd01c5fc7eed412973119594cf2ecea8b11" ]
[ "pyzoo/zoo/examples/orca/learn/tf2/yolov3/yoloV3.py" ]
[ "#\n# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ===========================================================================\n#\n# This file is adapted from\n# https://github.com/zzh8829/yolov3-tf2/blob/master/train.py,\n# https://github.com/zzh8829/yolov3-tf2/blob/master/yolov3_tf2/models.py and\n# https://github.com/zzh8829/yolov3-tf2/blob/master/yolov3_tf2/dataset.py\n#\n# MIT License\n#\n# Copyright (c) 2019 Zihao Zhang\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport tensorflow as tf\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, \\\n ModelCheckpoint, TensorBoard\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import Add, Concatenate, Conv2D, Input, Lambda, \\\n LeakyReLU, MaxPool2D, UpSampling2D, ZeroPadding2D, BatchNormalization\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras.losses import binary_crossentropy, sparse_categorical_crossentropy\nfrom zoo.orca.data.image.parquet_dataset import read_parquet, write_parquet\nfrom zoo.orca.learn.tf2 import Estimator\nfrom zoo.orca import init_orca_context, stop_orca_context\nimport numpy as np\nimport ray\nimport tempfile\nimport os\nimport argparse\nimport sys\n\nDEFAULT_IMAGE_SIZE = 416\n\n\[email protected]\ndef transform_targets_for_output(y_true, grid_size, anchor_idxs):\n N = tf.shape(y_true)[0]\n y_true_out = tf.zeros((N, grid_size, grid_size, tf.shape(anchor_idxs)[0], 6))\n anchor_idxs = tf.cast(anchor_idxs, tf.int32)\n\n def outer_comp(i):\n def inner_comp(j):\n anchor_eq = tf.equal(\n anchor_idxs, tf.cast(y_true[i][j][5], tf.int32))\n\n def reduce(y_true, anchor_eq, grid_size):\n box = y_true[i][j][0:4]\n box_xy = (y_true[i][j][0:2] + y_true[i][j][2:4]) / 2\n anchor_idx = tf.cast(tf.where(anchor_eq), tf.int32)\n grid_xy = tf.cast(box_xy // (1 / grid_size), tf.int32)\n\n # grid[y][x][anchor] = (tx, ty, bw, bh, obj, class)\n indexes = tf.stack([i, grid_xy[1], grid_xy[0], anchor_idx[0][0]])\n updates = tf.stack(\n [box[0], box[1], box[2], box[3], tf.constant(1, dtype=tf.float32),\n y_true[i][j][4]])\n print(\"updates\", updates)\n # idx += 1\n return (True, indexes, updates)\n\n (mask, indexes, updates) = tf.cond(tf.reduce_any(anchor_eq) and\n not tf.equal(y_true[i][j][2], 0),\n lambda: reduce(y_true, anchor_eq, grid_size),\n lambda: (False, tf.zeros(4, tf.int32),\n tf.zeros(6, tf.float32)))\n return (mask, indexes, updates)\n\n return tf.map_fn(inner_comp, tf.range(tf.shape(y_true)[1]),\n dtype=(tf.bool, tf.int32, tf.float32))\n\n (mask, indexes, updates) = tf.map_fn(outer_comp, tf.range(N),\n dtype=(tf.bool, tf.int32, tf.float32))\n\n indexes = tf.boolean_mask(indexes, mask)\n updates = tf.boolean_mask(updates, mask)\n\n return tf.tensor_scatter_nd_update(y_true_out, indexes, updates)\n\n\ndef transform_targets(y_train, anchors, anchor_masks, size):\n y_outs = []\n grid_size = size // 32\n\n # calculate anchor index for true boxes\n anchors = tf.cast(anchors, tf.float32)\n anchor_area = anchors[..., 0] * anchors[..., 1]\n box_wh = y_train[..., 2:4] - y_train[..., 0:2]\n box_wh = tf.tile(tf.expand_dims(box_wh, -2),\n (1, 1, tf.shape(anchors)[0], 1))\n box_area = box_wh[..., 0] * box_wh[..., 1]\n intersection = tf.minimum(box_wh[..., 0], anchors[..., 0]) * tf.minimum(box_wh[..., 1],\n anchors[..., 1])\n iou = intersection / (box_area + anchor_area - intersection)\n anchor_idx = tf.cast(tf.argmax(iou, axis=-1), tf.float32)\n anchor_idx = tf.expand_dims(anchor_idx, axis=-1)\n\n y_train = tf.concat([y_train, anchor_idx], axis=-1)\n\n for anchor_idxs in anchor_masks:\n y_outs.append(transform_targets_for_output(\n y_train, grid_size, anchor_idxs))\n grid_size *= 2\n\n return tuple(y_outs)\n\n\ndef transform_images(x_train, size):\n x_train = tf.image.resize(x_train, (size, size))\n x_train = x_train / 255\n return x_train\n\n\ndef parse_data_train(image, label):\n x_train = tf.io.decode_jpeg(image, 3)\n x_train = tf.image.resize(x_train, (DEFAULT_IMAGE_SIZE, DEFAULT_IMAGE_SIZE))\n paddings = [[0, 100 - tf.shape(label)[0]], [0, 0]]\n y_train = tf.pad(label, paddings)\n y_train = tf.convert_to_tensor(y_train, tf.float32)\n return x_train, y_train\n\n\nIMAGE_FEATURE_MAP = {\n 'image/encoded': tf.io.FixedLenFeature([], tf.string),\n 'image/object/bbox/xmin': tf.io.VarLenFeature(tf.float32),\n 'image/object/bbox/ymin': tf.io.VarLenFeature(tf.float32),\n 'image/object/bbox/xmax': tf.io.VarLenFeature(tf.float32),\n 'image/object/bbox/ymax': tf.io.VarLenFeature(tf.float32),\n 'image/object/class/text': tf.io.VarLenFeature(tf.string),\n}\n\nyolo_anchors = np.array([(10, 13), (16, 30), (33, 23), (30, 61), (62, 45),\n (59, 119), (116, 90), (156, 198), (373, 326)],\n np.float32) / 416\nyolo_anchor_masks = np.array([[6, 7, 8], [3, 4, 5], [0, 1, 2]])\n\nyolo_tiny_anchors = np.array([(10, 14), (23, 27), (37, 58),\n (81, 82), (135, 169), (344, 319)],\n np.float32) / 416\nyolo_tiny_anchor_masks = np.array([[3, 4, 5], [0, 1, 2]])\n\nYOLOV3_LAYER_LIST = [\n 'yolo_darknet',\n 'yolo_conv_0',\n 'yolo_output_0',\n 'yolo_conv_1',\n 'yolo_output_1',\n 'yolo_conv_2',\n 'yolo_output_2',\n]\n\nYOLOV3_TINY_LAYER_LIST = [\n 'yolo_darknet',\n 'yolo_conv_0',\n 'yolo_output_0',\n 'yolo_conv_1',\n 'yolo_output_1',\n]\n\n\ndef load_darknet_weights(model, weights_file, tiny=False):\n wf = open(weights_file, 'rb')\n major, minor, revision, seen, _ = np.fromfile(wf, dtype=np.int32, count=5)\n\n if tiny:\n layers = YOLOV3_TINY_LAYER_LIST\n else:\n layers = YOLOV3_LAYER_LIST\n\n for layer_name in layers:\n sub_model = model.get_layer(layer_name)\n for i, layer in enumerate(sub_model.layers):\n if not layer.name.startswith('conv2d'):\n continue\n batch_norm = None\n if i + 1 < len(sub_model.layers) and \\\n sub_model.layers[i + 1].name.startswith('batch_norm'):\n batch_norm = sub_model.layers[i + 1]\n\n filters = layer.filters\n size = layer.kernel_size[0]\n in_dim = layer.get_input_shape_at(0)[-1]\n\n if batch_norm is None:\n conv_bias = np.fromfile(wf, dtype=np.float32, count=filters)\n else:\n # darknet [beta, gamma, mean, variance]\n bn_weights = np.fromfile(\n wf, dtype=np.float32, count=4 * filters)\n # tf [gamma, beta, mean, variance]\n bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]]\n\n # darknet shape (out_dim, in_dim, height, width)\n conv_shape = (filters, in_dim, size, size)\n conv_weights = np.fromfile(\n wf, dtype=np.float32, count=np.product(conv_shape))\n # tf shape (height, width, in_dim, out_dim)\n conv_weights = conv_weights.reshape(\n conv_shape).transpose([2, 3, 1, 0])\n\n if batch_norm is None:\n layer.set_weights([conv_weights, conv_bias])\n else:\n layer.set_weights([conv_weights])\n batch_norm.set_weights(bn_weights)\n\n assert len(wf.read()) == 0, 'failed to read all data'\n wf.close()\n\n\ndef freeze_all(model, frozen=True):\n model.trainable = not frozen\n if isinstance(model, tf.keras.Model):\n for l in model.layers:\n freeze_all(l, frozen)\n\n\ndef DarknetConv(x, filters, size, strides=1, batch_norm=True):\n if strides == 1:\n padding = 'same'\n else:\n x = ZeroPadding2D(((1, 0), (1, 0)))(x) # top left half-padding\n padding = 'valid'\n x = Conv2D(filters=filters, kernel_size=size,\n strides=strides, padding=padding,\n use_bias=not batch_norm, kernel_regularizer=l2(0.0005))(x)\n if batch_norm:\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=0.1)(x)\n return x\n\n\ndef DarknetResidual(x, filters):\n prev = x\n x = DarknetConv(x, filters // 2, 1)\n x = DarknetConv(x, filters, 3)\n x = Add()([prev, x])\n return x\n\n\ndef DarknetBlock(x, filters, blocks):\n x = DarknetConv(x, filters, 3, strides=2)\n for _ in range(blocks):\n x = DarknetResidual(x, filters)\n return x\n\n\ndef Darknet(name=None):\n x = inputs = Input([None, None, 3])\n x = DarknetConv(x, 32, 3)\n x = DarknetBlock(x, 64, 1)\n x = DarknetBlock(x, 128, 2) # skip connection\n x = x_36 = DarknetBlock(x, 256, 8) # skip connection\n x = x_61 = DarknetBlock(x, 512, 8)\n x = DarknetBlock(x, 1024, 4)\n return tf.keras.Model(inputs, (x_36, x_61, x), name=name)\n\n\ndef YoloConv(filters, name=None):\n def yolo_conv(x_in):\n if isinstance(x_in, tuple):\n inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])\n x, x_skip = inputs\n\n # concat with skip connection\n x = DarknetConv(x, filters, 1)\n x = UpSampling2D(2)(x)\n x = Concatenate()([x, x_skip])\n else:\n x = inputs = Input(x_in.shape[1:])\n\n x = DarknetConv(x, filters, 1)\n x = DarknetConv(x, filters * 2, 3)\n x = DarknetConv(x, filters, 1)\n x = DarknetConv(x, filters * 2, 3)\n x = DarknetConv(x, filters, 1)\n return Model(inputs, x, name=name)(x_in)\n\n return yolo_conv\n\n\ndef YoloOutput(filters, anchors, classes, name=None):\n def yolo_output(x_in):\n x = inputs = Input(x_in.shape[1:])\n x = DarknetConv(x, filters * 2, 3)\n x = DarknetConv(x, anchors * (classes + 5), 1, batch_norm=False)\n x = Lambda(lambda x: tf.reshape(x, (-1, tf.shape(x)[1], tf.shape(x)[2],\n anchors, classes + 5)))(x)\n return tf.keras.Model(inputs, x, name=name)(x_in)\n\n return yolo_output\n\n\n# As tensorflow lite doesn't support tf.size used in tf.meshgrid,\n# we reimplemented a simple meshgrid function that use basic tf function.\ndef _meshgrid(n_a, n_b):\n return [\n tf.reshape(tf.tile(tf.range(n_a), [n_b]), (n_b, n_a)),\n tf.reshape(tf.repeat(tf.range(n_b), n_a), (n_b, n_a))\n ]\n\n\ndef yolo_boxes(pred, anchors, classes):\n # pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...classes))\n grid_size = tf.shape(pred)[1:3]\n box_xy, box_wh, objectness, class_probs = tf.split(\n pred, (2, 2, 1, classes), axis=-1)\n\n box_xy = tf.sigmoid(box_xy)\n objectness = tf.sigmoid(objectness)\n class_probs = tf.sigmoid(class_probs)\n pred_box = tf.concat((box_xy, box_wh), axis=-1) # original xywh for loss\n\n # !!! grid[x][y] == (y, x)\n grid = _meshgrid(grid_size[1], grid_size[0])\n grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2]\n\n box_xy = (box_xy + tf.cast(grid, tf.float32)) / tf.cast(grid_size, tf.float32)\n box_wh = tf.exp(box_wh) * anchors\n\n box_x1y1 = box_xy - box_wh / 2\n box_x2y2 = box_xy + box_wh / 2\n bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)\n\n return bbox, objectness, class_probs, pred_box\n\n\ndef yolo_nms(outputs, anchors, masks, classes):\n # boxes, conf, type\n b, c, t = [], [], []\n\n for o in outputs:\n b.append(tf.reshape(o[0], (tf.shape(o[0])[0], -1, tf.shape(o[0])[-1])))\n c.append(tf.reshape(o[1], (tf.shape(o[1])[0], -1, tf.shape(o[1])[-1])))\n t.append(tf.reshape(o[2], (tf.shape(o[2])[0], -1, tf.shape(o[2])[-1])))\n\n bbox = tf.concat(b, axis=1)\n confidence = tf.concat(c, axis=1)\n class_probs = tf.concat(t, axis=1)\n\n scores = confidence * class_probs\n\n dscores = tf.squeeze(scores, axis=0)\n scores = tf.reduce_max(dscores, [1])\n bbox = tf.reshape(bbox, (-1, 4))\n classes = tf.argmax(dscores, 1)\n selected_indices, selected_scores = tf.image.non_max_suppression_with_scores(\n boxes=bbox,\n scores=scores,\n max_output_size=100,\n iou_threshold=0.5,\n score_threshold=0.5,\n soft_nms_sigma=0.5\n )\n\n num_valid_nms_boxes = tf.shape(selected_indices)[0]\n\n selected_indices = tf.concat([selected_indices, tf.zeros(100 - num_valid_nms_boxes, tf.int32)],\n 0)\n selected_scores = tf.concat([selected_scores, tf.zeros(100 - num_valid_nms_boxes, tf.float32)],\n -1)\n\n boxes = tf.gather(bbox, selected_indices)\n boxes = tf.expand_dims(boxes, axis=0)\n scores = selected_scores\n scores = tf.expand_dims(scores, axis=0)\n classes = tf.gather(classes, selected_indices)\n classes = tf.expand_dims(classes, axis=0)\n valid_detections = num_valid_nms_boxes\n valid_detections = tf.expand_dims(valid_detections, axis=0)\n\n return boxes, scores, classes, valid_detections\n\n\ndef YoloV3(size=None, channels=3, anchors=yolo_anchors,\n masks=yolo_anchor_masks, classes=80, training=False):\n x = inputs = Input([size, size, channels], name='input')\n\n x_36, x_61, x = Darknet(name='yolo_darknet')(x)\n\n x = YoloConv(512, name='yolo_conv_0')(x)\n output_0 = YoloOutput(512, len(masks[0]), classes, name='yolo_output_0')(x)\n\n x = YoloConv(256, name='yolo_conv_1')((x, x_61))\n output_1 = YoloOutput(256, len(masks[1]), classes, name='yolo_output_1')(x)\n\n x = YoloConv(128, name='yolo_conv_2')((x, x_36))\n output_2 = YoloOutput(128, len(masks[2]), classes, name='yolo_output_2')(x)\n\n if training:\n return Model(inputs, (output_0, output_1, output_2), name='yolov3')\n\n boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),\n name='yolo_boxes_0')(output_0)\n boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),\n name='yolo_boxes_1')(output_1)\n boxes_2 = Lambda(lambda x: yolo_boxes(x, anchors[masks[2]], classes),\n name='yolo_boxes_2')(output_2)\n\n outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),\n name='yolo_nms')((boxes_0[:3], boxes_1[:3], boxes_2[:3]))\n\n return Model(inputs, outputs, name='yolov3')\n\n\ndef YoloLoss(anchors, classes, ignore_thresh=0.5):\n def yolo_loss(y_true, y_pred):\n # 1. transform all pred outputs\n # y_pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...cls))\n pred_box, pred_obj, pred_class, pred_xywh = yolo_boxes(\n y_pred, anchors, classes)\n pred_xy = pred_xywh[..., 0:2]\n pred_wh = pred_xywh[..., 2:4]\n\n # 2. transform all true outputs\n # y_true: (batch_size, grid, grid, anchors, (x1, y1, x2, y2, obj, cls))\n true_box, true_obj, true_class_idx = tf.split(\n y_true, (4, 1, 1), axis=-1)\n true_xy = (true_box[..., 0:2] + true_box[..., 2:4]) / 2\n true_wh = true_box[..., 2:4] - true_box[..., 0:2]\n\n # give higher weights to small boxes\n box_loss_scale = 2 - true_wh[..., 0] * true_wh[..., 1]\n\n # 3. inverting the pred box equations\n grid_size = tf.shape(y_true)[1]\n grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))\n grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)\n true_xy = true_xy * tf.cast(grid_size, tf.float32) - tf.cast(grid, tf.float32)\n true_wh = tf.math.log(true_wh / anchors)\n true_wh = tf.where(tf.math.is_inf(true_wh),\n tf.zeros_like(true_wh), true_wh)\n\n # 4. calculate all masks\n obj_mask = tf.squeeze(true_obj, -1)\n # ignore false positive when iou is over threshold\n best_iou = tf.map_fn(\n lambda x: tf.reduce_max(broadcast_iou(x[0], tf.boolean_mask(\n x[1], tf.cast(x[2], tf.bool))), axis=-1),\n (pred_box, true_box, obj_mask),\n tf.float32)\n ignore_mask = tf.cast(best_iou < ignore_thresh, tf.float32)\n\n # 5. calculate all losses\n xy_loss = obj_mask * box_loss_scale * tf.reduce_sum(tf.square(true_xy - pred_xy), axis=-1)\n wh_loss = obj_mask * box_loss_scale * tf.reduce_sum(tf.square(true_wh - pred_wh), axis=-1)\n obj_loss = binary_crossentropy(true_obj, pred_obj)\n obj_loss = obj_mask * obj_loss + (1 - obj_mask) * ignore_mask * obj_loss\n # TODO: use binary_crossentropy instead\n class_loss = obj_mask * sparse_categorical_crossentropy(\n true_class_idx, pred_class)\n\n # 6. sum over (batch, gridx, gridy, anchors) => (batch, 1)\n xy_loss = tf.reduce_sum(xy_loss, axis=(1, 2, 3))\n wh_loss = tf.reduce_sum(wh_loss, axis=(1, 2, 3))\n obj_loss = tf.reduce_sum(obj_loss, axis=(1, 2, 3))\n class_loss = tf.reduce_sum(class_loss, axis=(1, 2, 3))\n\n return xy_loss + wh_loss + obj_loss + class_loss\n\n return yolo_loss\n\n\ndef broadcast_iou(box_1, box_2):\n # box_1: (..., (x1, y1, x2, y2))\n # box_2: (N, (x1, y1, x2, y2))\n\n # broadcast boxes\n box_1 = tf.expand_dims(box_1, -2)\n box_2 = tf.expand_dims(box_2, 0)\n # new_shape: (..., N, (x1, y1, x2, y2))\n new_shape = tf.broadcast_dynamic_shape(tf.shape(box_1), tf.shape(box_2))\n box_1 = tf.broadcast_to(box_1, new_shape)\n box_2 = tf.broadcast_to(box_2, new_shape)\n\n int_w = tf.maximum(tf.minimum(box_1[..., 2], box_2[..., 2]) -\n tf.maximum(box_1[..., 0], box_2[..., 0]), 0)\n int_h = tf.maximum(tf.minimum(box_1[..., 3], box_2[..., 3]) -\n tf.maximum(box_1[..., 1], box_2[..., 1]), 0)\n int_area = int_w * int_h\n box_1_area = (box_1[..., 2] - box_1[..., 0]) * \\\n (box_1[..., 3] - box_1[..., 1])\n box_2_area = (box_2[..., 2] - box_2[..., 0]) * \\\n (box_2[..., 3] - box_2[..., 1])\n return int_area / (box_1_area + box_2_area - int_area)\n\n\ndef main():\n anchors = yolo_anchors\n anchor_masks = yolo_anchor_masks\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--data_dir\", dest=\"data_dir\",\n help=\"Required. The path where data locates.\")\n parser.add_argument(\"--output_data\", dest=\"output_data\", default=tempfile.mkdtemp(),\n help=\"Required. The path where voc parquet data locates.\")\n parser.add_argument(\"--data_year\", dest=\"data_year\", default=\"2009\",\n help=\"Required. The voc data date.\")\n parser.add_argument(\"--split_name_train\", dest=\"split_name_train\", default=\"train\",\n help=\"Required. Split name.\")\n parser.add_argument(\"--split_name_test\", dest=\"split_name_test\", default=\"val\",\n help=\"Required. Split name.\")\n parser.add_argument(\"--names\", dest=\"names\",\n help=\"Required. The path where class names locates.\")\n parser.add_argument(\"--weights\", dest=\"weights\", default=\"./checkpoints/yolov3.weights\",\n help=\"Required. The path where weights locates.\")\n parser.add_argument(\"--checkpoint\", dest=\"checkpoint\", default=\"./checkpoints/yolov3.tf\",\n help=\"Required. The path where checkpoint locates.\")\n parser.add_argument(\"--checkpoint_folder\", dest=\"checkpoint_folder\", default=\"./checkpoints\",\n help=\"Required. The path where saved checkpoint locates.\")\n parser.add_argument(\"--epochs\", dest=\"epochs\", type=int, default=2,\n help=\"Required. epochs.\")\n parser.add_argument(\"--batch_size\", dest=\"batch_size\", type=int, default=16,\n help=\"Required. epochs.\")\n parser.add_argument(\"--cluster_mode\", dest=\"cluster_mode\", default=\"local\",\n help=\"Required. Run on local/yarn/k8s mode.\")\n parser.add_argument(\"--class_num\", dest=\"class_num\", type=int, default=20,\n help=\"Required. class num.\")\n parser.add_argument(\"--worker_num\", type=int, default=1,\n help=\"The number of slave nodes to be used in the cluster.\"\n \"You can change it depending on your own cluster setting.\")\n parser.add_argument(\"--cores\", type=int, default=4,\n help=\"The number of cpu cores you want to use on each node. \"\n \"You can change it depending on your own cluster setting.\")\n parser.add_argument(\"--memory\", type=str, default=\"20g\",\n help=\"The memory you want to use on each node. \"\n \"You can change it depending on your own cluster setting.\")\n parser.add_argument(\"--object_store_memory\", type=str, default=\"10g\",\n help=\"The memory you want to use on each node. \"\n \"You can change it depending on your own cluster setting.\")\n parser.add_argument('--k8s_master', type=str, default=\"\",\n help=\"The k8s master. \"\n \"It should be k8s://https://<k8s-apiserver-host>: \"\n \"<k8s-apiserver-port>.\")\n parser.add_argument(\"--container_image\", type=str, default=\"\",\n help=\"The runtime k8s image. \")\n parser.add_argument('--k8s_driver_host', type=str, default=\"\",\n help=\"The k8s driver localhost.\")\n parser.add_argument('--k8s_driver_port', type=str, default=\"\",\n help=\"The k8s driver port.\")\n\n options = parser.parse_args()\n\n # convert yolov3 weights\n yolo = YoloV3(classes=80)\n load_darknet_weights(yolo, options.weights)\n yolo.save_weights(options.checkpoint)\n\n def model_creator(config):\n model = YoloV3(DEFAULT_IMAGE_SIZE, training=True, classes=options.class_num)\n anchors = yolo_anchors\n anchor_masks = yolo_anchor_masks\n\n model_pretrained = YoloV3(\n DEFAULT_IMAGE_SIZE, training=True, classes=80)\n model_pretrained.load_weights(options.checkpoint)\n\n model.get_layer('yolo_darknet').set_weights(\n model_pretrained.get_layer('yolo_darknet').get_weights())\n freeze_all(model.get_layer('yolo_darknet'))\n\n optimizer = tf.keras.optimizers.Adam(lr=1e-3)\n loss = [YoloLoss(anchors[mask], classes=options.class_num)\n for mask in anchor_masks]\n model.compile(optimizer=optimizer, loss=loss,\n run_eagerly=False)\n return model\n\n # prepare data\n class_map = {name: idx for idx, name in enumerate(\n open(options.names).read().splitlines())}\n dataset_path = os.path.join(options.data_dir, \"VOCdevkit\")\n voc_train_path = os.path.join(options.output_data, \"train_dataset\")\n voc_val_path = os.path.join(options.output_data, \"val_dataset\")\n\n write_parquet(format=\"voc\", voc_root_path=dataset_path, output_path=\"file://\" + voc_train_path,\n splits_names=[(options.data_year, options.split_name_train)], classes=class_map)\n write_parquet(format=\"voc\", voc_root_path=dataset_path, output_path=\"file://\" + voc_val_path,\n splits_names=[(options.data_year, options.split_name_test)], classes=class_map)\n\n output_types = {\"image\": tf.string, \"label\": tf.float32, \"image_id\": tf.string}\n output_shapes = {\"image\": (), \"label\": (None, 5), \"image_id\": ()}\n\n def train_data_creator(config, batch_size):\n train_dataset = read_parquet(format=\"tf_dataset\", path=voc_train_path,\n output_types=output_types,\n output_shapes=output_shapes)\n train_dataset = train_dataset.map(\n lambda data_dict: (data_dict[\"image\"], data_dict[\"label\"]))\n train_dataset = train_dataset.map(parse_data_train)\n train_dataset = train_dataset.shuffle(buffer_size=512)\n train_dataset = train_dataset.batch(batch_size)\n train_dataset = train_dataset.map(lambda x, y: (\n transform_images(x, DEFAULT_IMAGE_SIZE),\n transform_targets(y, anchors, anchor_masks, DEFAULT_IMAGE_SIZE)))\n train_dataset = train_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n return train_dataset\n\n def val_data_creator(config, batch_size):\n val_dataset = read_parquet(format=\"tf_dataset\", path=voc_val_path,\n output_types=output_types,\n output_shapes=output_shapes)\n val_dataset = val_dataset.map(\n lambda data_dict: (data_dict[\"image\"], data_dict[\"label\"]))\n val_dataset = val_dataset.map(parse_data_train)\n val_dataset = val_dataset.batch(batch_size)\n val_dataset = val_dataset.map(lambda x, y: (\n transform_images(x, DEFAULT_IMAGE_SIZE),\n transform_targets(y, anchors, anchor_masks, DEFAULT_IMAGE_SIZE)))\n return val_dataset\n\n callbacks = [\n ReduceLROnPlateau(verbose=1),\n EarlyStopping(patience=3, verbose=1),\n ModelCheckpoint(options.checkpoint_folder + '/yolov3_train_{epoch}.tf',\n verbose=1, save_weights_only=True),\n TensorBoard(log_dir='logs')\n ]\n\n if options.cluster_mode == \"local\":\n init_orca_context(cluster_mode=\"local\", cores=options.cores, num_nodes=options.worker_num,\n memory=options.memory, init_ray_on_spark=True, enable_numa_binding=False,\n object_store_memory=options.object_store_memory)\n elif options.cluster_mode == \"k8s\":\n init_orca_context(cluster_mode=\"k8s\", master=options.k8s_master,\n container_image=options.container_image,\n init_ray_on_spark=True, enable_numa_binding=False,\n num_nodes=options.worker_num, cores=options.cores, memory=options.memory,\n object_store_memory=options.object_store_memory,\n conf={\"spark.driver.host\": options.driver_host,\n \"spark.driver.port\": options.driver_port})\n elif options.cluster_mode == \"yarn\":\n init_orca_context(cluster_mode=\"yarn-client\", cores=options.cores,\n num_nodes=options.worker_num,\n memory=options.memory, init_ray_on_spark=True, enable_numa_binding=False,\n object_store_memory=options.object_store_memory)\n\n trainer = Estimator.from_keras(model_creator=model_creator)\n\n trainer.fit(train_data_creator,\n epochs=options.epochs,\n batch_size=options.batch_size,\n steps_per_epoch=3473 // options.batch_size,\n callbacks=callbacks,\n validation_data=val_data_creator,\n validation_steps=3581 // options.batch_size)\n stop_orca_context()\n\n\nif __name__ == '__main__':\n try:\n main()\n except SystemExit:\n pass\n" ]
[ [ "tensorflow.keras.optimizers.Adam", "tensorflow.reduce_max", "tensorflow.reshape", "tensorflow.broadcast_to", "tensorflow.keras.layers.Concatenate", "tensorflow.sigmoid", "tensorflow.squeeze", "tensorflow.keras.layers.ZeroPadding2D", "tensorflow.keras.layers.Add", "tensorflow.convert_to_tensor", "tensorflow.concat", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.math.is_inf", "tensorflow.keras.losses.sparse_categorical_crossentropy", "tensorflow.split", "tensorflow.reduce_sum", "tensorflow.keras.callbacks.ReduceLROnPlateau", "tensorflow.math.log", "tensorflow.minimum", "tensorflow.keras.callbacks.TensorBoard", "tensorflow.maximum", "tensorflow.io.FixedLenFeature", "tensorflow.keras.layers.BatchNormalization", "tensorflow.io.VarLenFeature", "tensorflow.constant", "numpy.fromfile", "tensorflow.shape", "tensorflow.stack", "tensorflow.keras.losses.binary_crossentropy", "tensorflow.image.resize", "tensorflow.keras.regularizers.l2", "tensorflow.expand_dims", "tensorflow.zeros_like", "tensorflow.keras.layers.LeakyReLU", "tensorflow.cast", "tensorflow.boolean_mask", "tensorflow.reduce_any", "tensorflow.pad", "tensorflow.io.decode_jpeg", "tensorflow.zeros", "tensorflow.keras.layers.UpSampling2D", "tensorflow.equal", "tensorflow.tensor_scatter_nd_update", "tensorflow.range", "tensorflow.keras.Model", "tensorflow.exp", "tensorflow.square", "tensorflow.argmax", "tensorflow.where", "numpy.array", "numpy.product", "tensorflow.gather", "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.image.non_max_suppression_with_scores", "tensorflow.keras.layers.Input" ] ]
ThatFrankGuy/qiskit-aqua
[ "daf58fa9aabecca82982bf612be0cc4f7d7bdc66" ]
[ "test/aqua/test_qgan.py" ]
[ "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n# =============================================================================\n\nimport unittest\n\nimport numpy as np\nfrom qiskit import QuantumCircuit, QuantumRegister\n\nfrom qiskit.aqua.components.uncertainty_models import UniformDistribution, UnivariateVariationalDistribution\nfrom qiskit.aqua.components.variational_forms import RY\n\nfrom qiskit.aqua.algorithms.adaptive.qgan.qgan import QGAN\nfrom qiskit.aqua.input import QGANInput\nfrom qiskit.aqua import aqua_globals, QuantumInstance, run_algorithm\nfrom qiskit.aqua.components.initial_states import Custom\n\nfrom qiskit import BasicAer\n\nfrom test.aqua.common import QiskitAquaTestCase\n\n\nclass TestQGAN(QiskitAquaTestCase):\n\n def setUp(self):\n super().setUp()\n\n # Number training data samples\n N = 5000\n # Load data samples from log-normal distribution with mean=1 and standard deviation=1\n mu = 1\n sigma = 1\n self._real_data = np.random.lognormal(mean=mu, sigma=sigma, size=N)\n # Set the data resolution\n # Set upper and lower data values as list of k min/max data values [[min_0,max_0],...,[min_k-1,max_k-1]]\n self._bounds = [0., 3.]\n # Set number of qubits per data dimension as list of k qubit values[#q_0,...,#q_k-1]\n num_qubits = [2]\n # Batch size\n batch_size = 100\n # Set number of training epochs\n num_epochs = 10\n self._params_torch = {'algorithm': {'name': 'QGAN',\n 'num_qubits': num_qubits,\n 'batch_size': batch_size,\n 'num_epochs': num_epochs},\n 'problem': {'name': 'distribution_learning_loading', 'random_seed': 7},\n 'generative_network': {'name': 'QuantumGenerator',\n 'bounds': self._bounds,\n 'num_qubits': num_qubits,\n 'init_params': None,\n 'snapshot_dir': None\n },\n 'discriminative_network': {'name': 'PytorchDiscriminator',\n 'n_features': len(num_qubits)}\n }\n self._params_numpy = {'algorithm': {'name': 'QGAN',\n 'num_qubits': num_qubits,\n 'batch_size': batch_size,\n 'num_epochs': num_epochs},\n 'problem': {'name': 'distribution_learning_loading', 'random_seed': 7},\n 'generative_network': {'name': 'QuantumGenerator',\n 'bounds': self._bounds,\n 'num_qubits': num_qubits,\n 'init_params': None,\n 'snapshot_dir': None\n },\n 'discriminative_network': {'name': 'NumpyDiscriminator',\n 'n_features': len(num_qubits)}\n }\n\n # Initialize qGAN\n self.qgan = QGAN(self._real_data, self._bounds, num_qubits, batch_size, num_epochs, snapshot_dir=None)\n self.qgan.seed = 7\n # Set quantum instance to run the quantum generator\n self.quantum_instance_statevector = QuantumInstance(backend=BasicAer.get_backend('statevector_simulator'),\n circuit_caching=False, seed_simulator=2, seed_transpiler=2)\n self.quantum_instance_qasm = QuantumInstance(backend=BasicAer.get_backend('qasm_simulator'), shots=1000,\n circuit_caching=False, seed_simulator=2, seed_transpiler=2)\n # Set entangler map\n entangler_map = [[0, 1]]\n\n # Set an initial state for the generator circuit\n init_dist = UniformDistribution(sum(num_qubits), low=self._bounds[0], high=self._bounds[1])\n q = QuantumRegister(sum(num_qubits), name='q')\n qc = QuantumCircuit(q)\n init_dist.build(qc, q)\n init_distribution = Custom(num_qubits=sum(num_qubits), circuit=qc)\n # Set variational form\n var_form = RY(sum(num_qubits), depth=1, initial_state=init_distribution, entangler_map=entangler_map,\n entanglement_gate='cz')\n # Set generator's initial parameters\n init_params = aqua_globals.random.rand(var_form._num_parameters) * 2 * 1e-2\n # Set generator circuit\n g_circuit = UnivariateVariationalDistribution(sum(num_qubits), var_form, init_params,\n low=self._bounds[0],\n high=self._bounds[1])\n # initial_distribution=init_distribution,\n # Set quantum generator\n self.qgan.set_generator(generator_circuit=g_circuit)\n\n def test_sample_generation(self):\n samples_statevector, weights_statevector = self.qgan._generator.get_output(self.quantum_instance_statevector,\n shots=100)\n samples_qasm, weights_qasm = self.qgan._generator.get_output(self.quantum_instance_qasm, shots=100)\n samples_qasm, weights_qasm = zip(*sorted(zip(samples_qasm, weights_qasm)))\n for i, weight_q in enumerate(weights_qasm):\n self.assertAlmostEqual(weight_q, weights_statevector[i], delta=0.1)\n\n def test_qgan_training(self):\n trained_statevector = self.qgan.run(self.quantum_instance_statevector)\n trained_qasm = self.qgan.run(self.quantum_instance_qasm)\n self.assertAlmostEqual(trained_qasm['rel_entr'], trained_statevector['rel_entr'], delta=0.1)\n\n def test_qgan_training_run_algo_torch(self):\n try:\n algo_input = QGANInput(self._real_data, self._bounds)\n trained_statevector = run_algorithm(params=self._params_torch, algo_input=algo_input,\n backend=BasicAer.get_backend('statevector_simulator'))\n trained_qasm = run_algorithm(self._params_torch, algo_input, backend=BasicAer.get_backend('qasm_simulator'))\n self.assertAlmostEqual(trained_qasm['rel_entr'], trained_statevector['rel_entr'], delta=0.1)\n except Exception as e:\n self.skipTest(\"Torch may not be installed: '{}'\".format(str(e)))\n\n def test_qgan_training_run_algo_numpy(self):\n algo_input = QGANInput(self._real_data, self._bounds)\n trained_statevector = run_algorithm(params=self._params_numpy, algo_input=algo_input,\n backend=BasicAer.get_backend('statevector_simulator'))\n trained_qasm = run_algorithm(self._params_numpy, algo_input, backend=BasicAer.get_backend('qasm_simulator'))\n self.assertAlmostEqual(trained_qasm['rel_entr'], trained_statevector['rel_entr'], delta=0.1)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.random.lognormal" ] ]
shiqitao/AutoGraph
[ "41f5956c859ff0fb6f87109d5f8731276bdcc2ef" ]
[ "ModelGCN4.py" ]
[ "import torch\nfrom sklearn.metrics import accuracy_score\nfrom torch.nn import Linear\nfrom torch.nn.functional import relu, dropout, log_softmax, nll_loss, leaky_relu\nfrom torch_geometric.nn import GCNConv, JumpingKnowledge\nfrom torch_geometric.utils.num_nodes import maybe_num_nodes\nfrom torch_sparse import coalesce\n\nfrom Result import Result\n\n\ndef filter_adj(row, col, edge_attr, mask):\n return row[mask], col[mask], None if edge_attr is None else edge_attr[mask]\n\n\ndef dropout_adj(edge_index, edge_attr=None, p=0.5, force_undirected=False,\n num_nodes=None, training=True):\n if p < 0. or p > 1.:\n raise ValueError('Dropout probability has to be between 0 and 1, '\n 'but got {}'.format(p))\n\n if not training:\n return edge_index, edge_attr\n\n N = maybe_num_nodes(edge_index, num_nodes)\n row, col = edge_index\n\n if force_undirected:\n row, col, edge_attr = filter_adj(row, col, edge_attr, row < col)\n\n mask = edge_index.new_full((row.size(0),), 1 - p, dtype=torch.float)\n mask = torch.bernoulli(mask).to(torch.bool)\n\n row, col, edge_attr = filter_adj(row, col, edge_attr, mask)\n\n if force_undirected:\n edge_index = torch.stack(\n [torch.cat([row, col], dim=0),\n torch.cat([col, row], dim=0)], dim=0)\n if edge_attr is not None:\n edge_attr = torch.cat([edge_attr, edge_attr], dim=0)\n edge_index, edge_attr = coalesce(edge_index, edge_attr, N, N)\n else:\n edge_index = torch.stack([row, col], dim=0)\n\n return edge_index, edge_attr\n\n\nclass ModelGCN(torch.nn.Module):\n\n def __init__(self, num_layers, hidden_list, activation, data):\n super(ModelGCN, self).__init__()\n assert len(hidden_list) == num_layers + 1\n self.linear_1 = Linear(data.num_features, hidden_list[0])\n self.convs = torch.nn.ModuleList()\n for i in range(num_layers):\n self.convs.append(GCNConv(hidden_list[i], hidden_list[i + 1]))\n self.JK = JumpingKnowledge(mode='max')\n self.linear_2 = Linear(hidden_list[-1], data.num_class)\n if activation == \"relu\":\n self.activation = relu\n elif activation == \"leaky_relu\":\n self.activation = leaky_relu\n self.reg_params = list(self.linear_1.parameters()) + list(self.convs.parameters()) + list(\n self.JK.parameters()) + list(self.linear_2.parameters())\n\n def reset_parameters(self):\n self.linear_1.reset_parameters()\n for conv in self.convs:\n conv.reset_parameters()\n self.linear_2.reset_parameters()\n\n def forward(self, data):\n x, edge_index, edge_weight = data.x, data.edge_index, data.edge_weight\n edge_index, edge_weight = dropout_adj(edge_index, edge_attr=edge_weight, p=0.8, training=self.training)\n x_jk = []\n x = self.linear_1(x)\n x = self.activation(x)\n x_jk.append(dropout(x, p=0.5, training=self.training))\n for i in range(len(self.convs)):\n x = self.convs[i](x_jk[-1], edge_index, edge_weight=edge_weight)\n if i != len(self.convs) - 1:\n x_jk.append(self.activation(x))\n else:\n x_jk.append(dropout(x, p=0.5, training=self.training))\n x = self.JK(x_jk)\n x = self.linear_2(x)\n return log_softmax(x, dim=-1)\n\n\ndef main_model_gcn(data, num_layers, hidden_list, activation, if_all=False):\n torch.backends.cudnn.deterministic = True\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n model = ModelGCN(\n num_layers=num_layers,\n hidden_list=hidden_list,\n activation=activation,\n data=data\n )\n\n data.split_train_valid()\n model = model.to(device)\n data = data.to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=0.005, weight_decay=5e-4)\n\n epoch = 1\n loss_train = float(\"inf\")\n loss_valid = float(\"inf\")\n best_loss_train = float(\"inf\")\n best_loss_valid = float(\"inf\")\n best_epoch = 0\n while best_epoch + 10 >= epoch:\n model.train()\n optimizer.zero_grad()\n predict = model(data)\n loss_train = nll_loss(predict[data.mask_train], data.y[data.mask_train])\n loss_valid = nll_loss(predict[data.mask_valid], data.y[data.mask_valid])\n l2_reg = sum((torch.sum(param ** 2) for param in model.reg_params))\n loss = loss_train + 0.001 * l2_reg\n loss.backward()\n optimizer.step()\n if loss_valid < best_loss_valid:\n best_loss_train = loss_train\n best_loss_valid = loss_valid\n best_epoch = epoch\n epoch += 1\n\n model.eval()\n with torch.no_grad():\n result = model(data)\n if if_all:\n return Result(\n result=result.cpu(),\n loss_train=loss_train.cpu(),\n loss_valid=loss_valid.cpu(),\n acc_train=accuracy_score(data.y[data.mask_train].cpu().numpy().flatten(),\n result[data.mask_train].max(1)[1].cpu().numpy().flatten()),\n acc_valid=accuracy_score(data.y[data.mask_valid].cpu().numpy().flatten(),\n result[data.mask_valid].max(1)[1].cpu().numpy().flatten()),\n epoch=epoch - 1,\n )\n else:\n return Result(\n result=result[data.mask_test].max(1)[1].cpu().numpy().flatten(),\n loss_train=loss_train.cpu(),\n loss_valid=loss_valid.cpu(),\n acc_train=accuracy_score(data.y[data.mask_train].cpu().numpy().flatten(),\n result[data.mask_train].max(1)[1].cpu().numpy().flatten()),\n acc_valid=accuracy_score(data.y[data.mask_valid].cpu().numpy().flatten(),\n result[data.mask_valid].max(1)[1].cpu().numpy().flatten()),\n epoch=epoch - 1,\n )\n" ]
[ [ "torch.sum", "torch.nn.functional.log_softmax", "torch.stack", "torch.nn.Linear", "torch.nn.functional.dropout", "torch.nn.functional.nll_loss", "torch.no_grad", "torch.cuda.is_available", "torch.nn.ModuleList", "torch.cat", "torch.bernoulli" ] ]
jake3991/sonar-SLAM
[ "995bfa61e61d99667bec7a7f70bea4d6d486c312" ]
[ "bruce_slam/src/bruce_slam/sonar.py" ]
[ "import numpy as np\nfrom scipy.interpolate import interp1d\nimport cv2\nimport rospy\n\nfrom .utils.topics import *\nfrom .utils.conversions import r2n\n\n\nclass OculusFireMsg(object):\n \"\"\"Oculus Fire Message\n\n uint8_t masterMode; // mode 0 is flexi mode, needs full fire message (not available for third party developers)\n // mode 1 - Low Frequency Mode (wide aperture, navigation)\n // mode 2 - High Frequency Mode (narrow aperture, target identification)\n PingRateType pingRate; // Sets the maximum ping rate.\n uint8_t networkSpeed; // Used to reduce the network comms speed (useful for high latency shared links)\n uint8_t gammaCorrection; // 0 and 0xff = gamma correction = 1.0\n // Set to 127 for gamma correction = 0.5\n uint8_t flags; // bit 0: 0 = interpret range as percent, 1 = interpret range as meters\n // bit 1: 0 = 8 bit data, 1 = 16 bit data\n // bit 2: 0 = wont send gain, 1 = send gain\n // bit 3: 0 = send full return message, 1 = send simple return message\n // bit 4: 0 = gain assistance off, 1 = gain assistance on\n // bit 5: 0 = low power mode off, 1 = low power mode on\n double range; // The range demand in percent or meters depending on flags\n double gainPercent; // The gain demand if gain assistance is off or intensity demand if gain assistance is on\n double speedOfSound; // meters/second, if set to zero then internal calc will apply using salinity\n double salinity; // ppt, set to zero if we are in fresh water and 35.0 if we are in salt water\n\n \"\"\"\n\n def __init__(self):\n self.mode = None\n\n self.gamma = None\n self.flags = None\n self.range = None\n self.gain = None\n self.speed_of_sound = None\n self.salinity = None\n\n def configure(self, ping):\n self.mode = ping.fire_msg.mode\n self.gamma = ping.fire_msg.gamma / 255.0\n self.flags = ping.fire_msg.flags\n self.range = ping.fire_msg.range\n self.gain = ping.fire_msg.gain\n self.speed_of_sound = ping.fire_msg.speed_of_sound\n self.salinity = ping.fire_msg.salinity\n\n def __str__(self):\n return (\n \"\\n=========================\\n\"\n \" Oculus Fire Message\\n\"\n \"=========================\\n\"\n \"Mode: {mode:>19d}\\n\"\n \"Gamma: {gamma:>18.1f}\\n\"\n \"Flags: {flags:>18b}\\n\"\n \"Range: {range:17.1f}m\\n\"\n \"Gain: {gain:>19.1f}\\n\"\n \"Speed of sound: {speed_of_sound:5.1f}m/s\\n\"\n \"Salinity: {salinity:>12.1f}ppt\\n\"\n \"=========================\\n\".format(**self.__dict__)\n )\n\n\nclass OculusProperty(object):\n OCULUS_VERTICAL_APERTURE = {1: np.deg2rad(20), 2: np.deg2rad(12)}\n OCULUS_PART_NUMBER = {1042: \"M1200d\", 1032: \"M750d\"}\n\n noise = 0.01\n # fmt: off\n psf = np.array([[0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0.0005, 0.0005, 0.0005, 0.0005, 0. , 0.0005, 0.0005, 0.0005,\n 0.0005, 0. , 0. , 0.0005, 0.0005, 0. , 0. , 0. ,\n 0.001 , 0.001 , 0.001 , 0.001 , 0. , 0. , 0.001 , 0.001 ,\n 0.001 , 0. , 0. , 0.001 , 0.0015, 0.002 , 0.0015, 0.0005,\n 0. , 0.001 , 0.002 , 0.0025, 0.002 , 0.001 , 0.001 , 0.002 ,\n 0.003 , 0.003 , 0.0015, 0. , 0.0025, 0.005 , 0.005 , 0.0035,\n 0.002 , 0.0105, 0.022 , 0.0355, 0.049 , 0.0615, 0.071 , 0.076 ,\n 0.076 , 0.071 , 0.0615, 0.049 , 0.0355, 0.022 , 0.0105, 0.002 ,\n 0.0035, 0.005 , 0.005 , 0.0025, 0. , 0.0015, 0.003 , 0.003 ,\n 0.002 , 0.001 , 0.001 , 0.002 , 0.0025, 0.002 , 0.001 , 0. ,\n 0.0005, 0.0015, 0.002 , 0.0015, 0.001 , 0. , 0. , 0.001 ,\n 0.001 , 0.001 , 0. , 0. , 0.001 , 0.001 , 0.001 , 0.001 ,\n 0. , 0. , 0. , 0.0005, 0.0005, 0. , 0. , 0.0005,\n 0.0005, 0.0005, 0.0005, 0. , 0.0005, 0.0005, 0.0005, 0.0005,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ]])\n # fmt: on\n\n def __init__(self):\n # model\n self.model = \"\"\n self.fire_msg = OculusFireMsg()\n\n # range bins: [r1, ..., rn]\n self.num_ranges = None\n self.ranges = None\n # r[i] - r[i - 1]\n self.range_resolution = None\n # n * resolution\n self.max_range = 30.\n\n # bearings: [b1, ..., bm]\n self.num_bearings = None\n # rad\n self.bearings = None\n # b[m] - b[1]\n self.horizontal_aperture = np.radians(130.)\n # mean: (b[m] - b[1]) / m\n self.angular_resolution = None\n # rad\n self.vertical_aperture = None\n\n ##################################################\n # polar <-> Cartesian\n ##################################################\n # functions to transform between bearings and cols\n # col -> bearing\n self.c2b = None\n # col <- bearing\n self.b2c = None\n # row -> range\n self.ro2ra = None\n # row <- range\n self.ra2ro = None\n\n # parameters for remapping from polar to Cartesian\n self.remap_x = None\n self.remap_y = None\n\n def configure(self, ping):\n self.fire_msg.configure(ping)\n\n if \"part_number\" not in ping.__slots__:\n # backward compatibility\n self.model = \"M750d\"\n else:\n self.model = OculusProperty.OCULUS_PART_NUMBER[ping.part_number]\n\n changed = False\n if (\n ping.num_ranges != self.num_ranges\n or ping.range_resolution != self.range_resolution\n ):\n self.num_ranges = ping.num_ranges\n self.range_resolution = ping.range_resolution\n self.ranges = self.range_resolution * (1 + np.arange(self.num_ranges))\n self.max_range = self.ranges[-1]\n\n self.ro2ra = lambda ro: (ro + 1) * self.range_resolution\n self.ra2ro = lambda ra: np.round(ra / self.range_resolution - 1)\n changed = True\n\n if len(ping.bearings) != self.num_bearings:\n self.num_bearings = len(ping.bearings)\n self.bearings = np.deg2rad(np.array(ping.bearings, np.float32) / 100)\n self.horizontal_aperture = abs(self.bearings[-1] - self.bearings[0])\n self.angular_resolution = self.horizontal_aperture / self.num_bearings\n self.vertical_aperture = OculusProperty.OCULUS_VERTICAL_APERTURE[\n self.fire_msg.mode\n ]\n\n self.b2c = interp1d(\n self.bearings,\n np.arange(self.num_bearings),\n kind=\"cubic\",\n bounds_error=False,\n fill_value=-1,\n assume_sorted=True,\n )\n self.c2b = interp1d(\n np.arange(self.num_bearings),\n self.bearings,\n kind=\"cubic\",\n bounds_error=False,\n fill_value=-1,\n assume_sorted=True,\n )\n changed = True\n\n if changed:\n height = self.max_range\n rows = self.num_ranges\n width = np.sin((self.bearings[-1] - self.bearings[0]) / 2) * height * 2\n cols = int(np.ceil(width / self.range_resolution))\n\n XX, YY = np.meshgrid(range(cols), range(rows))\n x = self.range_resolution * (rows - YY)\n y = self.range_resolution * (-cols / 2.0 + XX + 0.5)\n b = np.arctan2(y, x)\n r = np.sqrt(x ** 2 + y ** 2)\n self.remap_y = np.asarray(self.ra2ro(r), dtype=np.float32)\n self.remap_x = np.asarray(self.b2c(b), dtype=np.float32)\n\n return changed\n\n def remap(self, ping=None, img=None):\n if img is None:\n img = r2n(ping)\n img = np.array(img, dtype=img.dtype, order=\"F\")\n\n if self.remap_x.shape[1] > img.shape[1]:\n img.resize(*self.remap_x.shape)\n # Not too much difference between cubic and nearest\n img = cv2.remap(img, self.remap_x, self.remap_y, cv2.INTER_NEAREST)\n return img\n\n @staticmethod\n def adjust_gamma(img, gamma=1.0):\n return cv2.pow(img / 255.0, gamma) * 255.0\n\n def deconvolve(self, img):\n \"\"\"Remove impulse response function from ping\n\n Copy from https://github.com/pvazteixeira/multibeam\n \"\"\"\n img = np.float32(img)\n\n img_f = cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT)\n psf_padded = np.zeros_like(img)\n kh, kw = self.psf.shape\n psf_padded[:kh, :kw] = self.psf\n\n # compute (padded) psf's DFT\n psf_f = cv2.dft(psf_padded, flags=cv2.DFT_COMPLEX_OUTPUT, nonzeroRows=kh)\n\n psf_f_2 = (psf_f ** 2).sum(-1)\n ipsf_f = psf_f / (psf_f_2 + self.noise)[..., np.newaxis]\n\n result_f = cv2.mulSpectrums(img_f, ipsf_f, 0)\n result = cv2.idft(result_f, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)\n\n result = np.roll(result, -kh // 2, 0)\n result = np.roll(result, -kw // 2, 1)\n\n # clip to 0-1 range\n result[result < 0] = 0\n result = (np.max(img) / np.max(result)) * result\n\n return result.astype(np.float32)\n\n def polygon(self, origin=(0, 0, 0), angular_resolution=0.2):\n from shapely.geometry import Polygon\n from shapely.affinity import affine_transform\n\n points = [(0, 0)]\n for bearing in np.arange(\n self.bearings[0], self.bearings[-1], angular_resolution\n ):\n c, s = np.cos(bearing), np.sin(bearing)\n points.append((self.max_range * c, self.max_range * s))\n poly = Polygon(points)\n\n c, s = np.cos(origin[2]), np.sin(origin[2])\n params = c, -s, s, c, origin[0], origin[1]\n poly = affine_transform(poly, params)\n return poly\n\n def plot(self, origin=(0, 0, 0), ax=None, zdown=True, **kwargs):\n import matplotlib.pyplot as plt\n from matplotlib.patches import Wedge\n if ax is None:\n ax = plt.gca()\n\n x, y, theta = origin\n min_bearing = self.bearings[0] + theta\n max_bearing = self.bearings[-1] + theta\n if zdown:\n x, y = y, x\n min_bearing, max_bearing = np.pi / 2 - max_bearing, np.pi / 2 - min_bearing\n fov = Wedge(\n (x, y),\n self.max_range,\n np.rad2deg(min_bearing),\n np.rad2deg(max_bearing),\n fill=False,\n **kwargs\n )\n ax.add_artist(fov)\n\n def __str__(self):\n fire_msg = str(self.fire_msg)\n d = dict(self.__dict__)\n d[\"angular_resolution\"] = np.degrees(d[\"angular_resolution\"])\n d[\"horizontal_aperture\"] = np.degrees(d[\"horizontal_aperture\"])\n d[\"vertical_aperture\"] = np.degrees(d[\"vertical_aperture\"])\n return (\n \"\\n===============================\\n\"\n \" Oculus Property\\n\"\n \"===============================\\n\"\n \"Model: {model:>24}\\n\"\n \"#Ranges: {num_ranges:>22.0f}\\n\"\n \"Range resolution: {range_resolution:>12.2f}m\\n\"\n \"#Bemas: {num_bearings:>23}\\n\"\n \"Angular resolution: {angular_resolution:>8.1f}deg\\n\"\n \"Horizontal aperture: {horizontal_aperture:>7.1f}deg\\n\"\n \"Vertical aperture: {vertical_aperture:>9.1f}deg\\n\"\n \"===============================\\n\".format(**d) + fire_msg\n )\n" ]
[ [ "numpy.degrees", "numpy.zeros_like", "numpy.roll", "numpy.arctan2", "numpy.sqrt", "numpy.ceil", "numpy.rad2deg", "matplotlib.pyplot.gca", "numpy.float32", "numpy.cos", "numpy.arange", "numpy.max", "numpy.array", "numpy.sin", "numpy.round", "numpy.radians", "numpy.deg2rad" ] ]
sbrass/madminer
[ "df664344d1a43551ee9ecd91fe2dc0bccb4d529f" ]
[ "madminer/ml/base.py" ]
[ "import json\nimport logging\nimport numpy as np\nimport os\nimport torch\n\nfrom ..utils.various import create_missing_folders, load_and_check\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Estimator:\n \"\"\"\n Abstract class for any ML estimator. Subclassed by ParameterizedRatioEstimator, DoubleParameterizedRatioEstimator,\n ScoreEstimator, and LikelihoodEstimator.\n\n Each instance of this class represents one neural estimator. The most important functions are:\n\n * `Estimator.train()` to train an estimator. The keyword `method` determines the inference technique\n and whether a class instance represents a single-parameterized likelihood ratio estimator, a doubly-parameterized\n likelihood ratio estimator, or a local score estimator.\n * `Estimator.evaluate()` to evaluate the estimator.\n * `Estimator.save()` to save the trained model to files.\n * `Estimator.load()` to load the trained model from files.\n\n Please see the tutorial for a detailed walk-through.\n \"\"\"\n\n def __init__(self, features=None, n_hidden=(100,), activation=\"tanh\", dropout_prob=0.0):\n self.features = features\n self.n_hidden = n_hidden\n self.activation = activation\n self.dropout_prob = dropout_prob\n\n self.model = None\n self.n_observables = None\n self.n_parameters = None\n self.x_scaling_means = None\n self.x_scaling_stds = None\n\n def train(self, *args, **kwargs):\n raise NotImplementedError()\n\n def evaluate_log_likelihood(self, *args, **kwargs):\n \"\"\"\n Log likelihood estimation. Signature depends on the type of estimator. The first returned value is the log\n likelihood with shape `(n_thetas, n_x)`.\n \"\"\"\n raise NotImplementedError()\n\n def evaluate_log_likelihood_ratio(self, *args, **kwargs):\n \"\"\"\n Log likelihood ratio estimation. Signature depends on the type of estimator. The first returned value is the log\n likelihood ratio with shape `(n_thetas, n_x)` or `(n_x)`.\n \"\"\"\n raise NotImplementedError()\n\n def evaluate_score(self, *args, **kwargs):\n \"\"\"\n Score estimation. Signature depends on the type of estimator. The only returned value is the score with shape\n `(n_x)`.\n \"\"\"\n raise NotImplementedError()\n\n def evaluate(self, *args, **kwargs):\n raise NotImplementedError()\n\n def save(self, filename, save_model=False):\n\n \"\"\"\n Saves the trained model to four files: a JSON file with the settings, a pickled pyTorch state dict\n file, and numpy files for the mean and variance of the inputs (used for input scaling).\n\n Parameters\n ----------\n filename : str\n Path to the files. '_settings.json' and '_state_dict.pl' will be added.\n\n save_model : bool, optional\n If True, the whole model is saved in addition to the state dict. This is not necessary for loading it\n again with Estimator.load(), but can be useful for debugging, for instance to plot the computational graph.\n\n Returns\n -------\n None\n\n \"\"\"\n\n logger.info(\"Saving model to %s\", filename)\n\n if self.model is None:\n raise ValueError(\"No model -- train or load model before saving!\")\n\n # Check paths\n create_missing_folders([os.path.dirname(filename)])\n\n # Save settings\n logger.debug(\"Saving settings to %s_settings.json\", filename)\n\n settings = self._wrap_settings()\n\n with open(f\"{filename}_settings.json\", \"w\") as f:\n json.dump(settings, f)\n\n # Save scaling\n if self.x_scaling_stds is not None and self.x_scaling_means is not None:\n logger.debug(\"Saving input scaling information to %s_x_means.npy and %s_x_stds.npy\", filename, filename)\n np.save(f\"{filename}_x_means.npy\", self.x_scaling_means)\n np.save(f\"{filename}_x_stds.npy\", self.x_scaling_stds)\n\n # Save state dict\n logger.debug(\"Saving state dictionary to %s_state_dict.pt\", filename)\n torch.save(self.model.state_dict(), f\"{filename}_state_dict.pt\")\n\n # Save model\n if save_model:\n logger.debug(\"Saving model to %s_model.pt\", filename)\n torch.save(self.model, f\"{filename}_model.pt\")\n\n def load(self, filename):\n\n \"\"\"\n Loads a trained model from files.\n\n Parameters\n ----------\n filename : str\n Path to the files. '_settings.json' and '_state_dict.pl' will be added.\n\n Returns\n -------\n None\n\n \"\"\"\n\n logger.info(\"Loading model from %s\", filename)\n\n # Load settings and create model\n logger.debug(\"Loading settings from %s_settings.json\", filename)\n with open(f\"{filename}_settings.json\", \"r\") as f:\n settings = json.load(f)\n self._unwrap_settings(settings)\n self._create_model()\n\n # Load scaling\n try:\n self.x_scaling_means = np.load(f\"{filename}_x_means.npy\")\n self.x_scaling_stds = np.load(f\"{filename}_x_stds.npy\")\n logger.debug(\n \" Found input scaling information: means %s, stds %s\", self.x_scaling_means, self.x_scaling_stds\n )\n except FileNotFoundError:\n logger.warning(\"Scaling information not found in %s\", filename)\n self.x_scaling_means = None\n self.x_scaling_stds = None\n\n # Load state dict\n logger.debug(\"Loading state dictionary from %s_state_dict.pt\", filename)\n self.model.load_state_dict(torch.load(f\"{filename}_state_dict.pt\", map_location=\"cpu\"))\n\n def initialize_input_transform(self, x, transform=True, overwrite=True):\n if self.x_scaling_stds is not None and self.x_scaling_means is not None and not overwrite:\n logger.info(\n \"Input rescaling already defined. To overwrite, call initialize_input_transform(x, overwrite=True).\"\n )\n elif transform:\n logger.info(\"Setting up input rescaling\")\n self.x_scaling_means = np.mean(x, axis=0)\n self.x_scaling_stds = np.maximum(np.std(x, axis=0), 1.0e-6)\n else:\n logger.info(\"Disabling input rescaling\")\n n_parameters = x.shape[0]\n\n self.x_scaling_means = np.zeros(n_parameters)\n self.x_scaling_stds = np.ones(n_parameters)\n\n def _transform_inputs(self, x):\n if self.x_scaling_means is not None and self.x_scaling_stds is not None:\n if isinstance(x, torch.Tensor):\n x_scaled = x - torch.tensor(self.x_scaling_means, dtype=x.dtype, device=x.device)\n x_scaled = x_scaled / torch.tensor(self.x_scaling_stds, dtype=x.dtype, device=x.device)\n else:\n x_scaled = x - self.x_scaling_means\n x_scaled /= self.x_scaling_stds\n else:\n x_scaled = x\n return x_scaled\n\n def _wrap_settings(self):\n settings = {\n \"n_observables\": self.n_observables,\n \"n_parameters\": self.n_parameters,\n \"features\": self.features,\n \"n_hidden\": list(self.n_hidden),\n \"activation\": self.activation,\n \"dropout_prob\": self.dropout_prob,\n }\n return settings\n\n def _unwrap_settings(self, settings):\n try:\n _ = str(settings[\"estimator_type\"])\n except KeyError:\n raise RuntimeError(\n \"Can't find estimator type information in file. Maybe this file was created with\"\n \" an incompatible MadMiner version < v0.3.0?\"\n )\n\n self.n_observables = int(settings[\"n_observables\"])\n self.n_parameters = int(settings[\"n_parameters\"])\n self.n_hidden = tuple([int(item) for item in settings[\"n_hidden\"]])\n self.activation = str(settings[\"activation\"])\n self.features = settings[\"features\"]\n if self.features == \"None\":\n self.features = None\n if self.features is not None:\n self.features = [int(item) for item in self.features]\n\n try:\n self.dropout_prob = float(settings[\"dropout_prob\"])\n except KeyError:\n self.dropout_prob = 0.0\n logger.info(\n \"Can't find dropout probability in model file. Probably this file was created with an older\"\n \" MadMiner version < 0.6.1. That's totally fine, we'll just stick to the default of 0 (no\"\n \" dropout).\"\n )\n\n def _create_model(self):\n raise NotImplementedError()\n\n def calculate_fisher_information(self, x, theta=None, weights=None, n_events=1, sum_events=True):\n \"\"\"\n Calculates the expected Fisher information matrix based on the kinematic information in a given number of\n events.\n\n Parameters\n ----------\n x : str or ndarray\n Sample of observations, or path to numpy file with observations. Note that this sample has to be sampled\n from the reference parameter where the score is estimated with the SALLY / SALLINO estimator.\n\n theta: None or ndarray\n Numerator parameter point, or filename of a pickled numpy array. Has no effect for ScoreEstimator.\n\n weights : None or ndarray, optional\n Weights for the observations. If None, all events are taken to have equal weight. Default value: None.\n\n n_events : float, optional\n Expected number of events for which the kinematic Fisher information should be calculated. Default value: 1.\n\n sum_events : bool, optional\n If True, the expected Fisher information summed over the events x is calculated. If False, the per-event\n Fisher information for each event is returned. Default value: True.\n\n Returns\n -------\n fisher_information : ndarray\n Expected kinematic Fisher information matrix with shape `(n_events, n_parameters, n_parameters)` if\n sum_events is False or `(n_parameters, n_parameters)` if sum_events is True.\n\n \"\"\"\n\n if self.model is None:\n raise ValueError(\"No model -- train or load model before evaluating it!\")\n\n # Load training data\n logger.info(\"Loading evaluation data\")\n x = load_and_check(x)\n n_samples = x.shape[0]\n\n # Estimate scores\n t_hats = self.evaluate_score(x=x, theta=np.array([theta for _ in x]), nuisance_mode=\"keep\")\n\n # Weights\n if weights is None:\n weights = np.ones(n_samples)\n weights /= np.sum(weights)\n\n # Calculate Fisher information\n logger.info(\"Calculating Fisher information\")\n if sum_events:\n fisher_information = float(n_events) * np.einsum(\"n,ni,nj->ij\", weights, t_hats, t_hats)\n else:\n fisher_information = float(n_events) * np.einsum(\"n,ni,nj->nij\", weights, t_hats, t_hats)\n\n # Calculate expected score\n expected_score = np.mean(t_hats, axis=0)\n logger.debug(\"Expected per-event score (should be close to zero): %s\", expected_score)\n\n return fisher_information\n\n\nclass ConditionalEstimator(Estimator):\n\n \"\"\"\n Abstract class for estimator that is conditional on theta. Subclassed by ParameterizedRatioEstimator,\n DoubleParameterizedRatioEstimator, and LikelihoodEstimator (but not ScoreEstimator).\n\n Adds functionality to rescale parameters.\n \"\"\"\n\n def __init__(self, features=None, n_hidden=(100,), activation=\"tanh\", dropout_prob=0.0):\n super(ConditionalEstimator, self).__init__(features, n_hidden, activation, dropout_prob)\n\n self.theta_scaling_means = None\n self.theta_scaling_stds = None\n\n def save(self, filename, save_model=False):\n\n \"\"\"\n Saves the trained model to four files: a JSON file with the settings, a pickled pyTorch state dict\n file, and numpy files for the mean and variance of the inputs (used for input scaling).\n\n Parameters\n ----------\n filename : str\n Path to the files. '_settings.json' and '_state_dict.pl' will be added.\n\n save_model : bool, optional\n If True, the whole model is saved in addition to the state dict. This is not necessary for loading it\n again with Estimator.load(), but can be useful for debugging, for instance to plot the computational graph.\n\n Returns\n -------\n None\n\n \"\"\"\n\n super(ConditionalEstimator, self).save(filename, save_model)\n\n # Save param scaling\n if self.theta_scaling_stds is not None and self.theta_scaling_means is not None:\n logger.debug(\n \"Saving parameter scaling information to %s_theta_means.npy and %s_theta_stds.npy\", filename, filename\n )\n np.save(f\"{filename}_theta_means.npy\", self.theta_scaling_means)\n np.save(f\"{filename}_theta_stds.npy\", self.theta_scaling_stds)\n\n def load(self, filename):\n\n \"\"\"\n Loads a trained model from files.\n\n Parameters\n ----------\n filename : str\n Path to the files. '_settings.json' and '_state_dict.pl' will be added.\n\n Returns\n -------\n None\n\n \"\"\"\n\n super(ConditionalEstimator, self).load(filename)\n\n # Load param scaling\n try:\n self.theta_scaling_means = np.load(f\"{filename}_theta_means.npy\")\n self.theta_scaling_stds = np.load(f\"{filename}_theta_stds.npy\")\n logger.debug(\n \" Found parameter scaling information: means %s, stds %s\",\n self.theta_scaling_means,\n self.theta_scaling_stds,\n )\n except FileNotFoundError:\n logger.warning(\"Parameter scaling information not found in %s\", filename)\n self.theta_scaling_means = None\n self.theta_scaling_stds = None\n\n def initialize_parameter_transform(self, theta, transform=True, overwrite=True):\n if self.x_scaling_stds is not None and self.x_scaling_means is not None and not overwrite:\n logger.info(\n \"Parameter rescaling already defined. To overwrite, call initialize_parameter_transform(theta, overwrite=True).\"\n )\n elif transform:\n logger.info(\"Setting up parameter rescaling\")\n self.theta_scaling_means = np.mean(theta, axis=0)\n self.theta_scaling_stds = np.maximum(np.std(theta, axis=0), 1.0e-6)\n else:\n logger.info(\"Disabling parameter rescaling\")\n self.theta_scaling_means = None\n self.theta_scaling_stds = None\n\n def _transform_parameters(self, theta):\n if self.theta_scaling_means is not None and self.theta_scaling_stds is not None:\n if isinstance(theta, torch.Tensor):\n theta_scaled = theta - torch.tensor(self.theta_scaling_means, dtype=theta.dtype, device=theta.device)\n theta_scaled = theta_scaled / torch.tensor(\n self.theta_scaling_stds, dtype=theta.dtype, device=theta.device\n )\n else:\n theta_scaled = theta - self.theta_scaling_means[np.newaxis, :]\n theta_scaled /= self.theta_scaling_stds[np.newaxis, :]\n else:\n theta_scaled = theta\n return theta_scaled\n\n def _transform_score(self, t_xz, inverse=False):\n if self.theta_scaling_means is not None and self.theta_scaling_stds is not None and t_xz is not None:\n if inverse:\n t_xz_scaled = t_xz / self.theta_scaling_stds[np.newaxis, :]\n else:\n t_xz_scaled = t_xz * self.theta_scaling_stds[np.newaxis, :]\n else:\n t_xz_scaled = t_xz\n return t_xz_scaled\n\n\nclass TheresAGoodReasonThisDoesntWork(Exception):\n pass\n" ]
[ [ "numpy.sum", "numpy.save", "numpy.load", "numpy.ones", "torch.load", "numpy.zeros", "numpy.einsum", "torch.save", "torch.tensor", "numpy.array", "numpy.std", "numpy.mean" ] ]
ZongSingHuang/BPSOGWO-with-GA
[ "4b6968d8946ea5b4456c0fdec1f0c4dd062e4436" ]
[ "main_9010.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 23 21:29:10 2020\n\n@author: ZongSing_NB\n\"\"\"\n\nfrom BHPSOGWO import BHPSOGWO\nimport numpy as np\nimport pandas as pd\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\nimport functools\nimport time\nimport warnings\n#------------------------------------------------------------------------------\n\n\nwarnings.filterwarnings(\"ignore\")\nnp.random.seed(42)\n#------------------------------------------------------------------------------\n\n\n# 讀資料\nBreastcancer = pd.read_csv('Breastcancer.csv', header=None).values\nBreastEW = pd.read_csv('BreastEW.csv', header=None).values\nCongress = pd.read_csv('Congress.csv', header=None).values\nExactly = pd.read_csv('Exactly.csv', header=None).values\nExactly2 = pd.read_csv('Exactly2.csv', header=None).values\nHeartEW = pd.read_csv('HeartEW.csv', header=None).values\nIonosphere = pd.read_csv('Ionosphere.csv', header=None).values\nKrVsKpEW = pd.read_csv('KrVsKpEW.csv', header=None).values\nLymphography = pd.read_csv('Lymphography.csv', header=None).values\nM_of_n = pd.read_csv('M-of-n.csv', header=None).values\nPenglungEW = pd.read_csv('PenglungEW.csv', header=None).values\nSonar = pd.read_csv('Sonar.csv', header=None).values\nSpectEW = pd.read_csv('SpectEW.csv', header=None).values\nTic_tac_toe = pd.read_csv('Tic-tac-toe.csv', header=None).values\nVote = pd.read_csv('Vote.csv', header=None).values\nWaveformEW = pd.read_csv('WaveformEW.csv', header=None).values\nWine = pd.read_csv('Wine.csv', header=None).values\nZoo = pd.read_csv('Zoo.csv', header=None).values\n\nX1, y1 = Breastcancer[:, :-1], Breastcancer[:, -1]\nX2, y2 = BreastEW[:, :-1], BreastEW[:, -1]\nX3, y3 = Congress[:, :-1], Congress[:, -1]\nX4, y4 = Exactly[:, :-1], Exactly[:, -1]\nX5, y5 = Exactly2[:, :-1], Exactly2[:, -1]\nX6, y6 = HeartEW[:, :-1], HeartEW[:, -1]\nX7, y7 = Ionosphere[:, :-1], Ionosphere[:, -1]\nX8, y8 = KrVsKpEW[:, :-1], KrVsKpEW[:, -1]\nX9, y9 = Lymphography[:, :-1], Lymphography[:, -1]\nX10, y10 = M_of_n[:, :-1], M_of_n[:, -1]\nX11, y11 = PenglungEW[:, :-1], PenglungEW[:, -1]\nX12, y12 = Sonar[:, :-1], Sonar[:, -1]\nX13, y13 = SpectEW[:, :-1], SpectEW[:, -1]\nX14, y14 = Tic_tac_toe[:, :-1], Tic_tac_toe[:, -1]\nX15, y15 = Vote[:, :-1], Vote[:, -1]\nX16, y16 = WaveformEW[:, :-1], WaveformEW[:, -1]\nX17, y17 = Wine[:, :-1], Wine[:, -1]\nX18, y18 = Zoo[:, :-1], Zoo[:, -1]\n#------------------------------------------------------------------------------\n\n\ndef fitness(x, X, y):\n if x.ndim==1:\n x = x.reshape(1, -1)\n loss = np.zeros(x.shape[0])\n \n for i in range(x.shape[0]):\n if np.sum(x[i, :])>0:\n score = cross_val_score(KNeighborsClassifier(n_neighbors=5), X[:, x[i, :].astype(bool)], y, cv=skf)\n loss[i] = 0.99*(1-score.mean()) + 0.01*(np.sum(x[i, :])/X.shape[1])\n else:\n loss[i] = np.inf\n # print(666)\n return loss\n#------------------------------------------------------------------------------\n\n\nd = -1\ng = 70\np = 8\ntimes = 20\ntable = np.zeros((7, 18)) # ['avg acc', '% selected', 'avg time', 'avg loss', 'worst loss', 'best loss', 'std loss']\ntable[4, :] = -np.ones(18)*np.inf # worst\ntable[5, :] = np.ones(18)*np.inf # best\nall_for_std = np.zeros((times, 18))\nskf = StratifiedKFold(n_splits=10, shuffle=True)\n#------------------------------------------------------------------------------\n\n\nfor i in range(times):\n total_time = time.time()\n #------------------------------------------------------------------------------\n \n \n start1 = time.time()\n loss1 = functools.partial(fitness, X=X1, y=y1)\n optimizer = BHPSOGWO(fit_func=loss1, \n num_dim=X1.shape[1], num_particle=p, max_iter=g)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 0]: table[4, 0] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 0]: table[5, 0] = optimizer.gBest_score\n table[3, 0] += optimizer.gBest_score\n table[2, 0] += time.time()-start1\n all_for_std[i, 0] = optimizer.gBest_score\n \n table[0, 0] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X1[:, optimizer.gBest_X.astype(bool)], y1, cv=skf).mean()\n table[1, 0] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n \n # score = cross_val_score(KNeighborsClassifier(n_neighbors=5), X1, y1, cv=skf)\n # print(X1.shape[1])\n # print(score.mean())\n # print('==='*16)\n #------------------------------------------------------------------------------\n \n \n start2 = time.time()\n loss2 = functools.partial(fitness, X=X2, y=y2)\n optimizer = BHPSOGWO(fit_func=loss2, \n num_dim=X2.shape[1], num_particle=p, max_iter=g)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 1]: table[4, 1] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 1]: table[5, 1] = optimizer.gBest_score\n table[3, 1] += optimizer.gBest_score\n table[2, 1] += time.time()-start2\n all_for_std[i, 1] = optimizer.gBest_score\n \n table[0, 1] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X2[:, optimizer.gBest_X.astype(bool)], y2, cv=skf).mean()\n table[1, 1] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start3 = time.time()\n loss3 = functools.partial(fitness, X=X3, y=y3)\n optimizer = BHPSOGWO(fit_func=loss3, \n num_dim=X3.shape[1], num_particle=p, max_iter=g)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 2]: table[4, 2] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 2]: table[5, 2] = optimizer.gBest_score\n table[3, 2] += optimizer.gBest_score\n table[2, 2] += time.time()-start3\n all_for_std[i, 2] = optimizer.gBest_score\n \n table[0, 2] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X3[:, optimizer.gBest_X.astype(bool)], y3, cv=skf).mean()\n table[1, 2] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start4 = time.time()\n loss4 = functools.partial(fitness, X=X4, y=y4)\n optimizer = BHPSOGWO(fit_func=loss4, \n num_dim=X4.shape[1], num_particle=p, max_iter=g)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 3]: table[4, 3] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 3]: table[5, 3] = optimizer.gBest_score\n table[3, 3] += optimizer.gBest_score\n table[2, 3] += time.time()-start4\n all_for_std[i, 3] = optimizer.gBest_score\n \n table[0, 3] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X4[:, optimizer.gBest_X.astype(bool)], y4, cv=skf).mean()\n table[1, 3] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start5 = time.time()\n loss5 = functools.partial(fitness, X=X5, y=y5)\n optimizer = BHPSOGWO(fit_func=loss5, \n num_dim=X5.shape[1], num_particle=p, max_iter=g)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 4]: table[4, 4] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 4]: table[5, 4] = optimizer.gBest_score\n table[3, 4] += optimizer.gBest_score\n table[2, 4] += time.time()-start5\n all_for_std[i, 4] = optimizer.gBest_score\n \n table[0, 4] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X5[:, optimizer.gBest_X.astype(bool)], y5, cv=skf).mean()\n table[1, 4] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start6 = time.time()\n loss6 = functools.partial(fitness, X=X6, y=y6)\n optimizer = BHPSOGWO(fit_func=loss6, \n num_dim=X6.shape[1], num_particle=p, max_iter=g)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 5]: table[4, 5] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 5]: table[5, 5] = optimizer.gBest_score\n table[3, 5] += optimizer.gBest_score\n table[2, 5] += time.time()-start6\n all_for_std[i, 5] = optimizer.gBest_score\n \n table[0, 5] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X6[:, optimizer.gBest_X.astype(bool)], y6, cv=skf).mean()\n table[1, 5] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start7 = time.time()\n loss7 = functools.partial(fitness, X=X7, y=y7)\n optimizer = BHPSOGWO(fit_func=loss7, \n num_dim=X7.shape[1], num_particle=p, max_iter=g)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 6]: table[4, 6] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 6]: table[5, 6] = optimizer.gBest_score\n table[3, 6] += optimizer.gBest_score\n table[2, 6] += time.time()-start7\n all_for_std[i, 6] = optimizer.gBest_score\n \n table[0, 6] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X7[:, optimizer.gBest_X.astype(bool)], y7, cv=skf).mean()\n table[1, 6] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start8 = time.time()\n loss8 = functools.partial(fitness, X=X8, y=y8)\n optimizer = BHPSOGWO(fit_func=loss8, \n num_dim=X8.shape[1], num_particle=p, max_iter=g)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 7]: table[4, 7] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 7]: table[5, 7] = optimizer.gBest_score\n table[3, 7] += optimizer.gBest_score\n table[2, 7] += time.time()-start8\n all_for_std[i, 7] = optimizer.gBest_score\n \n table[0, 7] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X8[:, optimizer.gBest_X.astype(bool)], y8, cv=skf).mean()\n table[1, 7] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start9 = time.time()\n loss9 = functools.partial(fitness, X=X9, y=y9)\n optimizer = BHPSOGWO(fit_func=loss9, \n num_dim=X9.shape[1], num_particle=p, max_iter=g)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 8]: table[4, 8] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 8]: table[5, 8] = optimizer.gBest_score\n table[3, 8] += optimizer.gBest_score\n table[2, 8] += time.time()-start9\n all_for_std[i, 8] = optimizer.gBest_score\n \n table[0, 8] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X9[:, optimizer.gBest_X.astype(bool)], y9, cv=skf).mean()\n table[1, 8] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start10 = time.time()\n loss10 = functools.partial(fitness, X=X10, y=y10)\n optimizer = BHPSOGWO(fit_func=loss10, \n num_dim=X10.shape[1], num_particle=p, max_iter=g)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 9]: table[4, 9] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 9]: table[5, 9] = optimizer.gBest_score\n table[3, 9] += optimizer.gBest_score\n table[2, 9] += time.time()-start10\n all_for_std[i, 9] = optimizer.gBest_score\n \n table[0, 9] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X10[:, optimizer.gBest_X.astype(bool)], y10, cv=skf).mean()\n table[1, 9] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start11 = time.time()\n loss11 = functools.partial(fitness, X=X11, y=y11)\n optimizer = BHPSOGWO(fit_func=loss11, \n num_dim=X11.shape[1], num_particle=p, max_iter=g)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 10]: table[4, 10] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 10]: table[5, 10] = optimizer.gBest_score\n table[3, 10] += optimizer.gBest_score\n table[2, 10] += time.time()-start11\n all_for_std[i, 10] = optimizer.gBest_score\n \n table[0, 10] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X11[:, optimizer.gBest_X.astype(bool)], y11, cv=skf).mean()\n table[1, 10] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start12 = time.time()\n loss12 = functools.partial(fitness, X=X12, y=y12)\n optimizer = BHPSOGWO(fit_func=loss12, \n num_dim=X12.shape[1], num_particle=p, max_iter=g)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 11]: table[4, 11] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 11]: table[5, 11] = optimizer.gBest_score\n table[3, 11] += optimizer.gBest_score\n table[2, 11] += time.time()-start12\n all_for_std[i, 11] = optimizer.gBest_score\n \n table[0, 11] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X12[:, optimizer.gBest_X.astype(bool)], y12, cv=skf).mean()\n table[1, 11] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start13 = time.time()\n loss13 = functools.partial(fitness, X=X13, y=y13)\n optimizer = BHPSOGWO(fit_func=loss13, \n num_dim=X13.shape[1], num_particle=p, max_iter=g)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 12]: table[4, 12] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 12]: table[5, 12] = optimizer.gBest_score\n table[3, 12] += optimizer.gBest_score\n table[2, 12] += time.time()-start13\n all_for_std[i, 12] = optimizer.gBest_score\n \n table[0, 12] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X13[:, optimizer.gBest_X.astype(bool)], y13, cv=skf).mean()\n table[1, 12] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start14 = time.time()\n loss14 = functools.partial(fitness, X=X14, y=y14)\n optimizer = BHPSOGWO(fit_func=loss14, \n num_dim=X14.shape[1], num_particle=p, max_iter=g)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 13]: table[4, 13] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 13]: table[5, 13] = optimizer.gBest_score\n table[3, 13] += optimizer.gBest_score\n table[2, 13] += time.time()-start14\n all_for_std[i, 13] = optimizer.gBest_score\n \n table[0, 13] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X14[:, optimizer.gBest_X.astype(bool)], y14, cv=skf).mean()\n table[1, 13] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start15 = time.time()\n loss15 = functools.partial(fitness, X=X15, y=y15)\n optimizer = BHPSOGWO(fit_func=loss15, \n num_dim=X15.shape[1], num_particle=p, max_iter=g)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 14]: table[4, 14] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 14]: table[5, 14] = optimizer.gBest_score\n table[3, 14] += optimizer.gBest_score\n table[2, 14] += time.time()-start15\n all_for_std[i, 14] = optimizer.gBest_score\n \n table[0, 14] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X15[:, optimizer.gBest_X.astype(bool)], y15, cv=skf).mean()\n table[1, 14] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start16 = time.time()\n loss16 = functools.partial(fitness, X=X16, y=y16)\n optimizer = BHPSOGWO(fit_func=loss16, \n num_dim=X16.shape[1], num_particle=p, max_iter=g)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 15]: table[4, 15] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 15]: table[5, 15] = optimizer.gBest_score\n table[3, 15] += optimizer.gBest_score\n table[2, 15] += time.time()-start16\n all_for_std[i, 15] = optimizer.gBest_score\n \n table[0, 15] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X16[:, optimizer.gBest_X.astype(bool)], y16, cv=skf).mean()\n table[1, 15] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start17 = time.time()\n loss17 = functools.partial(fitness, X=X17, y=y17)\n optimizer = BHPSOGWO(fit_func=loss17, \n num_dim=X17.shape[1], num_particle=p, max_iter=g)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 16]: table[4, 16] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 16]: table[5, 16] = optimizer.gBest_score\n table[3, 16] += optimizer.gBest_score\n table[2, 16] += time.time()-start17\n all_for_std[i, 16] = optimizer.gBest_score\n \n \n table[0, 16] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X17[:, optimizer.gBest_X.astype(bool)], y17, cv=skf).mean()\n table[1, 16] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start18 = time.time()\n loss18 = functools.partial(fitness, X=X18, y=y18)\n optimizer = BHPSOGWO(fit_func=loss18, \n num_dim=X18.shape[1], num_particle=p, max_iter=g)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 17]: table[4, 17] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 17]: table[5, 17] = optimizer.gBest_score\n table[3, 17] += optimizer.gBest_score\n table[2, 17] += time.time()-start18\n all_for_std[i, 17] = optimizer.gBest_score\n \n table[0, 17] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X18[:, optimizer.gBest_X.astype(bool)], y18, cv=skf).mean()\n table[1, 17] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n print(i+1, ' ', round(time.time()-total_time, 2), 'sec')\n #------------------------------------------------------------------------------\n\n\ntable[:4, :] = table[:4, :] / times\ntable[6, :] = np.std(all_for_std, axis=0)\ntable = pd.DataFrame(table)\ntable.columns=['Breastcancer', 'BreastEW', 'Congress', 'Exactly', 'Exactly2', 'HeartEW',\n 'Ionosphere', 'KrVsKpEW', 'Lymphography', 'M-of-n', 'PenglungEW', 'Sonar', \n 'SpectEW', 'Tic-tac-toe', 'Vote', 'WaveformEW', 'Wine', 'Zoo']\ntable.index = ['avg acc', '% selected', 'avg time', 'avg loss', 'worst loss', 'best loss', 'std loss']\n\nall_for_std = pd.DataFrame(all_for_std)\nall_for_std.columns=['Breastcancer', 'BreastEW', 'Congress', 'Exactly', 'Exactly2', 'HeartEW',\n 'Ionosphere', 'KrVsKpEW', 'Lymphography', 'M-of-n', 'PenglungEW', 'Sonar', \n 'SpectEW', 'Tic-tac-toe', 'Vote', 'WaveformEW', 'Wine', 'Zoo']" ]
[ [ "numpy.ones", "numpy.sum", "sklearn.model_selection.StratifiedKFold", "numpy.zeros", "pandas.read_csv", "pandas.DataFrame", "numpy.random.seed", "sklearn.neighbors.KNeighborsClassifier", "numpy.std" ] ]
quanhua92/maskrcnn-pytorch
[ "d5dff68735ab175cba5604230141874aa6bd59b8" ]
[ "tests/data/test_processing.py" ]
[ "import os\nimport numpy as np\nfrom maskrcnn.lib.data.preprocessing import mold_inputs\nfrom maskrcnn.lib.config import cfg\n\nfrom maskrcnn.lib.utils import io_utils\n\n\ndef test_mold_inputs_ones():\n image = np.ones((cfg.IMAGE.MAX_DIM, cfg.IMAGE.MAX_DIM, 3), dtype=np.uint8) * 255\n \n molded_images, image_metas = mold_inputs([image], cfg)\n\n mean = molded_images[0, 0, :, :].mean()\n assert abs(mean - ((1 - 0.485) / 0.229)) < 1e-5\n\n mean = molded_images[0, 1, :, :].mean()\n assert abs(mean - ((1 - 0.456) / 0.224)) < 1e-5\n\n mean = molded_images[0, 2, :, :].mean()\n assert abs(mean - ((1 - 0.406) / 0.225)) < 1e-5\n\n assert molded_images.shape == (1, 3, cfg.IMAGE.MAX_DIM, cfg.IMAGE.MAX_DIM)\n\n assert image_metas[0][1] == 1\n\n\ndef test_mold_image():\n image_path = os.path.join(\"data\", \"COCO_val2014_000000018928.jpg\")\n image = io_utils.read_image(image_path)\n molded_images, image_metas = mold_inputs([image], cfg)\n\n print(\"image_metas\", image_metas)\n assert image_metas.shape[0] == 1\n assert molded_images.shape[1] == 3\n assert molded_images.shape == (1, 3, cfg.IMAGE.MAX_DIM, cfg.IMAGE.MAX_DIM)\n\n assert abs(image_metas[0][1] - 2.048) < 1e-10\n" ]
[ [ "numpy.ones" ] ]
Lhior/TXPipe
[ "58fd7612326779d4c1b0e499157dddc9e3b524c0" ]
[ "txpipe/theory.py" ]
[ "from .base_stage import PipelineStage\nfrom .data_types import FiducialCosmology, SACCFile\nimport numpy as np\n\n\nclass TXTwoPointTheoryReal(PipelineStage):\n \"\"\"\n Compute theory in CCL in real space and save to a sacc file.\n \"\"\"\n\n name = \"TXTwoPointTheoryReal\"\n inputs = [\n (\"twopoint_data_real\", SACCFile),\n (\"fiducial_cosmology\", FiducialCosmology), # For example lines\n ]\n outputs = [\n (\"twopoint_theory_real\", SACCFile),\n ]\n\n def run(self):\n import sacc\n\n filename = self.get_input(\"twopoint_data_real\")\n s = sacc.Sacc.load_fits(filename)\n\n # TODO: when there is a better Cosmology serialization method\n # switch to that\n print(\"Manually specifying matter_power_spectrum and Neff\")\n cosmo = self.open_input(\"fiducial_cosmology\", wrapper=True).to_ccl(\n matter_power_spectrum=\"halofit\", Neff=3.046\n )\n print(cosmo)\n\n s_theory = self.replace_with_theory_real(s, cosmo)\n\n # Remove covariance\n s_theory.covariance = None\n\n # save the output to Sacc file\n s_theory.save_fits(self.get_output(\"twopoint_theory_real\"), overwrite=True)\n\n def read_nbin(self, s):\n import sacc\n\n xip = sacc.standard_types.galaxy_shear_xi_plus\n wtheta = sacc.standard_types.galaxy_density_xi\n\n source_tracers = set()\n for b1, b2 in s.get_tracer_combinations(xip):\n source_tracers.add(b1)\n source_tracers.add(b2)\n\n lens_tracers = set()\n for b1, b2 in s.get_tracer_combinations(wtheta):\n lens_tracers.add(b1)\n lens_tracers.add(b2)\n\n return len(source_tracers), len(lens_tracers)\n\n def get_ccl_tracers(self, s, cosmo, smooth=False):\n\n # ccl tracers object\n import pyccl\n\n tracers = {}\n\n nbin_source, nbin_lens = self.read_nbin(s)\n\n # Make the lensing tracers\n for i in range(nbin_source):\n name = f\"source_{i}\"\n Ti = s.get_tracer(name)\n nz = smooth_nz(Ti.nz) if smooth else Ti.nz\n print(\"smooth:\", smooth)\n # Convert to CCL form\n tracers[name] = pyccl.WeakLensingTracer(cosmo, (Ti.z, nz))\n\n # And the clustering tracers\n for i in range(nbin_lens):\n name = f\"lens_{i}\"\n Ti = s.get_tracer(name)\n nz = smooth_nz(Ti.nz) if smooth else Ti.nz\n\n # Convert to CCL form\n tracers[name] = pyccl.NumberCountsTracer(\n cosmo, has_rsd=False, dndz=(Ti.z, nz), bias=(Ti.z, np.ones_like(Ti.z))\n )\n\n return tracers\n\n def replace_with_theory_real(self, s, cosmo):\n\n import pyccl\n\n nbin_source, nbin_lens = self.read_nbin(s)\n ell = np.unique(np.logspace(np.log10(2), 5, 400).astype(int))\n tracers = self.get_ccl_tracers(s, cosmo)\n\n for i in range(nbin_source):\n for j in range(i + 1):\n print(f\"Computing theory lensing-lensing ({i},{j})\")\n\n # compute theory\n print(tracers[f\"source_{i}\"], tracers[f\"source_{j}\"])\n cl = pyccl.angular_cl(\n cosmo, tracers[f\"source_{i}\"], tracers[f\"source_{j}\"], ell\n )\n theta, *_ = s.get_theta_xi(\n \"galaxy_shear_xi_plus\", f\"source_{i}\", f\"source_{j}\"\n )\n xip = pyccl.correlation(cosmo, ell, cl, theta / 60, corr_type=\"L+\")\n xim = pyccl.correlation(cosmo, ell, cl, theta / 60, corr_type=\"L-\")\n\n # replace data values in the sacc object for the theory ones\n ind_xip = s.indices(\n \"galaxy_shear_xi_plus\", (f\"source_{i}\", f\"source_{j}\")\n )\n ind_xim = s.indices(\n \"galaxy_shear_xi_minus\", (f\"source_{i}\", f\"source_{j}\")\n )\n for p, q in enumerate(ind_xip):\n s.data[q].value = xip[p]\n for p, q in enumerate(ind_xim):\n s.data[q].value = xim[p]\n\n for i in range(nbin_lens):\n print(f\"Computing theory density-density ({i},{i})\")\n\n # compute theory\n cl = pyccl.angular_cl(\n cosmo, tracers[f\"lens_{i}\"], tracers[f\"lens_{i}\"], ell\n )\n theta, *_ = s.get_theta_xi(\"galaxy_density_xi\", f\"lens_{i}\", f\"lens_{i}\")\n wtheta = pyccl.correlation(cosmo, ell, cl, theta / 60, corr_type=\"GG\")\n\n for j in range(i+1):\n print(f\"Computing theory density-density ({i},{j})\")\n\n # compute theory\n cl = pyccl.angular_cl(cosmo, tracers[f'lens_{i}'], tracers[f'lens_{j}'], ell)\n theta, *_ = s.get_theta_xi('galaxy_density_xi', f'lens_{i}' , f'lens_{j}')\n wtheta = pyccl.correlation(cosmo, ell, cl, theta/60, corr_type='GG')\n\n # replace data values in the sacc object for the theory ones\n ind = s.indices('galaxy_density_xi', (f'lens_{i}', f'lens_{j}'))\n for p, q in enumerate(ind):\n s.data[q].value = wtheta[p]\n\n for i in range(nbin_source):\n\n for j in range(nbin_lens):\n print(f\"Computing theory lensing-density (S{i},L{j})\")\n\n # compute theory\n cl = pyccl.angular_cl(\n cosmo, tracers[f\"source_{i}\"], tracers[f\"lens_{j}\"], ell\n )\n theta, *_ = s.get_theta_xi(\n \"galaxy_shearDensity_xi_t\", f\"source_{i}\", f\"lens_{j}\"\n )\n gt = pyccl.correlation(cosmo, ell, cl, theta / 60, corr_type=\"GL\")\n\n ind = s.indices(\n \"galaxy_shearDensity_xi_t\", (f\"source_{i}\", f\"lens_{j}\")\n )\n for p, q in enumerate(ind):\n s.data[q].value = gt[p]\n\n return s\n\n\nclass TXTwoPointTheoryFourier(TXTwoPointTheoryReal):\n \"\"\"\n Compute theory from CCL in Fourier space and save to a sacc file.\n \"\"\"\n\n name = \"TXTwoPointTheoryFourier\"\n inputs = [\n (\"twopoint_data_fourier\", SACCFile),\n (\"fiducial_cosmology\", FiducialCosmology), # For example lines\n ]\n outputs = [\n (\"twopoint_theory_fourier\", SACCFile),\n ]\n\n def run(self):\n import sacc\n\n filename = self.get_input(\"twopoint_data_fourier\")\n s = sacc.Sacc.load_fits(filename)\n\n # TODO: when there is a better Cosmology serialization method\n # switch to that\n print(\"Manually specifying matter_power_spectrum and Neff\")\n cosmo = self.open_input(\"fiducial_cosmology\", wrapper=True).to_ccl(\n matter_power_spectrum=\"halofit\", Neff=3.046\n )\n print(cosmo)\n\n s_theory = self.replace_with_theory_fourier(s, cosmo)\n\n # Remove covariance\n s_theory.covariance = None\n\n # save the output to Sacc file\n s_theory.save_fits(self.get_output(\"twopoint_theory_fourier\"), overwrite=True)\n\n def read_nbin(self, s):\n import sacc\n\n cl_ee = sacc.standard_types.galaxy_shear_cl_ee\n cl_density = sacc.standard_types.galaxy_density_cl\n\n source_tracers = set()\n for b1, b2 in s.get_tracer_combinations(cl_ee):\n source_tracers.add(b1)\n source_tracers.add(b2)\n\n lens_tracers = set()\n for b1, b2 in s.get_tracer_combinations(cl_density):\n lens_tracers.add(b1)\n lens_tracers.add(b2)\n\n return len(source_tracers), len(lens_tracers)\n\n def replace_with_theory_fourier(self, s, cosmo):\n\n import pyccl\n\n nbin_source, nbin_lens = self.read_nbin(s)\n tracers = self.get_ccl_tracers(s, cosmo)\n\n data_types = s.get_data_types()\n if \"galaxy_shearDensity_cl_b\" in data_types:\n # Remove galaxy_shearDensity_cl_b measurement values\n ind_b = s.indices(\"galaxy_shearDensity_cl_b\")\n s.remove_indices(ind_b)\n if \"galaxy_shear_cl_bb\" in data_types:\n # Remove galaxy_shear_cl_bb measurement values\n ind_bb = s.indices(\"galaxy_shear_cl_bb\")\n s.remove_indices(ind_bb)\n\n for i in range(nbin_source):\n for j in range(i + 1):\n print(f\"Computing theory lensing-lensing ({i},{j})\")\n\n # compute theory\n print(tracers[f\"source_{i}\"], tracers[f\"source_{j}\"])\n ell, *_ = s.get_ell_cl(\n \"galaxy_shear_cl_ee\", f\"source_{i}\", f\"source_{j}\"\n )\n cl = pyccl.angular_cl(\n cosmo, tracers[f\"source_{i}\"], tracers[f\"source_{j}\"], ell\n )\n\n # replace data values in the sacc object for the theory ones\n ind = s.indices(\"galaxy_shear_cl_ee\", (f\"source_{i}\", f\"source_{j}\"))\n for p, q in enumerate(ind):\n s.data[q].value = cl[p]\n\n for i in range(nbin_lens):\n print(f\"Computing theory density-density ({i},{i})\")\n\n # compute theory\n ell, *_ = s.get_ell_cl(\"galaxy_density_cl\", f\"lens_{i}\", f\"lens_{i}\")\n cl = pyccl.angular_cl(\n cosmo, tracers[f\"lens_{i}\"], tracers[f\"lens_{i}\"], ell\n )\n\n # replace data values in the sacc object for the theory ones\n ind = s.indices(\"galaxy_density_cl\", (f\"lens_{i}\", f\"lens_{i}\"))\n for p, q in enumerate(ind):\n s.data[q].value = cl[p]\n\n for i in range(nbin_source):\n\n for j in range(nbin_lens):\n print(f\"Computing theory lensing-density (S{i},L{j})\")\n\n # compute theory\n ell, *_ = s.get_ell_cl(\n \"galaxy_shearDensity_cl_e\", f\"source_{i}\", f\"lens_{j}\"\n )\n cl = pyccl.angular_cl(\n cosmo, tracers[f\"source_{i}\"], tracers[f\"lens_{j}\"], ell\n )\n\n # replace data values in the sacc object for the theory ones\n ind = s.indices(\n \"galaxy_shearDensity_cl_e\", (f\"source_{i}\", f\"lens_{j}\")\n )\n for p, q in enumerate(ind):\n s.data[q].value = cl[p]\n\n return s\n" ]
[ [ "numpy.log10", "numpy.ones_like" ] ]
MenuaB/deep_utils
[ "b1b936f4780ea7dc52224f53f5116288c5b0a804" ]
[ "deep_utils/callbacks/torch/model_checkpoint.py" ]
[ "import os\n\nfrom deep_utils.utils.os_utils.os_path import split_extension\n\n\nclass ModelCheckPoint:\n def __init__(self,\n model_path,\n model,\n monitor='min',\n save_best_only=True,\n overwrite=True,\n verbose=True,\n save_last=True,\n loss=None,\n optimizer=None,\n scheduler=None,\n static_dict=None):\n self.overwrite = overwrite\n self.model_path = model_path\n self.monitor_val = float('-inf') if monitor == 'max' else float('inf')\n self.save_best_only = save_best_only\n self.model = model\n self.loss = loss\n self.optimizer = optimizer\n self.scheduler = scheduler\n self.static_dict = static_dict\n self.monitor = monitor\n self.epoch = 0\n self.verbose = verbose\n self.save_last = save_last\n os.makedirs(os.path.split(model_path)[0], exist_ok=True)\n\n def __call__(self, monitor_val):\n self.epoch += 1\n if self.save_best_only:\n trigger = False\n if self.monitor == 'min' and monitor_val < self.monitor_val:\n self.monitor_val = monitor_val\n trigger = True\n elif self.monitor == 'max' and monitor_val > self.monitor_val:\n self.monitor_val = monitor_val\n trigger = True\n\n if self.save_last:\n last_path = split_extension(self.model_path, suffix=\"_last\")\n self._save(last_path, print_=False)\n\n if self.overwrite:\n best_path = split_extension(self.model_path, suffix=\"_best\")\n else:\n best_path = split_extension(self.model_path, suffix=\"_\" + str(self.epoch))\n\n if trigger:\n self._save(best_path, print_=self.verbose)\n else:\n model_path = split_extension(self.model_path, suffix=\"_\" + str(self.epoch))\n self._save(model_path, print_=self.verbose)\n\n def _save(self, model_path, print_):\n import torch\n save_dict = self.static_dict if self.static_dict is not None else dict()\n save_dict['model_state_dict'] = self.model.state_dict()\n self._add_file(save_dict, 'optimizer', self.optimizer)\n self._add_file(save_dict, 'scheduler', self.scheduler)\n self._add_file(save_dict, 'loss', self.loss)\n torch.save(save_dict, model_path)\n if print_:\n print(f'model is saved in {model_path}')\n\n @staticmethod\n def _add_file(dict_, name, file):\n if file is not None:\n dict_[name] = file\n return dict_\n" ]
[ [ "torch.save" ] ]