repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
ChristianLin0420/DeepRL
[ "143a9bfebd264229d9d26fcdc070065225774e04" ]
[ "Homework 4/107062240_hw4_train.py" ]
[ "from osim.env import L2M2019Env\n\n'''\n TD3\n'''\nimport copy\nimport numpy as np\nimport pandas as pd\n\nimport argparse\nimport os\nimport os.path\nfrom os import path\nimport utils\nfrom collections import Iterable\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# Implementation of Twin Delayed Deep Deterministic Policy Gradients (TD3)\n# Paper: https://arxiv.org/abs/1802.09477\n\ndef flatten_list(lis):\n for item in lis:\n if isinstance(item, Iterable) and not isinstance(item, str):\n for x in flatten(item):\n yield x\n else: \n yield item\n\ndef flatten(mydict):\n new_dict = {}\n for key, value in mydict.items():\n if type(value) == list:\n print(\"key: {}, value: {}\".format(key, value))\n new_dict[key] = flatten_list(value)\n else:\n new_dict[key] = value\n return new_dict\n\ndef FF(ss):\n state = flatten(ss)\n # print(state)\n\n new_state = []\n\n for v in state.values(): \n # print(type(v))\n if type(v) == dict:\n temp = pd.json_normalize(v, sep = '_')\n temp = list(temp.values)\n for item in temp:\n if type(item) == np.ndarray:\n for val in item:\n if type(val) == list:\n for t in val:\n new_state.append(float(t))\n else:\n new_state.append(float(val))\n else:\n new_state.append(item)\n else:\n # print(\"asdf\")\n for arr in v:\n for a in arr:\n for val in a:\n new_state.append(float(val))\n\n return new_state\n\nclass Actor(nn.Module):\n def __init__(self, state_dim, action_dim, max_action):\n super(Actor, self).__init__()\n\n self.l1 = nn.Linear(state_dim, 256)\n self.l2 = nn.Linear(256, 256)\n self.l3 = nn.Linear(256, action_dim)\n \n self.max_action = max_action\n \n\n def forward(self, state):\n a = F.relu(self.l1(state))\n a = F.relu(self.l2(a))\n return self.max_action * torch.tanh(self.l3(a))\n\n\nclass Critic(nn.Module):\n def __init__(self, state_dim, action_dim):\n super(Critic, self).__init__()\n\n # Q1 architecture\n self.l1 = nn.Linear(state_dim + action_dim, 256)\n self.l2 = nn.Linear(256, 256)\n self.l3 = nn.Linear(256, 1)\n\n # Q2 architecture\n self.l4 = nn.Linear(state_dim + action_dim, 256)\n self.l5 = nn.Linear(256, 256)\n self.l6 = nn.Linear(256, 1)\n\n\n def forward(self, state, action):\n sa = torch.cat([state, action], 1)\n\n q1 = F.relu(self.l1(sa))\n q1 = F.relu(self.l2(q1))\n q1 = self.l3(q1)\n\n q2 = F.relu(self.l4(sa))\n q2 = F.relu(self.l5(q2))\n q2 = self.l6(q2)\n return q1, q2\n\n\n def Q1(self, state, action):\n sa = torch.cat([state, action], 1)\n\n q1 = F.relu(self.l1(sa))\n q1 = F.relu(self.l2(q1))\n q1 = self.l3(q1)\n return q1\n\n\nclass TD3(object):\n def __init__(\n self,\n state_dim,\n action_dim,\n max_action,\n discount=0.99,\n tau=0.005,\n policy_noise=0.2,\n noise_clip=0.5,\n policy_freq=2\n ):\n\n self.actor = Actor(state_dim, action_dim, max_action).to(device)\n self.actor_target = copy.deepcopy(self.actor)\n self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)\n\n self.critic = Critic(state_dim, action_dim).to(device)\n self.critic_target = copy.deepcopy(self.critic)\n self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)\n\n self.max_action = max_action\n self.discount = discount\n self.tau = tau\n self.policy_noise = policy_noise\n self.noise_clip = noise_clip\n self.policy_freq = policy_freq\n\n self.total_it = 0\n\n def select_action(self, state):\n\n new_state = FF(state)\n\n new_state = np.array(new_state)\n # print(new_state)\n\n new_state = torch.FloatTensor(new_state.reshape(1, -1)).to(device)\n return self.actor(new_state).cpu().data.numpy().flatten()\n\n def train(self, replay_buffer, batch_size=256):\n self.total_it += 1\n\n # Sample replay buffer \n state, action, next_state, reward, not_done = replay_buffer.sample(batch_size)\n\n with torch.no_grad():\n # Select action according to policy and add clipped noise\n noise = (\n torch.randn_like(action) * self.policy_noise\n ).clamp(-self.noise_clip, self.noise_clip)\n \n next_action = (\n self.actor_target(next_state) + noise\n ).clamp(-self.max_action, self.max_action)\n\n # Compute the target Q value\n target_Q1, target_Q2 = self.critic_target(next_state, next_action)\n target_Q = torch.min(target_Q1, target_Q2)\n target_Q = reward + not_done * self.discount * target_Q\n\n # Get current Q estimates\n current_Q1, current_Q2 = self.critic(state, action)\n\n # Compute critic loss\n critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)\n\n # Optimize the critic\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n\n # Delayed policy updates\n if self.total_it % self.policy_freq == 0:\n\n # Compute actor losse\n actor_loss = -self.critic.Q1(state, self.actor(state)).mean()\n \n # Optimize the actor \n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n\n # Update the frozen target models\n for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):\n target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)\n\n for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):\n target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)\n\n\n def save(self, filename):\n torch.save(self.critic.state_dict(), filename + \"_critic\")\n torch.save(self.critic_optimizer.state_dict(), filename + \"_critic_optimizer\")\n \n torch.save(self.actor.state_dict(), filename + \"_actor\")\n torch.save(self.actor_optimizer.state_dict(), filename + \"_actor_optimizer\")\n\n\n def load(self, filename):\n self.critic.load_state_dict(torch.load(filename + \"_critic\"))\n self.critic_optimizer.load_state_dict(torch.load(filename + \"_critic_optimizer\"))\n self.critic_target = copy.deepcopy(self.critic)\n\n self.actor.load_state_dict(torch.load(filename + \"_actor\"))\n self.actor_optimizer.load_state_dict(torch.load(filename + \"_actor_optimizer\"))\n self.actor_target = copy.deepcopy(self.actor)\n\n def check_existed_files(self, filename):\n print (\"File exists:\"+str(path.exists(filename + \"_critic\")))\n print (\"File exists:\" + str(path.exists(filename + \"_critic_optimizer\")))\n print (\"File exists:\"+str(path.exists(filename + \"_actor\")))\n print (\"File exists:\" + str(path.exists(filename + \"_actor_optimizer\")))\n\n'''\n REPLAYBUFFER\n'''\n\nclass ReplayBuffer(object):\n def __init__(self, state_dim, action_dim, max_size=int(1e6)):\n self.max_size = max_size\n self.ptr = 0\n self.size = 0\n\n self.state = np.zeros((max_size, state_dim))\n self.action = np.zeros((max_size, action_dim))\n self.next_state = np.zeros((max_size, state_dim))\n self.reward = np.zeros((max_size, 1))\n self.not_done = np.zeros((max_size, 1))\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\n def add(self, state, action, next_state, reward, done): \n new_state = FF(state)\n new_next_state = FF(next_state)\n self.state[self.ptr] = new_state\n self.action[self.ptr] = action\n self.next_state[self.ptr] = new_next_state\n self.reward[self.ptr] = reward\n self.not_done[self.ptr] = 1. - done\n\n self.ptr = (self.ptr + 1) % self.max_size\n self.size = min(self.size + 1, self.max_size)\n\n\n def sample(self, batch_size):\n ind = np.random.randint(0, self.size, size=batch_size)\n\n return (\n torch.FloatTensor(self.state[ind]).to(self.device),\n torch.FloatTensor(self.action[ind]).to(self.device),\n torch.FloatTensor(self.next_state[ind]).to(self.device),\n torch.FloatTensor(self.reward[ind]).to(self.device),\n torch.FloatTensor(self.not_done[ind]).to(self.device)\n )\n\n\n'''\n MAIN\n'''\n\n# Runs policy for X episodes and returns average reward\n# A fixed seed is used for the eval environment\ndef eval_policy(policy, env_name, seed, eval_episodes=5):\n eval_env = L2M2019Env(visualize=True)\n eval_env.seed(seed + 100)\n\n avg_reward = 0.\n for _ in range(eval_episodes):\n state, done = eval_env.reset(), False\n while not done:\n action = policy.select_action(state)\n\n for i in range(len(action)):\n if action[i] > 1:\n action[i] = 1\n elif action[i] < 0: \n action[i] = 0\n\n # print(action)\n state, reward, done, _ = eval_env.step(action)\n avg_reward += reward\n\n avg_reward /= eval_episodes\n\n print(\"---------------------------------------\")\n print(f\"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}\")\n print(\"---------------------------------------\")\n return avg_reward\n\n\nif __name__ == \"__main__\":\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"--policy\", default=\"TD3\") # Policy name (TD3, DDPG or OurDDPG)\n parser.add_argument(\"--env\", default=\"HalfCheetah-v2\") # OpenAI gym environment name\n parser.add_argument(\"--seed\", default=0, type=int) # Sets Gym, PyTorch and Numpy seeds\n parser.add_argument(\"--start_timesteps\", default=10e2, type=int)# Time steps initial random policy is used\n parser.add_argument(\"--eval_freq\", default=1e4, type=int) # How often (time steps) we evaluate\n parser.add_argument(\"--max_timesteps\", default=5e7, type=int) # Max time steps to run environment\n parser.add_argument(\"--expl_noise\", default=0.1) # Std of Gaussian exploration noise\n parser.add_argument(\"--batch_size\", default=256, type=int) # Batch size for both actor and critic\n parser.add_argument(\"--discount\", default=0.99) # Discount factor\n parser.add_argument(\"--tau\", default=0.005) # Target network update rate\n parser.add_argument(\"--policy_noise\", default=0.2) # Noise added to target policy during critic update\n parser.add_argument(\"--noise_clip\", default=0.5) # Range to clip target policy noise\n parser.add_argument(\"--policy_freq\", default=2, type=int) # Frequency of delayed policy updates\n parser.add_argument(\"--save_model\", action=\"store_true\") # Save model and optimizer parameters\n parser.add_argument(\"--load_model\", default=\"true\") # Model load file name, \"\" doesn't load, \"default\" uses file_name\n args = parser.parse_args()\n\n file_name = f\"{args.policy}_{args.env}_{args.seed}\"\n print(\"---------------------------------------\")\n print(f\"Policy: {args.policy}, Env: {args.env}, Seed: {args.seed}\")\n print(\"---------------------------------------\")\n\n if not os.path.exists(\"./results\"):\n os.makedirs(\"./results\")\n\n if args.save_model and not os.path.exists(\"./models\"):\n os.makedirs(\"./models\")\n\n env = L2M2019Env(visualize=True)\n\n # Set seeds\n env.seed(args.seed)\n # env.action_space.seed(seed)\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n \n state_dim = env.observation_space.shape[0]\n action_dim = env.action_space.shape[0] \n max_action = float(env.action_space.high[0])\n\n kwargs = {\n \"state_dim\": state_dim,\n \"action_dim\": action_dim,\n \"max_action\": max_action,\n \"discount\": args.discount,\n \"tau\": args.tau,\n }\n\n # Initialize policy\n if args.policy == \"TD3\":\n # Target policy smoothing is scaled wrt the action scale\n kwargs[\"policy_noise\"] = args.policy_noise * max_action\n kwargs[\"noise_clip\"] = args.noise_clip * max_action\n kwargs[\"policy_freq\"] = args.policy_freq\n policy = TD3(**kwargs)\n\n if args.load_model != \"\":\n policy.check_existed_files(\"models/107062240_HW4_data\")\n policy_file = file_name if args.load_model == \"default\" else args.load_model\n policy.load(\"models/107062240_HW4_data\")\n\n replay_buffer = ReplayBuffer(state_dim, action_dim)\n \n # Evaluate untrained policy\n evaluations = [eval_policy(policy, args.env, args.seed)]\n\n state, done = env.reset(), False\n episode_reward = 0\n episode_timesteps = 0\n episode_num = 0\n\n for t in range(int(args.max_timesteps)):\n episode_timesteps += 1\n\n # Select action randomly or according to policy\n if t < args.start_timesteps:\n action = env.action_space.sample()\n else:\n action = (\n policy.select_action(state)\n + np.random.normal(0, max_action * args.expl_noise, size=action_dim)\n ).clip(-max_action, max_action)\n\n for i in range(len(action)):\n if action[i] > 1:\n action[i] = 1\n elif action[i] < 0: \n action[i] = 0\n\n # print(action)\n\n # Perform action\n next_state, reward, done, _ = env.step(action) \n done_bool = float(done) if episode_timesteps < 500 else 0\n\n # Store data in replay buffer\n replay_buffer.add(state, action, next_state, reward, done_bool)\n\n state = next_state\n episode_reward += reward \n\n # Train agent after collecting sufficient data\n if t >= args.start_timesteps:\n # print(\"Start training ........ \")\n policy.train(replay_buffer, args.batch_size)\n\n if done: \n # +1 to account for 0 indexing. +0 on ep_timesteps since it will increment +1 even if done=True\n print(f\"Total T: {t+1} Episode Num: {episode_num+1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}\")\n # Reset environment\n state, done = env.reset(), False\n episode_reward = 0\n episode_timesteps = 0\n episode_num += 1 \n\n # Evaluate episode\n if (t + 1) % args.eval_freq == 0:\n print(\"save new model!\")\n evaluations.append(eval_policy(policy, args.env, args.seed))\n np.save(f\"./results/{file_name}\", evaluations)\n policy.save(\"models/107062240_HW4_data\")\n" ]
[ [ "torch.randn_like", "numpy.random.seed", "torch.cat", "torch.load", "torch.manual_seed", "torch.min", "pandas.json_normalize", "numpy.save", "torch.nn.Linear", "torch.nn.functional.mse_loss", "numpy.random.normal", "torch.no_grad", "torch.FloatTensor", "torch.cuda.is_available", "numpy.array", "numpy.zeros", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0" ], "scipy": [], "tensorflow": [] } ]
gptix/DS--Data-Engineering-
[ "2975e4b538aed3e477bea56f0b09601452ed4837" ]
[ "recommender_vanilla.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.feature_extraction.text import CountVectorizer\n\n\n### Import and shape data\npath_base = 'gitstuff/buildweek2/DS--Data-Engineering-/'\ndf = pd.read_csv(path_base + 'data/cannabis.csv')\n\n## select subset with high rating\ngood_enough = df[df['Rating'] >= 4.0]\n\n## replace blank flavor with \"\"\ngood_enough = df.replace(np.nan, '', regex=True)\n\ndef clean_string(strng):\n \"\"\"Remove commas and parentheses.\"\"\"\n s = strng.replace(\",\",\" \") # comma-> space\n s = s.replace(\"(\",\" \") # (-> space\n s = s.replace(\")\",\" \") # (-> space\n s = s.lower()\n return s\n\n\n## Clean and concatenate som fields to build strings for creating an embedding.\ncols = ['Type', 'Effects', 'Flavor', 'Description']\nfor col in cols:\n good_enough[col] = good_enough[col].apply(clean_string)\ngood_enough['text'] = good_enough['Type'] + \" \" + good_enough['Effects'] + \" \" + good_enough['Flavor']\n\n## Define a function to create a list of docs to be used to create a sparse matrix.\ndef gather_docs(df):\n \"\"\" Produces List of Documents from a dataframe.\n df: a Pandas dataframe that has the column 'text'.\n Returns a list of strings. \n \"\"\"\n docs = list(df['text'])\n return docs\n\ndocs = gather_docs(good_enough)\n\n\n## Instantiate vectorizer\nvect = CountVectorizer(stop_words='english', max_features=1000)\n\n## Fit Vectorizer\nvect.fit(docs)\n\n## Create a sparse document-term matrix\ndtm = vect.transform(docs)\n\n## Make a dataframe of a condensed version of the DTM, using feature names\ndtm = pd.DataFrame(dtm.todense(), columns=vect.get_feature_names())\n\n## Instantiate Nearestneighbors\nnn = NearestNeighbors(n_neighbors=5, algorithm='kd_tree')\n\n## Fit on Document-Term Matrix\nnn.fit(dtm)\n\n\ndef recommend(txt):\n \"\"\" Receives a string containing strain, effects, and flavors, and \n returns a 2-tuple of (array of scores, array of indexes) describing\n the best matches among strains modeled.\"\"\"\n \n clean_text = clean_string(txt)\n transformed_text = vect.transform([clean_text])\n return nn.kneighbors(transformed_text.todense())" ]
[ [ "sklearn.feature_extraction.text.CountVectorizer", "pandas.read_csv", "sklearn.neighbors.NearestNeighbors" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
simra/msrflute
[ "c28e2e6bcfa9464b8640ccd625393bbed28491c3", "c28e2e6bcfa9464b8640ccd625393bbed28491c3", "c28e2e6bcfa9464b8640ccd625393bbed28491c3" ]
[ "utils/preprocessing/create-json.py", "extensions/privacy/__init__.py", "core/server.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport json\nimport time\nfrom tqdm import tqdm\nimport pandas as pd\n\npath = r'C:\\Users\\train.tsv'\n\ndef local_time():\n return str(time.strftime(\"%H:%M:%S\",time.localtime()))\n\n\nprint(local_time() + \" Starting script \" ) \ncolumns = ['author','num1','content','str1','str2','num2','subreddit']\ndf = pd.read_csv(path, sep='\\t', names=columns, header=None)\nprint(local_time() + \" File has been read \" )\n\ndf_authors = pd.DataFrame(df['author'])\ndf_content = pd.DataFrame(df['content'])\ndf_file = pd.concat([df_authors,df_content], axis=1)\nprint(local_time() + \" Data needed has been concatenated \")\n\n\nusers_group = df_file.groupby('author')\ngroup0 = df_file.groupby(['author','content'])\ngroup1 = pd.Series(users_group.size())\nusers = (group1.index).to_numpy() \nprint(local_time() + \" users been formatted \")\nnum_samples = group1.values \nprint(local_time() + \" num_samples has been formatted \")\nuser_data_dict= {}\n\nuser_data_dict= {i: {'x':list()} for i in tqdm(users)}\n\nfor i in tqdm(range(len(df_file))):\n if df_file['content'][i] not in user_data_dict[df_file['author'][i]]['x']:\n user_data_dict[df_file['author'][i]]['x'].append(df_file['content'][i])\n \n\nf = open(r'C:\\Users\\train.json', \"w\")\nnew_data = {'users': users.tolist(), 'num_samples': num_samples.tolist(), 'user_data': user_data_dict}\njson.dump(new_data,f)\nprint(local_time() + \" end of script \")", "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport numpy as np\nimport torch as T\nimport logging\nimport math\nimport json\nfrom utils import print_rank\nfrom azureml.core import Run\nfrom scipy.special import betainc, betaln\n\nrun = Run.get_context()\n\ndef compute_LDP_noise_std(eps, max_sensitivity, delta):\n return np.sqrt(2 * np.log(1.25 / delta)) * max_sensitivity / eps\n\n \ndef _beta2betainc_ratio(a, x):\n return 1 / betainc(a, a, x)\n\n\ndef _log_m1(d, alpha, gamma):\n return alpha * np.log(1 - gamma**2) - (d - 2) * np.log(2) - np.log(d - 1)\n\n\ndef _log_m2(p, tau, alpha):\n return np.log(p / (_beta2betainc_ratio(alpha, tau) - 1) - (1 - p)) + np.log(_beta2betainc_ratio(alpha, tau)) - betaln(alpha, alpha)\n\n\ndef _efficient_m(d, gamma, p):\n alpha = (d - 1) / 2\n tau = (1 + gamma) / 2 \n return np.exp(_log_m1(d, alpha, gamma) + _log_m2(p, tau, alpha))\n\n\ndef privacy_parameters(eps0, eps, d):\n exp_eps0 = np.exp(eps0)\n exp_eps = np.exp(eps)\n if exp_eps0 == np.inf:\n p0 = 1\n else:\n p0 = exp_eps0 / (1 + exp_eps0)\n if exp_eps == np.inf:\n gamma = np.sqrt(np.pi / (2 * (d - 1)))\n else:\n gamma = ((exp_eps - 1) / (exp_eps + 1)) * np.sqrt(np.pi / (2 * (d - 1)))\n return p0, gamma\n\n\ndef private_unit2(grad, gamma, prob):\n np.testing.assert_almost_equal(grad.norm().cpu().item(), 1, decimal=5)\n assert prob >= 0.5\n assert (0 <= gamma <= 1)\n p = T.rand(())\n while True:\n # create a uniform distriubtion over d-sphere\n V = T.normal(0, 1, grad.shape, device=grad.device)\n V = V / V.norm()\n dot_prod = T.dot(V, grad)\n if (dot_prod >= gamma and p < prob) or (dot_prod < gamma and p >= prob):\n break\n d = grad.shape[0]\n m = _efficient_m(d, gamma, prob)\n return V / m\n\n\ndef add_gaussian_noise(grad, eps, max_grad, delta):\n sigma = compute_LDP_noise_std(eps, max_grad, delta)\n #sigma = np.sqrt(2 * np.log(1.25 / delta)) * max_grad / eps\n noisy_grad = sigma * T.randn(grad.shape, device=grad.device) + grad\n return noisy_grad, sigma\n\n\ndef add_private_unit2_noise(eps, grad):\n eps0 = 0.01 * eps\n eps1 = 0.99 * eps\n samp_prob, gamma = privacy_parameters(eps0, eps1, grad.shape[0])\n return private_unit2(grad, gamma, samp_prob)\n\n\ndef scalar_DP(r, eps, k, r_max):\n r = np.minimum(r, r_max)\n val = k * r / r_max\n f_val = math.floor(val)\n c_val = math.ceil(val)\n J = f_val if T.rand(()) < (c_val - val) else c_val\n exp_eps = np.exp(eps)\n rand_prob = exp_eps / (exp_eps + k)\n if T.rand(()) >= rand_prob:\n while True:\n J_ = T.randint(0, k + 1, ()).item()\n if J != J_:\n J = J_\n break\n a = ((exp_eps + k) / (exp_eps - 1)) * (r_max / k)\n b = (k * (k + 1)) / (2 * (exp_eps + k))\n return a * (J - b)\n\n\ndef laplace_noise(max_sens, eps, vocab_size):\n return np.random.laplace(0.0, max_sens/eps, vocab_size)\n\n\ndef unroll_network(named_params, select_grad=False):\n # Unroll the network as 1D vector and save original values indices\n params_ids, flat_params = {}, []\n cur_idx = 0\n for n, p in named_params:\n dat = p.grad if select_grad else p.data\n flat_params.append(dat.view(-1))\n next_idx = cur_idx + flat_params[-1].shape[0]\n params_ids[n] = (cur_idx, next_idx)\n cur_idx = next_idx\n return T.cat(flat_params), params_ids\n\n\ndef update_network(named_params, params_ids, flat_params, apply_to_grad=False):\n # Roll back the network parameters to layers\n for n, p in named_params:\n s_id, e_id = params_ids[n]\n if apply_to_grad:\n p.grad.copy_(flat_params[s_id : e_id].view(*p.grad.shape))\n else:\n p.data.copy_(flat_params[s_id : e_id].view(*p.data.shape))\n\n\ndef apply_global_dp(config, model, num_clients_curr_iter, select_grad=True, metric_logger=None):\n # Add global DP noise here\n dp_config = config.get('dp_config', None)\n if dp_config is not None and dp_config.get('enable_global_dp', False):\n # enable_local_dp must be enabled - client-side gradient clipping must be enabled.\n assert (dp_config['enable_local_dp'])\n # Unroll the network grads as 1D vectors\n flat_grad, params_ids = unroll_network(model.named_parameters(), select_grad=select_grad)\n\n sigma = dp_config['global_sigma']\n max_grad = dp_config['max_grad']\n noise_scale = sigma * max_grad / num_clients_curr_iter\n noise = T.normal(0, 1, flat_grad.shape, device=flat_grad.device) * noise_scale\n flat_noisy_grad = flat_grad + noise\n print_rank('Error from noise {} is {}. grad norm: {} noisy_grad norm: {}'.format(noise_scale, (\n flat_grad - flat_noisy_grad).norm(), flat_grad.norm(), flat_noisy_grad.norm()))\n\n # Return back to the network gradients\n update_network(model.named_parameters(), params_ids, flat_noisy_grad,\n apply_to_grad=select_grad)\n\n if metric_logger is None:\n metric_logger = Run.get_context().log\n metric_logger('Gradient Norm', flat_grad.norm().cpu().item())\n\n\ndef apply_local_dp(trainer, weight, dp_config, add_weight_noise):\n '''Apply client-side DP, possibly given a data-dependent aggregation weight\n\n Args:\n trainer (core.Trainer object): trainer on client.\n dp_config (dict): DP config on original config file.\n add_weight_noise (bool): whether noise should be added to aggregation weight.\n '''\n\n # Unroll the network grads as 1D vectors\n flat_grad, params_ids = unroll_network(trainer.model.named_parameters(), select_grad=True)\n grad_norm = flat_grad.norm().cpu().item()\n\n if dp_config['eps'] < 0:\n # clip, but don't add noise\n if grad_norm > dp_config['max_grad']:\n flat_grad = flat_grad * (dp_config['max_grad'] / grad_norm)\n update_network(trainer.model.named_parameters(), params_ids, flat_grad, apply_to_grad=True)\n\n else:\n # Get Gaussian LDP noise\n dp_eps = dp_config['eps']\n delta = dp_config.get('delta', 1e-7) # TODO pre-compute in config\n weight_ = weight\n\n # Scaling the weight down so we don't impact the noise too much\n weight = dp_config.get('weight_scaler', 1) * weight\n weight = min(dp_config['max_weight'], weight)\n flat_noisy_grad = dp_config['max_grad'] * (flat_grad / flat_grad.norm())\n max_sensitivity = np.sqrt(dp_config['max_grad']**2 + (dp_config['max_weight']**2 if add_weight_noise else 0.0))\n flat_noisy_grad = T.cat([flat_noisy_grad, T.tensor([weight], device=flat_noisy_grad.device)], dim=0)\n flat_noisy_grad, _ = add_gaussian_noise(flat_noisy_grad, dp_eps, max_sensitivity, delta)\n weight = min(max(flat_noisy_grad[-1].item(), dp_config['min_weight']), dp_config['max_weight'])\n\n # Scaling the weight back up after noise addition (This is a DP-protect transformation)\n weight = weight / dp_config.get('weight_scaler', 1)\n if not add_weight_noise:\n weight = weight_\n flat_noisy_grad = flat_noisy_grad[:-1]\n\n print_rank('Cosine error from noise {}'.format(T.nn.functional.cosine_similarity(flat_grad, flat_noisy_grad, dim=0)), loglevel=logging.DEBUG)\n print_rank('Error from noise is {}'.format((flat_grad-flat_noisy_grad).norm()), loglevel=logging.DEBUG)\n print_rank('weight is {} and noisy weight is {}'.format(weight_, weight), loglevel=logging.DEBUG)\n\n # Return back to the network\n update_network(trainer.model.named_parameters(), params_ids, flat_noisy_grad, apply_to_grad=True)\n\n return weight\n\n\ndef update_privacy_accountant(config, num_clients, curr_iter, num_clients_curr_iter):\n # Privacy accounting starts here\n # We will dump all the needed parameters to the log so as not to slow down training.\n dp_config = config.get('dp_config', None)\n if dp_config is not None and dp_config.get('enable_global_dp', False) or dp_config.get('enable_local_dp',\n False):\n from math import sqrt, exp, log\n import extensions.privacy.analysis as privacy_analysis\n\n K = 1 # from DP perspective each user is contributing one gradient\n B = num_clients_curr_iter # batch size\n n = num_clients\n T = curr_iter + 1\n _delta = dp_config.get('delta', min(1e-7, 1. / (n * log(n)))) # TODO should be precomputed in config\n if dp_config.get('global_sigma', None) is None:\n max_sensitivity = np.sqrt(dp_config['max_grad'] ** 2 + dp_config['max_weight'] ** 2)\n noise_scale = compute_LDP_noise_std(dp_config['eps'], max_sensitivity, _delta)\n global_sigma = noise_scale * np.sqrt(B) / max_sensitivity\n else: \n global_sigma = dp_config['global_sigma']\n noise_scale = global_sigma * dp_config['max_grad'] / B\n\n try:\n mu = K * B / n * sqrt(T * exp((1. / global_sigma) ** 2 - 1))\n except OverflowError:\n print_rank(f\"Error computing mu {global_sigma} {K} {B} {n} {T}\")\n mu = -1\n\n orders = ([1.25, 1.5, 1.75, 2., 2.25, 2.5, 3., 3.5, 4., 4.5] + list(range(5, 64)) + [128, 256, 512])\n q = B / n\n _sigma = global_sigma # was: noise_scale but we should apply the noise multiplier.\n rdp = privacy_analysis.compute_rdp(q, _sigma, T, orders)\n\n rdp_epsilon, opt_order = privacy_analysis.get_privacy_spent(orders, rdp, _delta)\n\n props = {\n 'dp_global_K': K, # gradients per user\n 'dp_global_B': B, # users per batch\n 'dp_global_n': n, # total users\n 'dp_global_T': T, # how many iterations\n 'dp_sigma': _sigma, # noise_multiplier. Should be combined global+local sigma.\n 'dp_global_mu': mu,\n # 'dp_epsilon_fdp': fdp_epsilon,\n 'dp_epsilon_rdp': rdp_epsilon,\n # 'dp_epsilon_exact': exact_eps,\n 'dp_opt_order': opt_order,\n 'dp_delta': _delta,\n 'dp_noise_scale': noise_scale # Note: not needed for accounting.\n }\n\n print_rank(f'DP accounting: {json.dumps(props)}')\n for k in props:\n run.log(k, props[k])\n\n return rdp_epsilon\n else:\n return None\n", "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n'''\nIn this file, we define the classes that live inside 'worker 0', the worker\nresponsible for orchestration and aggregation. The main class is the\nOptimizationServer, which sends clients to the other workers to process and\ncombines the resulting models.\n'''\n\nimport json\nimport logging\nimport os\nimport random\nimport shutil\nimport time\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch\n\n# Internal imports\nfrom core.globals import TRAINING_FRAMEWORK_TYPE\nif TRAINING_FRAMEWORK_TYPE == 'mpi':\n import core.federated as federated\nelse:\n raise NotImplementedError('{} is not supported'.format(TRAINING_FRAMEWORK_TYPE))\nfrom core.evaluation import Evaluation\nfrom core.client import Client\nfrom .strategies import select_strategy\nfrom .trainer import (\n ModelUpdater,\n Trainer,\n set_component_wise_lr,\n)\nfrom utils import (\n get_lr,\n print_rank,\n update_json_log,\n)\n\n# For profiling\nimport cProfile\nimport pstats\n\n# AzureML-related libs\nfrom azureml.core import Run\nrun = Run.get_context()\n\n\nclass OptimizationServer(federated.Server):\n def __init__(self, num_clients, model, optimizer, ss_scheduler, data_path, model_path, train_dataloader,\n val_dataloader, test_dataloader, config, config_server):\n '''Implement Server's orchestration and aggregation.\n\n This is the main Server class, that actually implements orchestration\n and aggregation, inheriting from `federated.Server`, which deals with\n communication only.\n\n The `train` method is central in FLUTE, as it defines good part of what\n happens during training.\n\n Args:\n num_clients (int): total available clients.\n model (torch.nn.Module): neural network model.\n optimizer (torch.optim.Optimizer): optimizer.\n ss_scheduler: scheduled sampling scheduler.\n data_path (str): points to where data is.\n model_path (str): points to where pretrained model is.\n train_dataloader (torch.utils.data.DataLoader): dataloader for training\n val_dataloader (torch.utils.data.DataLoader): dataloader for validation\n test_dataloader (torch.utils.data.DataLoader): dataloader for test, can be None\n config (dict): JSON style configuration parameters\n config_server: deprecated, kept for API compatibility only.\n '''\n\n super().__init__()\n\n # Initialize all attributes from arguments\n self.client_idx_list = list(range(num_clients))\n self.config = config\n server_config = config['server_config']\n decoder_config = config.get('decoder_config', None)\n\n self.max_iteration = server_config['max_iteration']\n self.do_clustering = server_config.get('clustering', False)\n\n self.num_clients_per_iteration = [int(x) for x in server_config['num_clients_per_iteration'].split(',')] \\\n if isinstance(server_config['num_clients_per_iteration'], str) \\\n else [server_config['num_clients_per_iteration']]\n\n self.val_freq = server_config['val_freq']\n self.req_freq = server_config['rec_freq']\n\n self.evaluation = Evaluation(config, model_path, self.process_testvalidate, val_dataloader, test_dataloader)\n\n # TODO: does this need to be adjusted for custom metrics?\n self.metrics = {\n 'best_val_loss': float('inf'),\n 'best_val_acc': 0.0,\n 'best_test_loss': float('inf'),\n 'best_test_acc': 0.0\n }\n\n self.model_backup_freq = server_config.get('model_backup_freq', 100)\n self.worker_trainer_config = server_config.get('trainer_config', {})\n\n self.aggregate_median = server_config['aggregate_median']\n self.initial_lr_client = server_config.get('initial_lr_client', -1.0)\n self.lr_decay_factor = server_config.get('lr_decay_factor', 1.0)\n\n self.model_type = config['model_config']['model_type']\n self.quant_thresh = config['client_config'].get('quant_thresh', None)\n self.quant_bits = config['client_config'].get('quant_bits', 10)\n\n self.list_of_train_data = config['client_config']['data_config']['train']['list_of_train_data']\n self.data_path = data_path\n\n # Get max grad norm from data config\n if 'train' in server_config['data_config']:\n max_grad_norm = server_config['data_config']['train'].get('max_grad_norm', None)\n else:\n max_grad_norm = None\n\n # Creating an instance to update the model with stats aggregated from workers\n self.worker_trainer = ModelUpdater(\n model=model,\n optimizer=optimizer,\n ss_scheduler=ss_scheduler,\n train_dataloader=train_dataloader if train_dataloader is not None else val_dataloader,\n val_dataloader=val_dataloader,\n max_grad_norm=max_grad_norm,\n anneal_config=server_config['annealing_config'],\n model_type=self.model_type,\n decoder_config=decoder_config\n )\n self.metrics['worker_trainer'] = self.worker_trainer\n # Creating an instance for the server-side trainer (runs mini-batch SGD)\n self.server_replay_iterations = None\n self.server_trainer = None\n if train_dataloader is not None:\n assert 'server_replay_config' in server_config, 'server_replay_config is not set'\n assert 'optimizer_config' in server_config[\n 'server_replay_config'], 'server-side replay training optimizer is not set'\n self.server_optimizer_config = server_config['server_replay_config']['optimizer_config']\n self.server_trainer_config = server_config['server_replay_config'].get('trainer_config', {})\n self.server_replay_iterations = server_config['server_replay_config']['server_iterations']\n self.server_trainer = Trainer(\n model=model,\n optimizer=None,\n ss_scheduler=ss_scheduler,\n train_dataloader=train_dataloader,\n server_replay_config=server_config['server_replay_config'],\n val_dataloader=None,\n max_grad_norm=server_config['server_replay_config']\\\n .get('max_grad_norm',server_config['data_config']['train']\\\n .get('max_grad_norm',None)),\n anneal_config=server_config['server_replay_config'].get('annealing_config', None)\n )\n\n self.skip_model_update = False # will not update the model if True\n\n self.train_loss = 0.0\n self.model_path = model_path\n self.best_model_criterion = server_config['best_model_criterion']\n self.fall_back_to_best_model = server_config['fall_back_to_best_model']\n self.last_model_path = os.path.join(self.model_path, 'latest_model.tar')\n self.best_model_path = os.path.join(self.model_path,\n 'best_val_{}_model.tar'.format(self.best_model_criterion))\n self.log_path = os.path.join(self.model_path, 'status_log.json')\n self.cur_iter_no = 0 # keep the iteration number for Tensor board plotting\n self.lr_weight = 1.0\n\n self.losses = []\n self.no_label_updates = 0 # no. label updates\n\n # Update the parameters above if the log file\n if server_config.get('resume_from_checkpoint', False):\n self.load_saved_status()\n\n # Decoding config\n self.decoder_config = decoder_config\n self.spm_model = server_config['data_config']['test'].get('spm_model', None)\n\n self.do_profiling = server_config.get('do_profiling', False)\n\n # Parallel processing\n self.clients_in_parallel = config['client_config'].get('clients_in_parallel', None)\n\n StrategyClass = select_strategy(config['strategy'])\n self.strategy = StrategyClass('server', self.config, self.model_path)\n print_rank(f'Server successfully instantiated strategy {self.strategy}', loglevel=logging.DEBUG)\n\n def load_saved_status(self):\n '''Load checkpoint from disk'''\n\n # Check if model is on disk, if so loads it onto trainer\n if os.path.exists(self.last_model_path):\n print_rank('Resuming from checkpoint model {}'.format(self.last_model_path))\n self.worker_trainer.load(self.last_model_path, update_lr_scheduler=True, update_ss_scheduler=True)\n if self.server_trainer is not None:\n self.server_trainer.model = self.worker_trainer.model # make sure that the models are in sync\n\n # Check if log is on disk, if so loads it onto current stats\n if os.path.exists(self.log_path):\n with open(self.log_path, 'r') as logfp: # loading the iteration no., best loss and CER\n elems = json.load(logfp)\n self.cur_iter_no = elems.get('i', 0)\n self.metrics['best_val_loss'] = elems.get('best_val_loss', float('inf'))\n self.metrics['best_val_acc'] = elems.get('best_val_acc', 0)\n self.metrics['best_test_loss'] = elems.get('best_test_loss', float('inf'))\n self.metrics['best_test_acc'] = elems.get('best_test_acc', 0)\n self.lr_weight = elems.get('weight', 1.0)\n self.no_label_updates = elems.get('num_label_updates', 0)\n print_rank(f'Resuming from status_log: cur_iter: {self.cur_iter_no}')\n\n def run(self):\n '''Trigger training.\n\n This is a simple wrapper to the `train` method.\n '''\n print_rank('server started')\n self.train()\n print_rank('server terminated')\n\n def train(self):\n '''Main method for training.'''\n\n self.run_stats = {\n 'secsPerClientRound': [],\n 'secsPerClient': [],\n 'secsPerClientTraining': [],\n 'secsPerClientSetup': [],\n 'secsPerClientFull': [],\n 'secsPerRoundHousekeeping': [],\n 'secsPerRoundTotal': [],\n 'mpiCosts': []\n }\n\n run.log('Max iterations', self.max_iteration)\n try:\n self.worker_trainer.model.cuda() if torch.cuda.is_available() else None\n\n # Do an initial validation round to understand the pretrained model's validation accuracy\n # Skip if we resumed from a checkpoint (cur_iter_no > 0)\n eval_list = []\n if self.cur_iter_no == 0:\n\n if self.config['server_config']['initial_rec']:\n eval_list.append('test')\n if self.config['server_config']['initial_val']:\n eval_list.append('val')\n run.log('LR for agg. opt.', get_lr(self.worker_trainer.optimizer))\n\n print_rank(\"Running {} at itr={}\".format(eval_list, self.cur_iter_no))\n self.metrics = self.evaluation.run(eval_list, self.metrics, metric_logger=run.log)\n eval_list = [] # some cleanup\n\n # Dump all the information in aggregate_metric\n print_rank('Saving Model Before Starting Training', loglevel=logging.INFO)\n for token in ['best_val_loss', 'best_val_acc', 'best_test_acc', 'latest']:\n self.worker_trainer.save(\n model_path=self.model_path,\n token=token,\n config=self.config['server_config']\n )\n\n # Training loop\n self.worker_trainer.model.train()\n for i in range(self.cur_iter_no, self.max_iteration):\n begin = time.time()\n metrics_payload = {}\n\n def log_metric(k, v):\n metrics_payload[k] = v\n\n print_rank('==== iteration {}'.format(i))\n log_metric('Current iteration', i)\n\n # Initial value for the learning rate of the worker\n initial_lr = self.initial_lr_client * self.lr_weight\n print_rank('Client learning rate {}'.format(initial_lr))\n\n # Run training on clients\n self.worker_trainer.model.zero_grad()\n self.train_loss = []\n server_data = (\n initial_lr,\n [p.data.to(torch.device('cpu')) for p in self.worker_trainer.model.parameters()]\n )\n\n # Random number of clients per iteration\n if len(self.num_clients_per_iteration) > 1:\n num_clients_curr_iter = random.randint(\n self.num_clients_per_iteration[0],\n self.num_clients_per_iteration[1]\n )\n else:\n num_clients_curr_iter = self.num_clients_per_iteration[0]\n log_metric('Clients for round', num_clients_curr_iter)\n\n # Perform annealing in quantization threshold\n if self.quant_thresh is not None:\n self.config['client_config']['quant_thresh'] *= self.config['client_config'].get('quant_anneal', 1.0)\n self.quant_thresh = self.config['client_config']['quant_thresh']\n log_metric('Quantization Thresh.', self.config['client_config']['quant_thresh'])\n\n # Create the pool of clients -- sample from this pool to assign to workers\n sampled_idx_clients = random.sample(self.client_idx_list,\n num_clients_curr_iter) if num_clients_curr_iter > 0 else self.client_idx_list\n sampled_clients = [\n Client(\n client_id,\n self.config,\n self.config['client_config']['type'] == 'optimization',\n None\n ) for client_id in sampled_idx_clients\n ]\n\n # Initialize stats\n clients_begin = time.time()\n\n client_losses = []\n client_mag_grads = []\n client_mean_grads = []\n client_var_grads = []\n client_norm_grads = []\n\n self.run_stats['secsPerClient'].append([])\n self.run_stats['secsPerClientFull'].append([])\n self.run_stats['secsPerClientTraining'].append([])\n self.run_stats['secsPerClientSetup'].append([])\n self.run_stats['mpiCosts'].append([])\n\n # Check if we want privacy metrics\n apply_privacy_metrics = self.config.get('privacy_metrics_config', None) and \\\n self.config['privacy_metrics_config']['apply_metrics']\n adaptive_leakage = apply_privacy_metrics and \\\n self.config['privacy_metrics_config'].get('adaptive_leakage_threshold', None)\n if apply_privacy_metrics:\n privacy_metrics_stats = defaultdict(list)\n\n # Initialize profiler\n profiler = None\n if self.do_profiling:\n profiler = cProfile.Profile()\n profiler.enable()\n\n # Reset gradient for the model before assigning the new gradients\n self.worker_trainer.model.zero_grad()\n\n for client_output in self.process_clients(sampled_clients, server_data, self.clients_in_parallel):\n # Process client output\n client_timestamp = client_output['ts']\n client_stats = client_output['cs']\n client_loss = client_output['tl']\n client_mag_grad = client_output['mg']\n client_mean_grad = client_output['ng']\n client_var_grad = client_output['vg']\n client_norm_grad = client_output['rg']\n client_payload = client_output['pl']\n\n if apply_privacy_metrics:\n privacy_stats = client_output['ps']\n for metric, value in privacy_stats.items():\n privacy_metrics_stats[metric].append(value)\n\n self.run_stats['mpiCosts'][-1].append(time.time() - client_timestamp)\n\n # Get actual pseudo-gradients for aggregation\n payload_processed = self.strategy.process_individual_payload(self.worker_trainer, client_payload)\n if not payload_processed:\n print_rank('Dropping client', loglevel=logging.DEBUG)\n num_clients_curr_iter -= 1\n continue\n\n # Aggregate stats\n self.train_loss.append(client_loss)\n client_losses.append(client_loss)\n client_mag_grads.append(client_mag_grad.item())\n client_mean_grads.append(client_mean_grad.item())\n client_var_grads.append(client_var_grad.item())\n client_norm_grads.append(client_norm_grad.item())\n\n # Mark the end of client processing\n client_end = time.time()\n\n self.run_stats['secsPerClientFull'][-1].append(client_stats['full cost'])\n self.run_stats['secsPerClientTraining'][-1].append(client_stats['training'])\n self.run_stats['secsPerClientSetup'][-1].append(client_stats['setup'])\n self.run_stats['secsPerClient'][-1].append(client_end - clients_begin)\n\n # Tear down profiler\n if self.do_profiling:\n profiler.disable()\n stats = pstats.Stats(profiler)\n stats.sort_stats('cumulative').print_stats()\n\n # Prepare output\n client_mag_grads = np.array(client_mag_grads)\n client_mean_grads = np.array(client_mean_grads)\n client_var_grads = np.array(client_var_grads)\n client_norm_grads = np.array(client_norm_grads)\n\n client_stats = (client_mag_grads, client_mean_grads, client_var_grads)\n\n dump_norm_stats = self.config.get('dump_norm_stats', False)\n if dump_norm_stats:\n with open(os.path.join(self.model_path, 'norm_stats.txt'), 'a', encoding='utf-8') as outF:\n outF.write('{}\\n'.format(json.dumps(list(client_norm_grads))))\n\n # Print the privacy metrics\n if apply_privacy_metrics:\n for metric, values in privacy_metrics_stats.items():\n if metric == 'Dropped clients':\n log_metric(metric, sum(values))\n else:\n log_metric(metric, max(values))\n\n if type(adaptive_leakage) is float:\n values = privacy_metrics_stats['Practical epsilon (Max leakage)']\n new_threshold = list(sorted(values))[int(adaptive_leakage*len(values))]\n print_rank('Updating leakage threshold to {}'.format(new_threshold))\n self.config['privacy_metrics_config']['max_allowed_leakage'] = new_threshold\n\n # Mark that all clients have been processed\n end = time.time()\n self.run_stats['secsPerClientRound'].append(end - begin)\n begin = end\n\n # Log the training loss to tensorboard/AML\n log_metric('Training loss', sum(self.train_loss))\n\n # Combine payloads\n self.losses = self.strategy.combine_payloads(\n worker_trainer=self.worker_trainer,\n curr_iter=i,\n num_clients_curr_iter=num_clients_curr_iter,\n client_stats=client_stats,\n logger=log_metric,\n )\n\n # Run a couple of iterations of training data on the server\n if self.server_trainer is not None:\n print_rank('Running replay iterations on server')\n\n if 'updatable_names' in self.server_trainer_config:\n set_component_wise_lr(\n self.worker_trainer.model,\n self.server_optimizer_config,\n self.server_trainer_config['updatable_names']\n )\n self.server_trainer.prepare_iteration(self.worker_trainer.model)\n self.server_trainer.train_desired_samples(self.server_replay_iterations)\n self.worker_trainer.model.load_state_dict(self.server_trainer.model.state_dict())\n torch.cuda.empty_cache()\n\n # Update a sampling scheduler\n print_rank('Run ss scheduler')\n self.worker_trainer.run_ss_scheduler()\n\n # Run inference and score on val/test depending on the iter. number\n if ((i+1) % self.val_freq) == 0:\n eval_list.append(\"val\")\n if ((i+1) % self.req_freq) == 0 :\n eval_list.append(\"test\")\n\n if len(eval_list)> 0:\n print_rank('Running {} at itr={}'.format(eval_list,i+1))\n self.metrics['worker_trainer'] = self.worker_trainer\n self.metrics = self.evaluation.run(eval_list, self.metrics, metric_logger=run.log)\n self.losses = self.evaluation.losses\n eval_list = []\n\n # Create a schedule for the initial_lr (for the worker)\n if 'val' in eval_list:\n run.log('LR for agg. opt.', get_lr(self.worker_trainer.optimizer))\n if not (self.losses[0] < self.metrics['best_val_loss']):\n self.lr_weight *= self.lr_decay_factor\n print_rank('LOG: Client weight of learning rate {}..'.format(self.lr_weight))\n\n # Backup the current best models\n self.backup_models(i)\n\n # Fall back to the best model if the option is enabled\n self.fall_back_to_prev_best_status()\n\n # Logging the latest best values\n update_json_log(\n self.log_path,\n {\n 'i': i + 1,\n 'best_val_loss': float(self.metrics['best_val_loss']),\n 'best_val_acc': float(self.metrics['best_val_acc']),\n 'best_test_loss': float(self.metrics['best_test_loss']),\n 'best_test_acc': float(self.metrics['best_test_acc']),\n 'weight': float(self.lr_weight),\n 'num_label_updates': int(self.no_label_updates)\n },\n )\n\n end = time.time()\n\n # Aggregate stats\n self.run_stats['secsPerRoundHousekeeping'].append(end - begin)\n self.run_stats['secsPerRoundTotal'].append(self.run_stats['secsPerClientRound'][-1] + \\\n self.run_stats['secsPerRoundHousekeeping'][-1])\n\n log_metric('secsPerRoundTotal', self.run_stats['secsPerRoundTotal'][-1])\n if self.do_profiling:\n log_metric('secsPerClientRound', self.run_stats['secsPerClientRound'][-1])\n log_metric('secsPerRoundHousekeeping', self.run_stats['secsPerRoundHousekeeping'][-1])\n\n metrics_for_stats = [\n 'secsPerClient',\n 'secsPerClientTraining',\n 'secsPerClientFull',\n 'secsPerClientSetup',\n 'mpiCosts',\n ]\n\n for metric in metrics_for_stats:\n log_metric(f'{metric}Mean', np.mean(self.run_stats[metric][-1]))\n log_metric(f'{metric}Median', np.median(self.run_stats[metric][-1]))\n log_metric(f'{metric}Max', max(self.run_stats[metric][-1]))\n\n for k in self.run_stats:\n if k in metrics_for_stats:\n print_rank('{}: {}'.format(k, max(self.run_stats[k][-1])), loglevel=logging.DEBUG)\n else:\n print_rank('{}: {}'.format(k, self.run_stats[k][-1]), loglevel=logging.DEBUG)\n\n # Log all the metrics\n for k in metrics_payload:\n run.log(k, metrics_payload[k])\n\n finally: # perform cleanup even if error was raised above\n self.terminate_workers(terminate=(not self.do_clustering))\n\n def backup_models(self, i):\n '''Save the current best models.\n\n Save CER model, the best loss model and the best WER model. This occurs\n at a specified period.\n\n Args:\n i: no. of iterations.\n '''\n\n # Always save the latest model\n self.worker_trainer.save(\n model_path=self.model_path,\n token='latest',\n config=self.config['server_config'],\n )\n\n if (i % self.model_backup_freq) == 0: # save the current best models\n self.worker_trainer.save(\n model_path=self.model_path,\n token='epoch{}'.format(i),\n config=self.config['server_config']\n )\n\n for bodyname in ['best_val_acc', 'best_val_loss', 'best_test_acc']:\n src_model_path = os.path.join(self.model_path, '{}_model.tar'.format(bodyname))\n if os.path.exists(src_model_path):\n dst_model_path = os.path.join(self.model_path, 'epoch{}_{}_model.tar'.format(i, bodyname))\n shutil.copyfile(src_model_path, dst_model_path)\n print_rank('Saved {}'.format(dst_model_path))\n\n def fall_back_to_prev_best_status(self):\n '''Go back to the past best status and switch to the recent best model.'''\n\n if self.fall_back_to_best_model:\n print_rank('falling back to model {}'.format(self.best_model_path))\n\n # Save current learning rate\n tmp_lr = get_lr(self.worker_trainer.optimizer)\n\n # Load previous best model\n self.worker_trainer.load(self.best_model_path, update_lr_scheduler=False, update_ss_scheduler=False)\n\n # Update previous learning rate on optimizer\n for g in self.worker_trainer.optimizer.param_groups:\n g['lr'] = tmp_lr\n\n if self.server_trainer is not None:\n self.server_trainer.model = self.worker_trainer.model # make sure that the models are in sync\n\n\ndef select_server(server_type, config):\n '''Select a server type using different possible strings.\n\n Right now this just returns `OptimizationServer`, but this\n function could be useful when there are multiple choices of\n server.\n\n Args:\n server_type (str): indicates server choice.\n config (dict): config parsed from YAML, passed so that\n parameters can be used to select a given server.\n '''\n return OptimizationServer\n" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.DataFrame" ], [ "torch.normal", "numpy.log", "numpy.minimum", "numpy.sqrt", "torch.randint", "torch.cat", "torch.randn", "torch.tensor", "torch.dot", "scipy.special.betaln", "torch.rand", "torch.nn.functional.cosine_similarity", "numpy.random.laplace", "numpy.exp", "scipy.special.betainc" ], [ "numpy.median", "torch.cuda.empty_cache", "numpy.mean", "torch.cuda.is_available", "torch.device", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hnguye12333/Python_
[ "6aebb00d18ff5f276e6d1cc006c5e664bf9252eb" ]
[ "discount_cashflows/discount_cashflows.py" ]
[ "path_cashflow = r\"./Cashflows.csv\"\npath_rate = r\"./Yield curve.csv\"\n\n# Implementation with Pandas\nimport pandas as pd\n\n# Read in the cash flows data and rate data as csv\ncashflow_df = pd.read_csv(path_cashflow)\nrate_df = pd.read_csv(path_rate)\n\n# Calculate discount factor from the rates\nrate_df[\"Discount factor\"] = 1 / (1 + rate_df[\"Interest rate\"])**rate_df[\"Year\"]\n\n# Join cash flows with rates\ncf_with_rate_df = cashflow_df.merge(rate_df, on=[\"Currency\", \"Year\"], how=\"left\")\n\n# Calculate present values\ncf_with_rate_df[\"Present value\"] = cf_with_rate_df[\"Cash flows\"] * cf_with_rate_df[\"Discount factor\"]\n\n# Groupby product and check the profitability\ncf_with_rate_df = cf_with_rate_df.groupby(\"Product\")[[\"Present value\"]].sum().reset_index()\n\n# -----\n\n# Implementation with Koalas\nimport databricks.koalas as ks\n\n# Read in the cash flows data and rate data as csv\ncashflow_df = ks.read_csv(path_cashflow)\nrate_df = ks.read_csv(path_rate)\n\n# Calculate discount factor from the rates\nrate_df[\"Discount factor\"] = 1 / (1 + rate_df[\"Interest rate\"])**rate_df[\"Year\"]\n\n# Join cash flows with rates\ncf_with_rate_df = cashflow_df.merge(rate_df, on=[\"Currency\", \"Year\"], how=\"left\")\n\n# Calculate present values\ncf_with_rate_df[\"Present value\"] = cf_with_rate_df[\"Cash flows\"] * cf_with_rate_df[\"Discount factor\"]\n\n# Groupby product and check the profitability\ncf_with_rate_df = cf_with_rate_df.groupby(\"Product\")[[\"Present value\"]].sum().reset_index()\n\n# -----\n\n# Implementation with PySpark\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql import Window\nfrom pyspark.sql import functions as f\n\n# Define Spark settings\nbuilder = SparkSession.builder.appName(\"Discount_Cashflows\")\nspark = builder.getOrCreate()\n\n# Read in the cash flows data and rate data as csv\ncashflow_df = spark.read.csv(path_cashflow, header=True, inferSchema=True)\nrate_df = spark.read.csv(path_rate, header=True, inferSchema=True)\n\n# Calculate discount factor from the rates\nrate_df = rate_df.withColumn(\"Discount factor\", 1 / (1 + rate_df[\"Interest rate\"])**rate_df[\"Year\"])\n\n# Join cash flows with rates\ncf_with_rate_df = cashflow_df.join(f.broadcast(rate_df), on=[\"Currency\", \"Year\"], how=\"left\")\n\n# Calculate present values\ncf_with_rate_df = cf_with_rate_df.withColumn(\"Present value\", f.col(\"Cash flows\") * f.col(\"Discount factor\"))\n\n# Groupby product and check the profitability\ncf_with_rate_df = cf_with_rate_df.groupBy(\"Product\").agg(f.sum(\"Present value\").alias(\"Present value\"))\n\n\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
caspervdw/circletracking
[ "2d981a1bd3f2982d5d36932d7d5a38e912fcdba3", "2d981a1bd3f2982d5d36932d7d5a38e912fcdba3" ]
[ "circletracking/algebraic.py", "circletracking/find.py" ]
[ "\"\"\" Functions for algebraic fitting \"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nimport six\nimport numpy as np\n\nMODE_DICT_ELLIPSE = {'circle': 'xy', 'ellipse_aligned': '0', 'ellipse': ''}\nMODE_DICT_ELLIPSOID = {'sphere': 'xyz', 'prolate': 'xy', 'oblate': 'xy',\n 'ellipsoid': '', 'ellipsoid_aligned': '0',\n 'prolate_aligned': '0xy', 'oblate_aligned': '0xy'}\n\n\ndef fit_ellipse(coords, mode=''):\n \"\"\" Fits an ellipse to datapoints using an algebraic method.\n\n This method is different from least squares minimization of the distnace\n between each datapoint and the fitted ellipse (so-called geometrical\n approach). For circles, this makes no difference. The higher the aspect\n ratio of the ellipse, the worse the approximation gets.\n\n Parameters\n ----------\n coords : numpy array of floats\n array of shape (N, 2) containing datapoints\n mode : {'circle', 'ellipse', 'ellipse_aligned'}\n 'ellipse' or None fits an arbitrary ellipse (default)\n 'circle' fits a circle\n 'ellipse_aligned' fits an ellipse with its axes aligned along [x y] axes\n\n Returns\n -------\n center, radii, angle\n\n References\n ----------\n .. [1] Bertoni B (2010) Multi-dimensional ellipsoidal fitting.\n \"\"\"\n if coords.shape[0] != 2:\n raise ValueError('Input data must have two columns!')\n if mode in MODE_DICT_ELLIPSE:\n mode = MODE_DICT_ELLIPSE[mode]\n\n x = coords[0, :, np.newaxis]\n y = coords[1, :, np.newaxis]\n\n if mode == '':\n D = np.hstack((x**2 - y**2, 2*x*y, 2*x, 2*y, np.ones_like(x)))\n elif mode == '0':\n D = np.hstack((x**2 - y**2, 2*x, 2*y, np.ones_like(x)))\n elif mode == 'xy':\n D = np.hstack((2*x, 2*y, np.ones_like(x)))\n\n d2 = x**2 + y**2 # the RHS of the llsq problem (y's)\n u = np.linalg.solve(np.dot(D.T, D), (np.dot(D.T, d2)))[:, 0]\n v = np.empty((6), dtype=u.dtype)\n\n if mode == '':\n v[0] = u[0] - 1\n v[1] = -u[0] - 1\n v[2:] = u[1:]\n elif mode == '0':\n v[0] = u[0] - 1\n v[1] = -u[0] - 1\n v[2] = 0\n v[3:] = u[1:]\n elif mode == 'xy':\n v[:2] = -1\n v[2] = 0\n v[3:] = u\n\n A = np.array([[v[0], v[2], v[3]],\n [v[2], v[1], v[4]],\n [v[3], v[4], v[5]]])\n # find the center of the ellipse\n center = -np.linalg.solve(A[:2, :2], v[3:5])\n\n # translate to the center\n T = np.identity(3, dtype=A.dtype)\n T[2, :2] = center\n R = np.dot(np.dot(T, A), T.T)\n\n # solve the eigenproblem\n evals, evecs = np.linalg.eig(R[:2, :2] / -R[2, 2])\n radius = (np.sqrt(1 / np.abs(evals)) * np.sign(evals))\n\n if mode == '':\n new_order = np.argmax(np.abs(evecs), 1)\n radius = radius[new_order]\n evecs = evecs[:, new_order]\n r11, r12, r21, r22 = evecs.T.flat\n angle = np.arctan(-r12/r11)\n else:\n angle = 0\n\n return radius, center, angle\n\n\ndef fit_ellipsoid(coords, mode='', return_mode=''):\n \"\"\"\n Fit an ellipsoid/sphere/paraboloid/hyperboloid to a set of xyz data points:\n\n Parameters\n ----------\n coords : ndarray\n Cartesian coordinates, 3 x n array\n mode : {'', 'xy', 'xz', 'xyz', '0', '0xy', '0xz'} t\n '' or None fits an arbitrary ellipsoid (default)\n 'xy' fits a spheroid with x- and y- radii equal\n 'xz' fits a spheroid with x- and z- radii equal\n 'xyz' fits a sphere\n '0' fits an ellipsoid with its axes aligned along [x y z] axes\n '0xy' the same with x- and y- radii equal\n '0xz' the same with x- and z- radii equal\n return_mode : {'', 'euler', 'skew'}\n '' returns the directions of the radii as 3x3 array\n 'euler' returns euler angles\n 'skew' returns skew in xy\n\n Returns\n -------\n radius : ndarray\n ellipsoid radii [zr, yr, xr]\n center : ndarray\n ellipsoid center coordinates [zc, yc, xc]\n value :\n return_mode == '': the radii directions as columns of the 3x3 matrix\n return_mode == 'euler':\n euler angles, applied in x, y, z order [z, y, x]\n the y value is the angle with the z axis (tilt)\n the z value is the angle around the z axis (rotation)\n the x value is the 3rd rotation, should be around 0\n return_mode == 'skew':\n skew in y, x order\n\n Notes\n -----\n Author: Yury Petrov, Oculus VR Date: September, 2015\n ported to python by Casper van der Wel, December 2015\n added euler angles and skew by Casper van der Wel\n \"\"\"\n if coords.shape[0] != 3:\n raise ValueError('Input data must have three columns!')\n if mode in MODE_DICT_ELLIPSOID:\n mode = MODE_DICT_ELLIPSOID[mode]\n if return_mode == 'skew' and 'xy' not in mode:\n raise ValueError('Cannot return skew when x, y radii are not equal')\n if return_mode == 'euler':\n raise ValueError('Euler mode is not implemented fully')\n z = coords[0, :, np.newaxis]\n y = coords[1, :, np.newaxis]\n x = coords[2, :, np.newaxis]\n\n # fit ellipsoid in the form Ax^2 + By^2 + Cz^2 + 2Dxy + 2Exz + 2Fyz + 2Gx +\n # 2Hy + 2Iz + J = 0 and A + B + C = 3 constraint removing one extra param\n if mode == '':\n D = np.hstack((x**2 + y**2 - 2 * z**2, x**2 + z**2 - 2 * y**2,\n 2 * x * y, 2 * x * z, 2 * y * z, 2 * x, 2 * y, 2 * z,\n np.ones_like(x)))\n elif mode == 'xy':\n D = np.hstack((x**2 + y**2 - 2 * z**2, 2 * x * y, 2 * x * z, 2 * y * z,\n 2 * x, 2 * y, 2 * z, np.ones_like(x)))\n elif mode == 'xz':\n D = np.hstack((x**2 + z**2 - 2 * y**2, 2 * x * y, 2 * x * z, 2 * y * z,\n 2 * x, 2 * y, 2 * z, np.ones_like(x)))\n\n # fit ellipsoid in the form Ax^2 + By^2 + Cz^2 + 2Gx + 2Hy + 2Iz = 1\n elif mode == '0':\n D = np.hstack((x**2 + y**2 - 2 * z**2, x**2 + z**2 - 2 * y**2,\n 2 * x, 2 * y, 2 * z, np.ones_like(x)))\n\n # fit ellipsoid in the form Ax^2 + By^2 + Cz^2 + 2Gx + 2Hy + 2Iz = 1,\n # where A = B or B = C or A = C\n elif mode == '0xy':\n D = np.hstack((x**2 + y**2 - 2 * z**2, 2 * x, 2 * y, 2 * z,\n np.ones_like(x)))\n elif mode == '0xz':\n D = np.hstack((x**2 + z**2 - 2 * y**2, 2 * x, 2 * y, 2 * z,\n np.ones_like(x)))\n\n # fit sphere in the form A(x^2 + y^2 + z^2) + 2Gx + 2Hy + 2Iz = 1\n elif mode == 'xyz':\n D = np.hstack((2 * x, 2 * y, 2 * z, np.ones_like(x)))\n else:\n raise ValueError('Unknown mode \"{}\"'.format(mode))\n\n if D.shape[0] < D.shape[1]:\n raise ValueError('Not enough datapoints')\n\n # solve the normal system of equations\n d2 = x**2 + y**2 + z**2 # the RHS of the llsq problem (y's)\n u = np.linalg.solve(np.dot(D.T, D), (np.dot(D.T, d2)))[:, 0]\n\n # find the ellipsoid parameters\n # convert back to the conventional algebraic form\n v = np.empty((10), dtype=u.dtype)\n if mode == '':\n v[0] = u[0] + u[1] - 1\n v[1] = u[0] - 2 * u[1] - 1\n v[2] = u[1] - 2 * u[0] - 1\n v[3:10] = u[2:9]\n elif mode == 'xy':\n v[0] = u[0] - 1\n v[1] = u[0] - 1\n v[2] = -2 * u[0] - 1\n v[3:10] = u[1:8]\n elif mode == 'xz':\n v[0] = u[0] - 1\n v[1] = -2 * u[0] - 1\n v[2] = u[0] - 1\n v[3:10] = u[1:8]\n elif mode == '0':\n v[0] = u[0] + u[1] - 1\n v[1] = u[0] - 2 * u[1] - 1\n v[2] = u[1] - 2 * u[0] - 1\n v[3:6] = 0\n v[6:10] = u[2:6]\n elif mode == '0xy':\n v[0] = u[0] - 1\n v[1] = u[0] - 1\n v[2] = -2 * u[0] - 1\n v[3:6] = 0\n v[6:10] = u[2:6]\n elif mode == '0xz':\n v[0] = u[0] - 1\n v[1] = -2 * u[0] - 1\n v[2] = u[0] - 1\n v[3:6] = 0\n v[6:10] = u[2:6]\n elif mode == 'xyz':\n v[:3] = -1\n v[3:6] = 0\n v[6:10] = u[:4]\n\n # form the algebraic form of the ellipsoid\n A = np.array([[v[0], v[3], v[4], v[6]],\n [v[3], v[1], v[5], v[7]],\n [v[4], v[5], v[2], v[8]],\n [v[6], v[7], v[8], v[9]]])\n # find the center of the ellipsoid\n center = -np.linalg.solve(A[:3, :3], v[6:9])\n\n # form the corresponding translation matrix\n T = np.identity(4, dtype=A.dtype)\n T[3, :3] = center\n # translate to the center\n R = np.dot(np.dot(T, A), T.T)\n if return_mode == 'skew':\n # extract the xy skew (ignoring a parameter here!)\n skew_xy = -R[2, :2] / np.diag(R[:2, :2])\n radius = np.diag(R[:3, :3]) / R[3, 3]\n\n # do some trick to make radius_z be the unskewed radius\n radius[2] -= np.sum(radius[:2] * skew_xy**2)\n radius = np.sqrt(1 / np.abs(radius))\n return radius[::-1], center[::-1], skew_xy[::-1]\n\n # solve the eigenproblem\n evals, evecs = np.linalg.eig(R[:3, :3] / -R[3, 3])\n radii = (np.sqrt(1 / np.abs(evals)) * np.sign(evals))\n\n if return_mode == 'euler':\n # sort the vectors so that -> z, y, x\n new_order = np.argmax(np.abs(evecs), 1)\n radii = radii[new_order]\n evecs = evecs[:, new_order]\n\n # Discover Euler angle vector from 3x3 matrix\n cy_thresh = np.finfo(evecs.dtype).eps * 4\n r11, r12, r13, r21, r22, r23, r31, r32, r33 = evecs.T.flat\n # cy: sqrt((cos(y)*cos(z))**2 + (cos(x)*cos(y))**2)\n cy = np.sqrt(r33*r33 + r23*r23)\n if cy > cy_thresh: # cos(y) not close to zero, standard form\n # z: atan2(cos(y)*sin(z), cos(y)*cos(z)),\n # y: atan2(sin(y), cy), atan2(cos(y)*sin(x),\n # x: cos(x)*cos(y))\n angles = np.array([np.arctan(r12/r11), np.arctan(-r13/cy),\n np.arctan(r23/r33)])\n else: # cos(y) (close to) zero, so x -> 0.0 (see above)\n # so r21 -> sin(z), r22 -> cos(z) and\n # y: atan2(sin(y), cy)\n angles = np.array([np.arctan(-r21/r22), np.arctan(-r13/cy), 0.0])\n\n return radii[::-1], center[::-1], angles\n\n return radii[::-1], center[::-1], evecs[::-1]\n\n\ndef ellipse_grid(radius, center, rotation=0, skew=0, n=None, spacing=1):\n \"\"\" Returns points and normal (unit) vectors on an ellipse.\n\n Parameters\n ----------\n radius : tuple\n (yr, xr) the two principle radii of the ellipse\n center : tuple\n (yc, xc) the center coordinate of the ellipse\n rotation : float, optional\n angle of xr with the x-axis, in radians. Rotates clockwise in image.\n skew : float, optional\n skew: y -> y + skew * x\n n : int, optional\n number of points\n spacing : float, optional\n When `n` is not given then the spacing is determined by `spacing`.\n\n Returns\n -------\n two arrays of shape (2, N), being the coordinates and unit normals\n \"\"\"\n yr, xr = radius\n yc, xc = center\n if n is None:\n n = int(2*np.pi*np.sqrt((yr**2 + xr**2) / 2) / spacing)\n\n phi = np.linspace(-np.pi, np.pi, n, endpoint=False)\n pos = np.array([yr * np.sin(phi), xr * np.cos(phi)])\n\n normal = np.array([np.sin(phi) / yr, np.cos(phi) / xr])\n normal /= np.sqrt((normal**2).sum(0))\n\n mask = np.isfinite(pos).all(0) & np.isfinite(normal).all(0)\n pos = pos[:, mask]\n normal = normal[:, mask]\n\n if rotation != 0:\n R = np.array([[ np.cos(rotation), np.sin(rotation)],\n [-np.sin(rotation), np.cos(rotation)]])\n pos = np.dot(pos.T, R).T\n elif skew != 0:\n pos[0] += pos[1] * skew\n\n # translate\n pos[0] += yc\n pos[1] += xc\n return pos, normal # both in y_list, x_list format\n\n\ndef ellipsoid_grid(radius, center, spacing=1):\n \"\"\" Returns points and normal (unit) vectors on an ellipse on only\n integer values of z.\n\n Parameters\n ----------\n radius : tuple\n (zr, yr, xr) the three principle radii of the ellipsoid\n center : tuple\n (zc, yc, xc) the center coordinate of the ellipsoid\n spacing : float, optional\n Distance between points\n\n Returns\n -------\n two arrays of shape (3, N), being the coordinates and unit normals\n \"\"\"\n zc, yc, xc = center\n zr, yr, xr = radius\n\n pos = np.empty((3, 0))\n for z in range(int(zc - zr + 1), int(zc + zr) + 1):\n n = int(2*np.pi*np.sqrt((yr**2 + xr**2) / 2) / spacing)\n if n == 0:\n continue\n phi = np.linspace(-np.pi, np.pi, n, endpoint=False)\n factor = np.sqrt(1 - ((zc - z) / zr)**2) # = sin(arccos((zc/z)/zr))\n pos = np.append(pos,\n np.array([[float(z)] * n,\n yr * factor * np.sin(phi) + yc,\n xr * factor * np.cos(phi) + xc]),\n axis=1)\n normal = (pos - np.array(center)[:, np.newaxis]) / np.array(radius)[:, np.newaxis]\n normal /= np.sqrt((normal**2).sum(0))\n\n mask = np.isfinite(pos).all(0) & np.isfinite(normal).all(0)\n return pos[:, mask], normal[:, mask]\n\n\ndef max_linregress(arr, maxfit_size=2, threshold=0.1, axis=1):\n \"\"\" Locates maxima by fitting parabolas to values around the maximum.\n\n This function is optimized for two-dimensional numpy arrays. For each row\n in the array, the index of the maximum value is located. Then some values\n around it (given by ``maxfit_size``) are taken, the first (discrete)\n derivative is taken, and linear regression is done. This gives the location\n of the maximum with sub-pixel precision. Effectively, a parabola is fitted.\n\n Parameters\n ----------\n arr : ndarray\n maxfit_size : integer, optional\n Defines the fit region around the maximum value. By default, this value\n is 2, that is, two pixels before and two pixels after the maximum are\n used for the fit (a total of 5).\n threshold :\n Discard points when the average value of the fit region is lower than\n ``threshold`` times the maximum in the whole fit array. This helps\n discarding low-intensity peaks. Default 0.1: if the average intensity\n in the fitregion is below 10% of the global maximum, the point is\n discarded.\n axis : {0, 1}\n axis along which the maxima are fitted. Default 1.\n\n Returns\n -------\n ndarray with the locations of the maxima.\n Elements are NaN in all of the following cases:\n - any pixel in the fitregion is 0\n - the mean of the fitregion < threshold * global max\n - regression returned infinity\n - maximum is outside of the fit region.\n \"\"\"\n if axis == 0:\n arr = arr.T\n # identify the regions around the max value\n maxes = np.argmax(arr[:, maxfit_size:-maxfit_size],\n axis=1) + maxfit_size\n ind = maxes[:, np.newaxis] + range(-maxfit_size, maxfit_size+1)\n\n # must cast dtype from unsigned to signed integer\n dtype = np.dtype(arr.dtype)\n if dtype.kind == 'u':\n if dtype.itemsize == 1:\n dtype = np.int16\n elif dtype.itemsize == 2:\n dtype = np.int32\n else:\n dtype = np.int64\n else:\n dtype = arr.dtype\n\n fitregion = np.array([_int.take(_ind) for _int, _ind in zip(arr, ind)],\n dtype=dtype)\n\n # fit max using linear regression\n intdiff = np.diff(fitregion, 1)\n x_norm = np.arange(-maxfit_size + 0.5, maxfit_size + 0.5)\n y_mean = np.mean(intdiff, axis=1, keepdims=True)\n y_norm = intdiff - y_mean\n slope = np.sum(x_norm[np.newaxis, :] * y_norm, 1) / np.sum(x_norm * x_norm)\n slope[slope == 0] = np.nan # protect against division by zero\n r_dev = - y_mean[:, 0] / slope\n\n # mask invalid fits\n threshold = threshold * fitregion.max() # relative to global maximum\n with np.errstate(invalid='ignore'): # ignore comparison with np.nan\n valid = (np.isfinite(r_dev) & # finite result\n (fitregion > 0).all(1) & # all pixels in fitregion > 0\n (fitregion.mean(1) > threshold) & # fitregion mean > threshold\n (r_dev > -maxfit_size + 0.5) & # maximum inside fit region\n (r_dev < maxfit_size - 0.5))\n r_dev[~valid] = np.nan\n return r_dev + maxes\n\n\ndef max_edge(arr, threshold=0.5, axis=1):\n \"\"\" Find strongest decreasing edge on each row \"\"\"\n if axis == 0:\n arr = arr.T\n if np.issubdtype(arr.dtype, np.unsignedinteger):\n arr = arr.astype(np.int)\n derivative = -np.diff(arr)\n index = np.argmax(derivative, axis=1)\n values = np.max(derivative, axis=1)\n r_dev = index + 0.5\n r_dev[values < threshold * values.max()] = np.nan\n return r_dev\n", "\"\"\" Find features in image \"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nimport six\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport skimage\ntry:\n from skimage.filters import threshold_otsu\nexcept ImportError:\n from skimage.filter import threshold_otsu # skimage <= 0.10\ntry:\n from skimage.feature import canny\nexcept ImportError:\n from skimage.filter import canny # skimage <= 0.10\nfrom skimage.measure import find_contours\nfrom skimage.transform import hough_circle\nfrom skimage.feature import peak_local_max\nimport pandas as pd\nfrom scipy.spatial import cKDTree\nfrom .algebraic import fit_ellipse\nfrom .utils import validate_tuple\n\n\ndef find_disks(image, size_range, maximum=100, canny_sigma=1):\n \"\"\" Find circular edges in a 2D image using hough transforms.\n\n An edge is a sharp light-dark or dark-light transition. These are found\n using a canny edge filter. Subsequently, the edges undergo a circular\n Hough transformation for a range of circle radii. Peaks in the hough\n transformed image correspond to circle centers.\n\n Parameters\n ----------\n image : ndarray, 2d\n size_range : tuple of numbers\n the range of circle radii to look for, in pixels\n maximum : number, optional\n The maximum number of disks\n canny_sigma : number, optional\n The sigma value used in the Canny edge filter. Default 1.\n\n See also\n --------\n http://scikit-image.org/docs/dev/auto_examples/plot_canny.html\n http://scikit-image.org/docs/dev/auto_examples/edges/plot_circular_elliptical_hough_transform.html\n \"\"\"\n # Define the radius at for which hough transforms are done. Take integer\n # values and a maximum of 30 intermediate steps.\n step = max(int(round(abs(size_range[1] - size_range[0]) / 30)), 1)\n radii = np.arange(size_range[0], size_range[1], step=step, dtype=np.intp)\n\n # Find edges in the image\n edges = canny(image, sigma=canny_sigma)\n # Perform a circular hough transform of the edges\n circles = hough_circle(edges, radii)\n\n # Collect the peaks in hough space, these are circle centers\n data = []\n for radius, circle in zip(radii, circles):\n peaks = peak_local_max(circle, threshold_rel=0.5,\n num_peaks=int(maximum))\n try:\n accumulator = circle[peaks[:, 0], peaks[:, 1]]\n except TypeError:\n continue\n data.append(pd.DataFrame(dict(r=[radius] * peaks.shape[0],\n y=peaks[:, 0],\n x=peaks[:, 1],\n accum=accumulator)))\n if len(data) == 0:\n return pd.DataFrame(columns=['r', 'y', 'x', 'accum'])\n data = pd.concat(data, ignore_index=True)\n\n # drop features that are closer than the average radius together\n # keep the ones that are brightest in hough space (= the most circular ones)\n to_drop = where_close(data[['y', 'x']].values, data['r'].mean(),\n intensity=data['accum'].values)\n data.drop(to_drop, inplace=True)\n\n # Keep only brightest n circles\n try: # work around API change in pandas 0.17\n data = data.sort_values(by=['accum'], ascending=False)\n except AttributeError:\n data = data.sort(columns=['accum'], ascending=False)\n\n return data.head(maximum).copy()\n\n\ndef find_ellipse(image, mode='ellipse_aligned', min_length=24):\n \"\"\" Find bright ellipse contours on a black background.\n\n This routine thresholds the image (using the Otsu threshold), finds the\n longest contour and fits an ellipse to this contour.\n\n Parameters\n ----------\n image : ndarray, 2d\n mode : {'ellipse', 'ellipse_aligned', 'circle'}\n 'ellipse' or None finds an arbitrary ellipse (default)\n 'circle' finds a circle\n 'ellipse_aligned' finds an ellipse with its axes aligned along [x y] axes\n min_length : number, optional\n minimum length of the ellipse contour, in pixels. Default 24.\n\n Returns\n -------\n yr, xr, yc, xc when dimension order was y, x (most common)\n xr, yr, xc, yc when dimension order was x, y\n \"\"\"\n assert image.ndim == 2\n # Threshold the image\n thresh = threshold_otsu(image)\n binary = image > thresh\n\n # Find the contours of 0.5 value. For a thresholded ellipse contour, this\n # likely finds 2 contours: the inner and the outer.\n contours = find_contours(binary, 0.5, fully_connected='high')\n if len(contours) == 0:\n raise ValueError('No contours found')\n\n # Eliminate short contours\n contours = [c for c in contours if len(c) >= min_length]\n\n # fit circles to the rest, keep the one with lowest residual deviation\n result = [np.nan] * 4\n residual = None\n for c in contours:\n try:\n (xr, yr), (xc, yc), _ = fit_ellipse(c.T, mode=mode)\n if np.any(np.isnan([xr, yr, xc, yc])):\n continue\n x, y = c.T\n r = np.sum((((xc - x)/xr)**2 + ((yc - y)/yr)**2 - 1)**2)/len(c)\n if residual is None or r < residual:\n result = xr, yr, xc, yc\n residual = r\n except np.linalg.LinAlgError:\n pass\n\n return result\n\n\ndef find_ellipsoid(image3d, center_atol=None):\n \"\"\" Finds a bright ellipsoid contour on a black background in a 3D image.\n\n Finds the ellipses in all three projections of the 3D image and returns\n center coordinates and priciple radii.\n\n The function uses the YX projection for the yr, xr, yc, xc and the ZX\n projection for zr, xc. The ZY projection can be used for sanity checking\n the found center.\n\n Parameters\n ----------\n image3d : ndarray, 3d\n center_atol : float, optional\n the maximum absolute tolerance for the difference between the found\n centers in different projections. Default None\n\n Returns\n -------\n zr, yr, xr, zc, yc, xc\n \"\"\"\n assert image3d.ndim == 3\n\n # Y, X projection, use y radius because resonant scanning in x direction.\n image = np.mean(image3d, axis=0)\n yr, xr, yc, xc = find_ellipse(image, mode='ellipse_aligned')\n\n # Z, X projection\n image = np.mean(image3d, axis=1)\n zr, xr2, zc, xc2 = find_ellipse(image, mode='ellipse_aligned')\n\n if center_atol is not None:\n # Z, Y projection (noisy with resonant scanning)\n image = np.mean(image3d, axis=2)\n zr2, yr2, zc2, yc2 = find_ellipse(image, mode='ellipse_aligned')\n\n assert_allclose([xc, yc, zc],\n [xc2, yc2, zc2], rtol=0, atol=center_atol,\n err_msg='Found centers have inconsistent values.')\n\n return zr, yr, xr, zc, yc, xc\n\n\ndef where_close(pos, separation, intensity=None):\n \"\"\" Returns indices of features that are closer than separation from other\n features. When intensity is given, the one with the lowest intensity is\n returned: else the most topleft is returned (to avoid randomness)\n\n To be implemented in trackpy v0.4\"\"\"\n if len(pos) == 0:\n return []\n separation = validate_tuple(separation, pos.shape[1])\n if any([s == 0 for s in separation]):\n return []\n # Rescale positions, so that pairs are identified below a distance\n # of 1.\n pos_rescaled = pos / separation\n duplicates = cKDTree(pos_rescaled, 30).query_pairs(1 - 1e-7)\n if len(duplicates) == 0:\n return []\n index_0 = np.fromiter((x[0] for x in duplicates), dtype=int)\n index_1 = np.fromiter((x[1] for x in duplicates), dtype=int)\n if intensity is None:\n to_drop = np.where(np.sum(pos_rescaled[index_0], 1) >\n np.sum(pos_rescaled[index_1], 1),\n index_1, index_0)\n else:\n intensity_0 = intensity[index_0]\n intensity_1 = intensity[index_1]\n to_drop = np.where(intensity_0 > intensity_1, index_1, index_0)\n edge_cases = intensity_0 == intensity_1\n if np.any(edge_cases):\n index_0 = index_0[edge_cases]\n index_1 = index_1[edge_cases]\n to_drop[edge_cases] = np.where(np.sum(pos_rescaled[index_0], 1) >\n np.sum(pos_rescaled[index_1], 1),\n index_1, index_0)\n return np.unique(to_drop)\n\n" ]
[ [ "numpy.diag", "numpy.dot", "numpy.sqrt", "numpy.linspace", "numpy.arctan", "numpy.issubdtype", "numpy.dtype", "numpy.max", "numpy.mean", "numpy.ones_like", "numpy.linalg.eig", "numpy.arange", "numpy.finfo", "numpy.sin", "numpy.argmax", "numpy.diff", "numpy.identity", "numpy.errstate", "numpy.array", "numpy.sum", "numpy.linalg.solve", "numpy.abs", "numpy.isfinite", "numpy.cos", "numpy.sign", "numpy.empty" ], [ "pandas.concat", "numpy.sum", "numpy.unique", "numpy.isnan", "numpy.arange", "pandas.DataFrame", "numpy.mean", "numpy.any", "numpy.testing.assert_allclose", "numpy.fromiter", "numpy.where", "scipy.spatial.cKDTree" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
fengwang/jax
[ "88f888d498ee5a063c7fbdf96ea593ab8bd01849", "5726be1a9e1d33d8e0052d09104501b19fb6efcf" ]
[ "jax/_src/numpy/lax_numpy.py", "jax/experimental/jax2tf/jax2tf.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pytype: skip-file\n\"\"\"\nImplements the NumPy API, using the primitives in :mod:`jax.lax`.\n\nNumPy operations are implemented in Python in terms of the primitive operations\nin :mod:`jax.lax`. Since NumPy operations are not primitive and instead are\nimplemented in terms of :mod:`jax.lax` operations, we do not need to define\ntransformation rules such as gradient or batching rules. Instead,\ntransformations for NumPy primitives can be derived from the transformation\nrules for the underlying :code:`lax` primitives.\n\"\"\"\n\nimport abc\nimport builtins\nimport collections\nfrom functools import partial\nimport operator\nimport types\nfrom typing import Sequence, FrozenSet, Optional, Tuple, Union, Set, Type, Callable\nfrom textwrap import dedent as _dedent\nimport warnings\n\nimport numpy as np\nimport opt_einsum\n\nimport jax\nfrom jax import jit, custom_jvp\nfrom jax._src.numpy.vectorize import vectorize\nfrom jax._src.numpy.util import _wraps\nfrom jax import core\nfrom jax._src import dtypes\nfrom jax._src.api_util import _ensure_index_tuple\nfrom jax import errors\nfrom jax.core import UnshapedArray, ShapedArray, ConcreteArray, canonicalize_shape\nfrom jax.config import config\nfrom jax.interpreters import pxla\nfrom jax import lax\nfrom jax._src import device_array\nfrom jax._src.lax.lax import _array_copy\nfrom jax._src.ops import scatter\nfrom jax._src.util import (unzip2, prod as _prod, subvals, safe_zip, ceil_of_ratio,\n canonicalize_axis as _canonicalize_axis, maybe_named_axis)\nfrom jax.tree_util import tree_leaves, tree_flatten, tree_map\n\nnewaxis = None\n\n# Common docstring additions:\n\n_PRECISION_DOC = \"\"\"\\\nIn addition to the original NumPy arguments listed below, also supports\n``precision`` for extra control over matrix-multiplication precision\non supported devices. ``precision`` may be set to ``None``, which means\ndefault precision for the backend, a :class:`~jax.lax.Precision` enum value\n(``Precision.DEFAULT``, ``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple\nof two :class:`~jax.lax.Precision` enums indicating separate precision for each argument.\n\"\"\"\n\n# We replace some builtin names to follow Numpy's API, so we capture here.\n_abs = builtins.abs\n_all = builtins.all\n_any = builtins.any\n_max = builtins.max\n_min = builtins.min\n_sum = builtins.sum\n_divmod = builtins.divmod\n\n# NumPy constants\n\npi = np.pi\ne = np.e\neuler_gamma = np.euler_gamma\ninf = np.inf\nNINF = np.NINF\nPZERO = np.PZERO\nNZERO = np.NZERO\nnan = np.nan\n\n# NumPy utility functions\n\nget_printoptions = np.get_printoptions\nprintoptions = np.printoptions\nset_printoptions = np.set_printoptions\n\n# ndarray is defined as an virtual abstract base class.\n\nclass ArrayMeta(abc.ABCMeta):\n \"\"\"Metaclass for overriding ndarray isinstance checks.\"\"\"\n\n def __instancecheck__(self, instance):\n # Allow tracer instances with avals that are instances of UnshapedArray.\n # We could instead just declare Tracer an instance of the ndarray type, but\n # there can be traced values that are not arrays. The main downside here is\n # that isinstance(x, ndarray) might return true but\n # issubclass(type(x), ndarray) might return false for an array tracer.\n try:\n return (hasattr(instance, \"aval\") and\n isinstance(instance.aval, UnshapedArray))\n except AttributeError:\n super().__instancecheck__(instance)\n\n\nclass ndarray(metaclass=ArrayMeta):\n dtype: np.dtype\n ndim: int\n shape: Tuple[int, ...]\n size: int\n\n def __init__(shape, dtype=None, buffer=None, offset=0, strides=None,\n order=None):\n raise TypeError(\"jax.numpy.ndarray() should not be instantiated explicitly.\"\n \" Use jax.numpy.array, or jax.numpy.zeros instead.\")\n\n @abc.abstractmethod\n def __getitem__(self, key, indices_are_sorted=False,\n unique_indices=False): ...\n @abc.abstractmethod\n def __setitem__(self, key, value): ...\n @abc.abstractmethod\n def __len__(self): ...\n @abc.abstractmethod\n def __iter__(self): ...\n @abc.abstractmethod\n def __reversed__(self): ...\n\n # Comparisons\n @abc.abstractmethod\n def __lt__(self, other): ...\n @abc.abstractmethod\n def __le__(self, other): ...\n @abc.abstractmethod\n def __eq__(self, other): ...\n @abc.abstractmethod\n def __ne__(self, other): ...\n @abc.abstractmethod\n def __gt__(self, other): ...\n @abc.abstractmethod\n def __ge__(self, other): ...\n\n # Unary arithmetic\n\n @abc.abstractmethod\n def __neg__(self): ...\n @abc.abstractmethod\n def __pos__(self): ...\n @abc.abstractmethod\n def __abs__(self): ...\n @abc.abstractmethod\n def __invert__(self): ...\n\n # Binary arithmetic\n\n @abc.abstractmethod\n def __add__(self, other): ...\n @abc.abstractmethod\n def __sub__(self, other): ...\n @abc.abstractmethod\n def __mul__(self, other): ...\n @abc.abstractmethod\n def __matmul__(self, other): ...\n @abc.abstractmethod\n def __truediv__(self, other): ...\n @abc.abstractmethod\n def __floordiv__(self, other): ...\n @abc.abstractmethod\n def __mod__(self, other): ...\n @abc.abstractmethod\n def __divmod__(self, other): ...\n @abc.abstractmethod\n def __pow__(self, other): ...\n @abc.abstractmethod\n def __lshift__(self, other): ...\n @abc.abstractmethod\n def __rshift__(self, other): ...\n @abc.abstractmethod\n def __and__(self, other): ...\n @abc.abstractmethod\n def __xor__(self, other): ...\n @abc.abstractmethod\n def __or__(self, other): ...\n\n @abc.abstractmethod\n def __radd__(self, other): ...\n @abc.abstractmethod\n def __rsub__(self, other): ...\n @abc.abstractmethod\n def __rmul__(self, other): ...\n @abc.abstractmethod\n def __rmatmul__(self, other): ...\n @abc.abstractmethod\n def __rtruediv__(self, other): ...\n @abc.abstractmethod\n def __rfloordiv__(self, other): ...\n @abc.abstractmethod\n def __rmod__(self, other): ...\n @abc.abstractmethod\n def __rdivmod__(self, other): ...\n @abc.abstractmethod\n def __rpow__(self, other): ...\n @abc.abstractmethod\n def __rlshift__(self, other): ...\n @abc.abstractmethod\n def __rrshift__(self, other): ...\n @abc.abstractmethod\n def __rand__(self, other): ...\n @abc.abstractmethod\n def __rxor__(self, other): ...\n @abc.abstractmethod\n def __ror__(self, other): ...\n\n @abc.abstractmethod\n def __bool__(self): ...\n @abc.abstractmethod\n def __complex__(self): ...\n @abc.abstractmethod\n def __int__(self): ...\n @abc.abstractmethod\n def __float__(self): ...\n @abc.abstractmethod\n def __round__(self, ndigits=None): ...\n\n @abc.abstractmethod\n def __index__(self): ...\n\n # np.ndarray methods:\n @abc.abstractmethod\n def all(self, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=None): ...\n @abc.abstractmethod\n def any(self, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=None): ...\n @abc.abstractmethod\n def argmax(self, axis: Optional[int] = None, out=None): ...\n @abc.abstractmethod\n def argmin(self, axis: Optional[int] = None, out=None): ...\n @abc.abstractmethod\n def argpartition(self, kth, axis=-1, kind='introselect', order=None): ...\n @abc.abstractmethod\n def argsort(self, axis: Optional[int] = -1, kind='quicksort', order=None): ...\n @abc.abstractmethod\n def astype(self, dtype): ...\n @abc.abstractmethod\n def choose(self, choices, out=None, mode='raise'): ...\n @abc.abstractmethod\n def clip(self, a_min=None, a_max=None, out=None): ...\n @abc.abstractmethod\n def compress(self, condition, axis: Optional[int] = None, out=None): ...\n @abc.abstractmethod\n def conj(self): ...\n @abc.abstractmethod\n def conjugate(self): ...\n @abc.abstractmethod\n def copy(self): ...\n @abc.abstractmethod\n def cumprod(self, axis: Optional[Union[int, Tuple[int, ...]]] = None,\n dtype=None, out=None): ...\n @abc.abstractmethod\n def cumsum(self, axis: Optional[Union[int, Tuple[int, ...]]] = None,\n dtype=None, out=None): ...\n @abc.abstractmethod\n def diagonal(self, offset=0, axis1: int = 0, axis2: int = 1): ...\n @abc.abstractmethod\n def dot(self, b, *, precision=None): ...\n @abc.abstractmethod\n def flatten(self): ...\n @property\n @abc.abstractmethod\n def imag(self): ...\n @abc.abstractmethod\n def item(self, *args): ...\n @abc.abstractmethod\n def max(self, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=None, initial=None, where=None): ...\n @abc.abstractmethod\n def mean(self, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, keepdims=False, *, where=None,): ...\n @abc.abstractmethod\n def min(self, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=None, initial=None, where=None): ...\n @property\n @abc.abstractmethod\n def nbytes(self): ...\n @abc.abstractmethod\n def nonzero(self, *, size=None, fill_value=None): ...\n @abc.abstractmethod\n def prod(self, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, keepdims=None, initial=None, where=None): ...\n @abc.abstractmethod\n def ptp(self, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=False,): ...\n @abc.abstractmethod\n def ravel(self, order='C'): ...\n @property\n @abc.abstractmethod\n def real(self): ...\n @abc.abstractmethod\n def repeat(self, repeats, axis: Optional[int] = None, *,\n total_repeat_length=None): ...\n @abc.abstractmethod\n def reshape(self, *args, order='C'): ...\n @abc.abstractmethod\n def round(self, decimals=0, out=None): ...\n @abc.abstractmethod\n def searchsorted(self, v, side='left', sorter=None): ...\n @abc.abstractmethod\n def sort(self, axis: Optional[int] = -1, kind='quicksort', order=None): ...\n @abc.abstractmethod\n def squeeze(self, axis: Optional[Union[int, Tuple[int, ...]]] = None): ...\n @abc.abstractmethod\n def std(self, axis: Optional[Union[int, Tuple[int, ...]]] = None,\n dtype=None, out=None, ddof=0, keepdims=False, *, where=None): ...\n @abc.abstractmethod\n def sum(self, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, keepdims=None, initial=None, where=None): ...\n @abc.abstractmethod\n def swapaxes(self, axis1: int, axis2: int): ...\n @abc.abstractmethod\n def take(self, indices, axis: Optional[int] = None, out=None,\n mode=None): ...\n @abc.abstractmethod\n def tobytes(self, order='C'): ...\n @abc.abstractmethod\n def tolist(self): ...\n @abc.abstractmethod\n def trace(self, offset=0, axis1: int = 0, axis2: int = 1, dtype=None,\n out=None): ...\n @abc.abstractmethod\n def transpose(self, *args): ...\n @abc.abstractmethod\n def var(self, axis: Optional[Union[int, Tuple[int, ...]]] = None,\n dtype=None, out=None, ddof=0, keepdims=False, *, where=None): ...\n @abc.abstractmethod\n def view(self, dtype=None, type=None): ...\n\n # Even though we don't always support the NumPy array protocol, e.g., for\n # tracer types, for type checking purposes we must declare support so we\n # implement the NumPy ArrayLike protocol.\n def __array__(self): ...\n\n # JAX extensions\n @property\n @abc.abstractmethod\n def at(self): ...\n @property\n @abc.abstractmethod\n def aval(self): ...\n @property\n @abc.abstractmethod\n def weak_type(self) -> bool: ...\n\n\nndarray.register(device_array.DeviceArray)\nfor t in device_array.device_array_types:\n ndarray.register(t)\nndarray.register(pxla._SDA_BASE_CLASS)\n\n\n\niscomplexobj = np.iscomplexobj\n\nshape = _shape = np.shape\nndim = _ndim = np.ndim\nsize = np.size\n_dtype = partial(dtypes.dtype, canonicalize=True)\n\n# At present JAX doesn't have a reason to distinguish between scalars and arrays\n# in its object system. Further, we want JAX scalars to have the same type\n# promotion behaviors as JAX arrays. Rather than introducing a new type of JAX\n# scalar object with JAX promotion behaviors, instead we make the JAX scalar\n# types return JAX arrays when instantiated.\n\nclass _ScalarMeta(type):\n def __hash__(self):\n return hash(self.dtype.type)\n\n def __eq__(self, other):\n return id(self) == id(other) or self.dtype.type == other\n\n def __ne__(self, other):\n return not (self == other)\n\n def __call__(self, x):\n return array(x, dtype=self.dtype)\n\n def __instancecheck__(self, instance):\n return isinstance(instance, self.dtype.type)\n\ndef _make_scalar_type(np_scalar_type):\n return _ScalarMeta(np_scalar_type.__name__, (object,),\n {\"dtype\": np.dtype(np_scalar_type)})\n\nbool_ = _make_scalar_type(np.bool_)\nuint8 = _make_scalar_type(np.uint8)\nuint16 = _make_scalar_type(np.uint16)\nuint32 = _make_scalar_type(np.uint32)\nuint64 = _make_scalar_type(np.uint64)\nint8 = _make_scalar_type(np.int8)\nint16 = _make_scalar_type(np.int16)\nint32 = _make_scalar_type(np.int32)\nint64 = _make_scalar_type(np.int64)\nbfloat16 = _make_scalar_type(dtypes.bfloat16)\nfloat16 = _make_scalar_type(np.float16)\nfloat32 = single = _make_scalar_type(np.float32)\nfloat64 = double = _make_scalar_type(np.float64)\ncomplex64 = csingle = _make_scalar_type(np.complex64)\ncomplex128 = cdouble = _make_scalar_type(np.complex128)\n\nint_ = int32 if dtypes.int_ == np.int32 else int64\nuint = uint32 if dtypes.uint == np.uint32 else uint64\nfloat_ = float32 if dtypes.float_ == np.float32 else float64\ncomplex_ = complex64 if dtypes.complex_ == np.complex64 else complex128\n\nnumber = np.number\ninexact = np.inexact\ncomplexfloating = np.complexfloating\nfloating = np.floating\ninteger = np.integer\nsignedinteger = np.signedinteger\nunsignedinteger = np.unsignedinteger\n\nflexible = np.flexible\ncharacter = np.character\nobject_ = np.object_\n\niinfo = dtypes.iinfo\nfinfo = dtypes.finfo\n\ndtype = np.dtype\ncan_cast = dtypes.can_cast\nissubsctype = dtypes.issubsctype\npromote_types = dtypes.promote_types\n\nComplexWarning = np.ComplexWarning\n\narray_str = np.array_str\narray_repr = np.array_repr\n\nsave = np.save\nsavez = np.savez\n\n@_wraps(np.dtype)\ndef _jnp_dtype(obj, align=False, copy=False):\n \"\"\"Similar to np.dtype, but respects JAX dtype defaults.\"\"\"\n if obj is None:\n obj = dtypes.float_\n elif isinstance(obj, type) and obj in dtypes.python_scalar_dtypes:\n obj = _DEFAULT_TYPEMAP[np.dtype(obj, align=align, copy=copy).type]\n return np.dtype(obj, align=align, copy=copy)\n\n### utility functions\n\n_DEFAULT_TYPEMAP = {\n np.bool_: bool_,\n np.int_: int_,\n np.float_: float_,\n np.complex_: complex_\n}\n\n_INT_DTYPES = {\n 16: np.int16,\n 32: np.int32,\n 64: np.int64,\n}\n\ndef _promote_shapes(fun_name, *args):\n \"\"\"Prepend implicit leading singleton dimensions for Numpy broadcasting.\"\"\"\n if len(args) < 2:\n return args\n else:\n shapes = [shape(arg) for arg in args]\n nonscalar_ranks = [len(shp) for shp in shapes if shp]\n if not nonscalar_ranks or len(set(nonscalar_ranks)) == 1:\n return args\n else:\n if config.jax_numpy_rank_promotion != \"allow\":\n _rank_promotion_warning_or_error(fun_name, shapes)\n result_rank = len(lax.broadcast_shapes(*shapes))\n return [broadcast_to(arg, (1,) * (result_rank - len(shp)) + shp)\n for arg, shp in zip(args, shapes)]\n\ndef _rank_promotion_warning_or_error(fun_name, shapes):\n if config.jax_numpy_rank_promotion == \"warn\":\n msg = (\"Following NumPy automatic rank promotion for {} on shapes {}. \"\n \"Set the jax_numpy_rank_promotion config option to 'allow' to \"\n \"disable this warning; for more information, see \"\n \"https://jax.readthedocs.io/en/latest/rank_promotion_warning.html.\")\n warnings.warn(msg.format(fun_name, ' '.join(map(str, shapes))))\n elif config.jax_numpy_rank_promotion == \"raise\":\n msg = (\"Operands could not be broadcast together for {} on shapes {} \"\n \"and with the config option jax_numpy_rank_promotion='raise'. \"\n \"For more information, see \"\n \"https://jax.readthedocs.io/en/latest/rank_promotion_warning.html.\")\n raise ValueError(msg.format(fun_name, ' '.join(map(str, shapes))))\n\ndef _promote_dtypes(*args):\n \"\"\"Convenience function to apply Numpy argument dtype promotion.\"\"\"\n # TODO(dougalm,mattjj): This is a performance bottleneck. Consider memoizing.\n if len(args) < 2:\n return args\n else:\n to_dtype, weak_type = dtypes._lattice_result_type(*args)\n to_dtype = dtypes.canonicalize_dtype(to_dtype)\n return [lax._convert_element_type(x, to_dtype, weak_type) for x in args]\n\ndef _promote_dtypes_inexact(*args):\n \"\"\"Convenience function to apply Numpy argument dtype promotion.\n\n Promotes arguments to an inexact type.\"\"\"\n to_dtype, weak_type = dtypes._lattice_result_type(*args)\n to_dtype = dtypes.canonicalize_dtype(to_dtype)\n to_dtype_inexact = _to_inexact_dtype(to_dtype)\n weak_type = (weak_type and to_dtype == to_dtype_inexact)\n return [lax._convert_element_type(x, to_dtype_inexact, weak_type) for x in args]\n\ndef _to_inexact_dtype(dtype):\n \"\"\"Promotes a dtype into an inexact dtype, if it is not already one.\"\"\"\n return dtype if issubdtype(dtype, inexact) else promote_types(dtype, float_)\n\ndef _complex_elem_type(dtype):\n \"\"\"Returns the float type of the real/imaginary parts of a complex dtype.\"\"\"\n return np.abs(np.zeros((), dtype)).dtype\n\ndef _result_dtype(op, *args):\n \"\"\"Compute result dtype of applying op to arguments with given dtypes.\"\"\"\n args = [np.ones((0,) * ndim(arg), _dtype(arg)) for arg in args]\n return _dtype(op(*args))\n\n\ndef _arraylike(x):\n return (isinstance(x, np.ndarray) or isinstance(x, ndarray) or\n hasattr(x, '__jax_array__') or isscalar(x))\n\n\ndef _stackable(*args):\n return _all(type(arg) in stackables for arg in args)\nstackables: Set[Type] = set()\n_register_stackable: Callable[[Type], None] = stackables.add\n\ndef _check_arraylike(fun_name, *args):\n \"\"\"Check if all args fit JAX's definition of arraylike.\"\"\"\n assert isinstance(fun_name, str), f\"fun_name must be a string. Got {fun_name}\"\n if _any(not _arraylike(arg) for arg in args):\n pos, arg = next((i, arg) for i, arg in enumerate(args)\n if not _arraylike(arg))\n msg = \"{} requires ndarray or scalar arguments, got {} at position {}.\"\n raise TypeError(msg.format(fun_name, type(arg), pos))\n\ndef _check_no_float0s(fun_name, *args):\n \"\"\"Check if none of the args have dtype float0.\"\"\"\n if _any(dtypes.dtype(arg) is dtypes.float0 for arg in args):\n raise TypeError(\n f\"Called {fun_name} with a float0 array. \"\n \"float0s do not support any operations by design because they \"\n \"are not compatible with non-trivial vector spaces. No implicit dtype \"\n \"conversion is done. You can use np.zeros_like(arr, dtype=np.float) \"\n \"to cast a float0 array to a regular zeros array. \\n\"\n \"If you didn't expect to get a float0 you might have accidentally \"\n \"taken a gradient with respect to an integer argument.\")\n\ndef _promote_args(fun_name, *args):\n \"\"\"Convenience function to apply Numpy argument shape and dtype promotion.\"\"\"\n _check_arraylike(fun_name, *args)\n _check_no_float0s(fun_name, *args)\n return _promote_shapes(fun_name, *_promote_dtypes(*args))\n\ndef _promote_args_inexact(fun_name, *args):\n \"\"\"Convenience function to apply Numpy argument shape and dtype promotion.\n\n Promotes non-inexact types to an inexact type.\"\"\"\n _check_arraylike(fun_name, *args)\n _check_no_float0s(fun_name, *args)\n return _promote_shapes(fun_name, *_promote_dtypes_inexact(*args))\n\ndef _convert_and_clip_integer(val, dtype):\n \"\"\"\n Convert integer-typed val to specified integer dtype, clipping to dtype\n range rather than wrapping.\n\n Args:\n val: value to be converted\n dtype: dtype of output\n\n Returns:\n equivalent of val in new dtype\n\n Examples\n --------\n Normal integer type conversion will wrap:\n\n >>> val = jnp.uint32(0xFFFFFFFF)\n >>> val.astype('int32')\n DeviceArray(-1, dtype=int32)\n\n This function clips to the values representable in the new type:\n\n >>> _convert_and_clip_integer(val, 'int32')\n DeviceArray(2147483647, dtype=int32)\n \"\"\"\n val = val if isinstance(val, ndarray) else asarray(val)\n dtype = dtypes.canonicalize_dtype(dtype)\n if not (issubdtype(dtype, integer) and issubdtype(val.dtype, integer)):\n raise TypeError(\"_convert_and_clip_integer only accepts integer dtypes.\")\n\n val_dtype = dtypes.canonicalize_dtype(val.dtype)\n if val_dtype != val.dtype:\n # TODO(jakevdp): this is a weird corner case; need to figure out how to handle it.\n # This happens in X32 mode and can either come from a jax value created in another\n # context, or a Python integer converted to int64.\n pass\n min_val = _constant_like(val, _max(iinfo(dtype).min, iinfo(val_dtype).min))\n max_val = _constant_like(val, _min(iinfo(dtype).max, iinfo(val_dtype).max))\n return clip(val, min_val, max_val).astype(dtype)\n\n\ndef _constant_like(x, const):\n return np.array(const, dtype=_dtype(x))\n\n@_wraps(np.load, update_doc=False)\ndef load(*args, **kwargs):\n # The main purpose of this wrapper is to recover bfloat16 data types.\n # Note: this will only work for files created via np.save(), not np.savez().\n out = np.load(*args, **kwargs)\n if isinstance(out, np.ndarray):\n # numpy does not recognize bfloat16, so arrays are serialized as void16\n if out.dtype == 'V2':\n out = out.view(bfloat16)\n out = asarray(out)\n return out\n\n### implementations of numpy functions in terms of lax\n\n@_wraps(np.fmin)\n@jit\ndef fmin(x1, x2):\n return where((x1 < x2) | isnan(x2), x1, x2)\n\n@_wraps(np.fmax)\n@jit\ndef fmax(x1, x2):\n return where((x1 > x2) | isnan(x2), x1, x2)\n\n@_wraps(np.issubdtype)\ndef issubdtype(arg1, arg2):\n return dtypes.issubdtype(arg1, arg2)\n\n@_wraps(np.isscalar)\ndef isscalar(element):\n if hasattr(element, '__jax_array__'):\n element = element.__jax_array__()\n return dtypes.is_python_scalar(element) or np.isscalar(element)\n\niterable = np.iterable\n\n@_wraps(np.result_type)\ndef result_type(*args):\n return dtypes.result_type(*args)\n\ndef _one_to_one_unop(numpy_fn, lax_fn, promote_to_inexact=False, lax_doc=False):\n if promote_to_inexact:\n fn = lambda x: lax_fn(*_promote_args_inexact(numpy_fn.__name__, x))\n else:\n fn = lambda x: lax_fn(*_promote_args(numpy_fn.__name__, x))\n fn = jit(fn, inline=True)\n if lax_doc:\n doc = _dedent('\\n\\n'.join(lax_fn.__doc__.split('\\n\\n')[1:])).strip()\n return _wraps(numpy_fn, lax_description=doc)(fn)\n else:\n return _wraps(numpy_fn)(fn)\n\ndef _one_to_one_binop(numpy_fn, lax_fn, promote_to_inexact=False, lax_doc=False):\n if promote_to_inexact:\n fn = lambda x1, x2: lax_fn(*_promote_args_inexact(numpy_fn.__name__, x1, x2))\n else:\n fn = lambda x1, x2: lax_fn(*_promote_args(numpy_fn.__name__, x1, x2))\n fn = jit(fn, inline=True)\n if lax_doc:\n doc = _dedent('\\n\\n'.join(lax_fn.__doc__.split('\\n\\n')[1:])).strip()\n return _wraps(numpy_fn, lax_description=doc)(fn)\n else:\n return _wraps(numpy_fn)(fn)\n\ndef _maybe_bool_binop(numpy_fn, lax_fn, bool_lax_fn, lax_doc=False):\n def fn(x1, x2):\n x1, x2 = _promote_args(numpy_fn.__name__, x1, x2)\n return lax_fn(x1, x2) if x1.dtype != bool_ else bool_lax_fn(x1, x2)\n fn = jit(fn, inline=True)\n if lax_doc:\n doc = _dedent('\\n\\n'.join(lax_fn.__doc__.split('\\n\\n')[1:])).strip()\n return _wraps(numpy_fn, lax_description=doc)(fn)\n else:\n return _wraps(numpy_fn)(fn)\n\nfabs = _one_to_one_unop(np.fabs, lax.abs, True)\nbitwise_not = _one_to_one_unop(np.bitwise_not, lax.bitwise_not)\ninvert = _one_to_one_unop(np.invert, lax.bitwise_not)\nnegative = _one_to_one_unop(np.negative, lax.neg)\npositive = _one_to_one_unop(np.positive, lambda x: x)\n\nfloor = _one_to_one_unop(np.floor, lax.floor, True)\nceil = _one_to_one_unop(np.ceil, lax.ceil, True)\nexp = _one_to_one_unop(np.exp, lax.exp, True)\nlog = _one_to_one_unop(np.log, lax.log, True)\nexpm1 = _one_to_one_unop(np.expm1, lax.expm1, True)\nlog1p = _one_to_one_unop(np.log1p, lax.log1p, True)\nsin = _one_to_one_unop(np.sin, lax.sin, True)\ncos = _one_to_one_unop(np.cos, lax.cos, True)\ntan = _one_to_one_unop(np.tan, lax.tan, True)\narcsin = _one_to_one_unop(np.arcsin, lax.asin, True)\narccos = _one_to_one_unop(np.arccos, lax.acos, True)\narctan = _one_to_one_unop(np.arctan, lax.atan, True)\nsinh = _one_to_one_unop(np.sinh, lax.sinh, True)\ncosh = _one_to_one_unop(np.cosh, lax.cosh, True)\narcsinh = _one_to_one_unop(np.arcsinh, lax.asinh, True)\ntanh = _one_to_one_unop(np.tanh, lax.tanh, True)\narcsinh = _one_to_one_unop(np.arcsinh, lax.asinh, True)\narctanh = _one_to_one_unop(np.arctanh, lax.atanh, True)\nsqrt = _one_to_one_unop(np.sqrt, lax.sqrt, True)\ncbrt = _one_to_one_unop(np.cbrt, lax.cbrt, True)\n\n\nadd = _maybe_bool_binop(np.add, lax.add, lax.bitwise_or)\nbitwise_and = _one_to_one_binop(np.bitwise_and, lax.bitwise_and)\nbitwise_or = _one_to_one_binop(np.bitwise_or, lax.bitwise_or)\nbitwise_xor = _one_to_one_binop(np.bitwise_xor, lax.bitwise_xor)\nleft_shift = _one_to_one_binop(np.left_shift, lax.shift_left)\nequal = _one_to_one_binop(np.equal, lax.eq)\nmultiply = _maybe_bool_binop(np.multiply, lax.mul, lax.bitwise_and)\nnot_equal = _one_to_one_binop(np.not_equal, lax.ne)\nsubtract = _one_to_one_binop(np.subtract, lax.sub)\narctan2 = _one_to_one_binop(np.arctan2, lax.atan2, True)\nminimum = _one_to_one_binop(np.minimum, lax.min)\nmaximum = _one_to_one_binop(np.maximum, lax.max)\nfloat_power = _one_to_one_binop(np.float_power, lax.pow, True)\nnextafter = _one_to_one_binop(np.nextafter, lax.nextafter, True, True)\n\n@_wraps(np.arccosh)\n@jit\ndef arccosh(x):\n # Note: arccosh is multi-valued for complex input, and lax.acosh uses a different\n # convention than np.arccosh.\n out = lax.acosh(*_promote_args_inexact(\"arccosh\", x))\n if issubdtype(out.dtype, np.complexfloating):\n out = where(real(out) < 0, lax.neg(out), out)\n return out\n\ndef _comparison_op(numpy_fn, lax_fn):\n # TODO(https://github.com/google/jax/issues/6713): decorate this function with\n # jit, after fixing a surprising interaction with remat(..., concrete=True).\n def fn(x1, x2):\n x1, x2 = _promote_args(numpy_fn.__name__, x1, x2)\n # Comparison on complex types are defined as a lexicographic ordering on\n # the (real, imag) pair.\n if issubdtype(_dtype(x1), complexfloating):\n rx = lax.real(x1)\n ry = lax.real(x2)\n return lax.select(lax.eq(rx, ry), lax_fn(lax.imag(x1), lax.imag(x2)),\n lax_fn(rx, ry))\n return lax_fn(x1, x2)\n return _wraps(numpy_fn)(fn)\n\ngreater_equal = _comparison_op(np.greater_equal, lax.ge)\ngreater = _comparison_op(np.greater, lax.gt)\nless_equal = _comparison_op(np.less_equal, lax.le)\nless = _comparison_op(np.less, lax.lt)\n\n\ndef _logical_op(np_op, bitwise_op):\n @_wraps(np_op, update_doc=False)\n @partial(jit, inline=True)\n def op(*args):\n zero = lambda x: lax.full_like(x, shape=(), fill_value=0)\n args = (x if issubdtype(_dtype(x), bool_) else lax.ne(x, zero(x))\n for x in args)\n return bitwise_op(*_promote_args(np_op.__name__, *args))\n return op\n\nlogical_and = _logical_op(np.logical_and, lax.bitwise_and)\nlogical_not = _logical_op(np.logical_not, lax.bitwise_not)\nlogical_or = _logical_op(np.logical_or, lax.bitwise_or)\nlogical_xor = _logical_op(np.logical_xor, lax.bitwise_xor)\n\n\n@_wraps(np.right_shift)\n@partial(jit, inline=True)\ndef right_shift(x1, x2):\n x1, x2 = _promote_args(np.right_shift.__name__, x1, x2)\n lax_fn = lax.shift_right_logical if \\\n np.issubdtype(x1.dtype, np.unsignedinteger) else lax.shift_right_arithmetic\n return lax_fn(x1, x2)\n\n\n@_wraps(np.absolute)\n@partial(jit, inline=True)\ndef absolute(x):\n _check_arraylike('absolute', x)\n dt = _dtype(x)\n return x if dt == bool_ or issubdtype(dt, unsignedinteger) else lax.abs(x)\nabs = _wraps(np.abs)(absolute)\n\n\n@_wraps(np.rint)\n@jit\ndef rint(x):\n _check_arraylike('rint', x)\n dtype = _dtype(x)\n if issubdtype(dtype, integer):\n return lax.convert_element_type(x, float_)\n if issubdtype(dtype, complexfloating):\n return lax.complex(rint(lax.real(x)), rint(lax.imag(x)))\n return lax.round(x, lax.RoundingMethod.TO_NEAREST_EVEN)\n\n\n@_wraps(np.sign)\n@jit\ndef sign(x):\n _check_arraylike('sign', x)\n dtype = _dtype(x)\n if issubdtype(dtype, complexfloating):\n re = lax.real(x)\n return lax.complex(\n lax.sign(where(re != 0, re, lax.imag(x))), _constant_like(re, 0))\n return lax.sign(x)\n\n\n@_wraps(np.copysign)\n@jit\ndef copysign(x1, x2):\n x1, x2 = _promote_args_inexact(\"copysign\", x1, x2)\n if issubdtype(_dtype(x1), complexfloating):\n raise TypeError(\"copysign does not support complex-valued inputs\")\n return where(signbit(x2), -lax.abs(x1), lax.abs(x1))\n\n\n@_wraps(np.true_divide)\n@partial(jit, inline=True)\ndef true_divide(x1, x2):\n x1, x2 = _promote_args_inexact(\"true_divide\", x1, x2)\n return lax.div(x1, x2)\n\ndivide = true_divide\n\n@_wraps(np.floor_divide)\n@jit\ndef floor_divide(x1, x2):\n x1, x2 = _promote_args(\"floor_divide\", x1, x2)\n dtype = _dtype(x1)\n if issubdtype(dtype, integer):\n quotient = lax.div(x1, x2)\n select = logical_and(lax.sign(x1) != lax.sign(x2), lax.rem(x1, x2) != 0)\n # TODO(mattjj): investigate why subtracting a scalar was causing promotion\n return where(select, quotient - np.array(1, _dtype(quotient)), quotient)\n elif issubdtype(dtype, complexfloating):\n x1r = lax.real(x1)\n x1i = lax.imag(x1)\n x2r = lax.real(x2)\n x2i = lax.imag(x2)\n which = lax.ge(lax.abs(x2r), lax.abs(x2i))\n rat1 = where(which, lax._const(x2i, 1), lax.div(x2r, x2i))\n rat2 = where(which, lax.div(x2i, x2r), lax._const(x2i, 1))\n out = lax.floor(lax.div(lax.add(lax.mul(x1r, rat1), lax.mul(x1i, rat2)),\n lax.add(lax.mul(x2r, rat1), lax.mul(x2i, rat2))))\n return lax.convert_element_type(out, dtype)\n else:\n return _float_divmod(x1, x2)[0]\n\n\n@_wraps(np.divmod)\n@jit\ndef divmod(x1, x2):\n x1, x2 = _promote_args(\"divmod\", x1, x2)\n if issubdtype(_dtype(x1), integer):\n return floor_divide(x1, x2), remainder(x1, x2)\n else:\n return _float_divmod(x1, x2)\n\n\ndef _float_divmod(x1, x2):\n # see float_divmod in floatobject.c of CPython\n mod = lax.rem(x1, x2)\n div = lax.div(lax.sub(x1, mod), x2)\n\n ind = lax.bitwise_and(mod != 0, lax.sign(x2) != lax.sign(mod))\n mod = lax.select(ind, mod + x2, mod)\n div = lax.select(ind, div - _constant_like(div, 1), div)\n\n return lax.round(div), mod\n\n\n@partial(jit, inline=True)\ndef _power(x1, x2):\n x1, x2 = _promote_args(\"power\", x1, x2)\n dtype = _dtype(x1)\n if not issubdtype(dtype, integer):\n return lax.pow(x1, x2)\n\n # Integer power => use binary exponentiation.\n\n # TODO(phawkins): add integer pow support to XLA.\n bits = 6 # Anything more would overflow for any x1 > 1\n zero = _constant_like(x2, 0)\n one = _constant_like(x2, 1)\n # Initialize acc carefully such that pow(0, x2) is zero for x2 != 0\n acc = where(lax.bitwise_and(lax.eq(x1, zero), lax.ne(x2, zero)), zero, one)\n for _ in range(bits):\n acc = where(lax.bitwise_and(x2, one), lax.mul(acc, x1), acc)\n x1 = lax.mul(x1, x1)\n x2 = lax.shift_right_logical(x2, one)\n return acc\n\n@_wraps(np.power)\ndef power(x1, x2):\n # Special case for concrete integer scalars: use binary exponentiation.\n # Using lax.pow may be imprecise for floating-point values; the goal of this\n # code path is to make sure we end up with a precise output for the common\n # pattern ``x ** 2`` or similar.\n if isinstance(core.get_aval(x2), ConcreteArray):\n try:\n x2 = operator.index(x2)\n except TypeError:\n pass\n else:\n return lax.integer_pow(x1, x2)\n return _power(x1, x2)\n\n@custom_jvp\n@_wraps(np.logaddexp)\n@jit\ndef logaddexp(x1, x2):\n x1, x2 = _promote_args_inexact(\"logaddexp\", x1, x2)\n amax = lax.max(x1, x2)\n if issubdtype(x1.dtype, np.floating):\n delta = lax.sub(x1, x2)\n return lax.select(isnan(delta),\n lax.add(x1, x2), # NaNs or infinities of the same sign.\n lax.add(amax, lax.log1p(lax.exp(lax.neg(lax.abs(delta))))))\n else:\n delta = lax.sub(lax.add(x1, x2), lax.mul(amax, _constant_like(amax, 2)))\n out = lax.add(amax, lax.log1p(lax.exp(delta)))\n return lax.complex(lax.real(out), _wrap_between(lax.imag(out), np.pi))\n\ndef _wrap_between(x, _a):\n \"\"\"Wraps `x` between `[-a, a]`.\"\"\"\n a = _constant_like(x, _a)\n two_a = _constant_like(x, 2 * _a)\n zero = _constant_like(x, 0)\n rem = lax.rem(lax.add(x, a), two_a)\n rem = lax.select(lax.lt(rem, zero), lax.add(rem, two_a), rem)\n return lax.sub(rem, a)\n\[email protected]\ndef _logaddexp_jvp(primals, tangents):\n x1, x2 = primals\n t1, t2 = tangents\n x1, x2, t1, t2 = _promote_args_inexact(\"logaddexp_jvp\", x1, x2, t1, t2)\n primal_out = logaddexp(x1, x2)\n tangent_out = lax.add(lax.mul(t1, exp(lax.sub(_replace_inf(x1), _replace_inf(primal_out)))),\n lax.mul(t2, exp(lax.sub(_replace_inf(x2), _replace_inf(primal_out)))))\n return primal_out, tangent_out\n\ndef _replace_inf(x):\n return lax.select(isposinf(real(x)), zeros_like(x), x)\n\n\n@custom_jvp\n@_wraps(np.logaddexp2)\n@jit\ndef logaddexp2(x1, x2):\n x1, x2 = _promote_args_inexact(\"logaddexp2\", x1, x2)\n amax = lax.max(x1, x2)\n if issubdtype(x1.dtype, np.floating):\n delta = lax.sub(x1, x2)\n return lax.select(isnan(delta),\n lax.add(x1, x2), # NaNs or infinities of the same sign.\n lax.add(amax, lax.div(lax.log1p(exp2(lax.neg(lax.abs(delta)))),\n _constant_like(x1, np.log(2)))))\n else:\n delta = lax.sub(lax.add(x1, x2), lax.mul(amax, _constant_like(amax, 2)))\n out = lax.add(amax, lax.div(lax.log1p(exp2(delta)), _constant_like(x1, np.log(2))))\n return lax.complex(lax.real(out), _wrap_between(lax.imag(out), np.pi / np.log(2)))\n\[email protected]\ndef _logaddexp2_jvp(primals, tangents):\n x1, x2 = primals\n t1, t2 = tangents\n x1, x2, t1, t2 = _promote_args_inexact(\"logaddexp2_jvp\", x1, x2, t1, t2)\n primal_out = logaddexp2(x1, x2)\n tangent_out = lax.add(lax.mul(t1, exp2(lax.sub(_replace_inf(x1), _replace_inf(primal_out)))),\n lax.mul(t2, exp2(lax.sub(_replace_inf(x2), _replace_inf(primal_out)))))\n return primal_out, tangent_out\n\n\n@_wraps(np.log2)\n@partial(jit, inline=True)\ndef log2(x):\n x, = _promote_args_inexact(\"log2\", x)\n return lax.div(lax.log(x), lax.log(_constant_like(x, 2)))\n\n\n@_wraps(np.log10)\n@partial(jit, inline=True)\ndef log10(x):\n x, = _promote_args_inexact(\"log10\", x)\n return lax.div(lax.log(x), lax.log(_constant_like(x, 10)))\n\n\n@_wraps(np.exp2)\n@partial(jit, inline=True)\ndef exp2(x):\n x, = _promote_args_inexact(\"exp2\", x)\n return lax.exp(lax.mul(lax.log(_constant_like(x, 2)), x))\n\n@_wraps(np.signbit)\n@jit\ndef signbit(x):\n x, = _promote_args(\"signbit\", x)\n dtype = _dtype(x)\n if issubdtype(dtype, integer):\n return lax.lt(x, _constant_like(x, 0))\n elif issubdtype(dtype, bool_):\n return full_like(x, False, dtype=bool_)\n elif not issubdtype(dtype, floating):\n raise ValueError(\n \"jax.numpy.signbit is not well defined for %s\" % dtype)\n\n # TPU supports BF16 but not S16 types, so as a workaround, convert BF16 to\n # F32.\n if dtype == bfloat16:\n dtype = float32\n x = lax.convert_element_type(x, float32)\n\n info = finfo(dtype)\n if info.bits not in _INT_DTYPES:\n raise NotImplementedError(\n \"jax.numpy.signbit only supports 16, 32, and 64-bit types.\")\n int_type = _INT_DTYPES[info.bits]\n x = lax.bitcast_convert_type(x, int_type)\n return lax.convert_element_type(x >> (info.nexp + info.nmant), np.bool_)\n\n\n@_wraps(np.trapz)\n@partial(jit, static_argnames=('axis',))\ndef trapz(y, x=None, dx=1.0, axis: int = -1):\n _check_arraylike('trapz', y)\n y = moveaxis(y, axis, -1)\n if x is not None:\n if ndim(x) == 1:\n dx = diff(x)\n else:\n dx = moveaxis(diff(x, axis=axis), axis, -1)\n return 0.5 * (dx * (y[..., 1:] + y[..., :-1])).sum(-1)\n\n\n@_wraps(np.trunc)\n@jit\ndef trunc(x):\n _check_arraylike('trunc', x)\n return where(lax.lt(x, lax._const(x, 0)), ceil(x), floor(x))\n\n\n@partial(jit, static_argnums=(2, 3, 4))\ndef _conv(x, y, mode, op, precision):\n if ndim(x) != 1 or ndim(y) != 1:\n raise ValueError(f\"{op}() only support 1-dimensional inputs.\")\n x, y = _promote_dtypes_inexact(x, y)\n if len(x) == 0 or len(y) == 0:\n raise ValueError(f\"{op}: inputs cannot be empty, got shapes {x.shape} and {y.shape}.\")\n\n out_order = slice(None)\n if op == 'correlate':\n y = conj(y)\n if len(x) < len(y):\n x, y = y, x\n out_order = slice(None, None, -1)\n elif op == 'convolve':\n if len(x) < len(y):\n x, y = y, x\n y = flip(y)\n\n if mode == 'valid':\n padding = [(0, 0)]\n elif mode == 'same':\n padding = [(y.shape[0] // 2, y.shape[0] - y.shape[0] // 2 - 1)]\n elif mode == 'full':\n padding = [(y.shape[0] - 1, y.shape[0] - 1)]\n else:\n raise ValueError(\"mode must be one of ['full', 'same', 'valid']\")\n\n result = lax.conv_general_dilated(x[None, None, :], y[None, None, :], (1,),\n padding, precision=precision)\n return result[0, 0, out_order]\n\n\n@_wraps(np.convolve, lax_description=_PRECISION_DOC)\n@partial(jit, static_argnames=('mode', 'precision'))\ndef convolve(a, v, mode='full', *, precision=None):\n _check_arraylike(\"convolve\", a, v)\n return _conv(a, v, mode, 'convolve', precision)\n\n\n@_wraps(np.correlate, lax_description=_PRECISION_DOC)\n@partial(jit, static_argnames=('mode', 'precision'))\ndef correlate(a, v, mode='valid', *, precision=None):\n _check_arraylike(\"correlate\", a, v)\n return _conv(a, v, mode, 'correlate', precision)\n\n\ndef _normalize_float(x):\n info = finfo(_dtype(x))\n cond = lax.abs(x) < info.tiny\n x1 = where(cond, x * lax._const(x, 1 << info.nmant), x)\n x2 = where(cond, lax._const(np.int32, -info.nmant), lax._const(np.int32, 0))\n int_type = _INT_DTYPES[info.bits]\n return lax.bitcast_convert_type(x1, int_type), x2\n\n\n@_wraps(np.ldexp)\n@jit\ndef ldexp(x1, x2):\n _check_arraylike(\"ldexp\", x1, x2)\n dtype = dtypes.canonicalize_dtype(_result_dtype(np.ldexp, x1, x2))\n x1, x2 = _promote_shapes(\"ldexp\", x1, x2)\n x1 = lax.convert_element_type(x1, dtype)\n\n info = finfo(dtype)\n mask = (1 << info.nexp) - 1\n bias = ((1 << info.nexp) - 1) >> 1\n\n int_type = _INT_DTYPES[info.bits]\n\n x, e = _normalize_float(x1)\n x2 += e + ((x >> info.nmant) & mask) - bias\n\n # find underflow/overflow before denormalization\n underflow_cond = x2 < -(bias + info.nmant)\n overflow_cond = x2 > bias\n\n m = ones_like(x, dtype=dtype)\n\n # denormals\n cond = x2 < -bias + 1\n x2 = where(cond, x2 + info.nmant, x2)\n m = where(cond, m / (1 << info.nmant), m)\n\n x2 = lax.convert_element_type(x2, np.int32)\n x &= ~(mask << info.nmant)\n x |= ((lax.convert_element_type(x2, int_type) + bias) << info.nmant)\n\n x = lax.convert_element_type(m, dtype) * lax.bitcast_convert_type(x, dtype)\n\n # underflow\n x = where(underflow_cond, zeros_like(x, dtype=dtype), x)\n # overflow\n x = where(overflow_cond, lax.sign(x1) * full_like(x, np.inf), x)\n # ldexp(x1, x2) = x1 for x1 = inf, -inf, nan, 0\n return where(isinf(x1) | isnan(x1) | (x1 == 0), x1, x)\n\n\n@_wraps(np.frexp)\n@jit\ndef frexp(x):\n _check_arraylike(\"frexp\", x)\n x = asarray(x)\n if issubdtype(x.dtype, complexfloating):\n raise TypeError(\"frexp does not support complex-valued inputs\")\n elif not issubdtype(x.dtype, floating):\n x = lax.convert_element_type(x, float_)\n\n dtype = _dtype(x)\n info = finfo(dtype)\n mask = (1 << info.nexp) - 1\n bias = ((1 << info.nexp) - 1) >> 1\n\n x1, x2 = _normalize_float(x)\n x2 += ((x1 >> info.nmant) & mask) - bias + 1\n x1 &= ~(mask << info.nmant)\n x1 |= (bias - 1) << info.nmant\n x1 = lax.bitcast_convert_type(x1, dtype)\n\n cond = isinf(x) | isnan(x) | (x == 0)\n x2 = where(cond, zeros_like(x2), x2)\n return where(cond, x, x1), lax.convert_element_type(x2, int32)\n\n\n@_wraps(np.remainder)\n@jit\ndef remainder(x1, x2):\n x1, x2 = _promote_args(\"remainder\", x1, x2)\n zero = _constant_like(x1, 0)\n trunc_mod = lax.rem(x1, x2)\n trunc_mod_not_zero = lax.ne(trunc_mod, zero)\n do_plus = lax.bitwise_and(\n lax.ne(lax.lt(trunc_mod, zero), lax.lt(x2, zero)), trunc_mod_not_zero)\n return lax.select(do_plus, lax.add(trunc_mod, x2), trunc_mod)\nmod = _wraps(np.mod)(remainder)\n\n\n@_wraps(np.fmod)\n@jit\ndef fmod(x1, x2):\n _check_arraylike(\"fmod\", x1, x2)\n if issubdtype(result_type(x1, x2), integer):\n x2 = where(x2 == 0, 1, x2)\n return lax.rem(*_promote_args(\"fmod\", x1, x2))\n\n\n@_wraps(np.square)\n@partial(jit, inline=True)\ndef square(x):\n _check_arraylike(\"square\", x)\n return lax.integer_pow(x, 2)\n\n\n@_wraps(np.deg2rad)\n@partial(jit, inline=True)\ndef deg2rad(x):\n x, = _promote_args_inexact(\"deg2rad\", x)\n return lax.mul(x, lax._const(x, pi / 180))\n\n\n@_wraps(np.rad2deg)\n@partial(jit, inline=True)\ndef rad2deg(x):\n x, = _promote_args_inexact(\"rad2deg\", x)\n return lax.mul(x, lax._const(x, 180 / pi))\n\n\ndegrees = rad2deg\nradians = deg2rad\n\n\n@_wraps(np.histogram_bin_edges)\ndef histogram_bin_edges(a, bins=10, range=None, weights=None):\n if isinstance(bins, str):\n raise NotImplementedError(\"string values for `bins` not implemented.\")\n _check_arraylike(\"histogram_bin_edges\", a, bins)\n a = ravel(a)\n b = asarray(bins)\n if b.ndim == 1:\n return b\n if range is None:\n range = [a.min(), a.max()]\n assert len(range) == 2\n range = asarray(range)\n range = (where(ptp(range) == 0, range[0] - 0.5, range[0]),\n where(ptp(range) == 0, range[1] + 0.5, range[1]))\n dtype = _dtype(a)\n if issubdtype(dtype, integer):\n dtype = promote_types(dtype, float32)\n return linspace(range[0], range[1], bins + 1, dtype=dtype)\n\n\n@_wraps(np.histogram)\ndef histogram(a, bins=10, range=None, weights=None, density=None):\n _check_arraylike(\"histogram\", a, bins)\n if weights is not None and a.shape != weights.shape:\n raise ValueError(\"weights should have the same shape as a.\")\n a = ravel(a)\n if weights is not None:\n weights = ravel(weights)\n else:\n weights = ones_like(a)\n bin_edges = histogram_bin_edges(a, bins, range, weights)\n bin_idx = searchsorted(bin_edges, a, side='right')\n bin_idx = where(a == bin_edges[-1], len(bin_edges) - 1, bin_idx)\n counts = bincount(bin_idx, weights, length=len(bin_edges))[1:]\n if density:\n bin_widths = diff(bin_edges)\n counts = counts / bin_widths / counts.sum()\n return counts, bin_edges\n\n@_wraps(np.histogram2d)\ndef histogram2d(x, y, bins=10, range=None, weights=None, density=None):\n _check_arraylike(\"histogram2d\", x, y)\n try:\n N = len(bins)\n except TypeError:\n N = 1\n\n if N != 1 and N != 2:\n x_edges = y_edges = asarray(bins)\n bins = [x_edges, y_edges]\n\n sample = transpose(asarray([x, y]))\n hist, edges = histogramdd(sample, bins, range, weights, density)\n return hist, edges[0], edges[1]\n\n@_wraps(np.histogramdd)\ndef histogramdd(sample, bins=10, range=None, weights=None, density=None):\n _check_arraylike(\"histogramdd\", sample)\n N, D = shape(sample)\n\n if weights is not None and weights.shape != (N,):\n raise ValueError(\"should have one weight for each sample.\")\n\n if range is not None and (\n len(range) != D or _any(r is not None and len(r) != 2 for r in range)):\n raise ValueError(f\"For sample.shape={(N, D)}, range must be a sequence \"\n f\"of {D} pairs or Nones; got range={range}\")\n\n try:\n num_bins = len(bins)\n if num_bins != D:\n raise ValueError(\"should be a bin for each dimension.\")\n except TypeError:\n # when bin_size is integer, the same bin is used for each dimension\n bins = D * [bins]\n\n bin_idx_by_dim = D*[None]\n nbins = np.empty(D, int)\n bin_edges_by_dim = D*[None]\n dedges = D*[None]\n\n for i in builtins.range(D):\n range_i = None if range is None else range[i]\n bin_edges = histogram_bin_edges(sample[:, i], bins[i], range_i, weights)\n bin_idx = searchsorted(bin_edges, sample[:, i], side='right')\n bin_idx = where(sample[:, i] == bin_edges[-1], bin_idx - 1, bin_idx)\n bin_idx_by_dim[i] = bin_idx\n nbins[i] = len(bin_edges) + 1\n bin_edges_by_dim[i] = bin_edges\n dedges[i] = diff(bin_edges_by_dim[i])\n\n xy = ravel_multi_index(bin_idx_by_dim, nbins, mode='clip')\n hist = bincount(xy, weights, length=nbins.prod())\n hist = reshape(hist, nbins)\n core = D*(slice(1, -1),)\n hist = hist[core]\n\n if density:\n hist /= hist.sum()\n for norm in ix_(*dedges):\n hist /= norm\n\n return hist, bin_edges_by_dim\n\n@_wraps(np.heaviside)\n@jit\ndef heaviside(x1, x2):\n _check_arraylike(\"heaviside\", x1, x2)\n x1, x2 = _promote_dtypes_inexact(x1, x2)\n zero = lax._const(x1, 0)\n return where(lax.lt(x1, zero), zero,\n where(lax.gt(x1, zero), lax._const(x1, 1), x2))\n\n\n@_wraps(np.hypot)\n@jit\ndef hypot(x1, x2):\n _check_arraylike(\"hypot\", x1, x2)\n x1, x2 = _promote_dtypes_inexact(x1, x2)\n x1 = lax.abs(x1)\n x2 = lax.abs(x2)\n x1, x2 = maximum(x1, x2), minimum(x1, x2)\n return lax.select(x1 == 0, x1, x1 * lax.sqrt(1 + lax.square(lax.div(x2, lax.select(x1 == 0, ones_like(x1), x1)))))\n\n\n@_wraps(np.reciprocal)\n@partial(jit, inline=True)\ndef reciprocal(x):\n _check_arraylike(\"reciprocal\", x)\n x, = _promote_dtypes_inexact(x)\n return lax.integer_pow(x, -1)\n\n\n@_wraps(np.sinc, update_doc=False)\n@jit\ndef sinc(x):\n _check_arraylike(\"sinc\", x)\n x, = _promote_dtypes_inexact(x)\n eq_zero = lax.eq(x, lax._const(x, 0))\n pi_x = lax.mul(lax._const(x, pi), x)\n safe_pi_x = where(eq_zero, lax._const(x, 1), pi_x)\n return where(eq_zero, _sinc_maclaurin(0, pi_x),\n lax.div(lax.sin(safe_pi_x), safe_pi_x))\n\n@partial(custom_jvp, nondiff_argnums=(0,))\ndef _sinc_maclaurin(k, x):\n # compute the kth derivative of x -> sin(x)/x evaluated at zero (since we\n # compute the monomial term in the jvp rule)\n if k % 2:\n return lax.full_like(x, 0)\n else:\n return lax.full_like(x, (-1) ** (k // 2) / (k + 1))\n\n@_sinc_maclaurin.defjvp\ndef _sinc_maclaurin_jvp(k, primals, tangents):\n (x,), (t,) = primals, tangents\n return _sinc_maclaurin(k, x), _sinc_maclaurin(k + 1, x) * t\n\n_ARRAY_VIEW_DOC = \"\"\"\nThe JAX version of this function may in some cases return a copy rather than a\nview of the input.\n\"\"\"\n\n@_wraps(np.transpose, lax_description=_ARRAY_VIEW_DOC)\ndef transpose(a, axes=None):\n _check_arraylike(\"transpose\", a)\n axes = np.arange(ndim(a))[::-1] if axes is None else axes\n return lax.transpose(a, axes)\n\n\n@_wraps(np.rot90, lax_description=_ARRAY_VIEW_DOC)\n@partial(jit, static_argnames=('k', 'axes'))\ndef rot90(m, k=1, axes=(0, 1)):\n _check_arraylike(\"rot90\", m)\n ax1, ax2 = axes\n ax1 = _canonicalize_axis(ax1, ndim(m))\n ax2 = _canonicalize_axis(ax2, ndim(m))\n if ax1 == ax2:\n raise ValueError(\"Axes must be different\") # same as numpy error\n k = k % 4\n if k == 0:\n return m\n elif k == 2:\n return flip(flip(m, ax1), ax2)\n else:\n perm = list(range(m.ndim))\n perm[ax1], perm[ax2] = perm[ax2], perm[ax1]\n if k == 1:\n return transpose(flip(m, ax2), perm)\n else:\n return flip(transpose(m, perm), ax2)\n\n\n@_wraps(np.flip, lax_description=_ARRAY_VIEW_DOC)\ndef flip(m, axis: Optional[Union[int, Tuple[int, ...]]] = None):\n return _flip(m, _ensure_optional_axes(axis))\n\n@partial(jit, static_argnames=('axis',))\ndef _flip(m, axis: Optional[Union[int, Tuple[int, ...]]] = None):\n _check_arraylike(\"flip\", m)\n if axis is None:\n return lax.rev(m, list(range(len(shape(m)))))\n axis = _ensure_index_tuple(axis)\n return lax.rev(m, [_canonicalize_axis(ax, ndim(m)) for ax in axis])\n\n\n@_wraps(np.fliplr, lax_description=_ARRAY_VIEW_DOC)\ndef fliplr(m):\n return _flip(m, 1)\n\n\n@_wraps(np.flipud, lax_description=_ARRAY_VIEW_DOC)\ndef flipud(m):\n return _flip(m, 0)\n\n\n@_wraps(np.conjugate)\n@partial(jit, inline=True)\ndef conjugate(x):\n _check_arraylike(\"conjugate\", x)\n return lax.conj(x) if iscomplexobj(x) else x\nconj = conjugate\n\n\n@_wraps(np.imag)\n@partial(jit, inline=True)\ndef imag(val):\n _check_arraylike(\"imag\", val)\n return lax.imag(val) if iscomplexobj(val) else zeros_like(val)\n\n\n@_wraps(np.real)\n@partial(jit, inline=True)\ndef real(val):\n _check_arraylike(\"real\", val)\n return lax.real(val) if iscomplexobj(val) else val\n\n\n@_wraps(np.iscomplex)\n@jit\ndef iscomplex(x):\n i = imag(x)\n return lax.ne(i, lax._const(i, 0))\n\n@_wraps(np.isreal)\n@jit\ndef isreal(x):\n i = imag(x)\n return lax.eq(i, lax._const(i, 0))\n\n@_wraps(np.angle)\n@jit\ndef angle(z):\n re = real(z)\n im = imag(z)\n dtype = _dtype(re)\n if not issubdtype(dtype, inexact) or (\n issubdtype(_dtype(z), floating) and ndim(z) == 0):\n dtype = dtypes.canonicalize_dtype(float_)\n re = lax.convert_element_type(re, dtype)\n im = lax.convert_element_type(im, dtype)\n return lax.atan2(im, re)\n\n\n@_wraps(np.diff)\n@partial(jit, static_argnames=('n', 'axis'))\ndef diff(a, n=1, axis: int = -1, prepend=None, append=None):\n _check_arraylike(\"diff\", a)\n n = core.concrete_or_error(operator.index, n, \"'n' argument of jnp.diff\")\n axis = core.concrete_or_error(operator.index, axis, \"'axis' argument of jnp.diff\")\n if n == 0:\n return a\n if n < 0:\n raise ValueError(f\"order must be non-negative but got {n}\")\n if ndim(a) == 0:\n raise ValueError(f\"diff requires input that is at least one dimensional; got {a}\")\n\n nd = a.ndim\n axis = _canonicalize_axis(axis, nd)\n\n combined = []\n if prepend is not None:\n _check_arraylike(\"diff\", prepend)\n if isscalar(prepend):\n shape = list(a.shape)\n shape[axis] = 1\n prepend = broadcast_to(prepend, tuple(shape))\n combined.append(prepend)\n\n combined.append(a)\n\n if append is not None:\n _check_arraylike(\"diff\", append)\n if isscalar(append):\n shape = list(a.shape)\n shape[axis] = 1\n append = broadcast_to(append, tuple(shape))\n combined.append(append)\n\n if len(combined) > 1:\n a = concatenate(combined, axis)\n\n slice1 = [slice(None)] * nd\n slice2 = [slice(None)] * nd\n slice1[axis] = slice(1, None)\n slice2[axis] = slice(None, -1)\n slice1_tuple = tuple(slice1)\n slice2_tuple = tuple(slice2)\n\n op = not_equal if a.dtype == np.bool_ else subtract\n for _ in range(n):\n a = op(a[slice1_tuple], a[slice2_tuple])\n\n return a\n\n_EDIFF1D_DOC = \"\"\"\\\nUnlike NumPy's implementation of ediff1d, :py:func:`jax.numpy.ediff1d` will not\nissue an error if casting ``to_end`` or ``to_begin`` to the type of ``ary``\nloses precision.\n\"\"\"\n\n@_wraps(np.ediff1d, lax_description=_EDIFF1D_DOC)\n@jit\ndef ediff1d(ary, to_end=None, to_begin=None):\n _check_arraylike(\"ediff1d\", ary)\n ary = ravel(ary)\n result = lax.sub(ary[1:], ary[:-1])\n if to_begin is not None:\n _check_arraylike(\"ediff1d\", to_begin)\n result = concatenate((ravel(asarray(to_begin, dtype=ary.dtype)), result))\n if to_end is not None:\n _check_arraylike(\"ediff1d\", to_end)\n result = concatenate((result, ravel(asarray(to_end, dtype=ary.dtype))))\n return result\n\n\n@_wraps(np.gradient, skip_params=['edge_order'])\n@partial(jit, static_argnames=('axis', 'edge_order'))\ndef gradient(f, *varargs, axis: Optional[Union[int, Tuple[int, ...]]] = None,\n edge_order=None):\n if edge_order is not None:\n raise NotImplementedError(\"The 'edge_order' argument to jnp.gradient is not supported.\")\n\n def gradient_along_axis(a, h, axis):\n sliced = partial(lax.slice_in_dim, a, axis=axis)\n a_grad = concatenate((\n (sliced(1, 2) - sliced(0, 1)), # upper edge\n (sliced(2, None) - sliced(None, -2)) * 0.5, # inner\n (sliced(-1, None) - sliced(-2, -1)), # lower edge\n ), axis)\n return a_grad / h\n\n a = f\n axis_tuple: Tuple[int, ...]\n if axis is None:\n axis_tuple = tuple(range(a.ndim))\n else:\n if isinstance(axis, int):\n axis = (axis,)\n elif not isinstance(axis, tuple) and not isinstance(axis, list):\n raise ValueError(\"Give `axis` either as int or iterable\")\n elif len(axis) == 0:\n return []\n axis_tuple = tuple(_canonicalize_axis(i, a.ndim) for i in axis)\n\n if _min([s for i, s in enumerate(a.shape) if i in axis_tuple]) < 2:\n raise ValueError(\"Shape of array too small to calculate \"\n \"a numerical gradient, \"\n \"at least 2 elements are required.\")\n len_axes = len(axis_tuple)\n n = len(varargs)\n if n == 0 or varargs is None:\n # no spacing\n dx = [1.0] * len_axes\n elif n == 1:\n # single value for all axes\n dx = list(varargs) * len_axes\n elif n == len_axes:\n dx = list(varargs)\n else:\n TypeError(\"Invalid number of spacing arguments %d\" % n)\n\n if ndim(dx[0]) != 0:\n raise NotImplementedError(\"Non-constant spacing not implemented\")\n\n # TODO: use jax.lax loop tools if possible\n a_grad = [gradient_along_axis(a, h, ax) for ax, h in zip(axis_tuple, dx)]\n\n if len(axis_tuple) == 1:\n a_grad = a_grad[0]\n\n return a_grad\n\n\n@_wraps(np.isrealobj)\ndef isrealobj(x):\n return not iscomplexobj(x)\n\n_POLYFIT_DOC = \"\"\"\\\nUnlike NumPy's implementation of polyfit, :py:func:`jax.numpy.polyfit` will not warn on rank reduction, which indicates an ill conditioned matrix\nAlso, it works best on rcond <= 10e-3 values.\n\"\"\"\n@_wraps(np.polyfit, lax_description=_POLYFIT_DOC)\n@partial(jit, static_argnames=('deg', 'rcond', 'full', 'cov'))\ndef polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):\n _check_arraylike(\"polyfit\", x, y)\n deg = core.concrete_or_error(int, deg, \"deg must be int\")\n order = deg + 1\n # check arguments\n if deg < 0:\n raise ValueError(\"expected deg >= 0\")\n if x.ndim != 1:\n raise TypeError(\"expected 1D vector for x\")\n if x.size == 0:\n raise TypeError(\"expected non-empty vector for x\")\n if y.ndim < 1 or y.ndim > 2:\n raise TypeError(\"expected 1D or 2D array for y\")\n if x.shape[0] != y.shape[0]:\n raise TypeError(\"expected x and y to have same length\")\n\n # set rcond\n if rcond is None:\n rcond = len(x)*finfo(x.dtype).eps\n rcond = core.concrete_or_error(float, rcond, \"rcond must be float\")\n # set up least squares equation for powers of x\n lhs = vander(x, order)\n rhs = y\n\n # apply weighting\n if w is not None:\n _check_arraylike(\"polyfit\", w)\n w, = _promote_dtypes_inexact(w)\n if w.ndim != 1:\n raise TypeError(\"expected a 1-d array for weights\")\n if w.shape[0] != y.shape[0]:\n raise TypeError(\"expected w and y to have the same length\")\n lhs *= w[:, newaxis]\n if rhs.ndim == 2:\n rhs *= w[:, newaxis]\n else:\n rhs *= w\n\n # scale lhs to improve condition number and solve\n scale = sqrt((lhs*lhs).sum(axis=0))\n lhs /= scale[newaxis,:]\n from jax._src.numpy import linalg\n c, resids, rank, s = linalg.lstsq(lhs, rhs, rcond)\n c = (c.T/scale).T # broadcast scale coefficients\n\n if full:\n return c, resids, rank, s, rcond\n elif cov:\n Vbase = linalg.inv(dot(lhs.T, lhs))\n Vbase /= outer(scale, scale)\n if cov == \"unscaled\":\n fac = 1\n else:\n if len(x) <= order:\n raise ValueError(\"the number of data points must exceed order \"\n \"to scale the covariance matrix\")\n fac = resids / (len(x) - order)\n fac = fac[0] #making np.array() of shape (1,) to int\n if y.ndim == 1:\n return c, Vbase * fac\n else:\n return c, Vbase[:,:, newaxis] * fac\n else:\n return c\n\n\n@_wraps(np.reshape, lax_description=_ARRAY_VIEW_DOC)\ndef reshape(a, newshape, order=\"C\"):\n _stackable(a) or _check_arraylike(\"reshape\", a)\n try:\n return a.reshape(newshape, order=order) # forward to method for ndarrays\n except AttributeError:\n return _reshape(a, newshape, order=order)\n\ndef _compute_newshape(a, newshape):\n \"\"\"Fixes a -1 value in newshape, if present.\"\"\"\n # other errors, like having more than one -1, are caught downstream, in\n # reshape_shape_rule.\n try: iter(newshape)\n except: iterable = False\n else: iterable = True\n newshape = core.canonicalize_shape(newshape if iterable else [newshape])\n return tuple(- core.divide_shape_sizes(np.shape(a), newshape)\n if core.symbolic_equal_dim(d, -1) else d\n for d in newshape)\n\n\ndef _reshape(a, *args, order=\"C\"):\n newshape = _compute_newshape(a, args[0] if len(args) == 1 else args)\n if order == \"C\":\n return lax.reshape(a, newshape, None)\n elif order == \"F\":\n dims = np.arange(ndim(a))[::-1]\n return lax.reshape(a, newshape[::-1], dims).T\n elif order == \"A\":\n raise NotImplementedError(\"np.reshape order=A is not implemented.\")\n else:\n raise ValueError(\"Unexpected value for 'order' argument: {}.\".format(order))\n\ndef _transpose(a, *args):\n if not args:\n axis = None\n elif len(args) == 1:\n axis = args[0] if args[0] is None else _ensure_index_tuple(args[0])\n else:\n axis = _ensure_index_tuple(args)\n return transpose(a, axis)\n\n@_wraps(np.ravel, lax_description=_ARRAY_VIEW_DOC)\n@partial(jit, static_argnames=('order',), inline=True)\ndef ravel(a, order=\"C\"):\n _stackable(a) or _check_arraylike(\"ravel\", a)\n if order == \"K\":\n raise NotImplementedError(\"Ravel not implemented for order='K'.\")\n return reshape(a, (size(a),), order)\n\n\n@_wraps(np.ravel_multi_index)\ndef ravel_multi_index(multi_index, dims, mode='raise', order='C'):\n assert len(multi_index) == len(dims), f\"len(multi_index)={len(multi_index)} != len(dims)={len(dims)}\"\n dims = tuple(core.concrete_or_error(int, d, \"in `dims` argument of ravel_multi_index().\") for d in dims)\n _check_arraylike(\"ravel_multi_index\", *multi_index)\n for index in multi_index:\n if mode == 'raise':\n core.concrete_or_error(array, index,\n \"The error occurred because ravel_multi_index was jit-compiled\"\n \" with mode='raise'. Use mode='wrap' or mode='clip' instead.\")\n if not issubdtype(_dtype(index), integer):\n raise TypeError(\"only int indices permitted\")\n if mode == \"raise\":\n if _any(any((i < 0) | (i >= d)) for i, d in zip(multi_index, dims)):\n raise ValueError(\"invalid entry in coordinates array\")\n elif mode == \"clip\":\n multi_index = [clip(i, 0, d - 1) for i, d in zip(multi_index, dims)]\n elif mode == \"wrap\":\n multi_index = [i % d for i, d in zip(multi_index, dims)]\n else:\n raise ValueError(f\"invalid mode={mode!r}. Expected 'raise', 'wrap', or 'clip'\")\n\n if order == \"F\":\n strides = np.cumprod((1,) + dims[:-1])\n elif order == \"C\":\n strides = np.cumprod((1,) + dims[1:][::-1])[::-1]\n else:\n raise ValueError(f\"invalid order={order!r}. Expected 'C' or 'F'\")\n\n result = array(0, dtype=dtypes.canonicalize_dtype(int_))\n for i, s in zip(multi_index, strides):\n result = result + i * s\n return result\n\n\n_UNRAVEL_INDEX_DOC = \"\"\"\\\nUnlike numpy's implementation of unravel_index, negative indices are accepted\nand out-of-bounds indices are clipped.\n\"\"\"\n\n@_wraps(np.unravel_index, lax_description=_UNRAVEL_INDEX_DOC)\ndef unravel_index(indices, shape):\n _check_arraylike(\"unravel_index\", indices)\n sizes = append(array(shape), 1)\n cumulative_sizes = cumprod(sizes[::-1])[::-1]\n total_size = cumulative_sizes[0]\n # Clip so raveling and unraveling an oob index will not change the behavior\n clipped_indices = clip(indices, -total_size, total_size - 1)\n # Add enough trailing dims to avoid conflict with clipped_indices\n cumulative_sizes = expand_dims(cumulative_sizes, range(1, 1 + _ndim(indices)))\n clipped_indices = expand_dims(clipped_indices, axis=0)\n idx = clipped_indices % cumulative_sizes[:-1] // cumulative_sizes[1:]\n # TODO(jakevdp): return tuple(idx) once it behaves properly (#3821)\n return tuple(lax.index_in_dim(idx, i, keepdims=False) for i in range(idx.shape[0]))\n\n@_wraps(np.resize)\n@partial(jit, static_argnames=('new_shape',))\ndef resize(a, new_shape):\n _check_arraylike(\"resize\", a)\n new_shape = _ensure_index_tuple(new_shape)\n\n if _any(dim_length < 0 for dim_length in new_shape):\n raise ValueError(\"all elements of `new_shape` must be non-negative\")\n\n a = ravel(a)\n\n new_size = _prod(new_shape)\n if a.size == 0 or new_size == 0:\n return zeros_like(a, shape=new_shape)\n\n repeats = ceil_of_ratio(new_size, a.size)\n a = tile(a, repeats)[:new_size]\n\n return reshape(a, new_shape)\n\n@_wraps(np.squeeze, lax_description=_ARRAY_VIEW_DOC)\ndef squeeze(a, axis: Optional[Union[int, Tuple[int, ...]]] = None):\n return _squeeze(a, _ensure_index_tuple(axis) if axis is not None else None)\n\n@partial(jit, static_argnames=('axis',), inline=True)\ndef _squeeze(a, axis):\n _check_arraylike(\"squeeze\", a)\n if axis is None:\n a_shape = shape(a)\n axis = tuple(i for i, d in enumerate(a_shape) if d == 1)\n return lax.squeeze(a, axis)\n\n\n@_wraps(np.expand_dims)\ndef expand_dims(a, axis: Union[int, Sequence[int]]):\n _check_arraylike(\"expand_dims\", a)\n return lax.expand_dims(a, _ensure_index_tuple(axis))\n\n\n@_wraps(np.swapaxes, lax_description=_ARRAY_VIEW_DOC)\n@partial(jit, static_argnames=('axis1', 'axis2'), inline=True)\ndef swapaxes(a, axis1: int, axis2: int):\n _check_arraylike(\"swapaxes\", a)\n perm = np.arange(ndim(a))\n perm[axis1], perm[axis2] = perm[axis2], perm[axis1]\n return lax.transpose(a, perm)\n\n\n@_wraps(np.moveaxis, lax_description=_ARRAY_VIEW_DOC)\ndef moveaxis(a, source: Union[int, Sequence[int]],\n destination: Union[int, Sequence[int]]):\n return _moveaxis(a, _ensure_index_tuple(source),\n _ensure_index_tuple(destination))\n\n@partial(jit, static_argnames=('source', 'destination'), inline=True)\ndef _moveaxis(a, source: Tuple[int, ...], destination: Tuple[int, ...]):\n _check_arraylike(\"moveaxis\", a)\n source = tuple(_canonicalize_axis(i, ndim(a)) for i in source)\n destination = tuple(_canonicalize_axis(i, ndim(a)) for i in destination)\n if len(source) != len(destination):\n raise ValueError(\"Inconsistent number of elements: {} vs {}\"\n .format(len(source), len(destination)))\n perm = [i for i in range(ndim(a)) if i not in source]\n for dest, src in sorted(zip(destination, source)):\n perm.insert(dest, src)\n return lax.transpose(a, perm)\n\n\n@_wraps(np.isclose)\n@partial(jit, static_argnames=('equal_nan',))\ndef isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False):\n a, b = _promote_args(\"isclose\", a, b)\n dtype = _dtype(a)\n if issubdtype(dtype, inexact):\n if issubdtype(dtype, complexfloating):\n dtype = _complex_elem_type(dtype)\n rtol = lax.convert_element_type(rtol, dtype)\n atol = lax.convert_element_type(atol, dtype)\n out = lax.le(\n lax.abs(lax.sub(a, b)),\n lax.add(atol, lax.mul(rtol, lax.abs(b))))\n # This corrects the comparisons for infinite and nan values\n a_inf = isinf(a)\n b_inf = isinf(b)\n any_inf = logical_or(a_inf, b_inf)\n both_inf = logical_and(a_inf, b_inf)\n # Make all elements where either a or b are infinite to False\n out = logical_and(out, logical_not(any_inf))\n # Make all elements where both a or b are the same inf to True\n same_value = lax.eq(a, b)\n same_inf = logical_and(both_inf, same_value)\n out = logical_or(out, same_inf)\n\n # Make all elements where either a or b is NaN to False\n a_nan = isnan(a)\n b_nan = isnan(b)\n any_nan = logical_or(a_nan, b_nan)\n out = logical_and(out, logical_not(any_nan))\n if equal_nan:\n # Make all elements where both a and b is NaN to True\n both_nan = logical_and(a_nan, b_nan)\n out = logical_or(out, both_nan)\n return out\n else:\n return lax.eq(a, b)\n\n\n@_wraps(np.interp)\n@partial(jit, static_argnames=('period',))\ndef interp(x, xp, fp, left=None, right=None, period=None):\n if shape(xp) != shape(fp) or ndim(xp) != 1:\n raise ValueError(\"xp and fp must be one-dimensional arrays of equal size\")\n x, xp, fp = _promote_dtypes_inexact(x, xp, fp)\n if period is not None:\n if period == 0:\n raise ValueError(f\"period must be a non-zero value; got {period}\")\n period = abs(period)\n x = x % period\n xp = xp % period\n xp, fp = lax.sort_key_val(xp, fp)\n xp = concatenate([xp[-1:] - period, xp, xp[:1] + period])\n fp = concatenate([fp[-1:], fp, fp[:1]])\n\n i = clip(searchsorted(xp, x, side='right'), 1, len(xp) - 1)\n df = fp[i] - fp[i - 1]\n dx = xp[i] - xp[i - 1]\n delta = x - xp[i - 1]\n f = where((dx == 0), fp[i], fp[i - 1] + (delta / dx) * df)\n\n if period is None:\n f = where(x < xp[0], fp[0] if left is None else left, f)\n f = where(x > xp[-1], fp[-1] if right is None else right, f)\n return f\n\n\n@_wraps(np.in1d, lax_description=\"\"\"\nIn the JAX version, the `assume_unique` argument is not referenced.\n\"\"\")\n@partial(jit, static_argnames=('assume_unique', 'invert',))\ndef in1d(ar1, ar2, assume_unique=False, invert=False):\n _check_arraylike(\"in1d\", ar1, ar2)\n ar1 = ravel(ar1)\n ar2 = ravel(ar2)\n # Note: an algorithm based on searchsorted has better scaling, but in practice\n # is very slow on accelerators because it relies on lax control flow. If XLA\n # ever supports binary search natively, we should switch to this:\n # ar2 = jnp.sort(ar2)\n # ind = jnp.searchsorted(ar2, ar1)\n # if invert:\n # return ar1 != ar2[ind]\n # else:\n # return ar1 == ar2[ind]\n if invert:\n return (ar1[:, None] != ar2[None, :]).all(-1)\n else:\n return (ar1[:, None] == ar2[None, :]).any(-1)\n\n_SETDIFF1D_DOC = \"\"\"\\\nBecause the size of the output of ``setdiff1d`` is data-dependent, the function is not\ntypically compatible with JIT. The JAX version adds the optional `size` argument which\nspecifies the size of the output array: it must be specified statically for ``jnp.setdiff1d``\nto be compiled with non-static operands. If specified, the first `size` unique elements will\nbe returned; if there are fewer unique elements than `size` indicates, the return value will\nbe padded with the `fill_value`, which defaults to zero.\"\"\"\n\n@_wraps(np.setdiff1d, lax_description=_SETDIFF1D_DOC)\ndef setdiff1d(ar1, ar2, assume_unique=False, *, size=None, fill_value=None):\n _check_arraylike(\"setdiff1d\", ar1, ar2)\n if size is None:\n ar1 = core.concrete_or_error(None, ar1, \"The error arose in setdiff1d()\")\n else:\n size = core.concrete_or_error(operator.index, size, \"The error arose in setdiff1d()\")\n ar1 = asarray(ar1)\n fill_value = asarray(0 if fill_value is None else fill_value, dtype=ar1.dtype)\n if ar1.size == 0:\n return full_like(ar1, fill_value, shape=size or 0)\n if not assume_unique:\n ar1 = unique(ar1, size=size and ar1.size)\n mask = in1d(ar1, ar2, invert=True)\n if size is None:\n return ar1[mask]\n else:\n if not (assume_unique or size is None):\n # Set mask to zero at locations corresponding to unique() padding.\n n_unique = ar1.size + 1 - (ar1 == ar1[0]).sum()\n mask = where(arange(ar1.size) < n_unique, mask, False)\n return where(arange(size) < mask.sum(), ar1[where(mask, size=size)], fill_value)\n\n\n_UNION1D_DOC = \"\"\"\\\nBecause the size of the output of ``union1d`` is data-dependent, the function is not\ntypically compatible with JIT. The JAX version adds the optional `size` argument which\nspecifies the size of the output array: it must be specified statically for ``jnp.union1d``\nto be compiled with non-static operands. If specified, the first `size` unique elements\nwill be returned; if there are fewer unique elements than `size` indicates, the return\nvalue will be padded with `fill_value`, which defaults to the minimum value of the union.\"\"\"\n\n@_wraps(np.union1d, lax_description=_UNION1D_DOC)\ndef union1d(ar1, ar2, *, size=None, fill_value=None):\n _check_arraylike(\"union1d\", ar1, ar2)\n if size is None:\n ar1 = core.concrete_or_error(None, ar1, \"The error arose in union1d()\")\n ar2 = core.concrete_or_error(None, ar2, \"The error arose in union1d()\")\n else:\n size = core.concrete_or_error(operator.index, size, \"The error arose in union1d()\")\n return unique(concatenate((ar1, ar2), axis=None), size=size, fill_value=fill_value)\n\n\n@_wraps(np.setxor1d, lax_description=\"\"\"\nIn the JAX version, the input arrays are explicitly flattened regardless\nof assume_unique value.\n\"\"\")\ndef setxor1d(ar1, ar2, assume_unique=False):\n _check_arraylike(\"setxor1d\", ar1, ar2)\n ar1 = core.concrete_or_error(None, ar1, \"The error arose in setxor1d()\")\n ar2 = core.concrete_or_error(None, ar2, \"The error arose in setxor1d()\")\n\n ar1 = ravel(ar1)\n ar2 = ravel(ar2)\n\n if not assume_unique:\n ar1 = unique(ar1)\n ar2 = unique(ar2)\n\n aux = concatenate((ar1, ar2))\n if aux.size == 0:\n return aux\n\n aux = sort(aux)\n flag = concatenate((array([True]), aux[1:] != aux[:-1], array([True])))\n return aux[flag[1:] & flag[:-1]]\n\n\n@partial(jit, static_argnums=2)\ndef _intersect1d_sorted_mask(ar1, ar2, return_indices=False):\n \"\"\"\n Helper function for intersect1d which is jit-able\n \"\"\"\n ar = concatenate((ar1, ar2))\n if return_indices:\n iota = lax.broadcasted_iota(np.int64, shape(ar), dimension=0)\n aux, indices = lax.sort_key_val(ar, iota)\n else:\n aux = sort(ar)\n\n mask = aux[1:] == aux[:-1]\n if return_indices:\n return aux, mask, indices\n else:\n return aux, mask\n\n\n@_wraps(np.intersect1d)\ndef intersect1d(ar1, ar2, assume_unique=False, return_indices=False):\n _check_arraylike(\"intersect1d\", ar1, ar2)\n ar1 = core.concrete_or_error(None, ar1, \"The error arose in intersect1d()\")\n ar2 = core.concrete_or_error(None, ar2, \"The error arose in intersect1d()\")\n\n if not assume_unique:\n if return_indices:\n ar1, ind1 = unique(ar1, return_index=True)\n ar2, ind2 = unique(ar2, return_index=True)\n else:\n ar1 = unique(ar1)\n ar2 = unique(ar2)\n else:\n ar1 = ravel(ar1)\n ar2 = ravel(ar2)\n\n if return_indices:\n aux, mask, aux_sort_indices = _intersect1d_sorted_mask(ar1, ar2, return_indices)\n else:\n aux, mask = _intersect1d_sorted_mask(ar1, ar2, return_indices)\n\n int1d = aux[:-1][mask]\n\n if return_indices:\n ar1_indices = aux_sort_indices[:-1][mask]\n ar2_indices = aux_sort_indices[1:][mask] - ar1.size\n if not assume_unique:\n ar1_indices = ind1[ar1_indices]\n ar2_indices = ind2[ar2_indices]\n\n return int1d, ar1_indices, ar2_indices\n else:\n return int1d\n\n\n@_wraps(np.isin, lax_description=\"\"\"\nIn the JAX version, the `assume_unique` argument is not referenced.\n\"\"\")\ndef isin(element, test_elements, assume_unique=False, invert=False):\n result = in1d(element, test_elements, assume_unique=assume_unique, invert=invert)\n return result.reshape(shape(element))\n\n\n# The `jit` on `where` exists to avoid materializing constants in cases like\n# `np.where(np.zeros(1000), 7, 4)`. In op-by-op mode, we don't want to\n# materialize the broadcast forms of scalar arguments.\n@jit\ndef _where(condition, x=None, y=None):\n if x is None or y is None:\n raise ValueError(\"Either both or neither of the x and y arguments should \"\n \"be provided to jax.numpy.where, got {} and {}.\"\n .format(x, y))\n if not issubdtype(_dtype(condition), bool_):\n condition = lax.ne(condition, zeros_like(condition))\n x, y = _promote_dtypes(x, y)\n condition, x, y = broadcast_arrays(condition, x, y)\n return lax.select(condition, x, y) if not core.is_empty_shape(np.shape(x)) else x\n\n\n_WHERE_DOC = \"\"\"\\\nAt present, JAX does not support JIT-compilation of the single-argument form\nof :py:func:`jax.numpy.where` because its output shape is data-dependent. The\nthree-argument form does not have a data-dependent shape and can be JIT-compiled\nsuccessfully. Alternatively, you can specify the optional ``size`` keyword:\nif specified, the first ``size`` True elements will be returned; if there\nare fewer True elements than ``size`` indicates, the index arrays will be\npadded with ``fill_value`` (default is 0.)\n\"\"\"\n\n@_wraps(np.where, update_doc=False, lax_description=_WHERE_DOC)\ndef where(condition, x=None, y=None, *, size=None, fill_value=None):\n if x is None and y is None:\n _check_arraylike(\"where\", condition)\n return nonzero(condition, size=size, fill_value=fill_value)\n else:\n if size is not None or fill_value is not None:\n raise ValueError(\"size and fill_value arguments cannot be used in three-term where function.\")\n return _where(condition, x, y)\n\n\n@_wraps(np.select)\ndef select(condlist, choicelist, default=0):\n if len(condlist) != len(choicelist):\n msg = \"condlist must have length equal to choicelist ({} vs {})\"\n raise ValueError(msg.format(len(condlist), len(choicelist)))\n if len(condlist) == 0:\n raise ValueError(\"condlist must be non-empty\")\n choices = _promote_dtypes(default, *choicelist)\n choicelist = choices[1:]\n output = choices[0]\n for cond, choice in zip(condlist[::-1], choicelist[::-1]):\n output = where(cond, choice, output)\n return output\n\n\n@_wraps(np.bincount, lax_description=\"\"\"\\\nJax adds the optional `length` parameter which specifies the output length, and\ndefaults to ``x.max() + 1``. It must be specified for bincount to be compiled\nwith non-static operands. Values larger than the specified length will be discarded.\nIf `length` is specified, `minlength` will be ignored.\n\nAdditionally, while ``np.bincount`` raises an error if the input array contains\nnegative values, ``jax.numpy.bincount`` clips negative values to zero.\n\"\"\")\ndef bincount(x, weights=None, minlength=0, *, length=None):\n _check_arraylike(\"bincount\", x)\n if not issubdtype(_dtype(x), integer):\n msg = f\"x argument to bincount must have an integer type; got {x.dtype}\"\n raise TypeError(msg)\n if ndim(x) != 1:\n raise ValueError(\"only 1-dimensional input supported.\")\n minlength = core.concrete_or_error(operator.index, minlength,\n \"The error occurred because of argument 'minlength' of jnp.bincount.\")\n if length is None:\n x = core.concrete_or_error(asarray, x,\n \"The error occured because of argument 'x' of jnp.bincount. \"\n \"To avoid this error, pass a static `length` argument.\")\n length = _max(minlength, x.size and x.max() + 1)\n else:\n length = core.concrete_or_error(operator.index, length,\n \"The error occurred because of argument 'length' of jnp.bincount.\")\n if weights is None:\n weights = np.array(1, dtype=int_)\n elif shape(x) != shape(weights):\n raise ValueError(\"shape of weights must match shape of x.\")\n return zeros(length, _dtype(weights)).at[clip(x, 0)].add(weights)\n\n@_wraps(getattr(np, \"broadcast_shapes\", None))\ndef broadcast_shapes(*shapes):\n if not shapes:\n return ()\n shapes = [(shape,) if np.ndim(shape) == 0 else tuple(shape) for shape in shapes]\n return lax.broadcast_shapes(*shapes)\n\n@partial(jit, inline=True)\ndef broadcast_arrays(*args):\n \"\"\"Like Numpy's broadcast_arrays but doesn't return views.\"\"\"\n shapes = [shape(arg) for arg in args]\n if len(set(shapes)) == 1:\n return [arg if isinstance(arg, ndarray) or isscalar(arg) else array(arg)\n for arg in args]\n result_shape = lax.broadcast_shapes(*shapes)\n return [broadcast_to(arg, result_shape) for arg in args]\n\n\n@_wraps(np.broadcast_to, lax_description=\"\"\"\\\nThe JAX version does not necessarily return a view of the input.\n\"\"\")\ndef broadcast_to(arr, shape):\n if hasattr(arr, \"broadcast_to\"):\n return arr.broadcast_to(shape)\n arr = arr if isinstance(arr, ndarray) else array(arr)\n shape = (shape,) if ndim(shape) == 0 else shape\n shape = canonicalize_shape(shape) # check that shape is concrete\n arr_shape = _shape(arr)\n if core.symbolic_equal_shape(arr_shape, shape):\n return arr\n else:\n nlead = len(shape) - len(arr_shape)\n shape_tail = shape[nlead:]\n compatible = _all(core.symbolic_equal_one_of_dim(arr_d, [1, shape_d])\n for arr_d, shape_d in safe_zip(arr_shape, shape_tail))\n if nlead < 0 or not compatible:\n msg = \"Incompatible shapes for broadcasting: {} and requested shape {}\"\n raise ValueError(msg.format(arr_shape, shape))\n diff, = np.where(tuple(not core.symbolic_equal_dim(arr_d, shape_d)\n for arr_d, shape_d in safe_zip(arr_shape, shape_tail)))\n new_dims = tuple(range(nlead)) + tuple(nlead + diff)\n kept_dims = tuple(np.delete(np.arange(len(shape)), new_dims))\n return lax.broadcast_in_dim(squeeze(arr, tuple(diff)), shape, kept_dims)\n\n\ndef _split(op, ary, indices_or_sections, axis=0):\n axis = core.concrete_or_error(int, axis, f\"in jax.numpy.{op} argument `axis`\")\n size = ary.shape[axis]\n if isinstance(indices_or_sections, (tuple, list)):\n indices_or_sections = np.array(\n [core.concrete_or_error(np.int64, i_s, f\"in jax.numpy.{op} argument 1\")\n for i_s in indices_or_sections], np.int64)\n split_indices = np.concatenate([[np.int64(0)], indices_or_sections,\n [np.int64(size)]])\n elif (isinstance(indices_or_sections, (np.ndarray, ndarray)) and\n indices_or_sections.ndim > 0):\n indices_or_sections = np.array(\n [core.concrete_or_error(np.int64, i_s, f\"in jax.numpy.{op} argument 1\")\n for i_s in indices_or_sections], np.int64)\n split_indices = np.concatenate([[np.int64(0)], indices_or_sections,\n [np.int64(size)]])\n else:\n indices_or_sections = core.concrete_or_error(np.int64, indices_or_sections,\n f\"in jax.numpy.{op} argument 1\")\n part_size, r = _divmod(size, indices_or_sections)\n if r == 0:\n split_indices = np.arange(indices_or_sections + 1,\n dtype=np.int64) * part_size\n elif op == \"array_split\":\n split_indices = np.concatenate(\n [np.arange(r + 1, dtype=np.int64) * (part_size + 1),\n np.arange(indices_or_sections - r, dtype=np.int64) * part_size\n + ((r + 1) * (part_size + 1) - 1)])\n else:\n raise ValueError(\"array split does not result in an equal division\")\n starts, ends = [0] * ndim(ary), shape(ary)\n _subval = lambda x, i, v: subvals(x, [(i, v)])\n return [lax.slice(ary, _subval(starts, axis, start), _subval(ends, axis, end))\n for start, end in zip(split_indices[:-1], split_indices[1:])]\n\n@_wraps(np.split, lax_description=_ARRAY_VIEW_DOC)\ndef split(ary, indices_or_sections, axis: int = 0):\n return _split(\"split\", ary, indices_or_sections, axis=axis)\n\ndef _split_on_axis(np_fun, axis):\n @_wraps(np_fun, update_doc=False)\n def f(ary, indices_or_sections):\n return split(ary, indices_or_sections, axis=axis)\n return f\n\nvsplit = _split_on_axis(np.vsplit, axis=0)\nhsplit = _split_on_axis(np.hsplit, axis=1)\ndsplit = _split_on_axis(np.dsplit, axis=2)\n\n@_wraps(np.array_split)\ndef array_split(ary, indices_or_sections, axis: int = 0):\n return _split(\"array_split\", ary, indices_or_sections, axis=axis)\n\n@_wraps(np.clip, skip_params=['out'])\n@jit\ndef clip(a, a_min=None, a_max=None, out=None):\n _check_arraylike(\"clip\", a)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.clip is not supported.\")\n if a_min is None and a_max is None:\n raise ValueError(\"At most one of a_min and a_max may be None\")\n if a_min is not None:\n a = maximum(a_min, a)\n if a_max is not None:\n a = minimum(a_max, a)\n return a\n\n@_wraps(np.around, skip_params=['out'])\n@partial(jit, static_argnames=('decimals',))\ndef round(a, decimals=0, out=None):\n _check_arraylike(\"round\", a)\n decimals = core.concrete_or_error(operator.index, decimals, \"'decimals' argument of jnp.round\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.round is not supported.\")\n dtype = _dtype(a)\n if issubdtype(dtype, integer):\n if decimals < 0:\n raise NotImplementedError(\n \"integer np.round not implemented for decimals < 0\")\n return a # no-op on integer types\n\n def _round_float(x):\n if decimals == 0:\n return lax.round(x, lax.RoundingMethod.TO_NEAREST_EVEN)\n\n # TODO(phawkins): the strategy of rescaling the value isn't necessarily a\n # good one since we may be left with an incorrectly rounded value at the\n # end due to precision problems. As a workaround for float16, convert to\n # float32,\n x = lax.convert_element_type(x, np.float32) if dtype == np.float16 else x\n factor = _constant_like(x, 10 ** decimals)\n out = lax.div(lax.round(lax.mul(x, factor),\n lax.RoundingMethod.TO_NEAREST_EVEN), factor)\n return lax.convert_element_type(out, dtype) if dtype == np.float16 else out\n\n if issubdtype(dtype, complexfloating):\n return lax.complex(_round_float(lax.real(a)), _round_float(lax.imag(a)))\n else:\n return _round_float(a)\naround = round\nround_ = round\n\n\n@_wraps(np.fix, skip_params=['out'])\n@jit\ndef fix(x, out=None):\n _check_arraylike(\"fix\", x)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.fix is not supported.\")\n zero = lax._const(x, 0)\n return where(lax.ge(x, zero), floor(x), ceil(x))\n\n\n@_wraps(np.modf, skip_params=['out'])\n@jit\ndef modf(x, out=None):\n _check_arraylike(\"modf\", x)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.modf is not supported.\")\n whole = fix(x)\n return x - whole, whole\n\n\n@_wraps(np.isfinite)\n@jit\ndef isfinite(x):\n _check_arraylike(\"isfinite\", x)\n dtype = _dtype(x)\n if issubdtype(dtype, floating):\n return lax.is_finite(x)\n elif issubdtype(dtype, complexfloating):\n return lax.bitwise_and(lax.is_finite(real(x)), lax.is_finite(imag(x)))\n else:\n return full_like(x, True, dtype=bool_)\n\n@_wraps(np.isinf)\n@jit\ndef isinf(x):\n _check_arraylike(\"isinf\", x)\n dtype = _dtype(x)\n if issubdtype(dtype, floating):\n return lax.eq(lax.abs(x), _constant_like(x, inf))\n elif issubdtype(dtype, complexfloating):\n re = lax.real(x)\n im = lax.imag(x)\n return lax.bitwise_or(lax.eq(lax.abs(re), _constant_like(re, inf)),\n lax.eq(lax.abs(im), _constant_like(im, inf)))\n else:\n return full_like(x, False, dtype=bool_)\n\ndef _isposneginf(infinity, x, out):\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to isneginf/isposinf is not supported.\")\n dtype = _dtype(x)\n if issubdtype(dtype, floating):\n return lax.eq(x, _constant_like(x, infinity))\n elif issubdtype(dtype, complexfloating):\n raise ValueError(\"isposinf/isneginf are not well defined for complex types\")\n else:\n return full_like(x, False, dtype=bool_)\n\nisposinf = _wraps(np.isposinf, skip_params=['out'])(\n lambda x, out=None: _isposneginf(inf, x, out)\n)\n\nisneginf = _wraps(np.isneginf, skip_params=['out'])(\n lambda x, out=None: _isposneginf(-inf, x, out)\n)\n\n@_wraps(np.isnan)\n@jit\ndef isnan(x):\n _check_arraylike(\"isnan\", x)\n return lax.ne(x, x)\n\n@_wraps(np.nan_to_num)\n@jit\ndef nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):\n del copy\n _check_arraylike(\"nan_to_num\", x)\n dtype = _dtype(x)\n if issubdtype(dtype, complexfloating):\n return lax.complex(\n nan_to_num(lax.real(x), nan=nan, posinf=posinf, neginf=neginf),\n nan_to_num(lax.imag(x), nan=nan, posinf=posinf, neginf=neginf))\n info = finfo(dtypes.canonicalize_dtype(dtype))\n posinf = info.max if posinf is None else posinf\n neginf = info.min if neginf is None else neginf\n x = where(isnan(x), array(nan, dtype=x.dtype), x)\n x = where(isposinf(x), array(posinf, dtype=x.dtype), x)\n x = where(isneginf(x), array(neginf, dtype=x.dtype), x)\n return x\n\n### Reducers\n\ndef _reduction(a, name, np_fun, op, init_val, has_identity=True,\n preproc=None, bool_op=None, upcast_f16_for_computation=False,\n axis=None, dtype=None, out=None, keepdims=False, initial=None,\n where_=None, parallel_reduce=None):\n bool_op = bool_op or op\n # Note: we must accept out=None as an argument, because numpy reductions delegate to\n # object methods. For example `np.sum(x)` will call `x.sum()` if the `sum()` method\n # exists, passing along all its arguments.\n if out is not None:\n raise NotImplementedError(f\"The 'out' argument to jnp.{name} is not supported.\")\n _check_arraylike(name, a)\n lax._check_user_dtype_supported(dtype, name)\n axis = core.concrete_or_error(None, axis, f\"axis argument to jnp.{name}().\")\n\n if initial is None and not has_identity:\n if not _all(core.greater_equal_dim(d, 1) for d in np.shape(a)):\n raise ValueError(f\"zero-size array to reduction operation {name} which has no identity\")\n if where_ is not None:\n raise ValueError(f\"reduction operation {name} does not have an identity, so to use a \"\n f\"where mask one has to specify 'initial'\")\n\n a = a if isinstance(a, ndarray) else asarray(a)\n a = preproc(a) if preproc else a\n pos_dims, dims = _reduction_dims(a, axis)\n result_dtype = dtypes.canonicalize_dtype(dtype or _dtype(np_fun(np.ones((), dtype=_dtype(a)))))\n if upcast_f16_for_computation and issubdtype(result_dtype, inexact):\n computation_dtype = promote_types(result_dtype, float32)\n else:\n computation_dtype = result_dtype\n a = lax.convert_element_type(a, computation_dtype)\n op = op if computation_dtype != np.bool_ else bool_op\n # NB: in XLA, init_val must be an identity for the op, so the user-specified\n # initial value must be applied afterward.\n init_val = _reduction_init_val(a, init_val)\n if where_ is not None:\n a = where(where_, a, init_val)\n if pos_dims is not dims:\n if parallel_reduce is None:\n raise NotImplementedError(f\"Named reductions not implemented for jnp.{name}()\")\n result = parallel_reduce(a, dims)\n else:\n result = lax.reduce(a, init_val, op, dims)\n if initial is not None:\n result = op(lax.convert_element_type(initial, a.dtype), result)\n if keepdims:\n result = expand_dims(result, pos_dims)\n return lax.convert_element_type(result, dtype or result_dtype)\n\ndef _canonicalize_axis_allow_named(x, rank):\n return maybe_named_axis(x, lambda i: _canonicalize_axis(i, rank), lambda name: name)\n\ndef _reduction_dims(a, axis):\n if axis is None:\n return (tuple(range(ndim(a))),) * 2\n elif not isinstance(axis, (np.ndarray, tuple, list)):\n axis = (axis,)\n canon_axis = tuple(_canonicalize_axis_allow_named(x, ndim(a))\n for x in axis)\n if len(canon_axis) != len(set(canon_axis)):\n raise ValueError(f\"duplicate value in 'axis': {axis}\")\n canon_pos_axis = tuple(x for x in canon_axis if isinstance(x, int))\n if len(canon_pos_axis) != len(canon_axis):\n return canon_pos_axis, canon_axis\n else:\n return canon_axis, canon_axis\n\ndef _reduction_init_val(a, init_val):\n # This function uses np.* functions because lax pattern matches against the\n # specific concrete values of the reduction inputs.\n a_dtype = dtypes.canonicalize_dtype(_dtype(a))\n if a_dtype == 'bool':\n return np.array(init_val > 0, dtype=a_dtype)\n try:\n return np.array(init_val, dtype=a_dtype)\n except OverflowError:\n assert issubdtype(a_dtype, integer)\n sign, info = np.sign(init_val), iinfo(a_dtype)\n return np.array(info.min if sign < 0 else info.max, dtype=a_dtype)\n\ndef _cast_to_bool(operand):\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=np.ComplexWarning)\n return lax.convert_element_type(operand, bool_)\n\n\ndef _ensure_optional_axes(x):\n def force(x):\n if x is None:\n return None\n try:\n return operator.index(x)\n except TypeError:\n return tuple(i if isinstance(i, str) else operator.index(i) for i in x)\n return core.concrete_or_error(\n force, x, \"The axis argument must be known statically.\")\n\n\n@partial(jit, static_argnames=('axis', 'dtype', 'keepdims'), inline=True)\ndef _reduce_sum(a, axis: Optional[Union[int, Tuple[int, ...]]] = None,\n dtype=None, out=None, keepdims=None, initial=None, where=None):\n return _reduction(a, \"sum\", np.sum, lax.add, 0,\n bool_op=lax.bitwise_or, upcast_f16_for_computation=True,\n axis=axis, dtype=dtype, out=out, keepdims=keepdims,\n initial=initial, where_=where, parallel_reduce=lax.psum)\n\n@_wraps(np.sum, skip_params=['out'])\ndef sum(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, keepdims=None, initial=None, where=None):\n return _reduce_sum(a, axis=_ensure_optional_axes(axis), dtype=dtype, out=out,\n keepdims=keepdims, initial=initial, where=where)\n\n\n@partial(jit, static_argnames=('axis', 'dtype', 'keepdims'), inline=True)\ndef _reduce_prod(a, axis: Optional[Union[int, Tuple[int, ...]]] = None,\n dtype=None, out=None, keepdims=None, initial=None, where=None):\n return _reduction(a, \"prod\", np.prod, lax.mul, 1,\n bool_op=lax.bitwise_and, upcast_f16_for_computation=True,\n axis=axis, dtype=dtype, out=out, keepdims=keepdims,\n initial=initial, where_=where)\n\n@_wraps(np.prod, skip_params=['out'])\ndef prod(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, keepdims=None, initial=None, where=None):\n return _reduce_prod(a, axis=_ensure_optional_axes(axis), dtype=dtype,\n out=out, keepdims=keepdims, initial=initial, where=where)\n\n\n@partial(jit, static_argnames=('axis', 'keepdims'), inline=True)\ndef _reduce_max(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=None, initial=None, where=None):\n return _reduction(a, \"max\", np.max, lax.max, -np.inf, has_identity=False,\n axis=axis, out=out, keepdims=keepdims,\n initial=initial, where_=where, parallel_reduce=lax.pmax)\n\n@_wraps(np.max, skip_params=['out'])\ndef max(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=None, initial=None, where=None):\n return _reduce_max(a, axis=_ensure_optional_axes(axis), out=out,\n keepdims=keepdims, initial=initial, where=where)\n\n@partial(jit, static_argnames=('axis', 'keepdims'), inline=True)\ndef _reduce_min(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=None, initial=None, where=None):\n return _reduction(a, \"min\", np.min, lax.min, np.inf, has_identity=False,\n axis=axis, out=out, keepdims=keepdims,\n initial=initial, where_=where, parallel_reduce=lax.pmin)\n\n@_wraps(np.min, skip_params=['out'])\ndef min(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=None, initial=None, where=None):\n return _reduce_min(a, axis=_ensure_optional_axes(axis), out=out,\n keepdims=keepdims, initial=initial, where=where)\n\n@partial(jit, static_argnames=('axis', 'keepdims'), inline=True)\ndef _reduce_all(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=None, *, where=None):\n return _reduction(a, \"all\", np.all, lax.bitwise_and, True, preproc=_cast_to_bool,\n axis=axis, out=out, keepdims=keepdims, where_=where)\n\n@_wraps(np.all, skip_params=['out'])\ndef all(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=None, *, where=None):\n return _reduce_all(a, axis=_ensure_optional_axes(axis), out=out,\n keepdims=keepdims, where=where)\n\n@partial(jit, static_argnames=('axis', 'keepdims'), inline=True)\ndef _reduce_any(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=None, *, where=None):\n return _reduction(a, \"any\", np.any, lax.bitwise_or, False, preproc=_cast_to_bool,\n axis=axis, out=out, keepdims=keepdims, where_=where)\n\n@_wraps(np.any, skip_params=['out'])\ndef any(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=None, *, where=None):\n return _reduce_any(a, axis=_ensure_optional_axes(axis), out=out,\n keepdims=keepdims, where=where)\n\nproduct = prod\namin = min\namax = max\nalltrue = all\nsometrue = any\n\ndef _axis_size(a, axis):\n if not isinstance(axis, (tuple, list)):\n axis = (axis,)\n size = 1\n a_shape = shape(a)\n for a in axis:\n size *= maybe_named_axis(a, lambda i: a_shape[i], lambda name: lax.psum(1, name))\n return size\n\n@_wraps(np.mean, skip_params=['out'])\ndef mean(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, keepdims=False, *, where=None):\n return _mean(a, _ensure_optional_axes(axis), dtype, out, keepdims,\n where=where)\n\n@partial(jit, static_argnames=('axis', 'dtype', 'keepdims'), inline=True)\ndef _mean(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, keepdims=False, *, where=None):\n _check_arraylike(\"mean\", a)\n lax._check_user_dtype_supported(dtype, \"mean\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.mean is not supported.\")\n\n if where is None:\n if axis is None:\n normalizer = core.dimension_as_value(size(a))\n else:\n normalizer = core.dimension_as_value(_axis_size(a, axis))\n else:\n normalizer = sum(broadcast_to(where, shape(a)), axis, dtype=dtype, keepdims=keepdims)\n\n if dtype is None:\n if issubdtype(_dtype(a), bool_) or issubdtype(_dtype(a), integer):\n dtype = float_\n else:\n dtype = _dtype(a)\n dtype = dtypes.canonicalize_dtype(dtype)\n\n return lax.div(\n sum(a, axis, dtype=dtype, keepdims=keepdims, where=where),\n lax.convert_element_type(normalizer, dtype))\n\n@_wraps(np.average)\ndef average(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, weights=None,\n returned=False):\n return _average(a, _ensure_optional_axes(axis), weights, returned)\n\n@partial(jit, static_argnames=('axis', 'returned'), inline=True)\ndef _average(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, weights=None,\n returned=False):\n a = asarray(a)\n\n if weights is None: # Treat all weights as 1\n avg = mean(a, axis=axis)\n if axis is None:\n weights_sum = full((), core.dimension_as_value(size(a)), dtype=avg.dtype)\n else:\n weights_sum = full_like(avg, core.dimension_as_value(a.shape[axis]), dtype=avg.dtype)\n else:\n weights = asarray(weights)\n\n if issubdtype(a.dtype, inexact):\n out_dtype = result_type(a.dtype, weights.dtype)\n else:\n out_dtype = result_type(a.dtype, weights.dtype, float_)\n out_dtype = dtypes.canonicalize_dtype(out_dtype)\n\n a_shape = shape(a)\n a_ndim = len(a_shape)\n weights_shape = shape(weights)\n axis = None if axis is None else _canonicalize_axis(axis, a_ndim)\n\n if a_shape != weights_shape:\n # Make sure the dimensions work out\n if axis is None:\n raise ValueError(\"Axis must be specified when shapes of a and \"\n \"weights differ.\")\n if len(weights_shape) != 1:\n raise ValueError(\"1D weights expected when shapes of a and \"\n \"weights differ.\")\n if not core.symbolic_equal_dim(weights_shape[0], a_shape[axis]):\n raise ValueError(\"Length of weights not \"\n \"compatible with specified axis.\")\n\n weights = broadcast_to(weights, (a_ndim - 1) * (1,) + weights_shape)\n weights = moveaxis(weights, -1, axis)\n\n weights_sum = sum(weights, axis=axis, dtype=out_dtype)\n avg = sum(multiply(a, weights), axis=axis, dtype=out_dtype) / weights_sum\n\n if returned:\n if avg.shape != weights_sum.shape:\n weights_sum = broadcast_to(weights_sum, avg.shape)\n return avg, weights_sum\n return avg\n\n\n@_wraps(np.var, skip_params=['out'])\ndef var(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, ddof=0, keepdims=False, *, where=None):\n return _var(a, _ensure_optional_axes(axis), dtype, out, ddof, keepdims,\n where=where)\n\n@partial(jit, static_argnames=('axis', 'dtype', 'keepdims'))\ndef _var(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, ddof=0, keepdims=False, *, where=None):\n _check_arraylike(\"var\", a)\n lax._check_user_dtype_supported(dtype, \"var\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.var is not supported.\")\n\n a_dtype, dtype = _var_promote_types(_dtype(a), dtype)\n a_mean = mean(a, axis, dtype=a_dtype, keepdims=True, where=where)\n centered = a - a_mean\n if issubdtype(centered.dtype, complexfloating):\n centered = lax.real(lax.mul(centered, lax.conj(centered)))\n else:\n centered = lax.square(centered)\n\n if where is None:\n if axis is None:\n normalizer = core.dimension_as_value(size(a))\n else:\n normalizer = core.dimension_as_value(_axis_size(a, axis))\n else:\n normalizer = sum(broadcast_to(where, shape(a)), axis, dtype=dtype, keepdims=keepdims)\n normalizer = normalizer - ddof\n\n result = sum(centered, axis, keepdims=keepdims, where=where)\n out = lax.div(result, lax.convert_element_type(normalizer, result.dtype))\n return lax.convert_element_type(out, dtype)\n\n\ndef _var_promote_types(a_dtype, dtype):\n if dtype:\n if (not issubdtype(dtype, complexfloating) and\n issubdtype(a_dtype, complexfloating)):\n msg = (\"jax.numpy.var does not yet support real dtype parameters when \"\n \"computing the variance of an array of complex values. The \"\n \"semantics of numpy.var seem unclear in this case. Please comment \"\n \"on https://github.com/google/jax/issues/2283 if this behavior is \"\n \"important to you.\")\n raise ValueError(msg)\n a_dtype = promote_types(a_dtype, dtype)\n else:\n if not issubdtype(a_dtype, inexact):\n dtype = a_dtype = dtypes.canonicalize_dtype(float_)\n else:\n dtype = _complex_elem_type(a_dtype)\n a_dtype = promote_types(a_dtype, float32)\n return a_dtype, dtype\n\n\n@_wraps(np.std, skip_params=['out'])\ndef std(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, ddof=0, keepdims=False, *, where=None):\n return _std(a, _ensure_optional_axes(axis), dtype, out, ddof, keepdims,\n where=where)\n\n@partial(jit, static_argnames=('axis', 'dtype', 'keepdims'))\ndef _std(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, ddof=0, keepdims=False, *, where=None):\n _check_arraylike(\"std\", a)\n lax._check_user_dtype_supported(dtype, \"std\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.std is not supported.\")\n return sqrt(var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, where=where))\n\n\n@_wraps(np.ptp, skip_params=['out'])\ndef ptp(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=False):\n return _ptp(a, _ensure_optional_axes(axis), out, keepdims)\n\n@partial(jit, static_argnames=('axis', 'keepdims'))\ndef _ptp(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=False):\n _check_arraylike(\"ptp\", a)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.ptp is not supported.\")\n x = amax(a, axis=axis, keepdims=keepdims)\n y = amin(a, axis=axis, keepdims=keepdims)\n return lax.sub(x, y)\n\n\n@_wraps(np.allclose)\n@partial(jit, static_argnames=('equal_nan',))\ndef allclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False):\n _check_arraylike(\"allclose\", a, b)\n return all(isclose(a, b, rtol, atol, equal_nan))\n\n\n@_wraps(np.count_nonzero)\n@partial(jit, static_argnames=('axis', 'keepdims'))\ndef count_nonzero(a, axis: Optional[Union[int, Tuple[int, ...]]] = None,\n keepdims=False):\n _check_arraylike(\"count_nonzero\", a)\n return sum(lax.ne(a, _constant_like(a, 0)), axis=axis,\n dtype=dtypes.canonicalize_dtype(np.int_), keepdims=keepdims)\n\n\n_NONZERO_DOC = \"\"\"\\\nBecause the size of the output of ``nonzero`` is data-dependent, the function is not\ntypically compatible with JIT. The JAX version adds the optional `size` argument which\nspecifies the size of the output arrays: it must be specified statically for ``jnp.nonzero``\nto be compiled with non-static operands. If specified, the first `size` nonzero elements\nwill be returned; if there are fewer nonzero elements than `size` indicates, the result\nwill be padded with ``fill_value``, which defaults to zero. ``fill_value`` may be a scalar,\nor a tuple specifying the fill value in each dimension.\n\"\"\"\n\n@_wraps(np.nonzero, lax_description=_NONZERO_DOC)\ndef nonzero(a, *, size=None, fill_value=None):\n a = atleast_1d(a)\n mask = a != 0\n if size is None:\n size = mask.sum()\n size = core.concrete_or_error(int, size,\n \"The size argument of jnp.nonzero must be statically specified \"\n \"to use jnp.nonzero within JAX transformations.\")\n if a.size == 0 or size == 0:\n return tuple(zeros(size, int) for dim in a.shape)\n flat_indices = cumsum(bincount(cumsum(mask), length=size))\n strides = (np.cumprod(a.shape[::-1])[::-1] // a.shape).astype(int_)\n out = tuple((flat_indices // stride) % size for stride, size in zip(strides, a.shape))\n if size is not None and fill_value is not None:\n if not isinstance(fill_value, tuple):\n fill_value = a.ndim * (fill_value,)\n if _shape(fill_value) != (a.ndim,):\n raise ValueError(f\"fill_value must be a scalar or a tuple of length {a.ndim}; got {fill_value}\")\n fill_mask = arange(size) >= mask.sum()\n out = tuple(where(fill_mask, fval, entry) for fval, entry in safe_zip(fill_value, out))\n return out\n\n@_wraps(np.flatnonzero, lax_description=_NONZERO_DOC)\ndef flatnonzero(a, *, size=None, fill_value=None):\n return nonzero(ravel(a), size=size, fill_value=fill_value)[0]\n\n\ndef _nan_reduction(a, name, jnp_reduction, init_val, nan_if_all_nan,\n axis=None, keepdims=None, **kwargs):\n _check_arraylike(name, a)\n if not issubdtype(_dtype(a), inexact):\n return jnp_reduction(a, axis=axis, keepdims=keepdims, **kwargs)\n\n out = jnp_reduction(where(isnan(a), _reduction_init_val(a, init_val), a),\n axis=axis, keepdims=keepdims, **kwargs)\n if nan_if_all_nan:\n return where(all(isnan(a), axis=axis, keepdims=keepdims),\n _constant_like(a, nan), out)\n else:\n return out\n\n@_wraps(np.nanmin, skip_params=['out'])\n@partial(jit, static_argnames=('axis', 'keepdims'))\ndef nanmin(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=None):\n return _nan_reduction(a, 'nanmin', min, inf, nan_if_all_nan=True,\n axis=axis, out=out, keepdims=keepdims)\n\n@_wraps(np.nanmax, skip_params=['out'])\n@partial(jit, static_argnames=('axis', 'keepdims'))\ndef nanmax(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=None):\n return _nan_reduction(a, 'nanmax', max, -inf, nan_if_all_nan=True,\n axis=axis, out=out, keepdims=keepdims)\n\n@_wraps(np.nansum, skip_params=['out'])\n@partial(jit, static_argnames=('axis', 'dtype', 'keepdims'))\ndef nansum(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, keepdims=None):\n lax._check_user_dtype_supported(dtype, \"nanprod\")\n return _nan_reduction(a, 'nansum', sum, 0, nan_if_all_nan=False,\n axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n\n@_wraps(np.nanprod, skip_params=['out'])\n@partial(jit, static_argnames=('axis', 'dtype', 'keepdims'))\ndef nanprod(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, keepdims=None):\n lax._check_user_dtype_supported(dtype, \"nanprod\")\n return _nan_reduction(a, 'nanprod', prod, 1, nan_if_all_nan=False,\n axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n\n@_wraps(np.nanmean, skip_params=['out'])\n@partial(jit, static_argnames=('axis', 'dtype', 'keepdims'))\ndef nanmean(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, keepdims=False):\n _check_arraylike(\"nanmean\", a)\n lax._check_user_dtype_supported(dtype, \"nanmean\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.nanmean is not supported.\")\n if issubdtype(_dtype(a), bool_) or issubdtype(_dtype(a), integer):\n return mean(a, axis, dtype, out, keepdims)\n if dtype is None:\n dtype = _dtype(a)\n nan_mask = logical_not(isnan(a))\n normalizer = sum(nan_mask, axis=axis, dtype=int32, keepdims=keepdims)\n normalizer = lax.convert_element_type(normalizer, dtype)\n td = lax.div(nansum(a, axis, dtype=dtype, keepdims=keepdims), normalizer)\n return td\n\n\n@_wraps(np.nanvar, skip_params=['out'])\n@partial(jit, static_argnames=('axis', 'dtype', 'keepdims'))\ndef nanvar(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, ddof=0, keepdims=False):\n _check_arraylike(\"nanvar\", a)\n lax._check_user_dtype_supported(dtype, \"nanvar\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.nanvar is not supported.\")\n\n a_dtype, dtype = _var_promote_types(_dtype(a), dtype)\n a_mean = nanmean(a, axis, dtype=a_dtype, keepdims=True)\n\n centered = where(isnan(a), 0, a - a_mean) # double-where trick for gradients.\n if issubdtype(centered.dtype, complexfloating):\n centered = lax.real(lax.mul(centered, lax.conj(centered)))\n else:\n centered = lax.square(centered)\n\n normalizer = sum(logical_not(isnan(a)), axis=axis, keepdims=keepdims)\n normalizer = normalizer - ddof\n normalizer_mask = lax.le(normalizer, 0)\n result = sum(centered, axis, keepdims=keepdims)\n result = where(normalizer_mask, nan, result)\n divisor = where(normalizer_mask, 1, normalizer)\n out = lax.div(result, lax.convert_element_type(divisor, result.dtype))\n return lax.convert_element_type(out, dtype)\n\n\n@_wraps(np.nanstd, skip_params=['out'])\n@partial(jit, static_argnames=('axis', 'dtype', 'keepdims'))\ndef nanstd(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, ddof=0, keepdims=False):\n _check_arraylike(\"nanstd\", a)\n lax._check_user_dtype_supported(dtype, \"nanstd\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.nanstd is not supported.\")\n return sqrt(nanvar(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims))\n\n\ndef _make_cumulative_reduction(np_reduction, reduction, fill_nan=False, fill_value=0):\n @_wraps(np_reduction, skip_params=['out'])\n def cumulative_reduction(a,\n axis: Optional[Union[int, Tuple[int, ...]]] = None,\n dtype=None, out=None):\n return _cumulative_reduction(a, _ensure_optional_axes(axis), dtype, out)\n\n @partial(jit, static_argnames=('axis', 'dtype'))\n def _cumulative_reduction(a,\n axis: Optional[Union[int, Tuple[int, ...]]] = None,\n dtype=None, out=None):\n _check_arraylike(np_reduction.__name__, a)\n if out is not None:\n raise NotImplementedError(f\"The 'out' argument to jnp.{np_reduction.__name__} \"\n f\"is not supported.\")\n lax._check_user_dtype_supported(dtype, np_reduction.__name__)\n\n if axis is None or isscalar(a):\n a = ravel(a)\n axis = 0\n\n a_shape = list(shape(a))\n num_dims = len(a_shape)\n axis = _canonicalize_axis(axis, num_dims)\n\n if fill_nan:\n a = where(isnan(a), _constant_like(a, fill_value), a)\n\n if not dtype and _dtype(a) == bool_:\n dtype = int_\n if dtype:\n a = lax.convert_element_type(a, dtype)\n\n return reduction(a, axis)\n\n return cumulative_reduction\n\n\ncumsum = _make_cumulative_reduction(np.cumsum, lax.cumsum, fill_nan=False)\ncumprod = _make_cumulative_reduction(np.cumprod, lax.cumprod, fill_nan=False)\ncumproduct = cumprod\nnancumsum = _make_cumulative_reduction(np.nancumsum, lax.cumsum,\n fill_nan=True, fill_value=0)\nnancumprod = _make_cumulative_reduction(np.nancumprod, lax.cumprod,\n fill_nan=True, fill_value=1)\n\n\n@_wraps(np.unwrap)\n@partial(jit, static_argnames=('axis',))\ndef unwrap(p, discont=pi, axis: int = -1):\n _check_arraylike(\"unwrap\", p)\n dd = diff(p, axis=axis)\n ddmod = mod(dd + pi, 2 * pi) - pi\n ddmod = where((ddmod == -pi) & (dd > 0), pi, ddmod)\n\n ph_correct = where(abs(dd) < discont, 0, ddmod - dd)\n\n up = concatenate((\n lax.slice_in_dim(p, 0, 1, axis=axis),\n lax.slice_in_dim(p, 1, None, axis=axis) + cumsum(ph_correct, axis=axis)\n ), axis=axis)\n\n return up\n\n\n### Array-creation functions\n\ndef _check_no_padding(axis_padding, mode):\n if (axis_padding[0] > 0 or axis_padding[1] > 0):\n msg = \"Cannot apply '{}' padding to empty axis\"\n raise ValueError(msg.format(mode))\n\n\ndef _pad_constant(array, pad_width, constant_values):\n nd = ndim(array)\n constant_values = broadcast_to(asarray(constant_values), (nd, 2))\n constant_values = lax._convert_element_type(constant_values, array.dtype, dtypes.is_weakly_typed(array))\n for i in range(nd):\n widths = [(0, 0, 0)] * nd\n widths[i] = (pad_width[i, 0], 0, 0)\n array = lax.pad(array, constant_values[i, 0], widths)\n widths[i] = (0, pad_width[i, 1], 0)\n array = lax.pad(array, constant_values[i, 1], widths)\n return array\n\n\ndef _pad_wrap(array, pad_width):\n for i in range(ndim(array)):\n if array.shape[i] == 0:\n _check_no_padding(pad_width[i], \"wrap\")\n continue\n size = array.shape[i]\n repeats, (left_remainder, right_remainder) = _divmod(pad_width[i], size)\n total_repeats = repeats.sum() + 1\n parts = []\n if left_remainder:\n parts += [lax.slice_in_dim(array, size - left_remainder, size, axis=i)]\n parts += total_repeats * [array]\n if right_remainder:\n parts += [lax.slice_in_dim(array, 0, right_remainder, axis=i)]\n array = lax.concatenate(parts, dimension=i)\n return array\n\n\ndef _pad_symmetric_or_reflect(array, pad_width, mode, reflect_type):\n assert mode in (\"symmetric\", \"reflect\")\n assert reflect_type in (\"even\", \"odd\")\n\n for i in range(ndim(array)):\n if array.shape[i] == 0:\n _check_no_padding(pad_width[i], mode)\n continue\n\n n = array.shape[i]\n offset = 1 if (mode == \"reflect\" and n > 1) else 0\n\n def build_padding(array, padding, before):\n if before:\n edge = lax.slice_in_dim(array, 0, 1, axis=i)\n else:\n edge = lax.slice_in_dim(array, -1, None, axis=i)\n\n while padding > 0:\n curr_pad = _min(padding, n - offset)\n padding -= curr_pad\n\n if before:\n start = offset\n stop = offset + curr_pad\n else:\n start = -(curr_pad + offset)\n stop = None if (mode == \"symmetric\" or n == 1) else -1\n\n x = lax.slice_in_dim(array, start, stop, axis=i)\n x = flip(x, axis=i)\n\n if reflect_type == 'odd':\n x = 2 * edge - x\n if n > 1:\n if before:\n edge = lax.slice_in_dim(x, 0, 1, axis=i)\n else:\n edge = lax.slice_in_dim(x, -1, None, axis=i)\n\n if before:\n array = lax.concatenate([x, array], dimension=i)\n else:\n array = lax.concatenate([array, x], dimension=i)\n return array\n\n array = build_padding(array, pad_width[i, 0], before=True)\n array = build_padding(array, pad_width[i, 1], before=False)\n return array\n\n\ndef _pad_edge(array, pad_width):\n nd = ndim(array)\n for i in range(nd):\n if array.shape[i] == 0:\n _check_no_padding(pad_width[i], \"edge\")\n continue\n\n n = array.shape[i]\n npad_before, npad_after = pad_width[i]\n\n edge_before = lax.slice_in_dim(array, 0, 1, axis=i)\n pad_before = repeat(edge_before, npad_before, axis=i)\n\n edge_after = lax.slice_in_dim(array, n-1, n, axis=i)\n pad_after = repeat(edge_after, npad_after, axis=i)\n\n array = lax.concatenate([pad_before, array, pad_after], dimension=i)\n return array\n\n\ndef _pad_linear_ramp(array, pad_width, end_values):\n for axis in range(ndim(array)):\n edge_before = lax.slice_in_dim(array, 0, 1, axis=axis)\n edge_after = lax.slice_in_dim(array, -1, None, axis=axis)\n ramp_before = linspace(\n start=end_values[axis][0],\n stop=edge_before.squeeze(axis), # Dimension is replaced by linspace\n num=pad_width[axis][0],\n endpoint=False,\n dtype=array.dtype,\n axis=axis\n )\n ramp_before = lax._convert_element_type(ramp_before, weak_type=dtypes.is_weakly_typed(array))\n ramp_after = linspace(\n start=end_values[axis][1],\n stop=edge_after.squeeze(axis), # Dimension is replaced by linspace\n num=pad_width[axis][1],\n endpoint=False,\n dtype=array.dtype,\n axis=axis\n )\n ramp_after = lax._convert_element_type(ramp_after, weak_type=dtypes.is_weakly_typed(array))\n\n # Reverse linear space in appropriate dimension\n ramp_after = flip(ramp_after, axis)\n\n array = lax.concatenate([ramp_before, array, ramp_after], dimension=axis)\n return array\n\n\ndef _pad_stats(array, pad_width, stat_length, stat_func):\n nd = ndim(array)\n for i in range(nd):\n if stat_length is None:\n stat_before = stat_func(array, axis=i, keepdims=True)\n stat_after = stat_before\n else:\n array_length = array.shape[i]\n length_before, length_after = stat_length[i]\n if length_before == 0 or length_after == 0:\n raise ValueError(\"stat_length of 0 yields no value for padding\")\n\n # Limit stat_length to length of array.\n length_before = _min(length_before, array_length)\n length_after = _min(length_after, array_length)\n\n slice_before = lax.slice_in_dim(array, 0, length_before, axis=i)\n slice_after = lax.slice_in_dim(array, -length_after, None, axis=i)\n stat_before = stat_func(slice_before, axis=i, keepdims=True)\n stat_after = stat_func(slice_after, axis=i, keepdims=True)\n\n if np.issubdtype(array.dtype, np.integer):\n stat_before = round(stat_before)\n stat_after = round(stat_after)\n\n stat_before = lax._convert_element_type(stat_before, array.dtype, dtypes.is_weakly_typed(array))\n stat_after = lax._convert_element_type(stat_after, array.dtype, dtypes.is_weakly_typed(array))\n\n npad_before, npad_after = pad_width[i]\n pad_before = repeat(stat_before, npad_before, axis=i)\n pad_after = repeat(stat_after, npad_after, axis=i)\n\n array = lax.concatenate([pad_before, array, pad_after], dimension=i)\n return array\n\n\ndef _pad_empty(array, pad_width):\n # Note: jax.numpy.empty = jax.numpy.zeros\n for i in range(ndim(array)):\n shape_before = array.shape[:i] + (pad_width[i][0],) + array.shape[i + 1:]\n pad_before = empty_like(array, shape=shape_before)\n\n shape_after = array.shape[:i] + (pad_width[i][1],) + array.shape[i + 1:]\n pad_after = empty_like(array, shape=shape_after)\n array = lax.concatenate([pad_before, array, pad_after], dimension=i)\n return array\n\n\ndef _pad_func(array, pad_width, func, **kwargs):\n pad_width = _broadcast_to_pairs(pad_width, ndim(array), \"pad_width\")\n padded = _pad_constant(array, np.array(pad_width), 0)\n for axis in range(ndim(padded)):\n padded = apply_along_axis(func, axis, padded, pad_width[axis], axis, kwargs)\n return padded\n\n\ndef _broadcast_to_pairs(nvals, nd, name):\n nvals = np.asarray(tree_map(\n lambda x: core.concrete_or_error(np.array, x, context=f\"{name} argument of jnp.pad\"),\n nvals))\n if nvals.dtype.kind == 'O':\n raise TypeError(f'`{name}` entries must be the same shape.')\n\n if nvals.shape == (nd, 2):\n # ((before_1, after_1), ..., (before_N, after_N))\n return tuple(tuple(nval) for nval in nvals)\n elif nvals.shape == (1, 2):\n # ((before, after),)\n return tuple(tuple(nvals[0]) for i in range(nd))\n elif nvals.shape == (2,):\n # (before, after) (not in the numpy docstring but works anyway)\n return tuple(tuple(nvals) for i in range(nd))\n elif nvals.shape == (1,):\n # (pad,)\n return tuple((nvals[0], nvals[0]) for i in range(nd))\n elif nvals.shape == ():\n # pad\n return tuple((nvals.flat[0], nvals.flat[0]) for i in range(nd))\n else:\n raise ValueError(f\"jnp.pad: {name} with nd={nd} has unsupported shape {nvals.shape}. \"\n f\"Valid shapes are ({nd}, 2), (1, 2), (2,), (1,), or ().\")\n\n\n@partial(jit, static_argnums=(1, 2, 4, 5, 6))\ndef _pad(array, pad_width, mode, constant_values, stat_length, end_values, reflect_type):\n array = asarray(array)\n nd = ndim(array)\n\n if nd == 0:\n return array\n\n stat_funcs = {\"maximum\": amax, \"minimum\": amin,\n \"mean\": mean, \"median\": median}\n\n pad_width = _broadcast_to_pairs(pad_width, nd, \"pad_width\")\n pad_width = np.array(pad_width)\n assert pad_width.shape == (nd, 2), pad_width\n\n if np.any(pad_width < 0):\n raise ValueError(\"index can't contain negative values\")\n\n if mode == \"constant\":\n return _pad_constant(array, pad_width, constant_values)\n\n elif mode == \"wrap\":\n return _pad_wrap(array, pad_width)\n\n elif mode in (\"symmetric\", \"reflect\"):\n return _pad_symmetric_or_reflect(array, pad_width, mode, reflect_type)\n\n elif mode == \"edge\":\n return _pad_edge(array, pad_width)\n\n elif mode == \"linear_ramp\":\n end_values = _broadcast_to_pairs(end_values, nd, \"end_values\")\n return _pad_linear_ramp(array, pad_width, end_values)\n\n elif mode in stat_funcs:\n if stat_length is not None:\n stat_length = _broadcast_to_pairs(stat_length, nd, \"stat_length\")\n return _pad_stats(array, pad_width, stat_length, stat_funcs[mode])\n\n elif mode == \"empty\":\n return _pad_empty(array, pad_width)\n\n else:\n assert False, (\"Should not be reached since pad already handled unsupported and\"\n \"not implemented modes\")\n\n\n@_wraps(np.pad, lax_description=\"\"\"\\\nUnlike numpy, JAX \"function\" mode's argument (which is another function) should return\nthe modified array. This is because Jax arrays are immutable.\n(In numpy, \"function\" mode's argument should modify a rank 1 array in-place.)\n\"\"\")\ndef pad(array, pad_width, mode=\"constant\", **kwargs):\n _check_arraylike(\"pad\", array)\n pad_width = _broadcast_to_pairs(pad_width, ndim(array), \"pad_width\")\n if pad_width and np.array(pad_width).dtype.kind != 'i':\n raise TypeError('`pad_width` must be of integral type.')\n\n if callable(mode):\n return _pad_func(array, pad_width, mode, **kwargs)\n\n allowed_kwargs = {\n 'empty': [], 'edge': [], 'wrap': [],\n 'constant': ['constant_values'],\n 'linear_ramp': ['end_values'],\n 'maximum': ['stat_length'],\n 'mean': ['stat_length'],\n 'median': ['stat_length'],\n 'minimum': ['stat_length'],\n 'reflect': ['reflect_type'],\n 'symmetric': ['reflect_type'],\n }\n try:\n unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode])\n except KeyError:\n msg = \"Unimplemented padding mode '{}' for np.pad.\"\n raise NotImplementedError(msg.format(mode))\n if unsupported_kwargs:\n raise ValueError(\"unsupported keyword arguments for mode '{}': {}\"\n .format(mode, unsupported_kwargs))\n # Set default value if not given.\n constant_values = kwargs.get('constant_values', 0)\n stat_length = kwargs.get('stat_length', None)\n end_values = kwargs.get('end_values', 0)\n reflect_type = kwargs.get('reflect_type', \"even\")\n\n return _pad(array, pad_width, mode, constant_values, stat_length, end_values, reflect_type)\n\n\n@_wraps(np.stack, skip_params=['out'])\ndef stack(arrays, axis: int = 0, out=None):\n if not len(arrays):\n raise ValueError(\"Need at least one array to stack.\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.stack is not supported.\")\n if isinstance(arrays, (np.ndarray, ndarray)):\n axis = _canonicalize_axis(axis, arrays.ndim)\n return concatenate(expand_dims(arrays, axis + 1), axis=axis)\n else:\n _check_arraylike(\"stack\", *arrays)\n shape0 = shape(arrays[0])\n axis = _canonicalize_axis(axis, len(shape0) + 1)\n new_arrays = []\n for a in arrays:\n if shape(a) != shape0:\n raise ValueError(\"All input arrays must have the same shape.\")\n new_arrays.append(expand_dims(a, axis))\n return concatenate(new_arrays, axis=axis)\n\n@_wraps(np.tile)\ndef tile(A, reps):\n _stackable(A) or _check_arraylike(\"tile\", A)\n try:\n iter(reps)\n except TypeError:\n reps = (reps,)\n reps = tuple(operator.index(rep) if core.is_constant_dim(rep) else rep\n for rep in reps)\n A_shape = (1,) * (len(reps) - ndim(A)) + shape(A)\n reps = (1,) * (len(A_shape) - len(reps)) + reps\n result = broadcast_to(reshape(A, [j for i in A_shape for j in [1, i]]),\n [k for pair in zip(reps, A_shape) for k in pair])\n return reshape(result, tuple(np.multiply(A_shape, reps)))\n\ndef _concatenate_array(arr, axis: int):\n # Fast path for concatenation when the input is an ndarray rather than a list.\n arr = asarray(arr)\n if arr.ndim == 0 or arr.shape[0] == 0:\n raise ValueError(\"Need at least one array to concatenate.\")\n if axis is None:\n return lax.reshape(arr, (arr.size,))\n if arr.ndim == 1:\n raise ValueError(\"Zero-dimensional arrays cannot be concatenated.\")\n axis = _canonicalize_axis(axis, arr.ndim - 1)\n shape = arr.shape[1:axis + 1] + (arr.shape[0] * arr.shape[axis + 1],) + arr.shape[axis + 2:]\n dimensions = [*range(1, axis + 1), 0, *range(axis + 1, arr.ndim)]\n return lax.reshape(arr, shape, dimensions)\n\n@_wraps(np.concatenate)\ndef concatenate(arrays, axis: int = 0):\n if isinstance(arrays, (np.ndarray, ndarray)):\n return _concatenate_array(arrays, axis)\n _stackable(*arrays) or _check_arraylike(\"concatenate\", *arrays)\n if not len(arrays):\n raise ValueError(\"Need at least one array to concatenate.\")\n if ndim(arrays[0]) == 0:\n raise ValueError(\"Zero-dimensional arrays cannot be concatenated.\")\n if axis is None:\n return concatenate([ravel(a) for a in arrays], axis=0)\n if hasattr(arrays[0], \"concatenate\"):\n return arrays[0].concatenate(arrays[1:], axis)\n axis = _canonicalize_axis(axis, ndim(arrays[0]))\n arrays = _promote_dtypes(*arrays)\n # lax.concatenate can be slow to compile for wide concatenations, so form a\n # tree of concatenations as a workaround especially for op-by-op mode.\n # (https://github.com/google/jax/issues/653).\n k = 16\n if len(arrays) == 1:\n return asarray(arrays[0])\n else:\n while len(arrays) > 1:\n arrays = [lax.concatenate(arrays[i:i+k], axis)\n for i in range(0, len(arrays), k)]\n return arrays[0]\n\n\n@_wraps(np.vstack)\ndef vstack(tup):\n if isinstance(tup, (np.ndarray, ndarray)):\n arrs = jax.vmap(atleast_2d)(tup)\n else:\n arrs = [atleast_2d(m) for m in tup]\n return concatenate(arrs, axis=0)\nrow_stack = vstack\n\n\n@_wraps(np.hstack)\ndef hstack(tup):\n if isinstance(tup, (np.ndarray, ndarray)):\n arrs = jax.vmap(atleast_1d)(tup)\n arr0_ndim = arrs.ndim - 1\n else:\n arrs = [atleast_1d(m) for m in tup]\n arr0_ndim = arrs[0].ndim\n return concatenate(arrs, axis=0 if arr0_ndim == 1 else 1)\n\n\n@_wraps(np.dstack)\ndef dstack(tup):\n if isinstance(tup, (np.ndarray, ndarray)):\n arrs = jax.vmap(atleast_3d)(tup)\n else:\n arrs = [atleast_3d(m) for m in tup]\n return concatenate(arrs, axis=2)\n\n\n@_wraps(np.column_stack)\ndef column_stack(tup):\n if isinstance(tup, (np.ndarray, ndarray)):\n arrs = jax.vmap(lambda x: atleast_2d(x).T)(tup) if tup.ndim < 3 else tup\n else:\n arrs = [atleast_2d(arr).T if arr.ndim < 2 else arr for arr in map(asarray, tup)]\n return concatenate(arrs, 1)\n\n\n@_wraps(np.choose, skip_params=['out'])\ndef choose(a, choices, out=None, mode='raise'):\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.choose is not supported.\")\n _check_arraylike('choose', a, *choices)\n if not issubdtype(_dtype(a), integer):\n raise ValueError(\"`a` array must be integer typed\")\n N = len(choices)\n\n if mode == 'raise':\n a = core.concrete_or_error(asarray, a,\n \"The error occurred because jnp.choose was jit-compiled\"\n \" with mode='raise'. Use mode='wrap' or mode='clip' instead.\")\n if any((a < 0) | (a >= N)):\n raise ValueError(\"invalid entry in choice array\")\n elif mode == 'wrap':\n a = a % N\n elif mode == 'clip':\n a = clip(a, 0, N - 1)\n else:\n raise ValueError(f\"mode={mode!r} not understood. Must be 'raise', 'wrap', or 'clip'\")\n\n a, *choices = broadcast_arrays(a, *choices)\n return array(choices)[(a,) + indices(a.shape, sparse=True)]\n\n\ndef _atleast_nd(x, n):\n m = ndim(x)\n return lax.broadcast(x, (1,) * (n - m)) if m < n else x\n\ndef _block(xs):\n if isinstance(xs, tuple):\n raise ValueError(\"jax.numpy.block does not allow tuples, got {}\"\n .format(xs))\n elif isinstance(xs, list):\n if len(xs) == 0:\n raise ValueError(\"jax.numpy.block does not allow empty list arguments\")\n xs, depths = unzip2([_block(x) for x in xs])\n if _any(d != depths[0] for d in depths[1:]):\n raise ValueError(\"Mismatched list depths in jax.numpy.block\")\n rank = _max(depths[0], _max(ndim(x) for x in xs))\n xs = [_atleast_nd(x, rank) for x in xs]\n return concatenate(xs, axis=-depths[0]), depths[0] + 1\n else:\n return asarray(xs), 1\n\n@_wraps(np.block)\n@jit\ndef block(arrays):\n out, _ = _block(arrays)\n return out\n\n@_wraps(np.atleast_1d, update_doc=False, lax_description=_ARRAY_VIEW_DOC)\n@jit\ndef atleast_1d(*arys):\n if len(arys) == 1:\n arr = asarray(arys[0])\n return arr if ndim(arr) >= 1 else reshape(arr, -1)\n else:\n return [atleast_1d(arr) for arr in arys]\n\n\n@_wraps(np.atleast_2d, update_doc=False, lax_description=_ARRAY_VIEW_DOC)\n@jit\ndef atleast_2d(*arys):\n if len(arys) == 1:\n arr = asarray(arys[0])\n if ndim(arr) >= 2:\n return arr\n elif ndim(arr) == 1:\n return expand_dims(arr, axis=0)\n else:\n return expand_dims(arr, axis=(0, 1))\n else:\n return [atleast_2d(arr) for arr in arys]\n\n\n@_wraps(np.atleast_3d, update_doc=False, lax_description=_ARRAY_VIEW_DOC)\n@jit\ndef atleast_3d(*arys):\n if len(arys) == 1:\n arr = asarray(arys[0])\n if ndim(arr) == 0:\n arr = expand_dims(arr, axis=(0, 1, 2))\n elif ndim(arr) == 1:\n arr = expand_dims(arr, axis=(0, 2))\n elif ndim(arr) == 2:\n arr = expand_dims(arr, axis=2)\n return arr\n else:\n return [atleast_3d(arr) for arr in arys]\n\n\n_ARRAY_DOC = \"\"\"\nThis function will create arrays on JAX's default device. For control of the\ndevice placement of data, see :func:`jax.device_put`. More information is\navailable in the JAX FAQ at :ref:`faq-data-placement` (full FAQ at\nhttps://jax.readthedocs.io/en/latest/faq.html).\n\"\"\"\n\n@_wraps(np.array, lax_description=_ARRAY_DOC)\ndef array(object, dtype=None, copy=True, order=\"K\", ndmin=0):\n if order is not None and order != \"K\":\n raise NotImplementedError(\"Only implemented for order='K'\")\n\n # check if the given dtype is compatible with JAX\n lax._check_user_dtype_supported(dtype, \"array\")\n\n # Here we make a judgment call: we only return a weakly-typed array when the\n # input object itself is weakly typed. That ensures asarray(x) is a no-op whenever\n # x is weak, but avoids introducing weak types with something like array([1, 2, 3])\n weak_type = dtype is None and dtypes.is_weakly_typed(object)\n\n # For Python scalar literals, call coerce_to_array to catch any overflow errors.\n # We don't use dtypes.is_python_scalar because we don't want this triggering for\n # traced values. We do this here because it matters whether or not dtype is None.\n # We don't assign the result because we want the raw object to be used for type\n # inference below.\n if isinstance(object, (bool, int, float, complex)):\n _ = dtypes.coerce_to_array(object, dtype)\n\n leaves = tree_leaves(object)\n if dtype is None:\n # Use lattice_result_type rather than result_type to avoid canonicalization.\n # Otherwise, weakly-typed inputs would have their dtypes canonicalized.\n try:\n dtype = dtypes._lattice_result_type(*leaves)[0] if leaves else dtypes.float_\n except TypeError:\n # This happens if, e.g. one of the entries is a memoryview object.\n # This is rare, so we only handle it if the normal path fails.\n leaves = [_convert_to_array_if_dtype_fails(leaf) for leaf in leaves]\n dtype = dtypes._lattice_result_type(*leaves)[0]\n\n if not weak_type:\n dtype = dtypes.canonicalize_dtype(dtype)\n\n # We can't use the ndarray class because we need to handle internal buffers\n # (See https://github.com/google/jax/issues/8950)\n ndarray_types = (device_array.DeviceArray, core.Tracer)\n\n if not _any(isinstance(leaf, ndarray_types) for leaf in leaves):\n # TODO(jakevdp): falling back to numpy here fails to overflow for lists containing\n # large integers; see discussion in https://github.com/google/jax/pull/6047.\n # More correct would be to call coerce_to_array on each leaf, but this may have\n # performance implications.\n out = np.array(object, dtype=dtype, ndmin=ndmin, copy=False)\n elif isinstance(object, ndarray_types):\n if object.aval is None:\n # object is a raw buffer; convert to device array on its current device.\n aval = ShapedArray(object.xla_shape().dimensions(), object.dtype,\n weak_type=bool(getattr(object, \"weak_type\", False)))\n object = device_array.make_device_array(aval, object.device(), object)\n out = _array_copy(object) if copy else object\n elif isinstance(object, (list, tuple)):\n if object:\n out = stack([asarray(elt, dtype=dtype) for elt in object])\n else:\n out = np.array([], dtype=dtype)\n else:\n try:\n view = memoryview(object)\n except TypeError:\n pass # `object` does not support the buffer interface.\n else:\n return array(np.asarray(view), dtype, copy, ndmin=ndmin)\n\n raise TypeError(\"Unexpected input type for array: {}\".format(type(object)))\n\n out = lax._convert_element_type(out, dtype, weak_type=weak_type)\n if ndmin > ndim(out):\n out = lax.expand_dims(out, range(ndmin - ndim(out)))\n return out\n\n\ndef _convert_to_array_if_dtype_fails(x):\n try:\n dtypes.dtype(x)\n except TypeError:\n return np.asarray(x)\n else:\n return x\n\n\n@_wraps(np.asarray, lax_description=_ARRAY_DOC)\ndef asarray(a, dtype=None, order=None):\n lax._check_user_dtype_supported(dtype, \"asarray\")\n dtype = dtypes.canonicalize_dtype(dtype) if dtype is not None else dtype\n return array(a, dtype=dtype, copy=False, order=order)\n\n\n@_wraps(np.zeros_like)\ndef zeros_like(a, dtype=None, shape=None):\n _check_arraylike(\"zeros_like\", a)\n lax._check_user_dtype_supported(dtype, \"zeros_like\")\n if np.isscalar(shape):\n shape = (shape,)\n return lax.full_like(a, 0, dtype, shape)\n\n\n@_wraps(np.ones_like)\ndef ones_like(a, dtype=None, shape=None):\n _check_arraylike(\"ones_like\", a)\n lax._check_user_dtype_supported(dtype, \"ones_like\")\n if np.isscalar(shape):\n shape = (shape,)\n return lax.full_like(a, 1, dtype, shape)\n\n\n@_wraps(np.full)\ndef full(shape, fill_value, dtype=None):\n lax._check_user_dtype_supported(dtype, \"full\")\n _check_arraylike(\"full\", fill_value)\n if ndim(fill_value) == 0:\n shape = (shape,) if ndim(shape) == 0 else shape\n return lax.full(shape, fill_value, dtype)\n else:\n return broadcast_to(asarray(fill_value, dtype=dtype), shape)\n\n\n@_wraps(np.full_like)\ndef full_like(a, fill_value, dtype=None, shape=None):\n lax._check_user_dtype_supported(dtype, \"full_like\")\n _check_arraylike(\"full_like\", a, fill_value)\n if shape is not None:\n shape = (shape,) if ndim(shape) == 0 else shape\n if ndim(fill_value) == 0:\n return lax.full_like(a, fill_value, dtype, shape)\n else:\n shape = np.shape(a) if shape is None else shape\n dtype = result_type(a) if dtype is None else dtype\n return broadcast_to(asarray(fill_value, dtype=dtype), shape)\n\n\n@_wraps(np.zeros)\ndef zeros(shape, dtype=None):\n if isinstance(shape, types.GeneratorType):\n raise TypeError(\"expected sequence object with len >= 0 or a single integer\")\n lax._check_user_dtype_supported(dtype, \"zeros\")\n shape = canonicalize_shape((shape,) if ndim(shape) == 0 else shape)\n return lax.full(shape, 0, _jnp_dtype(dtype))\n\n@_wraps(np.ones)\ndef ones(shape, dtype=None):\n if isinstance(shape, types.GeneratorType):\n raise TypeError(\"expected sequence object with len >= 0 or a single integer\")\n lax._check_user_dtype_supported(dtype, \"ones\")\n shape = canonicalize_shape((shape,) if ndim(shape) == 0 else shape)\n return lax.full(shape, 1, _jnp_dtype(dtype))\n\n\n@_wraps(np.array_equal)\ndef array_equal(a1, a2, equal_nan=False):\n try:\n a1, a2 = asarray(a1), asarray(a2)\n except Exception:\n return False\n if shape(a1) != shape(a2):\n return False\n eq = asarray(a1 == a2)\n if equal_nan:\n eq = logical_or(eq, logical_and(isnan(a1), isnan(a2)))\n return all(eq)\n\n\n@_wraps(np.array_equiv)\ndef array_equiv(a1, a2):\n try:\n a1, a2 = asarray(a1), asarray(a2)\n except Exception:\n return False\n try:\n eq = equal(a1, a2)\n except ValueError:\n # shapes are not broadcastable\n return False\n return all(eq)\n\n\n# We can't create uninitialized arrays in XLA; use zeros for empty.\nempty_like = zeros_like\nempty = zeros\n\n\n@_wraps(np.eye)\ndef eye(N, M=None, k=0, dtype=None):\n lax._check_user_dtype_supported(dtype, \"eye\")\n N = core.canonicalize_dim(N, \"'N' argument of jnp.eye()\")\n M = N if M is None else core.canonicalize_dim(M, \"'M' argument of jnp.eye()\")\n if N < 0 or M < 0:\n raise ValueError(f\"negative dimensions are not allowed, got {N} and {M}\")\n k = operator.index(k)\n return lax._eye(_jnp_dtype(dtype), (N, M), k)\n\n\n@_wraps(np.identity)\ndef identity(n, dtype=None):\n lax._check_user_dtype_supported(dtype, \"identity\")\n return eye(n, dtype=dtype)\n\n\n@_wraps(np.arange)\ndef arange(start: core.DimSize, stop: Optional[core.DimSize]=None,\n step: Optional[core.DimSize]=None, dtype=None):\n lax._check_user_dtype_supported(dtype, \"arange\")\n require = partial(core.concrete_or_error, None)\n msg = \"It arose in jax.numpy.arange argument `{}`.\".format\n if _any(core.is_special_dim_size(d) for d in (start, stop, step)):\n if stop is not None or step is not None:\n raise ValueError(\n \"jax.numpy.arange supports non-constant arguments only in single-argument form. \"\n f\"Found jax.numpy.arange(start={start}, stop={stop}, step={step})\")\n return lax.iota(int_, start)\n if dtype is None:\n dtype = result_type(start, *(x for x in [stop, step] if x is not None))\n dtype = _jnp_dtype(dtype)\n if stop is None and step is None:\n start = require(start, msg(\"stop\"))\n start = np.ceil(start).astype(int)\n return lax.iota(dtype, start)\n else:\n start = require(start, msg(\"start\"))\n stop = None if stop is None else require(stop, msg(\"stop\"))\n step = None if step is None else require(step, msg(\"step\"))\n return array(np.arange(start, stop=stop, step=step, dtype=dtype))\n\n\ndef _wrap_numpy_nullary_function(f):\n \"\"\"Adapts `f` to return a DeviceArray instead of an np.ndarray.\n\n `f` cannot have any non-static array arguments.\n \"\"\"\n @_wraps(f, update_doc=False)\n def wrapper(*args, **kwargs):\n args = [core.concrete_or_error(None, arg, f\"the error occured in argument {i} jnp.{f.__name__}()\")\n for i, arg in enumerate(args)]\n kwargs = {key: core.concrete_or_error(None, val, f\"the error occured in argument '{key}' jnp.{f.__name__}()\")\n for key, val in kwargs.items()}\n return asarray(f(*args, **kwargs))\n return wrapper\n\n\n@_wraps(np.linspace)\ndef linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,\n axis: int = 0):\n num = core.concrete_or_error(operator.index, num, \"'num' argument of jnp.linspace\")\n axis = core.concrete_or_error(operator.index, axis, \"'axis' argument of jnp.linspace\")\n return _linspace(start, stop, int(num), endpoint, retstep, dtype,\n operator.index(axis))\n\n@partial(jit, static_argnames=('num', 'endpoint', 'retstep', 'dtype', 'axis'))\ndef _linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,\n axis: int = 0):\n \"\"\"Implementation of linspace differentiable in start and stop args.\"\"\"\n lax._check_user_dtype_supported(dtype, \"linspace\")\n if num < 0:\n raise ValueError(f\"Number of samples, {num}, must be non-negative.\")\n _check_arraylike(\"linspace\", start, stop)\n\n if dtype is None:\n dtype = result_type(start, stop, dtypes.canonicalize_dtype(float_))\n dtype = _jnp_dtype(dtype)\n computation_dtype = promote_types(dtype, dtypes.canonicalize_dtype(float_))\n start = asarray(start, dtype=computation_dtype)\n stop = asarray(stop, dtype=computation_dtype)\n\n bounds_shape = list(lax.broadcast_shapes(shape(start), shape(stop)))\n broadcast_start = broadcast_to(start, bounds_shape)\n broadcast_stop = broadcast_to(stop, bounds_shape)\n axis = len(bounds_shape) + axis + 1 if axis < 0 else axis\n bounds_shape.insert(axis, 1)\n div = (num - 1) if endpoint else num\n if num > 1:\n delta = lax.convert_element_type(stop - start, computation_dtype) / div\n iota_shape = [1,] * len(bounds_shape)\n iota_shape[axis] = div\n # This approach recovers the endpoints with float32 arithmetic,\n # but can lead to rounding errors for integer outputs.\n real_dtype = finfo(computation_dtype).dtype\n step = reshape(lax.iota(real_dtype, div), iota_shape) / div\n out = (reshape(broadcast_start, bounds_shape) * (1 - step) +\n reshape(broadcast_stop, bounds_shape) * step)\n\n if endpoint:\n out = lax.concatenate([out, lax.expand_dims(broadcast_stop, (axis,))],\n _canonicalize_axis(axis, out.ndim))\n\n elif num == 1:\n delta = nan if endpoint else stop - start\n out = reshape(broadcast_start, bounds_shape)\n else: # num == 0 degenerate case, match numpy behavior\n empty_shape = list(lax.broadcast_shapes(shape(start), shape(stop)))\n empty_shape.insert(axis, 0)\n delta = nan\n out = reshape(array([], dtype=dtype), empty_shape)\n\n if issubdtype(dtype, integer) and not issubdtype(out.dtype, integer):\n out = lax.floor(out)\n\n if retstep:\n return lax.convert_element_type(out, dtype), delta\n else:\n return lax.convert_element_type(out, dtype)\n\n\n@_wraps(np.logspace)\ndef logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None,\n axis: int = 0):\n num = core.concrete_or_error(operator.index, num, \"'num' argument of jnp.logspace\")\n axis = core.concrete_or_error(operator.index, axis, \"'axis' argument of jnp.logspace\")\n return _logspace(start, stop, int(num), endpoint, base, dtype,\n operator.index(axis))\n\n@partial(jit, static_argnames=('num', 'endpoint', 'dtype', 'axis'))\ndef _logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None,\n axis: int = 0):\n \"\"\"Implementation of logspace differentiable in start and stop args.\"\"\"\n lax._check_user_dtype_supported(dtype, \"logspace\")\n if dtype is None:\n dtype = result_type(start, stop, dtypes.canonicalize_dtype(float_))\n dtype = _jnp_dtype(dtype)\n computation_dtype = promote_types(dtype, dtypes.canonicalize_dtype(float_))\n _check_arraylike(\"logspace\", start, stop)\n start = asarray(start, dtype=computation_dtype)\n stop = asarray(stop, dtype=computation_dtype)\n lin = linspace(start, stop, num,\n endpoint=endpoint, retstep=False, dtype=None, axis=axis)\n return lax.convert_element_type(power(base, lin), dtype)\n\n\n@_wraps(np.geomspace)\ndef geomspace(start, stop, num=50, endpoint=True, dtype=None, axis: int = 0):\n num = core.concrete_or_error(operator.index, num, \"'num' argument of jnp.geomspace\")\n axis = core.concrete_or_error(operator.index, axis, \"'axis' argument of jnp.geomspace\")\n return _geomspace(start, stop, int(num), endpoint, dtype,\n operator.index(axis))\n\n@partial(jit, static_argnames=('num', 'endpoint', 'dtype', 'axis'))\ndef _geomspace(start, stop, num=50, endpoint=True, dtype=None, axis: int = 0):\n \"\"\"Implementation of geomspace differentiable in start and stop args.\"\"\"\n lax._check_user_dtype_supported(dtype, \"geomspace\")\n if dtype is None:\n dtype = result_type(start, stop, dtypes.canonicalize_dtype(float_))\n dtype = _jnp_dtype(dtype)\n computation_dtype = promote_types(dtype, dtypes.canonicalize_dtype(float_))\n _check_arraylike(\"geomspace\", start, stop)\n start = asarray(start, dtype=computation_dtype)\n stop = asarray(stop, dtype=computation_dtype)\n # follow the numpy geomspace convention for negative and complex endpoints\n signflip = 1 - (1 - sign(real(start))) * (1 - sign(real(stop))) // 2\n res = signflip * logspace(log10(signflip * start),\n log10(signflip * stop), num,\n endpoint=endpoint, base=10.0,\n dtype=computation_dtype, axis=0)\n if axis != 0:\n res = moveaxis(res, 0, axis)\n return lax.convert_element_type(res, dtype)\n\n\n@_wraps(np.meshgrid, lax_description=_ARRAY_VIEW_DOC)\ndef meshgrid(*xi, copy=True, sparse=False, indexing='xy'):\n _check_arraylike(\"meshgrid\", *xi)\n args = [asarray(x) for x in xi]\n if not copy:\n raise ValueError(\"jax.numpy.meshgrid only supports copy=True\")\n if indexing not in [\"xy\", \"ij\"]:\n raise ValueError(f\"Valid values for indexing are 'xy' and 'ij', got {indexing}\")\n if _any(a.ndim != 1 for a in args):\n raise ValueError(\"Arguments to jax.numpy.meshgrid must be 1D, got shapes \"\n f\"{[a.shape for a in args]}\")\n if indexing == \"xy\" and len(args) >= 2:\n args[0], args[1] = args[1], args[0]\n shape = [1 if sparse else a.shape[0] for a in args]\n _a_shape = lambda i, a: [*shape[:i], a.shape[0], *shape[i + 1:]] if sparse else shape\n output = [lax.broadcast_in_dim(a, _a_shape(i, a), (i,)) for i, a, in enumerate(args)]\n if indexing == \"xy\" and len(args) >= 2:\n output[0], output[1] = output[1], output[0]\n return output\n\n\ndef _make_1d_grid_from_slice(s: slice, op_name: str):\n start = core.concrete_or_error(None, s.start,\n f\"slice start of jnp.{op_name}\") or 0\n stop = core.concrete_or_error(None, s.stop,\n f\"slice stop of jnp.{op_name}\")\n step = core.concrete_or_error(None, s.step,\n f\"slice step of jnp.{op_name}\") or 1\n if np.iscomplex(step):\n newobj = linspace(start, stop, int(_abs(step)))\n else:\n newobj = arange(start, stop, step)\n\n return newobj\n\n\nclass _IndexGrid:\n def __getitem__(self, key):\n single_slice = isinstance(key, slice)\n if single_slice:\n key = (key,)\n output = []\n for k in key:\n output.append(_make_1d_grid_from_slice(k, op_name=self.op_name))\n if single_slice:\n return output[0]\n output = meshgrid(*output, indexing='ij', sparse=self.sparse)\n return output if self.sparse else stack(output, 0)\n\n\nclass _Mgrid(_IndexGrid):\n \"\"\"Return dense multi-dimensional \"meshgrid\".\n\n LAX-backend implementation of :obj:`numpy.mgrid`. This is a convenience wrapper for\n functionality provided by :func:`jax.numpy.meshgrid` with ``sparse=False``.\n\n See Also:\n jnp.ogrid: open/sparse version of jnp.mgrid\n\n Examples:\n Pass ``[start:stop:step]`` to generate values similar to :func:`jax.numpy.arange`:\n\n >>> jnp.mgrid[0:4:1]\n DeviceArray([0, 1, 2, 3], dtype=int32)\n\n Passing an imaginary step generates values similar to :func:`jax.numpy.linspace`:\n\n >>> jnp.mgrid[0:1:4j]\n DeviceArray([0. , 0.33333334, 0.6666667 , 1. ], dtype=float32)\n\n Multiple slices can be used to create broadcasted grids of indices:\n\n >>> jnp.mgrid[:2, :3]\n DeviceArray([[[0, 0, 0],\n [1, 1, 1]],\n [[0, 1, 2],\n [0, 1, 2]]], dtype=int32)\n \"\"\"\n sparse = False\n op_name = \"mgrid\"\n\nmgrid = _Mgrid()\n\n\nclass _Ogrid(_IndexGrid):\n \"\"\"Return open multi-dimensional \"meshgrid\".\n\n LAX-backend implementation of :obj:`numpy.ogrid`. This is a convenience wrapper for\n functionality provided by :func:`jax.numpy.meshgrid` with ``sparse=True``.\n\n See Also:\n jnp.mgrid: dense version of jnp.ogrid\n\n Examples:\n Pass ``[start:stop:step]`` to generate values similar to :func:`jax.numpy.arange`:\n\n >>> jnp.ogrid[0:4:1]\n DeviceArray([0, 1, 2, 3], dtype=int32)\n\n Passing an imaginary step generates values similar to :func:`jax.numpy.linspace`:\n\n >>> jnp.ogrid[0:1:4j]\n DeviceArray([0. , 0.33333334, 0.6666667 , 1. ], dtype=float32)\n\n Multiple slices can be used to create sparse grids of indices:\n\n >>> jnp.ogrid[:2, :3]\n [DeviceArray([[0],\n [1]], dtype=int32),\n DeviceArray([[0, 1, 2]], dtype=int32)]\n \"\"\"\n sparse = True\n op_name = \"ogrid\"\n\n\nogrid = _Ogrid()\n\n\nclass _AxisConcat:\n \"\"\"Concatenates slices, scalars and array-like objects along a given axis.\"\"\"\n def __getitem__(self, key):\n if not isinstance(key, tuple):\n key = (key,)\n\n params = [self.axis, self.ndmin, self.trans1d, -1]\n\n if isinstance(key[0], str):\n # split off the directive\n directive, *key = key\n # check two special cases: matrix directives\n if directive == \"r\":\n params[-1] = 0\n elif directive == \"c\":\n params[-1] = 1\n else:\n vec = directive.split(\",\")\n k = len(vec)\n if k < 4:\n vec += params[k:]\n else:\n # ignore everything after the first three comma-separated ints\n vec = vec[:3] + params[-1]\n try:\n params = list(map(int, vec))\n except ValueError as err:\n raise ValueError(\n \"could not understand directive {!r}\".format(directive)\n ) from err\n\n axis, ndmin, trans1d, matrix = params\n\n output = []\n for item in key:\n if isinstance(item, slice):\n newobj = _make_1d_grid_from_slice(item, op_name=self.op_name)\n elif isinstance(item, str):\n raise ValueError(\"string directive must be placed at the beginning\")\n else:\n newobj = item\n\n newobj = array(newobj, copy=False, ndmin=ndmin)\n\n if trans1d != -1 and ndmin - ndim(item) > 0:\n shape_obj = list(range(ndmin))\n # Calculate number of left shifts, with overflow protection by mod\n num_lshifts = ndmin - _abs(ndmin + trans1d + 1) % ndmin\n shape_obj = tuple(shape_obj[num_lshifts:] + shape_obj[:num_lshifts])\n\n newobj = transpose(newobj, shape_obj)\n\n output.append(newobj)\n\n res = concatenate(tuple(output), axis=axis)\n\n if matrix != -1 and res.ndim == 1:\n # insert 2nd dim at axis 0 or 1\n res = expand_dims(res, matrix)\n\n return res\n\n def __len__(self):\n return 0\n\n\nclass RClass(_AxisConcat):\n \"\"\"Concatenate slices, scalars and array-like objects along the first axis.\n\n LAX-backend implementation of :obj:`numpy.r_`.\n\n See Also:\n ``jnp.c_``: Concatenates slices, scalars and array-like objects along the last axis.\n\n Examples:\n Passing slices in the form ``[start:stop:step]`` generates ``jnp.arange`` objects:\n\n >>> jnp.r_[-1:5:1, 0, 0, jnp.array([1,2,3])]\n DeviceArray([-1, 0, 1, 2, 3, 4, 0, 0, 1, 2, 3], dtype=int32)\n\n An imaginary value for ``step`` will create a ``jnp.linspace`` object instead,\n which includes the right endpoint:\n\n >>> jnp.r_[-1:1:6j, 0, jnp.array([1,2,3])]\n DeviceArray([-1. , -0.6 , -0.20000002, 0.20000005,\n 0.6 , 1. , 0. , 1. ,\n 2. , 3. ], dtype=float32)\n\n Use a string directive of the form ``\"axis,dims,trans1d\"`` as the first argument to\n specify concatenation axis, minimum number of dimensions, and the position of the\n upgraded array's original dimensions in the resulting array's shape tuple:\n\n >>> jnp.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, 2D output\n DeviceArray([[1, 2, 3],\n [4, 5, 6]], dtype=int32)\n\n >>> jnp.r_['0,2,0', [1,2,3], [4,5,6]] # push last input axis to the front\n DeviceArray([[1],\n [2],\n [3],\n [4],\n [5],\n [6]], dtype=int32)\n\n Negative values for ``trans1d`` offset the last axis towards the start\n of the shape tuple:\n\n >>> jnp.r_['0,2,-2', [1,2,3], [4,5,6]]\n DeviceArray([[1],\n [2],\n [3],\n [4],\n [5],\n [6]], dtype=int32)\n\n Use the special directives ``\"r\"`` or ``\"c\"`` as the first argument on flat inputs\n to create an array with an extra row or column axis, respectively:\n\n >>> jnp.r_['r',[1,2,3], [4,5,6]]\n DeviceArray([[1, 2, 3, 4, 5, 6]], dtype=int32)\n\n >>> jnp.r_['c',[1,2,3], [4,5,6]]\n DeviceArray([[1],\n [2],\n [3],\n [4],\n [5],\n [6]], dtype=int32)\n\n For higher-dimensional inputs (``dim >= 2``), both directives ``\"r\"`` and ``\"c\"``\n give the same result.\n \"\"\"\n axis = 0\n ndmin = 1\n trans1d = -1\n op_name = \"r_\"\n\n\nr_ = RClass()\n\n\nclass CClass(_AxisConcat):\n \"\"\"Concatenate slices, scalars and array-like objects along the last axis.\n\n LAX-backend implementation of :obj:`numpy.c_`.\n\n See Also:\n ``jnp.r_``: Concatenates slices, scalars and array-like objects along the first axis.\n\n Examples:\n\n >>> a = jnp.arange(6).reshape((2,3))\n >>> jnp.c_[a,a]\n DeviceArray([[0, 1, 2, 0, 1, 2],\n [3, 4, 5, 3, 4, 5]], dtype=int32)\n\n Use a string directive of the form ``\"axis:dims:trans1d\"`` as the first argument to specify\n concatenation axis, minimum number of dimensions, and the position of the upgraded array's\n original dimensions in the resulting array's shape tuple:\n\n >>> jnp.c_['0,2', [1,2,3], [4,5,6]]\n DeviceArray([[1],\n [2],\n [3],\n [4],\n [5],\n [6]], dtype=int32)\n\n >>> jnp.c_['0,2,-1', [1,2,3], [4,5,6]]\n DeviceArray([[1, 2, 3],\n [4, 5, 6]], dtype=int32)\n\n Use the special directives ``\"r\"`` or ``\"c\"`` as the first argument on flat inputs\n to create an array with inputs stacked along the last axis:\n\n >>> jnp.c_['r',[1,2,3], [4,5,6]]\n DeviceArray([[1, 4],\n [2, 5],\n [3, 6]], dtype=int32)\n \"\"\"\n axis = -1\n ndmin = 2\n trans1d = 0\n op_name = \"c_\"\n\nc_ = CClass()\n\ns_ = np.s_\n\nindex_exp = np.index_exp\n\n@_wraps(np.i0)\n@jit\ndef i0(x):\n x_orig = x\n x, = _promote_args_inexact(\"i0\", x)\n if not issubdtype(x.dtype, np.floating):\n raise ValueError(f\"Unsupported input type to jax.numpy.i0: {_dtype(x_orig)}\")\n x = lax.abs(x)\n return lax.mul(lax.exp(x), lax.bessel_i0e(x))\n\n\n@_wraps(np.ix_)\ndef ix_(*args):\n _check_arraylike(\"ix\", *args)\n n = len(args)\n output = []\n for i, a in enumerate(args):\n a = asarray(a)\n if len(a.shape) != 1:\n msg = \"Arguments to jax.numpy.ix_ must be 1-dimensional, got shape {}\"\n raise ValueError(msg.format(a.shape))\n if _dtype(a) == bool_:\n raise NotImplementedError(\n \"Boolean arguments to jax.numpy.ix_ are not implemented\")\n shape = [1] * n\n shape[i] = a.shape[0]\n if a.size == 0:\n # Numpy uses an integer index type for empty arrays.\n output.append(lax.full(shape, np.zeros((), np.intp)))\n else:\n output.append(lax.broadcast_in_dim(a, shape, (i,)))\n return tuple(output)\n\n\n@_wraps(np.indices)\ndef indices(dimensions, dtype=int32, sparse=False):\n dimensions = tuple(\n core.concrete_or_error(int, d, \"dimensions argument of jnp.indices\")\n for d in dimensions)\n N = len(dimensions)\n output = []\n s = dimensions\n for i, dim in enumerate(dimensions):\n idx = lax.iota(dtype, dim)\n if sparse:\n s = (1,)*i + (dim,) + (1,)*(N - i - 1)\n output.append(lax.broadcast_in_dim(idx, s, (i,)))\n if sparse:\n return tuple(output)\n return stack(output, 0) if output else array([], dtype=dtype)\n\n\n_TOTAL_REPEAT_LENGTH_DOC = \"\"\"\\\nJax adds the optional `total_repeat_length` parameter which specifies the total\nnumber of repeat, and defaults to sum(repeats). It must be specified for repeat\nto be compilable. If `sum(repeats)` is larger than the specified\n`total_repeat_length` the remaining values will be discarded. In the case of\n`sum(repeats)` being smaller than the specified target length, the final value\nwill be repeated.\n\"\"\"\n\n\n@_wraps(np.repeat, lax_description=_TOTAL_REPEAT_LENGTH_DOC)\ndef repeat(a, repeats, axis: Optional[int] = None, *, total_repeat_length=None):\n _check_arraylike(\"repeat\", a, repeats)\n\n if axis is None:\n a = ravel(a)\n axis = 0\n\n axis = core.concrete_or_error(operator.index, axis, \"'axis' argument of jnp.repeat()\")\n assert isinstance(axis, int) # to appease mypy\n\n # If total_repeat_length is not given, can't compile, use a default.\n if total_repeat_length is None:\n repeats = core.concrete_or_error(np.array, repeats,\n \"When jit-compiling jnp.repeat, the total number of repeats must be static. \"\n \"To fix this, either specify a static value for `repeats`, or pass a static \"\n \"value to `total_repeat_length`.\")\n\n # Fast path for when repeats is a scalar.\n if np.ndim(repeats) == 0 and ndim(a) != 0:\n input_shape = a.shape\n aux_axis = axis if axis < 0 else axis + 1\n a = expand_dims(a, aux_axis)\n reps = [1] * len(a.shape)\n reps[aux_axis] = repeats\n a = tile(a, reps)\n result_shape = list(input_shape)\n result_shape[axis] *= repeats\n return reshape(a, result_shape)\n\n repeats = np.ravel(repeats)\n if ndim(a) != 0:\n repeats = np.broadcast_to(repeats, [a.shape[axis]])\n total_repeat_length = np.sum(repeats)\n else:\n repeats = ravel(repeats)\n if ndim(a) != 0:\n repeats = broadcast_to(repeats, [a.shape[axis]])\n\n # Special case when a is a scalar.\n if ndim(a) == 0:\n if repeats.shape == (1,):\n return full([total_repeat_length], a)\n else:\n raise ValueError('`repeat` with a scalar parameter `a` is only '\n 'implemented for scalar values of the parameter `repeats`.')\n\n # Special case if total_repeat_length is zero.\n if total_repeat_length == 0:\n result_shape = list(a.shape)\n result_shape[axis] = 0\n return reshape(array([], dtype=a.dtype), result_shape)\n\n # If repeats is on a zero sized axis, then return the array.\n if a.shape[axis] == 0:\n return a\n\n # This implementation of repeat avoid having to instantiate a large.\n # intermediate tensor.\n\n # Modify repeats from e.g. [1,2,0,5] -> [0,1,2,0] for exclusive repeat.\n exclusive_repeats = roll(repeats, shift=1).at[0].set(0)\n # Cumsum to get indices of new number in repeated tensor, e.g. [0, 1, 3, 3]\n scatter_indices = cumsum(exclusive_repeats)\n # Scatter these onto a zero buffer, e.g. [1,1,0,2,0,0,0,0]\n block_split_indicators = zeros([total_repeat_length], dtype=int32)\n block_split_indicators = block_split_indicators.at[scatter_indices].add(1)\n # Cumsum again to get scatter indices for repeat, e.g. [0,1,1,3,3,3,3,3]\n gather_indices = cumsum(block_split_indicators) - 1\n return take(a, gather_indices, axis=axis)\n\n\n@_wraps(np.tri)\ndef tri(N, M=None, k=0, dtype=None):\n lax._check_user_dtype_supported(dtype, \"tri\")\n M = M if M is not None else N\n dtype = dtype or float32\n return lax._tri(dtype, (N, M), k)\n\n\n@_wraps(np.tril)\n@partial(jit, static_argnames=('k',))\ndef tril(m, k=0):\n _check_arraylike(\"tril\", m)\n m_shape = shape(m)\n if len(m_shape) < 2:\n raise ValueError(\"Argument to jax.numpy.tril must be at least 2D\")\n mask = tri(*m_shape[-2:], k=k, dtype=bool)\n return lax.select(lax.broadcast(mask, m_shape[:-2]), m, zeros_like(m))\n\n\n@_wraps(np.triu, update_doc=False)\n@partial(jit, static_argnames=('k',))\ndef triu(m, k=0):\n _check_arraylike(\"triu\", m)\n m_shape = shape(m)\n if len(m_shape) < 2:\n raise ValueError(\"Argument to jax.numpy.triu must be at least 2D\")\n mask = tri(*m_shape[-2:], k=k - 1, dtype=bool)\n return lax.select(lax.broadcast(mask, m_shape[:-2]), zeros_like(m), m)\n\n\n@_wraps(np.trace, skip_params=['out'])\n@partial(jit, static_argnames=('offset', 'axis1', 'axis2', 'dtype'))\ndef trace(a, offset=0, axis1: int = 0, axis2: int = 1, dtype=None, out=None):\n _check_arraylike(\"trace\", a)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.trace is not supported.\")\n lax._check_user_dtype_supported(dtype, \"trace\")\n\n axis1 = _canonicalize_axis(axis1, ndim(a))\n axis2 = _canonicalize_axis(axis2, ndim(a))\n\n a_shape = shape(a)\n if dtype is None:\n dtype = _dtype(a)\n if issubdtype(dtype, integer):\n default_int = dtypes.canonicalize_dtype(np.int_)\n if iinfo(dtype).bits < iinfo(default_int).bits:\n dtype = default_int\n\n # Move the axis? dimensions to the end.\n perm = [i for i in range(len(a_shape)) if i != axis1 and i != axis2]\n perm = perm + [axis1, axis2]\n a = lax.transpose(a, perm)\n\n # Mask out the diagonal and reduce.\n a = where(eye(a_shape[axis1], a_shape[axis2], k=offset, dtype=bool),\n a, zeros_like(a))\n return sum(a, axis=(-2, -1), dtype=dtype)\n\n\ndef _wrap_indices_function(f):\n @_wraps(f, update_doc=False)\n def wrapper(*args, **kwargs):\n args = [core.concrete_or_error(\n None, arg, f\"argument {i} of jnp.{f.__name__}()\")\n for i, arg in enumerate(args)]\n kwargs = {key: core.concrete_or_error(\n None, val, f\"argument '{key}' of jnp.{f.__name__}()\")\n for key, val in kwargs.items()}\n return tuple(asarray(x) for x in f(*args, **kwargs))\n return wrapper\n\ntril_indices = _wrap_indices_function(np.tril_indices)\ntriu_indices = _wrap_indices_function(np.triu_indices)\nmask_indices = _wrap_indices_function(np.mask_indices)\n\n\n@_wraps(np.triu_indices_from)\ndef triu_indices_from(arr, k=0):\n return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])\n\n\n@_wraps(np.tril_indices_from)\ndef tril_indices_from(arr, k=0):\n return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])\n\n\n@_wraps(np.diag_indices)\ndef diag_indices(n, ndim=2):\n n = core.concrete_or_error(operator.index, n, \"'n' argument of jnp.diag_indices()\")\n ndim = core.concrete_or_error(operator.index, ndim, \"'ndim' argument of jnp.diag_indices()\")\n if n < 0:\n raise ValueError(\"n argument to diag_indices must be nonnegative, got {}\"\n .format(n))\n if ndim < 0:\n raise ValueError(\"ndim argument to diag_indices must be nonnegative, got {}\"\n .format(ndim))\n return (lax.iota(int_, n),) * ndim\n\n@_wraps(np.diag_indices_from)\ndef diag_indices_from(arr):\n _check_arraylike(\"diag_indices_from\", arr)\n if not arr.ndim >= 2:\n raise ValueError(\"input array must be at least 2-d\")\n\n if len(set(arr.shape)) != 1:\n raise ValueError(\"All dimensions of input must be of equal length\")\n\n return diag_indices(arr.shape[0], ndim=arr.ndim)\n\n@_wraps(np.diagonal, lax_description=_ARRAY_VIEW_DOC)\n@partial(jit, static_argnames=('offset', 'axis1', 'axis2'))\ndef diagonal(a, offset=0, axis1: int = 0, axis2: int = 1):\n _check_arraylike(\"diagonal\", a)\n a_shape = shape(a)\n a_ndims = len(a_shape)\n offset = core.concrete_or_error(operator.index, offset, \"'offset' argument of jnp.diagonal()\")\n\n # Move the two dimensions to the end.\n axis1 = _canonicalize_axis(axis1, a_ndims)\n axis2 = _canonicalize_axis(axis2, a_ndims)\n perm = [i for i in range(a_ndims) if i != axis1 and i != axis2]\n perm = perm + [axis1, axis2]\n a = lax.transpose(a, perm)\n\n # Mask out the diagonal and reduce over one of the axes\n a = where(eye(a_shape[axis1], a_shape[axis2], k=offset, dtype=bool),\n a, zeros_like(a))\n reduce_axis = -2 if offset < 0 else -1\n d = sum(a, axis=reduce_axis, dtype=_dtype(a))\n\n # Slice out the correct diagonal size.\n diag_size = _max(0, _min(a_shape[axis1] + _min(offset, 0),\n a_shape[axis2] - _max(offset, 0)))\n return lax.slice_in_dim(d, 0, diag_size, axis=-1)\n\n\n@_wraps(np.diag, lax_description=_ARRAY_VIEW_DOC)\ndef diag(v, k=0):\n return _diag(v, int(k))\n\n@partial(jit, static_argnames=('k',))\ndef _diag(v, k):\n _check_arraylike(\"diag\", v)\n v_shape = shape(v)\n if len(v_shape) == 1:\n zero = lambda x: lax.full_like(x, shape=(), fill_value=0)\n n = v_shape[0] + _abs(k)\n v = lax.pad(v, zero(v), ((_max(0, k), _max(0, -k), 0),))\n return where(eye(n, k=k, dtype=bool), v, zeros_like(v))\n elif len(v_shape) == 2:\n return diagonal(v, offset=k)\n else:\n raise ValueError(\"diag input must be 1d or 2d\")\n\n_SCALAR_VALUE_DOC = \"\"\"\\\nThis differs from np.diagflat for some scalar values of v,\njax always returns a two-dimensional array, whereas numpy may\nreturn a scalar depending on the type of v.\n\"\"\"\n\n@_wraps(np.diagflat, lax_description=_SCALAR_VALUE_DOC)\ndef diagflat(v, k=0):\n _check_arraylike(\"diagflat\", v)\n v = ravel(v)\n v_length = len(v)\n adj_length = v_length + _abs(k)\n res = zeros(adj_length*adj_length, dtype=v.dtype)\n i = arange(0, adj_length-_abs(k))\n if (k >= 0):\n fi = i+k+i*adj_length\n else:\n fi = i+(i-k)*adj_length\n res = res.at[fi].set(v)\n res = res.reshape(adj_length, adj_length)\n return res\n\n_POLY_DOC = \"\"\"\\\nThis differs from np.poly when an integer array is given.\nnp.poly returns a result with dtype float64 in this case.\njax returns a result with an inexact type, but not necessarily\nfloat64.\n\nThis also differs from np.poly when the input array strictly\ncontains pairs of complex conjugates, e.g. [1j, -1j, 1-1j, 1+1j].\nnp.poly returns an array with a real dtype in such cases.\njax returns an array with a complex dtype in such cases.\n\"\"\"\n\n@_wraps(np.poly, lax_description=_POLY_DOC)\n@jit\ndef poly(seq_of_zeros):\n _check_arraylike('poly', seq_of_zeros)\n seq_of_zeros, = _promote_dtypes_inexact(seq_of_zeros)\n seq_of_zeros = atleast_1d(seq_of_zeros)\n\n sh = seq_of_zeros.shape\n if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:\n # import at runtime to avoid circular import\n from jax._src.numpy import linalg\n seq_of_zeros = linalg.eigvals(seq_of_zeros)\n\n if seq_of_zeros.ndim != 1:\n raise ValueError(\"input must be 1d or non-empty square 2d array.\")\n\n dt = seq_of_zeros.dtype\n if len(seq_of_zeros) == 0:\n return ones((), dtype=dt)\n\n a = ones((1,), dtype=dt)\n for k in range(len(seq_of_zeros)):\n a = convolve(a, array([1, -seq_of_zeros[k]], dtype=dt), mode='full')\n\n return a\n\n\n@_wraps(np.polyval, lax_description=\"\"\"\\\nThe ``unroll`` parameter is JAX specific. It does not effect correctness but can\nhave a major impact on performance for evaluating high-order polynomials. The\nparameter controls the number of unrolled steps with ``lax.scan`` inside the\n``polyval`` implementation. Consider setting ``unroll=128`` (or even higher) to\nimprove runtime performance on accelerators, at the cost of increased\ncompilation time.\n\"\"\")\n@partial(jax.jit, static_argnames=['unroll'])\ndef polyval(p, x, *, unroll=16):\n _check_arraylike(\"polyval\", p, x)\n p, x = _promote_dtypes_inexact(p, x)\n shape = lax.broadcast_shapes(p.shape[1:], x.shape)\n y = lax.full_like(x, 0, shape=shape, dtype=x.dtype)\n y, _ = lax.scan(lambda y, p: (y * x + p, None), y, p, unroll=unroll)\n return y\n\n@_wraps(np.polyadd)\n@jit\ndef polyadd(a1, a2):\n _check_arraylike(\"polyadd\", a1, a2)\n a1, a2 = _promote_dtypes(a1, a2)\n if a2.shape[0] <= a1.shape[0]:\n return a1.at[-a2.shape[0]:].add(a2)\n else:\n return a2.at[-a1.shape[0]:].add(a1)\n\n\n@_wraps(np.polyint)\n@partial(jit, static_argnames=('m',))\ndef polyint(p, m=1, k=None):\n m = core.concrete_or_error(operator.index, m, \"'m' argument of jnp.polyint\")\n k = 0 if k is None else k\n _check_arraylike(\"polyint\", p, k)\n p, k = _promote_dtypes_inexact(p, k)\n if m < 0:\n raise ValueError(\"Order of integral must be positive (see polyder)\")\n k = atleast_1d(k)\n if len(k) == 1:\n k = full((m,), k[0])\n if k.shape != (m,):\n raise ValueError(\"k must be a scalar or a rank-1 array of length 1 or m.\")\n if m == 0:\n return p\n else:\n coeff = maximum(1, arange(len(p) + m, 0, -1)[newaxis, :] - 1 - arange(m)[:, newaxis]).prod(0)\n return true_divide(concatenate((p, k)), coeff)\n\n\n@_wraps(np.polyder)\n@partial(jit, static_argnames=('m',))\ndef polyder(p, m=1):\n _check_arraylike(\"polyder\", p)\n m = core.concrete_or_error(operator.index, m, \"'m' argument of jnp.polyder\")\n p, = _promote_dtypes_inexact(p)\n if m < 0:\n raise ValueError(\"Order of derivative must be positive\")\n if m == 0:\n return p\n coeff = (arange(len(p), m, -1)[newaxis, :] - 1 - arange(m)[:, newaxis]).prod(0)\n return p[:-m] * coeff\n\n\n@_wraps(np.trim_zeros)\ndef trim_zeros(filt, trim='fb'):\n filt = core.concrete_or_error(asarray, filt,\n \"Error arose in the `filt` argument of trim_zeros()\")\n nz = (filt == 0)\n if all(nz):\n return empty(0, _dtype(filt))\n start = argmin(nz) if 'f' in trim.lower() else 0\n end = argmin(nz[::-1]) if 'b' in trim.lower() else 0\n return filt[start:len(filt) - end]\n\n\n_LEADING_ZEROS_DOC = \"\"\"\\\nSetting trim_leading_zeros=True makes the output match that of numpy.\nBut prevents the function from being able to be used in compiled code.\n\"\"\"\n\n@_wraps(np.polymul, lax_description=_LEADING_ZEROS_DOC)\ndef polymul(a1, a2, *, trim_leading_zeros=False):\n _check_arraylike(\"polymul\", a1, a2)\n a1, a2 = _promote_dtypes_inexact(a1, a2)\n if trim_leading_zeros and (len(a1) > 1 or len(a2) > 1):\n a1, a2 = trim_zeros(a1, trim='f'), trim_zeros(a2, trim='f')\n if len(a1) == 0:\n a1 = asarray([0.])\n if len(a2) == 0:\n a2 = asarray([0.])\n val = convolve(a1, a2, mode='full')\n return val\n\n\n@_wraps(np.polysub)\n@jit\ndef polysub(a1, a2):\n _check_arraylike(\"polysub\", a1, a2)\n a1, a2 = _promote_dtypes(a1, a2)\n return polyadd(a1, -a2)\n\n\n@_wraps(np.append)\n@partial(jit, static_argnames=('axis',))\ndef append(arr, values, axis: Optional[int] = None):\n if axis is None:\n return concatenate([ravel(arr), ravel(values)], 0)\n else:\n return concatenate([arr, values], axis=axis)\n\n\n@_wraps(np.delete)\ndef delete(arr, obj, axis=None):\n _check_arraylike(\"delete\", arr)\n if axis is None:\n arr = ravel(arr)\n axis = 0\n axis = _canonicalize_axis(axis, arr.ndim)\n\n # Case 1: obj is a static integer.\n try:\n obj = operator.index(obj)\n obj = _canonicalize_axis(obj, arr.shape[axis])\n except TypeError:\n pass\n else:\n idx = tuple(slice(None) for i in range(axis))\n return concatenate([arr[idx + (slice(0, obj),)], arr[idx + (slice(obj + 1, None),)]], axis=axis)\n\n # Case 2: obj is a static slice.\n if isinstance(obj, slice):\n # TODO(jakevdp): we should be able to do this dynamically with care.\n indices = np.delete(np.arange(arr.shape[axis]), obj)\n return take(arr, indices, axis=axis)\n\n # Case 3: obj is an array\n # NB: pass both arrays to check for appropriate error message.\n _check_arraylike(\"delete\", arr, obj)\n obj = core.concrete_or_error(np.asarray, obj, \"'obj' array argument of jnp.delete()\")\n\n if issubdtype(obj.dtype, integer):\n # TODO(jakevdp): in theory this could be done dynamically if obj has no duplicates,\n # but this would require the complement of lax.gather.\n mask = np.ones(arr.shape[axis], dtype=bool)\n mask[obj] = False\n elif obj.dtype == bool:\n if obj.shape != (arr.shape[axis],):\n raise ValueError(\"np.delete(arr, obj): for boolean indices, obj must be one-dimensional \"\n \"with length matching specified axis.\")\n mask = ~obj\n else:\n raise ValueError(f\"np.delete(arr, obj): got obj.dtype={obj.dtype}; must be integer or bool.\")\n return arr[tuple(slice(None) for i in range(axis)) + (mask,)]\n\n@_wraps(np.insert)\ndef insert(arr, obj, values, axis=None):\n _check_arraylike(\"insert\", arr, 0 if isinstance(obj, slice) else obj, values)\n arr = asarray(arr)\n values = asarray(values)\n\n if axis is None:\n arr = ravel(arr)\n axis = 0\n axis = core.concrete_or_error(None, axis, \"axis argument of jnp.insert()\")\n axis = _canonicalize_axis(axis, arr.ndim)\n if isinstance(obj, slice):\n indices = arange(*obj.indices(arr.shape[axis]))\n else:\n indices = asarray(obj)\n\n if indices.ndim > 1:\n raise ValueError(\"jnp.insert(): obj must be a slice, a one-dimensional \"\n f\"array, or a scalar; got {obj}\")\n if not np.issubdtype(indices.dtype, np.integer):\n if indices.size == 0 and not isinstance(obj, ndarray):\n indices = indices.astype(int)\n else:\n # Note: np.insert allows boolean inputs but the behavior is deprecated.\n raise ValueError(\"jnp.insert(): index array must be \"\n f\"integer typed; got {obj}\")\n values = array(values, ndmin=arr.ndim, dtype=arr.dtype, copy=False)\n\n if indices.size == 1:\n index = ravel(indices)[0]\n if indices.ndim == 0:\n values = moveaxis(values, 0, axis)\n indices = full(values.shape[axis], index)\n n_input = arr.shape[axis]\n n_insert = broadcast_shapes(indices.shape, values.shape[axis])[0]\n out_shape = list(arr.shape)\n out_shape[axis] += n_insert\n out = zeros_like(arr, shape=tuple(out_shape))\n\n indices = where(indices < 0, indices + n_input, indices)\n indices = clip(indices, 0, n_input)\n\n values_ind = indices.at[argsort(indices)].add(arange(n_insert))\n arr_mask = ones(n_input + n_insert, dtype=bool).at[values_ind].set(False)\n arr_ind = where(arr_mask, size=n_input)[0]\n\n out = out.at[(slice(None),) * axis + (values_ind,)].set(values)\n out = out.at[(slice(None),) * axis + (arr_ind,)].set(arr)\n\n return out\n\n\n@_wraps(np.apply_along_axis)\ndef apply_along_axis(func1d, axis: int, arr, *args, **kwargs):\n num_dims = ndim(arr)\n axis = _canonicalize_axis(axis, num_dims)\n func = lambda arr: func1d(arr, *args, **kwargs)\n for i in range(1, num_dims - axis):\n func = jax.vmap(func, in_axes=i, out_axes=-1)\n for i in range(axis):\n func = jax.vmap(func, in_axes=0, out_axes=0)\n return func(arr)\n\n\n@_wraps(np.apply_over_axes)\ndef apply_over_axes(func, a, axes):\n for axis in axes:\n b = func(a, axis=axis)\n if b.ndim == a.ndim:\n a = b\n elif b.ndim == a.ndim - 1:\n a = expand_dims(b, axis)\n else:\n raise ValueError(\"function is not returning an array of the correct shape\")\n return a\n\n\n### Tensor contraction operations\n\n\n@_wraps(np.dot, lax_description=_PRECISION_DOC)\n@partial(jit, static_argnames=('precision',), inline=True)\ndef dot(a, b, *, precision=None): # pylint: disable=missing-docstring\n _check_arraylike(\"dot\", a, b)\n a, b = _promote_dtypes(a, b)\n a_ndim, b_ndim = ndim(a), ndim(b)\n if a_ndim == 0 or b_ndim == 0:\n return lax.mul(a, b)\n if _max(a_ndim, b_ndim) <= 2:\n return lax.dot(a, b, precision=precision)\n\n if b_ndim == 1:\n contract_dims = ((a_ndim - 1,), (0,))\n else:\n contract_dims = ((a_ndim - 1,), (b_ndim - 2,))\n batch_dims = ((), ())\n return lax.dot_general(a, b, (contract_dims, batch_dims), precision)\n\n\n@_wraps(np.matmul, lax_description=_PRECISION_DOC)\n@partial(jit, static_argnames=('precision',), inline=True)\ndef matmul(a, b, *, precision=None): # pylint: disable=missing-docstring\n _check_arraylike(\"matmul\", a, b)\n for i, x in enumerate((a, b)):\n if ndim(x) < 1:\n msg = (f\"matmul input operand {i} must have ndim at least 1, \"\n f\"but it has ndim {ndim(x)}\")\n raise ValueError(msg)\n\n a, b = _promote_dtypes(a, b)\n\n a_is_mat, b_is_mat = (ndim(a) > 1), (ndim(b) > 1)\n a_batch_dims = shape(a)[:-2] if a_is_mat else ()\n b_batch_dims = shape(b)[:-2] if b_is_mat else ()\n num_batch_dims = _max(len(a_batch_dims), len(b_batch_dims))\n a_batch_dims = (None,) * (num_batch_dims - len(a_batch_dims)) + a_batch_dims\n b_batch_dims = (None,) * (num_batch_dims - len(b_batch_dims)) + b_batch_dims\n\n # Dimensions to squeeze from the inputs.\n a_squeeze = []\n b_squeeze = []\n\n # Positions of batch dimensions in squeezed inputs.\n a_batch = []\n b_batch = []\n\n # Desired index in final output of each kind of dimension, in the order that\n # lax.dot_general will emit them.\n idx_batch = []\n idx_a_other = [] # other = non-batch, non-contracting.\n idx_b_other = []\n for i, (ba, bb) in enumerate(zip(a_batch_dims, b_batch_dims)):\n if ba is None:\n idx_b_other.append(i)\n elif bb is None:\n idx_a_other.append(i)\n elif core.symbolic_equal_dim(ba, 1):\n idx_b_other.append(i)\n a_squeeze.append(len(idx_batch) + len(idx_a_other) + len(a_squeeze))\n elif core.symbolic_equal_dim(bb, 1):\n idx_a_other.append(i)\n b_squeeze.append(len(idx_batch) + len(idx_b_other) + len(b_squeeze))\n elif core.symbolic_equal_dim(ba, bb):\n a_batch.append(len(idx_batch) + len(idx_a_other))\n b_batch.append(len(idx_batch) + len(idx_b_other))\n idx_batch.append(i)\n else:\n raise ValueError(\"Incompatible shapes for matmul arguments: {} and {}\"\n .format(shape(a), shape(b)))\n\n if a_is_mat: idx_a_other.append(num_batch_dims)\n if b_is_mat: idx_b_other.append(num_batch_dims + a_is_mat)\n perm = np.argsort(np.concatenate([idx_batch, idx_a_other, idx_b_other]))\n\n a = lax.squeeze(a, tuple(a_squeeze))\n b = lax.squeeze(b, tuple(b_squeeze))\n out = lax.dot_general(\n a, b, (((ndim(a) - 1,), (ndim(b) - 1 - b_is_mat,)), (a_batch, b_batch)),\n precision=precision)\n return lax.transpose(out, perm)\n\n\n@_wraps(np.vdot, lax_description=_PRECISION_DOC)\n@partial(jit, static_argnames=('precision',), inline=True)\ndef vdot(a, b, *, precision=None):\n _check_arraylike(\"vdot\", a, b)\n if issubdtype(_dtype(a), complexfloating):\n a = conj(a)\n return dot(a.ravel(), b.ravel(), precision=precision)\n\n\n@_wraps(np.tensordot, lax_description=_PRECISION_DOC)\ndef tensordot(a, b, axes=2, *, precision=None):\n _check_arraylike(\"tensordot\", a, b)\n a_ndim = ndim(a)\n b_ndim = ndim(b)\n\n a, b = _promote_dtypes(a, b)\n if type(axes) is int:\n if axes > _min(a_ndim, b_ndim):\n msg = \"Number of tensordot axes (axes {}) exceeds input ranks ({} and {})\"\n raise TypeError(msg.format(axes, a.shape, b.shape))\n contracting_dims = tuple(range(a_ndim - axes, a_ndim)), tuple(range(axes))\n elif type(axes) in (list, tuple) and len(axes) == 2:\n ax1, ax2 = axes\n if type(ax1) == type(ax2) == int:\n contracting_dims = ((_canonicalize_axis(ax1, a_ndim),),\n (_canonicalize_axis(ax2, b_ndim),))\n elif type(ax1) in (list, tuple) and type(ax2) in (list, tuple):\n if len(ax1) != len(ax2):\n msg = \"tensordot requires axes lists to have equal length, got {} and {}.\"\n raise TypeError(msg.format(ax1, ax2))\n contracting_dims = (tuple(_canonicalize_axis(i, a_ndim) for i in ax1),\n tuple(_canonicalize_axis(i, b_ndim) for i in ax2))\n else:\n msg = (\"tensordot requires both axes lists to be either ints, tuples or \"\n \"lists, got {} and {}\")\n raise TypeError(msg.format(ax1, ax2))\n else:\n msg = (\"tensordot axes argument must be an int, a pair of ints, or a pair \"\n \"of lists/tuples of ints.\")\n raise TypeError(msg)\n return lax.dot_general(a, b, (contracting_dims, ((), ())),\n precision=precision)\n\n\n@_wraps(np.einsum, lax_description=_PRECISION_DOC, skip_params=['out'])\ndef einsum(*operands, out=None, optimize='optimal', precision=None,\n _use_xeinsum=False):\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.einsum is not supported.\")\n\n if (_use_xeinsum or isinstance(operands[0], str) and '{' in operands[0] and\n len(operands[1:]) == 2):\n return lax.xeinsum(*operands)\n\n optimize = 'optimal' if optimize is True else optimize\n # using einsum_call=True here is an internal api for opt_einsum\n\n # Allow handling of shape polymorphism\n non_constant_dim_types = {\n type(d) for op in operands if not isinstance(op, str)\n for d in np.shape(op) if not core.is_constant_dim(d)\n }\n if not non_constant_dim_types:\n einsum_contract_path_fn = opt_einsum.contract_path\n else:\n einsum_contract_path_fn = _polymorphic_einsum_contract_path_handlers[next(iter(non_constant_dim_types))]\n operands, contractions = einsum_contract_path_fn(\n *operands, einsum_call=True, use_blas=True, optimize=optimize)\n\n contractions = tuple((a, frozenset(b), c) for a, b, c, *_ in contractions)\n return _einsum(operands, contractions, precision)\n\n# Enable other modules to override einsum_contact_path.\n# Indexed by the type of the non constant dimension\n_polymorphic_einsum_contract_path_handlers = {} # type: ignore\n\n@_wraps(np.einsum_path)\ndef einsum_path(subscripts, *operands, optimize='greedy'):\n # using einsum_call=True here is an internal api for opt_einsum\n return opt_einsum.contract_path(subscripts, *operands, optimize=optimize)\n\ndef _removechars(s, chars):\n return s.translate(str.maketrans(dict.fromkeys(chars)))\n\n@partial(jit, static_argnums=(1, 2))\ndef _einsum(operands: Sequence,\n contractions: Sequence[Tuple[Tuple[int, ...], FrozenSet[str], str]],\n precision):\n operands = list(_promote_dtypes(*operands))\n def sum(x, axes):\n return lax.reduce(x, np.array(0, x.dtype),\n lax.add if x.dtype != bool_ else lax.bitwise_or, axes)\n\n def sum_uniques(operand, names, uniques):\n if uniques:\n axes = [names.index(name) for name in uniques]\n operand = sum(operand, axes)\n names = _removechars(names, uniques)\n return operand, names\n\n def sum_repeats(operand, names, counts, keep_names):\n for name, count in counts.items():\n if count > 1:\n axes = [i for i, n in enumerate(names) if n == name]\n eye = lax._delta(operand.dtype, operand.shape, axes)\n if name not in keep_names:\n operand = sum(operand * eye, axes)\n names = names.replace(name, '')\n else:\n operand = sum(operand * eye, axes[:-1])\n names = names.replace(name, '', count - 1)\n return operand, names\n\n def filter_singleton_dims(operand, names, other_shape, other_names):\n s = shape(operand)\n new_shape = []\n new_names = []\n for i, d in enumerate(names):\n other_i = other_names.find(d)\n if not core.symbolic_equal_dim(s[i], 1) or other_i == -1 or core.symbolic_equal_dim(other_shape[other_i], 1):\n new_shape.append(s[i])\n new_names.append(d)\n return reshape(operand, tuple(new_shape)), \"\".join(new_names)\n\n for operand_indices, contracted_names_set, einstr in contractions:\n contracted_names = sorted(contracted_names_set)\n input_str, result_names = einstr.split('->')\n input_names = input_str.split(',')\n\n # switch on the number of operands to be processed in this loop iteration.\n # every case here sets 'operand' and 'names'.\n if len(operand_indices) == 1:\n operand = operands.pop(operand_indices[0])\n names, = input_names\n counts = collections.Counter(names)\n\n # sum out unique contracted indices with a single reduce-sum\n uniques = [name for name in contracted_names if counts[name] == 1]\n operand, names = sum_uniques(operand, names, uniques)\n\n # for every repeated index, do a contraction against an identity matrix\n operand, names = sum_repeats(operand, names, counts, result_names)\n\n elif len(operand_indices) == 2:\n lhs, rhs = map(operands.pop, operand_indices)\n lhs_names, rhs_names = input_names\n\n # handle cases where one side of a contracting or batch dimension is 1\n # but its counterpart is not.\n lhs, lhs_names = filter_singleton_dims(lhs, lhs_names, shape(rhs),\n rhs_names)\n rhs, rhs_names = filter_singleton_dims(rhs, rhs_names, shape(lhs),\n lhs_names)\n\n lhs_counts = collections.Counter(lhs_names)\n rhs_counts = collections.Counter(rhs_names)\n\n # sum out unique contracted indices in lhs and rhs\n lhs_uniques = [name for name in contracted_names\n if lhs_counts[name] == 1 and rhs_counts[name] == 0]\n lhs, lhs_names = sum_uniques(lhs, lhs_names, lhs_uniques)\n\n rhs_uniques = [name for name in contracted_names\n if rhs_counts[name] == 1 and lhs_counts[name] == 0]\n rhs, rhs_names = sum_uniques(rhs, rhs_names, rhs_uniques)\n\n # for every repeated index, contract against an identity matrix\n lhs, lhs_names = sum_repeats(lhs, lhs_names, lhs_counts,\n result_names + rhs_names)\n rhs, rhs_names = sum_repeats(rhs, rhs_names, rhs_counts,\n result_names + lhs_names)\n\n lhs_or_rhs_names = set(lhs_names) | set(rhs_names)\n contracted_names = [x for x in contracted_names if x in lhs_or_rhs_names]\n lhs_and_rhs_names = set(lhs_names) & set(rhs_names)\n batch_names = [x for x in result_names if x in lhs_and_rhs_names]\n\n lhs_batch, rhs_batch = unzip2((lhs_names.find(n), rhs_names.find(n))\n for n in batch_names)\n\n # NOTE(mattjj): this can fail non-deterministically in python3, maybe\n # due to opt_einsum\n assert _all(\n name in lhs_names and name in rhs_names and\n lhs.shape[lhs_names.index(name)] == rhs.shape[rhs_names.index(name)]\n for name in contracted_names)\n\n # contract using lax.dot_general\n batch_names_str = ''.join(batch_names)\n lhs_cont, rhs_cont = unzip2((lhs_names.index(n), rhs_names.index(n))\n for n in contracted_names)\n deleted_names = batch_names_str + ''.join(contracted_names)\n remaining_lhs_names = _removechars(lhs_names, deleted_names)\n remaining_rhs_names = _removechars(rhs_names, deleted_names)\n # Try both orders of lhs and rhs, in the hope that one of them means we\n # don't need an explicit transpose. opt_einsum likes to contract from\n # right to left, so we expect (rhs,lhs) to have the best chance of not\n # needing a transpose.\n names = batch_names_str + remaining_rhs_names + remaining_lhs_names\n if names == result_names:\n dimension_numbers = ((rhs_cont, lhs_cont), (rhs_batch, lhs_batch))\n operand = lax.dot_general(rhs, lhs, dimension_numbers, precision)\n else:\n names = batch_names_str + remaining_lhs_names + remaining_rhs_names\n dimension_numbers = ((lhs_cont, rhs_cont), (lhs_batch, rhs_batch))\n operand = lax.dot_general(lhs, rhs, dimension_numbers, precision)\n else:\n raise NotImplementedError # if this is actually reachable, open an issue!\n\n # the resulting 'operand' with axis labels 'names' should be a permutation\n # of the desired result\n assert len(names) == len(result_names) == len(set(names))\n assert set(names) == set(result_names)\n if names != result_names:\n perm = tuple([names.index(name) for name in result_names])\n operand = lax.transpose(operand, perm)\n operands.append(operand) # used in next iteration\n\n return operands[0]\n\n\ndef _movechars(s, src, dst):\n \"\"\"Helper for einsum string munging, like moveaxis on identifier strings.\"\"\"\n chars = [c for i, c in enumerate(s) if i not in src]\n for i, j in sorted(zip(dst, src)):\n chars.insert(i, s[j])\n return ''.join(chars)\n\n\n@_wraps(np.inner, lax_description=_PRECISION_DOC)\n@partial(jit, static_argnames=('precision',), inline=True)\ndef inner(a, b, *, precision=None):\n if ndim(a) == 0 or ndim(b) == 0:\n return a * b\n return tensordot(a, b, (-1, -1), precision=precision)\n\n\n@_wraps(np.outer, skip_params=['out'])\n@partial(jit, inline=True)\ndef outer(a, b, out=None):\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.outer is not supported.\")\n a, b = _promote_dtypes(a, b)\n return ravel(a)[:, None] * ravel(b)[None, :]\n\n@_wraps(np.cross)\n@partial(jit, static_argnames=('axisa', 'axisb', 'axisc', 'axis'))\ndef cross(a, b, axisa: int = -1, axisb: int = -1, axisc: int = -1,\n axis: Optional[int] = None):\n if axis is not None:\n axisa = axis\n axisb = axis\n axisc = axis\n a = moveaxis(a, axisa, -1)\n b = moveaxis(b, axisb, -1)\n\n if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):\n raise ValueError(\"Dimension must be either 2 or 3 for cross product\")\n\n if a.shape[-1] == 2 and b.shape[-1] == 2:\n return a[..., 0] * b[..., 1] - a[..., 1] * b[..., 0]\n\n a0 = a[..., 0]\n a1 = a[..., 1]\n a2 = a[..., 2] if a.shape[-1] == 3 else zeros_like(a0)\n b0 = b[..., 0]\n b1 = b[..., 1]\n b2 = b[..., 2] if b.shape[-1] == 3 else zeros_like(b0)\n c = array([a1 * b2 - a2 * b1, a2 * b0 - a0 * b2, a0 * b1 - a1 * b0])\n return moveaxis(c, 0, axisc)\n\n\n@_wraps(np.kron)\n@jit\ndef kron(a, b):\n a, b = _promote_dtypes(a, b)\n if ndim(a) < ndim(b):\n a = expand_dims(a, range(ndim(b) - ndim(a)))\n elif ndim(b) < ndim(a):\n b = expand_dims(b, range(ndim(a) - ndim(b)))\n a_reshaped = expand_dims(a, range(1, 2 * ndim(a), 2))\n b_reshaped = expand_dims(b, range(0, 2 * ndim(b), 2))\n out_shape = tuple(np.multiply(shape(a), shape(b)))\n return reshape(lax.mul(a_reshaped, b_reshaped), out_shape)\n\n\n@_wraps(np.vander)\n@partial(jit, static_argnames=('N', 'increasing'))\ndef vander(x, N=None, increasing=False):\n _check_arraylike(\"vander\", x)\n x = asarray(x)\n if x.ndim != 1:\n raise ValueError(\"x must be a one-dimensional array\")\n N = x.shape[0] if N is None else core.concrete_or_error(\n operator.index, N, \"'N' argument of jnp.vander()\")\n if N < 0:\n raise ValueError(\"N must be nonnegative\")\n\n iota = lax.iota(x.dtype, N)\n if not increasing:\n iota = lax.sub(lax._const(iota, N - 1), iota)\n\n return power(x[..., None], expand_dims(iota, tuple(range(x.ndim))))\n\n\n### Misc\n\n_ARGWHERE_DOC = \"\"\"\\\nBecause the size of the output of ``argwhere`` is data-dependent, the function is not\ntypically compatible with JIT. The JAX version adds the optional ``size`` argument, which\nspecifies the size of the leading dimension of the output - it must be specified statically\nfor ``jnp.argwhere`` to be compiled with non-static operands. If ``size`` is specified,\nthe indices of the first ``size`` True elements will be returned; if there are fewer\nnonzero elements than `size` indicates, the index arrays will be zero-padded.\n\"\"\"\n\n@_wraps(np.argwhere, lax_description=_ARGWHERE_DOC)\ndef argwhere(a, *, size=None, fill_value=None):\n result = transpose(vstack(nonzero(a, size=size, fill_value=fill_value)))\n if ndim(a) == 0:\n return result[:0].reshape(result.shape[0], 0)\n return result.reshape(result.shape[0], ndim(a))\n\n\n@_wraps(np.argmax, skip_params=['out'])\ndef argmax(a, axis: Optional[int] = None, out=None):\n return _argmax(a, None if axis is None else operator.index(axis))\n\n@partial(jit, static_argnames=('axis',), inline=True)\ndef _argmax(a, axis: Optional[int] = None, out=None):\n _check_arraylike(\"argmax\", a)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.argmax is not supported.\")\n if axis is None:\n a = ravel(a)\n axis = 0\n if a.shape[axis] == 0:\n raise ValueError(\"attempt to get argmax of an empty sequence\")\n return lax.argmax(a, _canonicalize_axis(axis, a.ndim), dtypes.canonicalize_dtype(int_))\n\n@_wraps(np.argmin, skip_params=['out'])\ndef argmin(a, axis: Optional[int] = None, out=None):\n return _argmin(a, None if axis is None else operator.index(axis))\n\n@partial(jit, static_argnames=('axis',), inline=True)\ndef _argmin(a, axis: Optional[int] = None, out=None):\n _check_arraylike(\"argmin\", a)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.argmin is not supported.\")\n if axis is None:\n a = ravel(a)\n axis = 0\n if a.shape[axis] == 0:\n raise ValueError(\"attempt to get argmin of an empty sequence\")\n return lax.argmin(a, _canonicalize_axis(axis, a.ndim), dtypes.canonicalize_dtype(int_))\n\n\n_NANARG_DOC = \"\"\"\\\nWarning: jax.numpy.arg{} returns -1 for all-NaN slices and does not raise\nan error.\n\"\"\"\n\n@_wraps(np.nanargmax, lax_description=_NANARG_DOC.format(\"max\"))\ndef nanargmax(a, axis: Optional[int] = None):\n return _nanargmax(a, None if axis is None else operator.index(axis))\n\n@partial(jit, static_argnames=('axis',))\ndef _nanargmax(a, axis: Optional[int] = None):\n _check_arraylike(\"nanargmax\", a)\n if not issubdtype(_dtype(a), inexact):\n return argmax(a, axis=axis)\n nan_mask = isnan(a)\n a = where(nan_mask, -inf, a)\n res = argmax(a, axis=axis)\n return where(all(nan_mask, axis=axis), -1, res)\n\n@_wraps(np.nanargmin, lax_description=_NANARG_DOC.format(\"min\"))\ndef nanargmin(a, axis: Optional[int] = None):\n return _nanargmin(a, None if axis is None else operator.index(axis))\n\n@partial(jit, static_argnames=('axis',))\ndef _nanargmin(a, axis: Optional[int] = None):\n _check_arraylike(\"nanargmin\", a)\n if not issubdtype(_dtype(a), inexact):\n return argmin(a, axis=axis)\n nan_mask = isnan(a)\n a = where(nan_mask, inf, a)\n res = argmin(a, axis=axis)\n return where(all(nan_mask, axis=axis), -1, res)\n\n\n@_wraps(np.sort)\n@partial(jit, static_argnames=('axis', 'kind', 'order'))\ndef sort(a, axis: Optional[int] = -1, kind='quicksort', order=None):\n _check_arraylike(\"sort\", a)\n if kind != 'quicksort':\n warnings.warn(\"'kind' argument to sort is ignored.\")\n if order is not None:\n raise ValueError(\"'order' argument to sort is not supported.\")\n\n if axis is None:\n return lax.sort(a.ravel(), dimension=0)\n else:\n return lax.sort(a, dimension=_canonicalize_axis(axis, ndim(a)))\n\n@_wraps(np.sort_complex)\n@jit\ndef sort_complex(a):\n _check_arraylike(\"sort_complex\", a)\n a = lax.sort(a, dimension=0)\n return lax.convert_element_type(a, result_type(a, dtypes.canonicalize_dtype(complex_)))\n\n@_wraps(np.lexsort)\n@partial(jit, static_argnames=('axis',))\ndef lexsort(keys, axis=-1):\n keys = tuple(keys)\n if len(keys) == 0:\n raise TypeError(\"need sequence of keys with len > 0 in lexsort\")\n if len({shape(key) for key in keys}) > 1:\n raise ValueError(\"all keys need to be the same shape\")\n if ndim(keys[0]) == 0:\n return array(0, dtype=dtypes.canonicalize_dtype(int_))\n axis = _canonicalize_axis(axis, ndim(keys[0]))\n use_64bit_index = keys[0].shape[axis] >= (1 << 31)\n iota = lax.broadcasted_iota(int64 if use_64bit_index else int_, shape(keys[0]), axis)\n return lax.sort((*keys[::-1], iota), dimension=axis, num_keys=len(keys))[-1]\n\n\n_ARGSORT_DOC = \"\"\"\nOnly :code:`kind='stable'` is supported. Other :code:`kind` values will produce\na warning and be treated as if they were :code:`'stable'`.\n\"\"\"\n\n@_wraps(np.argsort, lax_description=_ARGSORT_DOC)\n@partial(jit, static_argnames=('axis', 'kind', 'order'))\ndef argsort(a, axis: Optional[int] = -1, kind='stable', order=None):\n _check_arraylike(\"argsort\", a)\n if kind != 'stable':\n warnings.warn(\"'kind' argument to argsort is ignored; only 'stable' sorts \"\n \"are supported.\")\n if order is not None:\n raise ValueError(\"'order' argument to argsort is not supported.\")\n\n if axis is None:\n return argsort(a.ravel(), 0)\n else:\n axis_num = _canonicalize_axis(axis, ndim(a))\n use_64bit_index = a.shape[axis_num] >= (1 << 31)\n iota = lax.broadcasted_iota(int64 if use_64bit_index else int_, shape(a), axis_num)\n _, perm = lax.sort_key_val(a, iota, dimension=axis_num)\n return perm\n\n\n@_wraps(np.msort)\ndef msort(a):\n return sort(a, axis=0)\n\n\n@partial(jit, static_argnums=(2,))\ndef _roll(a, shift, axis):\n a_shape = shape(a)\n if axis is None:\n return lax.reshape(_roll(ravel(a), shift, axis=0), a_shape)\n shift = asarray(shift)\n a_ndim = len(a_shape)\n axis = np.asarray(axis)\n b_shape = lax.broadcast_shapes(shift.shape, axis.shape, (1,))\n if len(b_shape) != 1:\n msg = \"'shift' and 'axis' arguments to roll must be scalars or 1D arrays\"\n raise ValueError(msg)\n\n for x, i in zip(broadcast_to(shift, b_shape),\n np.broadcast_to(axis, b_shape)):\n i = _canonicalize_axis(i, a_ndim)\n x = remainder(x, (a_shape[i] or 1))\n a = lax.concatenate((a, a), i)\n a = lax.dynamic_slice_in_dim(a, a_shape[i] - x, a_shape[i], axis=i)\n return a\n\n\n@_wraps(np.roll)\ndef roll(a, shift, axis: Optional[Union[int, Sequence[int]]] = None):\n _check_arraylike(\"roll\", a,)\n if isinstance(axis, list):\n axis = tuple(axis)\n return _roll(a, shift, axis)\n\n\n@_wraps(np.rollaxis, lax_description=_ARRAY_VIEW_DOC)\n@partial(jit, static_argnames=('axis', 'start'))\ndef rollaxis(a, axis: int, start=0):\n _check_arraylike(\"rollaxis\", a)\n start = core.concrete_or_error(operator.index, start, \"'start' argument of jnp.rollaxis()\")\n a_ndim = ndim(a)\n axis = _canonicalize_axis(axis, a_ndim)\n if not (-a_ndim <= start <= a_ndim):\n raise ValueError(f\"start={start} must satisfy {-a_ndim}<=start<={a_ndim}\")\n if start < 0:\n start += a_ndim\n if start > axis:\n start -= 1\n return moveaxis(a, axis, start)\n\n\n@_wraps(np.packbits)\n@partial(jit, static_argnames=('axis', 'bitorder'))\ndef packbits(a, axis: Optional[int] = None, bitorder='big'):\n _check_arraylike(\"packbits\", a)\n if not (issubdtype(_dtype(a), integer) or issubdtype(_dtype(a), bool_)):\n raise TypeError('Expected an input array of integer or boolean data type')\n if bitorder not in ['little', 'big']:\n raise ValueError(\"'order' must be either 'little' or 'big'\")\n a = greater(a, 0).astype('uint8')\n bits = arange(8, dtype='uint8')\n if bitorder == 'big':\n bits = bits[::-1]\n if axis is None:\n a = ravel(a)\n axis = 0\n a = swapaxes(a, axis, -1)\n\n remainder = a.shape[-1] % 8\n if remainder:\n a = lax.pad(a, np.uint8(0),\n (a.ndim - 1) * [(0, 0, 0)] + [(0, 8 - remainder, 0)])\n\n a = a.reshape(a.shape[:-1] + (a.shape[-1] // 8, 8))\n bits = expand_dims(bits, tuple(range(a.ndim - 1)))\n packed = (a << bits).sum(-1).astype('uint8')\n return swapaxes(packed, axis, -1)\n\n\n@_wraps(np.unpackbits)\n@partial(jit, static_argnames=('axis', 'count', 'bitorder'))\ndef unpackbits(a, axis: Optional[int] = None, count=None, bitorder='big'):\n _check_arraylike(\"unpackbits\", a)\n if _dtype(a) != uint8:\n raise TypeError(\"Expected an input array of unsigned byte data type\")\n if bitorder not in ['little', 'big']:\n raise ValueError(\"'order' must be either 'little' or 'big'\")\n bits = asarray(1) << arange(8, dtype='uint8')\n if bitorder == 'big':\n bits = bits[::-1]\n if axis is None:\n a = ravel(a)\n axis = 0\n a = swapaxes(a, axis, -1)\n unpacked = ((a[..., None] & expand_dims(bits, tuple(range(a.ndim)))) > 0).astype('uint8')\n unpacked = unpacked.reshape(unpacked.shape[:-2] + (-1,))[..., :count]\n return swapaxes(unpacked, axis, -1)\n\n\n@_wraps(np.take, skip_params=['out'])\ndef take(a, indices, axis: Optional[int] = None, out=None, mode=None):\n return _take(a, indices, None if axis is None else operator.index(axis), out,\n mode)\n\n@partial(jit, static_argnames=('axis', 'mode'))\ndef _take(a, indices, axis: Optional[int] = None, out=None, mode=None):\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.take is not supported.\")\n _check_arraylike(\"take\", a, indices)\n a = asarray(a)\n indices = asarray(indices)\n\n if axis is None:\n a = ravel(a)\n axis_idx = 0\n else:\n axis_idx = _canonicalize_axis(axis, ndim(a))\n\n if mode is None:\n # TODO(phawkins): change default mode to \"fill\" and delete this case.\n # lax.gather() does not support negative indices, so we wrap them here\n indices = where(indices < 0, indices + a.shape[axis_idx], indices)\n gather_mode = lax.GatherScatterMode.CLIP\n elif mode == \"raise\":\n # TODO(phawkins): we have no way to report out of bounds errors yet.\n raise NotImplementedError(\"The 'raise' mode to jnp.take is not supported.\")\n elif mode == \"wrap\":\n indices = mod(indices, _constant_like(indices, a.shape[axis_idx]))\n gather_mode = lax.GatherScatterMode.PROMISE_IN_BOUNDS\n elif mode == \"fill\":\n # Undocumented non-standard mode corresponding to the fill_or_drop mode on\n # lax.gather()\n gather_mode = lax.GatherScatterMode.FILL_OR_DROP\n # lax.gather() does not support negative indices, so we wrap them here\n indices = where(indices < 0, indices + a.shape[axis_idx], indices)\n elif mode == \"clip\":\n gather_mode = lax.GatherScatterMode.CLIP\n else:\n raise ValueError(\"Invalid mode '{}' for np.take\".format(mode))\n\n index_dims = len(shape(indices))\n slice_sizes = list(shape(a))\n if slice_sizes[axis_idx] == 0:\n if indices.size != 0:\n raise IndexError(\"Cannot do a non-empty jnp.take() from an empty axis.\")\n return a\n\n if indices.size == 0:\n out_shape = (slice_sizes[:axis_idx] + list(indices.shape) +\n slice_sizes[axis_idx + 1:])\n return full_like(a, 0, shape=out_shape)\n\n slice_sizes[axis_idx] = 1\n dnums = lax.GatherDimensionNumbers(\n offset_dims=tuple(\n list(range(axis_idx)) +\n list(range(axis_idx + index_dims, len(a.shape) + index_dims - 1))),\n collapsed_slice_dims=(axis_idx,),\n start_index_map=(axis_idx,))\n return lax.gather(a, indices[..., None], dimension_numbers=dnums,\n slice_sizes=tuple(slice_sizes),\n mode=gather_mode)\n\n\ndef _normalize_index(index, axis_size):\n \"\"\"Normalizes an index value in the range [-N, N) to the range [0, N).\"\"\"\n if core.is_constant_dim(axis_size):\n axis_size_val = _constant_like(index, axis_size)\n else:\n axis_size_val = lax.convert_element_type(core.dimension_as_value(axis_size),\n _dtype(index))\n return lax.select(\n lax.lt(index, _constant_like(index, 0)),\n lax.add(index, axis_size_val),\n index)\n\n@_wraps(np.take_along_axis, update_doc=False)\n@partial(jit, static_argnames=('axis',))\ndef take_along_axis(arr, indices, axis: Optional[int]):\n _check_arraylike(\"take_along_axis\", arr, indices)\n if axis is None:\n if ndim(indices) != 1:\n msg = \"take_along_axis indices must be 1D if axis=None, got shape {}\"\n raise ValueError(msg.format(indices.shape))\n return take_along_axis(arr.ravel(), indices, 0)\n rank = ndim(arr)\n if rank != ndim(indices):\n msg = \"indices and arr must have the same number of dimensions; {} vs. {}\"\n raise ValueError(msg.format(ndim(indices), ndim(arr)))\n axis = _canonicalize_axis(axis, rank)\n\n def replace(tup, val):\n lst = list(tup)\n lst[axis] = val\n return tuple(lst)\n\n use_64bit_index = _any([not core.is_constant_dim(d) or d >= (1 << 31) for d in arr.shape])\n index_dtype = int64 if use_64bit_index else int32\n indices = lax.convert_element_type(indices, index_dtype)\n\n bcast_shape = lax.broadcast_shapes(replace(arr.shape, 1), replace(indices.shape, 1))\n indices = broadcast_to(indices, replace(bcast_shape, indices.shape[axis]))\n arr = broadcast_to(arr, replace(bcast_shape, arr.shape[axis]))\n\n axis_size = arr.shape[axis]\n arr_shape = replace(arr.shape, 1)\n idx_shape = indices.shape\n out_shape = lax.broadcast_shapes(idx_shape, arr_shape)\n\n index_dims = [i for i, idx in enumerate(idx_shape) if i == axis or idx != 1]\n\n gather_index_shape = tuple(np.array(out_shape)[index_dims]) + (1,)\n gather_indices = []\n slice_sizes = []\n offset_dims = []\n start_index_map = []\n collapsed_slice_dims = []\n j = 0\n for i in range(rank):\n if i == axis:\n indices = _normalize_index(indices, axis_size)\n gather_indices.append(lax.reshape(indices, gather_index_shape))\n slice_sizes.append(1)\n start_index_map.append(i)\n collapsed_slice_dims.append(i)\n j += 1\n elif idx_shape[i] != 1:\n iota = lax.iota(_dtype(indices), out_shape[i])\n iota = lax.broadcast_in_dim(iota, gather_index_shape, (j,))\n gather_indices.append(iota)\n slice_sizes.append(1)\n start_index_map.append(i)\n collapsed_slice_dims.append(i)\n j += 1\n else:\n # If idx_shape[i] == 1, we can just take the entirety of the arr's axis\n # and avoid forming an iota index.\n offset_dims.append(i)\n slice_sizes.append(arr_shape[i])\n\n gather_indices = lax.concatenate(gather_indices, dimension=j)\n dnums = lax.GatherDimensionNumbers(\n offset_dims=tuple(offset_dims),\n collapsed_slice_dims=tuple(collapsed_slice_dims),\n start_index_map=tuple(start_index_map))\n return lax.gather(arr, gather_indices, dnums, tuple(slice_sizes))\n\n\n### SetOps\n@partial(jit, static_argnums=1)\ndef _unique_sorted_mask(ar, axis):\n aux = moveaxis(ar, axis, 0)\n size, *out_shape = aux.shape\n if _prod(out_shape) == 0:\n size = 1\n perm = zeros(1, dtype=int)\n else:\n perm = lexsort(aux.reshape(size, _prod(out_shape)).T[::-1])\n aux = aux[perm]\n if aux.size:\n mask = ones(size, dtype=bool).at[1:].set(any(aux[1:] != aux[:-1], tuple(range(1, aux.ndim))))\n else:\n mask = zeros(size, dtype=bool)\n return aux, mask, perm\n\ndef _unique(ar, axis, return_index=False, return_inverse=False, return_counts=False,\n size=None, fill_value=None, return_true_size=False):\n \"\"\"\n Find the unique elements of an array along a particular axis.\n \"\"\"\n if ar.shape[axis] == 0 and size and fill_value is None:\n raise ValueError(\n \"jnp.unique: for zero-sized input with nonzero size argument, fill_value must be specified\")\n\n aux, mask, perm = _unique_sorted_mask(ar, axis)\n ind = mask if size is None else nonzero(mask, size=size)[0]\n result = aux[ind] if aux.size else aux\n if fill_value is not None:\n fill_value = asarray(fill_value, dtype=result.dtype)\n if size is not None and fill_value is not None:\n if result.shape[0]:\n valid = lax.expand_dims(arange(size) < mask.sum(), tuple(range(1, result.ndim)))\n result = where(valid, result, fill_value)\n else:\n result = full_like(result, fill_value, shape=(size, *result.shape[1:]))\n result = moveaxis(result, 0, axis)\n\n ret = (result,)\n if return_index:\n if aux.size:\n ret += (perm[ind],)\n else:\n ret += (perm,)\n if return_inverse:\n if aux.size:\n imask = cumsum(mask) - 1\n inv_idx = zeros(mask.shape, dtype=dtypes.canonicalize_dtype(int_))\n inv_idx = inv_idx.at[perm].set(imask)\n else:\n inv_idx = zeros(ar.shape[axis], dtype=int)\n ret += (inv_idx,)\n if return_counts:\n if aux.size:\n if size is None:\n idx = append(nonzero(mask)[0], mask.size)\n else:\n idx = nonzero(mask, size=size + 1)[0]\n idx = idx.at[1:].set(where(idx[1:], idx[1:], mask.size))\n ret += (diff(idx),)\n elif ar.shape[axis]:\n ret += (array([ar.shape[axis]], dtype=int_),)\n else:\n ret += (empty(0, dtype=int),)\n if return_true_size:\n # Useful for internal uses of unique().\n ret += (mask.sum(),)\n return ret[0] if len(ret) == 1 else ret\n\n\n_UNIQUE_DOC = \"\"\"\\\nBecause the size of the output of ``unique`` is data-dependent, the function is not\ntypically compatible with JIT. The JAX version adds the optional `size` argument which\nspecifies the size of the data-dependent output arrays: it must be specified statically\nfor ``jnp.unique`` to be compiled with non-static operands. If specified, the first `size`\nunique elements will be returned; if there are fewer unique elements than `size` indicates,\nthe return value will be padded with `fill_value`, which defaults to the minimum value\nalong the specified axis of the input.\"\"\"\n\n\n@_wraps(np.unique, skip_params=['axis'], lax_description=_UNIQUE_DOC)\ndef unique(ar, return_index=False, return_inverse=False,\n return_counts=False, axis: Optional[int] = None, *, size=None, fill_value=None):\n _check_arraylike(\"unique\", ar)\n if size is None:\n ar = core.concrete_or_error(None, ar, \"The error arose for the first argument of jnp.unique()\")\n else:\n size = core.concrete_or_error(operator.index, size, \"The error arose for the size argument of jnp.unique()\")\n ar = asarray(ar)\n if axis is None:\n axis = 0\n ar = ar.flatten()\n axis = core.concrete_or_error(operator.index, axis, \"axis argument of jnp.unique()\")\n return _unique(ar, axis, return_index, return_inverse, return_counts, size=size, fill_value=fill_value)\n\n### Indexing\n\ndef _rewriting_take(arr, idx, indices_are_sorted=False, unique_indices=False,\n mode=None, fill_value=None):\n # Computes arr[idx].\n # All supported cases of indexing can be implemented as an XLA gather,\n # followed by an optional reverse and broadcast_in_dim.\n arr = asarray(arr)\n treedef, static_idx, dynamic_idx = _split_index_for_jit(idx, arr.shape)\n return _gather(arr, treedef, static_idx, dynamic_idx, indices_are_sorted,\n unique_indices, mode, fill_value)\n\n# TODO(phawkins): re-enable jit after fixing excessive recompilation for\n# slice indexes (e.g., slice(0, 5, None), slice(10, 15, None), etc.).\n# @partial(jit, static_argnums=(1, 2))\ndef _gather(arr, treedef, static_idx, dynamic_idx, indices_are_sorted,\n unique_indices, mode, fill_value):\n idx = _merge_static_and_dynamic_indices(treedef, static_idx, dynamic_idx)\n indexer = _index_to_gather(shape(arr), idx) # shared with _scatter_update\n y = arr\n\n if fill_value is not None:\n core.concrete_or_error(None, fill_value,\n \"fill_value argument to indexed get()\")\n if np.ndim(fill_value) != 0:\n raise ValueError(\"fill_value argument to indexed get() must be a scalar\")\n if isinstance(fill_value, np.ndarray):\n fill_value = fill_value.item()\n\n # Avoid calling gather if the slice shape is empty, both as a fast path and to\n # handle cases like zeros(0)[array([], int32)].\n if core.is_empty_shape(indexer.slice_shape):\n return zeros_like(y, shape=indexer.slice_shape)\n\n # We avoid generating a gather when indexer.gather_indices.size is empty.\n if not core.is_empty_shape(indexer.gather_indices.shape):\n y = lax.gather(\n y, indexer.gather_indices, indexer.dnums, indexer.gather_slice_shape,\n unique_indices=unique_indices or indexer.unique_indices,\n indices_are_sorted=indices_are_sorted or indexer.indices_are_sorted,\n mode=mode, fill_value=fill_value)\n\n # Reverses axes with negative strides.\n if indexer.reversed_y_dims:\n y = lax.rev(y, indexer.reversed_y_dims)\n\n # This adds np.newaxis/None dimensions.\n return expand_dims(y, indexer.newaxis_dims)\n\n_Indexer = collections.namedtuple(\"_Indexer\", [\n # The expected shape of the slice output.\n \"slice_shape\",\n\n # The slice shape to pass to lax.gather().\n \"gather_slice_shape\",\n\n # The gather indices to use.\n \"gather_indices\",\n\n # A GatherDimensionNumbers object describing the gather to perform.\n \"dnums\",\n\n # Are the gather_indices known to be non-overlapping and/or sorted?\n # (In practice, these translate to \"there no advanced indices\", because\n # only advanced indices could lead to index repetition.)\n \"unique_indices\",\n \"indices_are_sorted\",\n\n # Slice dimensions that have negative strides, and so must be reversed after\n # the gather.\n \"reversed_y_dims\",\n\n # Keep track of any axes created by `newaxis`. These must be inserted for\n # gathers and eliminated for scatters.\n \"newaxis_dims\",\n])\n\ndef _split_index_for_jit(idx, shape):\n \"\"\"Splits indices into necessarily-static and dynamic parts.\n\n Used to pass indices into `jit`-ted function.\n \"\"\"\n # Convert list indices to tuples in cases (deprecated by NumPy.)\n idx = _eliminate_deprecated_list_indexing(idx)\n\n # Expand any (concrete) boolean indices. We can then use advanced integer\n # indexing logic to handle them.\n idx = _expand_bool_indices(idx, shape)\n\n leaves, treedef = tree_flatten(idx)\n dynamic = [None] * len(leaves)\n static = [None] * len(leaves)\n for i, x in enumerate(leaves):\n if x is Ellipsis:\n static[i] = x\n elif isinstance(x, slice):\n # slice objects aren't hashable.\n static[i] = (x.start, x.stop, x.step)\n else:\n dynamic[i] = x\n return treedef, tuple(static), dynamic\n\ndef _merge_static_and_dynamic_indices(treedef, static_idx, dynamic_idx):\n \"\"\"Recombines indices that were split by _split_index_for_jit.\"\"\"\n idx = []\n for s, d in zip(static_idx, dynamic_idx):\n if d is not None:\n idx.append(d)\n elif isinstance(s, tuple):\n idx.append(slice(s[0], s[1], s[2]))\n else:\n idx.append(s)\n return treedef.unflatten(idx)\n\ndef _int(aval):\n return not aval.shape and issubdtype(aval.dtype, integer)\n\ndef _index_to_gather(x_shape, idx, normalize_indices=True):\n # Remove ellipses and add trailing slice(None)s.\n idx = _canonicalize_tuple_index(len(x_shape), idx)\n\n # Check for advanced indexing:\n # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing\n\n # Do the advanced indexing axes appear contiguously? If not, NumPy semantics\n # move the advanced axes to the front.\n advanced_axes_are_contiguous = False\n\n advanced_indexes = None\n\n # The positions of the advanced indexing axes in `idx`.\n idx_advanced_axes = []\n\n # The positions of the advanced indexes in x's shape.\n # collapsed, after None axes have been removed. See below.\n x_advanced_axes = None\n\n if _is_advanced_int_indexer(idx):\n idx_no_nones = [(i, d) for i, d in enumerate(idx) if d is not None]\n advanced_pairs = (\n (asarray(e), i, j) for j, (i, e) in enumerate(idx_no_nones)\n if isscalar(e) or isinstance(e, (Sequence, ndarray, np.ndarray)))\n if normalize_indices:\n advanced_pairs = ((_normalize_index(e, x_shape[j]), i, j)\n for e, i, j in advanced_pairs)\n advanced_indexes, idx_advanced_axes, x_advanced_axes = zip(*advanced_pairs)\n advanced_axes_are_contiguous = np.all(np.diff(idx_advanced_axes) == 1)\n\n x_axis = 0 # Current axis in x.\n y_axis = 0 # Current axis in y, before collapsing. See below.\n collapsed_y_axis = 0 # Current axis in y, after collapsing.\n\n # Scatter dimension numbers.\n offset_dims = []\n collapsed_slice_dims = []\n start_index_map = []\n\n use_64bit_index = _any([not core.is_constant_dim(d) or d >= (1 << 31) for d in x_shape])\n index_dtype = int64 if use_64bit_index else int32\n\n # Gather indices.\n # Pairs of (array, start_dim) values. These will be broadcast into\n # gather_indices_shape, with the array dimensions aligned to start_dim, and\n # then concatenated.\n gather_indices = []\n gather_indices_shape = []\n\n # We perform three transformations to y before the scatter op, in order:\n # First, y is broadcast to slice_shape. In general `y` only need broadcast to\n # the right shape.\n slice_shape = []\n\n # Next, y is squeezed to remove newaxis_dims. This removes np.newaxis/`None`\n # indices, which the scatter cannot remove itself.\n newaxis_dims = []\n\n # Finally, we reverse reversed_y_dims to handle slices with negative strides.\n reversed_y_dims = []\n\n gather_slice_shape = []\n\n for idx_pos, i in enumerate(idx):\n # Handle the advanced indices here if:\n # * the advanced indices were not contiguous and we are the start.\n # * we are at the position of the first advanced index.\n if (advanced_indexes is not None and\n (advanced_axes_are_contiguous and idx_pos == idx_advanced_axes[0] or\n not advanced_axes_are_contiguous and idx_pos == 0)):\n advanced_indexes = broadcast_arrays(*advanced_indexes)\n shape = advanced_indexes[0].shape\n ndim = len(shape)\n\n start_dim = len(gather_indices_shape)\n gather_indices += ((lax.convert_element_type(a, index_dtype), start_dim)\n for a in advanced_indexes)\n gather_indices_shape += shape\n\n start_index_map.extend(x_advanced_axes)\n collapsed_slice_dims.extend(x_advanced_axes)\n slice_shape.extend(shape)\n y_axis += ndim\n collapsed_y_axis += ndim\n\n # Per-index bookkeeping for advanced indexes.\n if idx_pos in idx_advanced_axes:\n x_axis += 1\n gather_slice_shape.append(1)\n continue\n\n try:\n abstract_i = core.get_aval(i)\n except TypeError:\n abstract_i = None\n # Handle basic int indexes.\n if isinstance(abstract_i, (ConcreteArray, ShapedArray)) and _int(abstract_i):\n if core.symbolic_equal_dim(x_shape[x_axis], 0):\n # XLA gives error when indexing into an axis of size 0\n raise IndexError(f\"index is out of bounds for axis {x_axis} with size 0\")\n i = _normalize_index(i, x_shape[x_axis]) if normalize_indices else i\n i = lax.convert_element_type(i, index_dtype)\n gather_indices.append((i, len(gather_indices_shape)))\n collapsed_slice_dims.append(x_axis)\n gather_slice_shape.append(1)\n start_index_map.append(x_axis)\n x_axis += 1\n # Handle np.newaxis (None)\n elif i is None:\n slice_shape.append(1)\n newaxis_dims.append(y_axis)\n y_axis += 1\n\n elif isinstance(i, slice):\n # Normalize the slice to use None when possible\n start, stop, step = i.start, i.stop, i.step\n try:\n if ((step is None or core.symbolic_equal_dim(step, 1)) and\n stop is not None and core.symbolic_equal_dim(stop, x_shape[x_axis])):\n # The following is a useful special case with shape polymorphism\n stop = None\n except TypeError:\n pass\n\n # Handle slice(None)\n if start is None and stop is None and step is None:\n slice_shape.append(x_shape[x_axis])\n gather_slice_shape.append(x_shape[x_axis])\n offset_dims.append(collapsed_y_axis)\n collapsed_y_axis += 1\n y_axis += 1\n x_axis += 1\n # Handle slice index (only static, otherwise an error is raised)\n else:\n if not _all(_is_slice_element_none_or_constant(elt)\n for elt in (start, stop, step)):\n msg = (\"Array slice indices must have static start/stop/step to be used \"\n \"with NumPy indexing syntax. \"\n f\"Found slice({start}, {stop}, {step}). \"\n \"To index a statically sized \"\n \"array at a dynamic position, try lax.dynamic_slice/\"\n \"dynamic_update_slice (JAX does not support dynamically sized \"\n \"arrays within JIT compiled functions).\")\n raise IndexError(msg)\n if not core.is_constant_dim(x_shape[x_axis]):\n msg = (\"Cannot use NumPy slice indexing on an array dimension whose \"\n f\"size is not statically known ({x_shape[x_axis]}). \"\n \"Try using lax.dynamic_slice/dynamic_update_slice\")\n raise IndexError(msg)\n start, limit, stride, needs_rev = _static_idx(slice(start, stop, step),\n x_shape[x_axis])\n if needs_rev:\n reversed_y_dims.append(collapsed_y_axis)\n if stride == 1:\n i = lax.convert_element_type(start, index_dtype)\n gather_indices.append((i, len(gather_indices_shape)))\n slice_shape.append(limit - start)\n gather_slice_shape.append(limit - start)\n offset_dims.append(collapsed_y_axis)\n start_index_map.append(x_axis)\n else:\n i = arange(start, limit, stride, dtype=index_dtype)\n size = i.shape[0]\n slice_shape.append(size)\n gather_slice_shape.append(1)\n gather_indices.append((i, len(gather_indices_shape)))\n gather_indices_shape.append(size)\n\n start_index_map.append(x_axis)\n collapsed_slice_dims.append(x_axis)\n\n collapsed_y_axis += 1\n y_axis += 1\n x_axis += 1\n else:\n if (abstract_i is not None and\n not (issubdtype(abstract_i.dtype, integer) or issubdtype(abstract_i.dtype, bool_))):\n msg = (\"Indexer must have integer or boolean type, got indexer \"\n \"with type {} at position {}, indexer value {}\")\n raise TypeError(msg.format(abstract_i.dtype.name, idx_pos, i))\n\n msg = \"Indexing mode not yet supported. Open a feature request!\\n{}\"\n raise IndexError(msg.format(idx))\n\n if len(gather_indices) == 0:\n gather_indices_array = np.zeros((0,), dtype=index_dtype)\n elif len(gather_indices) == 1:\n g, _ = gather_indices[0]\n gather_indices_array = lax.expand_dims(g, (g.ndim,))\n else:\n last_dim = len(gather_indices_shape)\n gather_indices_shape.append(1)\n gather_indices_array = lax.concatenate([\n lax.broadcast_in_dim(g, gather_indices_shape, tuple(range(i, i + g.ndim)))\n for g, i in gather_indices],\n last_dim)\n\n dnums = lax.GatherDimensionNumbers(\n offset_dims = tuple(offset_dims),\n collapsed_slice_dims = tuple(sorted(collapsed_slice_dims)),\n start_index_map = tuple(start_index_map)\n )\n return _Indexer(\n slice_shape=slice_shape,\n newaxis_dims=tuple(newaxis_dims),\n gather_slice_shape=gather_slice_shape,\n reversed_y_dims=reversed_y_dims,\n dnums=dnums,\n gather_indices=gather_indices_array,\n unique_indices=advanced_indexes is None,\n indices_are_sorted=advanced_indexes is None)\n\ndef _should_unpack_list_index(x):\n \"\"\"Helper for _eliminate_deprecated_list_indexing.\"\"\"\n return (isinstance(x, (np.ndarray, ndarray)) and np.ndim(x) != 0\n or isinstance(x, (Sequence, slice))\n or x is Ellipsis or x is None)\n\ndef _eliminate_deprecated_list_indexing(idx):\n # \"Basic slicing is initiated if the selection object is a non-array,\n # non-tuple sequence containing slice objects, [Ellipses, or newaxis\n # objects]\". Detects this and raises a TypeError.\n if not isinstance(idx, tuple):\n if isinstance(idx, Sequence) and not isinstance(idx, (ndarray, np.ndarray)):\n # As of numpy 1.16, some non-tuple sequences of indices result in a warning, while\n # others are converted to arrays, based on a set of somewhat convoluted heuristics\n # (See https://github.com/numpy/numpy/blob/v1.19.2/numpy/core/src/multiarray/mapping.c#L179-L343)\n # In JAX, we raise an informative TypeError for *all* non-tuple sequences.\n if _any(_should_unpack_list_index(i) for i in idx):\n msg = (\"Using a non-tuple sequence for multidimensional indexing is not allowed; \"\n \"use `arr[tuple(seq)]` instead of `arr[seq]`. \"\n \"See https://github.com/google/jax/issues/4564 for more information.\")\n else:\n msg = (\"Using a non-tuple sequence for multidimensional indexing is not allowed; \"\n \"use `arr[array(seq)]` instead of `arr[seq]`. \"\n \"See https://github.com/google/jax/issues/4564 for more information.\")\n raise TypeError(msg)\n else:\n idx = (idx,)\n return idx\n\ndef _is_boolean_index(i):\n try:\n abstract_i = core.get_aval(i)\n except TypeError:\n abstract_i = None\n return (isinstance(abstract_i, ShapedArray) and issubdtype(abstract_i.dtype, bool_)\n or isinstance(i, list) and i and _all(_is_scalar(e)\n and issubdtype(_dtype(e), np.bool_) for e in i))\n\ndef _expand_bool_indices(idx, shape):\n \"\"\"Converts concrete bool indexes into advanced integer indexes.\"\"\"\n out = []\n total_dims = len(shape)\n num_ellipsis = _sum(e is Ellipsis for e in idx)\n if num_ellipsis > 1:\n raise IndexError(\"an index can only have a single ellipsis ('...')\")\n elif num_ellipsis == 1:\n total_dims = _sum(_ndim(e) if _is_boolean_index(e) else 1 for e in idx\n if e is not None and e is not Ellipsis)\n ellipsis_offset = 0\n for dim_number, i in enumerate(idx):\n try:\n abstract_i = core.get_aval(i)\n except TypeError:\n abstract_i = None\n if _is_boolean_index(i):\n if isinstance(i, list):\n i = array(i)\n abstract_i = core.get_aval(i)\n\n if not type(abstract_i) is ConcreteArray:\n # TODO(mattjj): improve this error by tracking _why_ the indices are not concrete\n raise errors.NonConcreteBooleanIndexError(abstract_i)\n elif _ndim(i) == 0:\n raise TypeError(\"JAX arrays do not support boolean scalar indices\")\n else:\n i_shape = _shape(i)\n start = len(out) + ellipsis_offset\n expected_shape = shape[start: start + _ndim(i)]\n if i_shape != expected_shape:\n raise IndexError(\"boolean index did not match shape of indexed array in index \"\n f\"{dim_number}: got {i_shape}, expected {expected_shape}\")\n out.extend(np.where(i))\n else:\n out.append(i)\n if i is Ellipsis:\n ellipsis_offset = len(shape) - total_dims - 1\n return tuple(out)\n\n\ndef _is_slice_element_none_or_constant(elt):\n \"\"\"Return True if elt is a constant or None.\"\"\"\n if elt is None: return True\n try:\n return type(core.get_aval(elt)) is ConcreteArray\n except TypeError:\n return False\n\n# TODO(mattjj): clean up this logic\ndef _is_advanced_int_indexer(idx):\n \"\"\"Returns True if idx should trigger int array indexing, False otherwise.\"\"\"\n # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing\n assert isinstance(idx, tuple)\n if _all(e is None or e is Ellipsis or isinstance(e, slice)\n or _is_scalar(e) and issubdtype(_dtype(e), np.integer) for e in idx):\n return False\n return _all(e is None or e is Ellipsis or isinstance(e, slice)\n or _is_int_arraylike(e) for e in idx)\n\ndef _is_int_arraylike(x):\n \"\"\"Returns True if x is array-like with integer dtype, False otherwise.\"\"\"\n return (isinstance(x, int) and not isinstance(x, bool)\n or issubdtype(getattr(x, \"dtype\", None), np.integer)\n or isinstance(x, (list, tuple)) and _all(_is_int_arraylike(e) for e in x))\n\ndef _is_scalar(x):\n \"\"\"Checks if a Python or NumPy scalar.\"\"\"\n return np.isscalar(x) or (isinstance(x, (np.ndarray, ndarray))\n and np.ndim(x) == 0)\n\ndef _canonicalize_tuple_index(arr_ndim, idx, array_name='array'):\n \"\"\"Helper to remove Ellipsis and add in the implicit trailing slice(None).\"\"\"\n len_without_none = _sum(1 for e in idx if e is not None and e is not Ellipsis)\n if len_without_none > arr_ndim:\n raise IndexError(\n f\"Too many indices for {array_name}: {len_without_none} \"\n f\"non-None/Ellipsis indices for dim {arr_ndim}.\")\n ellipses = (i for i, elt in enumerate(idx) if elt is Ellipsis)\n ellipsis_index = next(ellipses, None)\n if ellipsis_index is not None:\n if next(ellipses, None) is not None:\n raise IndexError(\n f\"Multiple ellipses (...) not supported: {list(map(type, idx))}.\")\n colons = (slice(None),) * (arr_ndim - len_without_none)\n idx = idx[:ellipsis_index] + colons + idx[ellipsis_index + 1:]\n elif len_without_none < arr_ndim:\n colons = (slice(None),) * (arr_ndim - len_without_none)\n idx = tuple(idx) + colons\n return idx\n\ndef _static_idx(idx: slice, size: core.DimSize):\n \"\"\"Helper function to compute the static slice start/limit/stride values.\"\"\"\n if isinstance(size, int):\n start, stop, step = idx.indices(size)\n else:\n raise TypeError(size)\n\n if (step < 0 and stop >= start) or (step > 0 and start >= stop):\n return 0, 0, 1, False # sliced to size zero\n\n if step > 0:\n return start, stop, step, False\n else:\n k = (start - stop - 1) % (-step)\n return stop + k + 1, start + 1, -step, True\n\n\nblackman = _wrap_numpy_nullary_function(np.blackman)\nbartlett = _wrap_numpy_nullary_function(np.bartlett)\nhamming = _wrap_numpy_nullary_function(np.hamming)\nhanning = _wrap_numpy_nullary_function(np.hanning)\n# TODO: lower `kaiser` via lax to allow non-constant beta values.\nkaiser = _wrap_numpy_nullary_function(np.kaiser)\n\ndef _gcd_cond_fn(xs):\n x1, x2 = xs\n return any(x2 != 0)\n\ndef _gcd_body_fn(xs):\n x1, x2 = xs\n x1, x2 = (where(x2 != 0, x2, x1),\n where(x2 != 0, lax.rem(x1, x2), lax._const(x2, 0)))\n return (where(x1 < x2, x2, x1), where(x1 < x2, x1, x2))\n\n@_wraps(np.gcd)\n@jit\ndef gcd(x1, x2):\n _check_arraylike(\"gcd\", x1, x2)\n if (not issubdtype(_dtype(x1), integer) or\n not issubdtype(_dtype(x2), integer)):\n raise ValueError(\"Arguments to jax.numpy.gcd must be integers.\")\n x1, x2 = _promote_dtypes(x1, x2)\n x1, x2 = broadcast_arrays(x1, x2)\n gcd, _ = lax.while_loop(_gcd_cond_fn, _gcd_body_fn, (abs(x1), abs(x2)))\n return gcd\n\n\n@_wraps(np.lcm)\n@jit\ndef lcm(x1, x2):\n _check_arraylike(\"lcm\", x1, x2)\n x1, x2 = _promote_dtypes(x1, x2)\n d = gcd(x1, x2)\n return where(d == 0, lax._const(d, 0),\n abs(multiply(x1, floor_divide(x2, d))))\n\n\n@_wraps(np.extract)\ndef extract(condition, arr):\n return compress(ravel(condition), ravel(arr))\n\n\n@_wraps(np.compress, skip_params=['out'])\ndef compress(condition, a, axis: Optional[int] = None, out=None):\n _check_arraylike(\"compress\", condition, a)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.compress is not supported.\")\n if ndim(condition) != 1:\n raise ValueError(\"condition must be a 1D array\")\n condition = asarray(condition).astype(bool)\n if axis is None:\n axis = 0\n a = ravel(a)\n else:\n a = moveaxis(a, axis, 0)\n condition, extra = condition[:a.shape[0]], condition[a.shape[0]:]\n if any(extra):\n raise ValueError(\"condition contains entries that are out of bounds\")\n a = a[:condition.shape[0]]\n return moveaxis(a[condition], 0, axis)\n\n\n@_wraps(np.cov)\n@partial(jit, static_argnames=('rowvar', 'bias', 'ddof'))\ndef cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,\n aweights=None):\n if y is not None:\n m, y = _promote_args_inexact(\"cov\", m, y)\n if y.ndim > 2:\n raise ValueError(\"y has more than 2 dimensions\")\n else:\n m, = _promote_args_inexact(\"cov\", m)\n\n if m.ndim > 2:\n raise ValueError(\"m has more than 2 dimensions\") # same as numpy error\n\n X = atleast_2d(m)\n if not rowvar and X.shape[0] != 1:\n X = X.T\n if X.shape[0] == 0:\n return array([]).reshape(0, 0)\n\n if y is not None:\n y = atleast_2d(y)\n if not rowvar and y.shape[0] != 1:\n y = y.T\n X = concatenate((X, y), axis=0)\n if ddof is None:\n ddof = 1 if bias == 0 else 0\n\n w = None\n if fweights is not None:\n _check_arraylike(\"cov\", fweights)\n if ndim(fweights) > 1:\n raise RuntimeError(\"cannot handle multidimensional fweights\")\n if shape(fweights)[0] != X.shape[1]:\n raise RuntimeError(\"incompatible numbers of samples and fweights\")\n if not issubdtype(_dtype(fweights), integer):\n raise TypeError(\"fweights must be integer.\")\n # Ensure positive fweights; note that numpy raises an error on negative fweights.\n w = asarray(abs(fweights))\n if aweights is not None:\n _check_arraylike(\"cov\", aweights)\n if ndim(aweights) > 1:\n raise RuntimeError(\"cannot handle multidimensional aweights\")\n if shape(aweights)[0] != X.shape[1]:\n raise RuntimeError(\"incompatible numbers of samples and aweights\")\n # Ensure positive aweights: note that numpy raises an error for negative aweights.\n aweights = abs(aweights)\n w = aweights if w is None else w * aweights\n\n avg, w_sum = average(X, axis=1, weights=w, returned=True)\n w_sum = w_sum[0]\n\n if w is None:\n f = X.shape[1] - ddof\n elif ddof == 0:\n f = w_sum\n elif aweights is None:\n f = w_sum - ddof\n else:\n f = w_sum - ddof * sum(w * aweights) / w_sum\n\n X = X - avg[:, None]\n X_T = X.T if w is None else (X * lax.broadcast_to_rank(w, X.ndim)).T\n return true_divide(dot(X, X_T.conj()), f).squeeze()\n\n\n@_wraps(np.corrcoef)\n@partial(jit, static_argnames=('rowvar',))\ndef corrcoef(x, y=None, rowvar=True):\n _check_arraylike(\"corrcoef\", x)\n c = cov(x, y, rowvar)\n if len(shape(c)) == 0:\n # scalar - this should yield nan for values (nan/nan, inf/inf, 0/0), 1 otherwise\n return divide(c, c)\n d = diag(c)\n stddev = sqrt(real(d))\n c = divide(c, stddev[:,None])\n c = divide(c, stddev[None,:])\n\n real_part = clip(real(c), -1, 1)\n if iscomplexobj(c):\n complex_part = clip(imag(c), -1, 1)\n c = lax.complex(real_part, complex_part)\n else:\n c = real_part\n return c\n\n\n@_wraps(np.quantile, skip_params=['out', 'overwrite_input'])\n@partial(jit, static_argnames=('axis', 'overwrite_input', 'interpolation',\n 'keepdims'))\ndef quantile(a, q, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n overwrite_input=False, interpolation=\"linear\", keepdims=False):\n _check_arraylike(\"quantile\", a, q)\n if overwrite_input or out is not None:\n msg = (\"jax.numpy.quantile does not support overwrite_input=True or \"\n \"out != None\")\n raise ValueError(msg)\n return _quantile(a, q, axis, interpolation, keepdims, False)\n\n@_wraps(np.nanquantile, skip_params=['out', 'overwrite_input'])\n@partial(jit, static_argnames=('axis', 'overwrite_input', 'interpolation',\n 'keepdims'))\ndef nanquantile(a, q, axis: Optional[Union[int, Tuple[int, ...]]] = None,\n out=None, overwrite_input=False, interpolation=\"linear\",\n keepdims=False):\n _check_arraylike(\"nanquantile\", a, q)\n if overwrite_input or out is not None:\n msg = (\"jax.numpy.nanquantile does not support overwrite_input=True or \"\n \"out != None\")\n raise ValueError(msg)\n return _quantile(a, q, axis, interpolation, keepdims, True)\n\ndef _quantile(a, q, axis, interpolation, keepdims, squash_nans):\n if interpolation not in [\"linear\", \"lower\", \"higher\", \"midpoint\", \"nearest\"]:\n raise ValueError(\"interpolation can only be 'linear', 'lower', 'higher', \"\n \"'midpoint', or 'nearest'\")\n a, q = _promote_dtypes_inexact(a, q)\n if issubdtype(a.dtype, np.complexfloating):\n raise ValueError(\"quantile does not support complex input, as the operation is poorly defined.\")\n if axis is None:\n a = ravel(a)\n axis = 0\n elif isinstance(axis, tuple):\n raise NotImplementedError(\"Tuple values for axis are not implemented\")\n else:\n axis = _canonicalize_axis(axis, ndim(a))\n\n q_shape = shape(q)\n q_ndim = ndim(q)\n if q_ndim > 1:\n raise ValueError(\"q must be have rank <= 1, got shape {}\".format(shape(q)))\n\n a_shape = shape(a)\n\n if squash_nans:\n a = where(isnan(a), nan, a) # Ensure nans are positive so they sort to the end.\n a = lax.sort(a, dimension=axis)\n counts = sum(logical_not(isnan(a)), axis=axis, dtype=q.dtype,\n keepdims=keepdims)\n shape_after_reduction = counts.shape\n q = lax.expand_dims(\n q, tuple(range(q_ndim, len(shape_after_reduction) + q_ndim)))\n counts = lax.expand_dims(counts, tuple(range(q_ndim)))\n q = lax.mul(q, lax.sub(counts, _constant_like(q, 1)))\n low = lax.floor(q)\n high = lax.ceil(q)\n high_weight = lax.sub(q, low)\n low_weight = lax.sub(_constant_like(high_weight, 1), high_weight)\n\n low = lax.max(_constant_like(low, 0), lax.min(low, counts - 1))\n high = lax.max(_constant_like(high, 0), lax.min(high, counts - 1))\n low = lax.convert_element_type(low, int64)\n high = lax.convert_element_type(high, int64)\n out_shape = q_shape + shape_after_reduction\n index = [lax.broadcasted_iota(int64, out_shape, dim + q_ndim)\n for dim in range(len(shape_after_reduction))]\n if keepdims:\n index[axis] = low\n else:\n index.insert(axis, low)\n low_value = a[tuple(index)]\n index[axis] = high\n high_value = a[tuple(index)]\n else:\n a = where(any(isnan(a), axis=axis, keepdims=True), nan, a)\n a = lax.sort(a, dimension=axis)\n n = a_shape[axis]\n q = lax.mul(q, _constant_like(q, n - 1))\n low = lax.floor(q)\n high = lax.ceil(q)\n high_weight = lax.sub(q, low)\n low_weight = lax.sub(_constant_like(high_weight, 1), high_weight)\n\n low = lax.clamp(_constant_like(low, 0), low, _constant_like(low, n - 1))\n high = lax.clamp(_constant_like(high, 0), high, _constant_like(high, n - 1))\n low = lax.convert_element_type(low, int64)\n high = lax.convert_element_type(high, int64)\n\n slice_sizes = list(a_shape)\n slice_sizes[axis] = 1\n dnums = lax.GatherDimensionNumbers(\n offset_dims=tuple(range(\n q_ndim,\n len(a_shape) + q_ndim if keepdims else len(a_shape) + q_ndim - 1)),\n collapsed_slice_dims=() if keepdims else (axis,),\n start_index_map=(axis,))\n low_value = lax.gather(a, low[..., None], dimension_numbers=dnums,\n slice_sizes=slice_sizes)\n high_value = lax.gather(a, high[..., None], dimension_numbers=dnums,\n slice_sizes=slice_sizes)\n if q_ndim == 1:\n low_weight = lax.broadcast_in_dim(low_weight, low_value.shape,\n broadcast_dimensions=(0,))\n high_weight = lax.broadcast_in_dim(high_weight, high_value.shape,\n broadcast_dimensions=(0,))\n\n if interpolation == \"linear\":\n result = lax.add(lax.mul(low_value.astype(q.dtype), low_weight),\n lax.mul(high_value.astype(q.dtype), high_weight))\n elif interpolation == \"lower\":\n result = low_value\n elif interpolation == \"higher\":\n result = high_value\n elif interpolation == \"nearest\":\n pred = lax.le(high_weight, _constant_like(high_weight, 0.5))\n result = lax.select(pred, low_value, high_value)\n elif interpolation == \"midpoint\":\n result = lax.mul(lax.add(low_value, high_value), _constant_like(low_value, 0.5))\n else:\n raise ValueError(f\"interpolation={interpolation!r} not recognized\")\n\n return lax.convert_element_type(result, a.dtype)\n\n\n@partial(vectorize, excluded={0, 2})\ndef _searchsorted(a, v, side):\n if len(a) == 0:\n return 0\n op = operator.le if side == 'left' else operator.lt\n\n def body_fun(i, state):\n low, high = state\n mid = (low + high) // 2\n go_left = op(v, a[mid])\n return (where(go_left, low, mid), where(go_left, mid, high))\n\n n_levels = int(np.ceil(np.log2(len(a) + 1)))\n return lax.fori_loop(0, n_levels, body_fun, (0, len(a)))[1]\n\n\n@_wraps(np.searchsorted, skip_params=['sorter'])\n@partial(jit, static_argnames=('side', 'sorter'))\ndef searchsorted(a, v, side='left', sorter=None):\n _check_arraylike(\"searchsorted\", a, v)\n if side not in ['left', 'right']:\n raise ValueError(f\"{side!r} is an invalid value for keyword 'side'\")\n if sorter is not None:\n raise NotImplementedError(\"sorter is not implemented\")\n if ndim(a) != 1:\n raise ValueError(\"a should be 1-dimensional\")\n return _searchsorted(a, v, side)\n\n\n@_wraps(np.digitize)\n@partial(jit, static_argnames=('right',))\ndef digitize(x, bins, right=False):\n _check_arraylike(\"digitize\", x, bins)\n right = core.concrete_or_error(bool, right, \"right argument of jnp.digitize()\")\n if ndim(bins) != 1:\n raise ValueError(f\"digitize: bins must be a 1-dimensional array; got bins={bins}\")\n if len(bins) == 0:\n return zeros(x, dtype=dtypes.canonicalize_dtype(int_))\n side = 'right' if not right else 'left'\n return where(\n bins[-1] >= bins[0],\n searchsorted(bins, x, side=side),\n len(bins) - searchsorted(bins[::-1], x, side=side)\n )\n\n_PIECEWISE_DOC = \"\"\"\\\nUnlike `np.piecewise`, :py:func:`jax.numpy.piecewise` requires functions in\n`funclist` to be traceable by JAX, as it is implemented via :func:`jax.lax.switch`.\nSee the :func:`jax.lax.switch` documentation for more information.\n\"\"\"\n\n@_wraps(np.piecewise, lax_description=_PIECEWISE_DOC)\ndef piecewise(x, condlist, funclist, *args, **kw):\n _check_arraylike(\"piecewise\", x)\n condlist = array(condlist, dtype=bool_)\n nc, nf = len(condlist), len(funclist)\n if nf == nc + 1:\n funclist = funclist[-1:] + funclist[:-1]\n elif nf == nc:\n funclist = [0] + list(funclist)\n else:\n raise ValueError(f\"with {nc} condition(s), either {nc} or {nc+1} functions are expected; got {nf}\")\n consts = {i: c for i, c in enumerate(funclist) if not callable(c)}\n funcs = {i: f for i, f in enumerate(funclist) if callable(f)}\n return _piecewise(x, condlist, consts,\n frozenset(funcs.items()), # dict is not hashable.\n *args, **kw)\n\n@partial(jit, static_argnames=['funcs'])\ndef _piecewise(x, condlist, consts, funcs, *args, **kw):\n funcs = dict(funcs)\n funclist = [consts.get(i, funcs.get(i)) for i in range(len(condlist) + 1)]\n indices = argmax(cumsum(concatenate([zeros_like(condlist[:1]), condlist], 0), 0), 0)\n dtype = _dtype(x)\n def _call(f):\n return lambda x: f(x, *args, **kw).astype(dtype)\n def _const(v):\n return lambda x: array(v, dtype=dtype)\n funclist = [_call(f) if callable(f) else _const(f) for f in funclist]\n return vectorize(lax.switch, excluded=(1,))(indices, funclist, x)\n\n\n@_wraps(np.percentile, skip_params=['out', 'overwrite_input'])\n@partial(jit, static_argnames=('axis', 'overwrite_input', 'interpolation',\n 'keepdims'))\ndef percentile(a, q, axis: Optional[Union[int, Tuple[int, ...]]] = None,\n out=None, overwrite_input=False, interpolation=\"linear\",\n keepdims=False):\n _check_arraylike(\"percentile\", a, q)\n a, q = _promote_dtypes_inexact(a, q)\n q = true_divide(q, 100.0)\n return quantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,\n interpolation=interpolation, keepdims=keepdims)\n\n@_wraps(np.nanpercentile, skip_params=['out', 'overwrite_input'])\n@partial(jit, static_argnames=('axis', 'overwrite_input', 'interpolation',\n 'keepdims'))\ndef nanpercentile(a, q, axis: Optional[Union[int, Tuple[int, ...]]] = None,\n out=None, overwrite_input=False, interpolation=\"linear\",\n keepdims=False):\n _check_arraylike(\"nanpercentile\", a, q)\n q = true_divide(q, float32(100.0))\n return nanquantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,\n interpolation=interpolation, keepdims=keepdims)\n\n@_wraps(np.median, skip_params=['out', 'overwrite_input'])\n@partial(jit, static_argnames=('axis', 'overwrite_input', 'keepdims'))\ndef median(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n overwrite_input=False, keepdims=False):\n _check_arraylike(\"median\", a)\n return quantile(a, 0.5, axis=axis, out=out, overwrite_input=overwrite_input,\n keepdims=keepdims, interpolation='midpoint')\n\n@_wraps(np.nanmedian, skip_params=['out', 'overwrite_input'])\n@partial(jit, static_argnames=('axis', 'overwrite_input', 'keepdims'))\ndef nanmedian(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n overwrite_input=False, keepdims=False):\n _check_arraylike(\"nanmedian\", a)\n return nanquantile(a, 0.5, axis=axis, out=out,\n overwrite_input=overwrite_input, keepdims=keepdims,\n interpolation='midpoint')\n\n\ndef _astype(arr, dtype):\n if dtype is None:\n dtype = dtypes.canonicalize_dtype(float_)\n lax._check_user_dtype_supported(dtype, \"astype\")\n return lax.convert_element_type(arr, dtype)\n\n\ndef _nbytes(arr):\n return size(arr) * _dtype(arr).itemsize\n\n\ndef _clip(number, min=None, max=None, out=None, *, a_min=None, a_max=None):\n # ndarray.clip has a slightly different API from clip (min -> a_min, max -> a_max)\n # TODO: remove after deprecation window\n if a_min is not None or a_max is not None:\n warnings.warn('`a_min` and `a_max` keyword arguments to ndarray.clip are deprecated '\n 'in favor of `min` and `max` for compatibility with numpy. '\n 'They will be removed in JAX 0.22.2', FutureWarning)\n if min is None and a_min is not None:\n min = a_min\n if max is None and a_max is not None:\n max = a_max\n return clip(number, a_min=min, a_max=max, out=out)\n\n\ndef _view(arr, dtype=None, type=None):\n lax._check_user_dtype_supported(dtype, \"view\")\n if type is not None:\n raise NotImplementedError(\"`type` argument of array.view()\")\n if dtype is None:\n return arr\n arr_dtype = _dtype(arr)\n if arr_dtype == dtype:\n return arr\n # bool is implemented as lax:PRED, which is not compatible with lax.bitcast_convert_type.\n # We work around this by casting bool to uint8.\n if arr_dtype == bool_:\n arr = arr.astype(uint8)\n nbits_in = 8 * arr_dtype.itemsize\n nbits_out = 8 * np.dtype(dtype).itemsize\n if nbits_in == nbits_out:\n if dtype == bool_:\n return lax.bitcast_convert_type(arr, uint8).astype(dtype)\n return lax.bitcast_convert_type(arr, dtype)\n if nbits_out > nbits_in and (shape(arr)[-1] * nbits_in) % nbits_out != 0:\n raise ValueError(\"When changing to a larger dtype, its size must be a divisor \"\n \"of the total size in bytes of the last axis of the array.\")\n byte_dtypes = {8: uint8, 16: uint16, 32: uint32, 64: uint64}\n if nbits_in not in byte_dtypes:\n raise NotImplementedError(f\"arr.view() for arr.dtype={arr_dtype}\")\n if nbits_out not in byte_dtypes:\n raise NotImplementedError(f\"arr.view(dtype) for dtype={dtype}\")\n dt_in = byte_dtypes[nbits_in]\n dt_out = byte_dtypes[nbits_out]\n arr_bytes = lax.bitcast_convert_type(arr, dt_in)\n if nbits_in < nbits_out:\n arr_bytes = arr_bytes.reshape(arr.shape[:-1] + (-1, nbits_out // nbits_in)).astype(dt_out)\n shifts = expand_dims(arange(0, nbits_out, nbits_in, dtype=dt_out), tuple(range(arr_bytes.ndim - 1)))\n arr_bytes = (arr_bytes << shifts).sum(-1).astype(dt_out)\n else:\n shifts = lax.expand_dims(arange(0, nbits_in, nbits_out, dtype=dt_in), tuple(range(arr_bytes.ndim)))\n arr_bytes = ((arr_bytes[..., newaxis] >> shifts) & iinfo(dt_out).max).astype(dt_out)\n arr_bytes = arr_bytes.reshape(arr_bytes.shape[:-2] + (-1,))\n if dtype == bool_:\n return lax.bitcast_convert_type(arr_bytes, uint8).astype(dtype)\n return lax.bitcast_convert_type(arr_bytes, dtype)\n\n### track unimplemented functions\n\n_NOT_IMPLEMENTED_DESC = \"\"\"\n*** This function is not yet implemented by jax.numpy, and will raise NotImplementedError ***\n\"\"\"\n\ndef _not_implemented(fun):\n @_wraps(fun, update_doc=False, lax_description=_NOT_IMPLEMENTED_DESC)\n def wrapped(*args, **kwargs):\n msg = \"Numpy function {} not yet implemented\"\n raise NotImplementedError(msg.format(fun))\n return wrapped\n\n\n### add method and operator overloads to arraylike classes\n\n# We add operator overloads to DeviceArray and ShapedArray. These method and\n# operator overloads mainly just forward calls to the corresponding lax_numpy\n# functions, which can themselves handle instances from any of these classes.\n\n_scalar_types = (int, float, complex, np.generic)\n_accepted_binop_types = (int, float, complex, np.generic, np.ndarray, ndarray)\n\ndef _defer_to_unrecognized_arg(binary_op):\n # Ensure that other array types have the chance to override arithmetic.\n def deferring_binary_op(self, other):\n if not isinstance(other, _accepted_binop_types):\n return NotImplemented\n return binary_op(self, other)\n return deferring_binary_op\n\ndef _swap_args(f):\n return lambda x, y: f(y, x)\n\ndef _unimplemented_setitem(self, i, x):\n msg = (\"'{}' object does not support item assignment. JAX arrays are \"\n \"immutable. Instead of ``x[idx] = y``, use ``x = x.at[idx].set(y)`` \"\n \"or another .at[] method: \"\n \"https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.ndarray.at.html\")\n raise TypeError(msg.format(type(self)))\n\ndef _operator_round(number, ndigits=None):\n out = round(number, decimals=ndigits or 0)\n # If `ndigits` is None, for a builtin float round(7.5) returns an integer.\n return out.astype(int) if ndigits is None else out\n\n_operators = {\n \"getitem\": _rewriting_take,\n \"setitem\": _unimplemented_setitem,\n \"neg\": negative,\n \"pos\": positive,\n \"eq\": _defer_to_unrecognized_arg(equal),\n \"ne\": _defer_to_unrecognized_arg(not_equal),\n \"lt\": _defer_to_unrecognized_arg(less),\n \"le\": _defer_to_unrecognized_arg(less_equal),\n \"gt\": _defer_to_unrecognized_arg(greater),\n \"ge\": _defer_to_unrecognized_arg(greater_equal),\n \"abs\": abs,\n \"add\": _defer_to_unrecognized_arg(add),\n \"radd\": _defer_to_unrecognized_arg(add),\n \"sub\": _defer_to_unrecognized_arg(subtract),\n \"rsub\": _defer_to_unrecognized_arg(_swap_args(subtract)),\n \"mul\": _defer_to_unrecognized_arg(multiply),\n \"rmul\": _defer_to_unrecognized_arg(multiply),\n \"div\": _defer_to_unrecognized_arg(divide),\n \"rdiv\": _defer_to_unrecognized_arg(_swap_args(divide)),\n \"truediv\": _defer_to_unrecognized_arg(true_divide),\n \"rtruediv\": _defer_to_unrecognized_arg(_swap_args(true_divide)),\n \"floordiv\": _defer_to_unrecognized_arg(floor_divide),\n \"rfloordiv\": _defer_to_unrecognized_arg(_swap_args(floor_divide)),\n \"divmod\": _defer_to_unrecognized_arg(divmod),\n \"rdivmod\": _defer_to_unrecognized_arg(_swap_args(divmod)),\n \"mod\": _defer_to_unrecognized_arg(mod),\n \"rmod\": _defer_to_unrecognized_arg(_swap_args(mod)),\n \"pow\": _defer_to_unrecognized_arg(power),\n \"rpow\": _defer_to_unrecognized_arg(_swap_args(power)),\n \"matmul\": _defer_to_unrecognized_arg(matmul),\n \"rmatmul\": _defer_to_unrecognized_arg(_swap_args(matmul)),\n \"and\": _defer_to_unrecognized_arg(bitwise_and),\n \"rand\": _defer_to_unrecognized_arg(bitwise_and),\n \"or\": _defer_to_unrecognized_arg(bitwise_or),\n \"ror\": _defer_to_unrecognized_arg(bitwise_or),\n \"xor\": _defer_to_unrecognized_arg(bitwise_xor),\n \"rxor\": _defer_to_unrecognized_arg(bitwise_xor),\n \"invert\": bitwise_not,\n \"lshift\": _defer_to_unrecognized_arg(left_shift),\n \"rshift\": _defer_to_unrecognized_arg(right_shift),\n \"rlshift\": _defer_to_unrecognized_arg(_swap_args(left_shift)),\n \"rrshift\": _defer_to_unrecognized_arg(_swap_args(right_shift)),\n \"round\": _operator_round,\n}\n\n# These numpy.ndarray methods are just refs to an equivalent numpy function\n_nondiff_methods = [\"all\", \"any\", \"argmax\", \"argmin\", \"argpartition\", \"argsort\",\n \"nonzero\", \"searchsorted\", \"round\"]\n_diff_methods = [\"choose\", \"conj\", \"conjugate\", \"cumprod\", \"cumsum\",\n \"diagonal\", \"dot\", \"max\", \"mean\", \"min\", \"prod\", \"ptp\",\n \"ravel\", \"repeat\", \"sort\", \"squeeze\", \"std\", \"sum\",\n \"swapaxes\", \"take\", \"tile\", \"trace\", \"var\"]\n\n# These methods are mentioned explicitly by nondiff_methods, so we create\n# _not_implemented implementations of them here rather than in __init__.py.\n# TODO(phawkins): implement these.\nargpartition = _not_implemented(np.argpartition)\n_NOT_IMPLEMENTED = ['argpartition']\n\n\n# Experimental support for NumPy's module dispatch with NEP-37.\n# Currently requires https://github.com/seberg/numpy-dispatch\n_JAX_ARRAY_TYPES = (device_array.DeviceArray, core.Tracer)\n_HANDLED_ARRAY_TYPES = _JAX_ARRAY_TYPES + (np.ndarray,)\n\ndef __array_module__(self, types):\n if builtins.all(issubclass(t, _HANDLED_ARRAY_TYPES) for t in types):\n return jax.numpy\n else:\n return NotImplemented\n\n\ndef _compress_method(a, condition, axis=None, out=None):\n return compress(condition, a, axis, out)\n\n\n@partial(jit, static_argnums=(1,2,3))\ndef _multi_slice(arr,\n start_indices: Tuple[Tuple[int, ...]],\n limit_indices: Tuple[Tuple[int, ...]],\n removed_dims: Tuple[Tuple[int, ...]]):\n \"\"\"Extracts multiple slices from `arr`.\n\n This is used to shard DeviceArray arguments to pmap. It's implemented as a\n DeviceArray method here to avoid circular imports.\n \"\"\"\n results = []\n for starts, limits, removed in safe_zip(start_indices, limit_indices, removed_dims):\n sliced = lax.slice(arr, starts, limits)\n if removed:\n sliced = lax.squeeze(sliced, removed)\n results.append(sliced)\n return results\n\n# The next two functions are related to iter(device_array), implemented here to\n# avoid circular imports.\n@jit\ndef _unstack(x):\n return [lax.index_in_dim(x, i, keepdims=False) for i in range(x.shape[0])]\nsetattr(device_array.DeviceArray, \"_unstack\", _unstack)\ndef _chunk_iter(x, size):\n if size > x.shape[0]:\n yield x\n else:\n num_chunks, tail = divmod(x.shape[0], size)\n for i in range(num_chunks):\n yield lax.dynamic_slice_in_dim(x, i * size, size)\n if tail:\n yield lax.dynamic_slice_in_dim(x, num_chunks * size, tail)\nsetattr(device_array.DeviceArray, \"_chunk_iter\", _chunk_iter)\n\n# Syntactic sugar for scatter operations.\nclass _IndexUpdateHelper:\n # Note: this docstring will appear as the docstring for the `at` property.\n \"\"\"Helper property for index update functionality.\n\n The ``at`` property provides a functionally pure equivalent of in-place\n array modificatons.\n\n In particular:\n\n ============================== ================================\n Alternate syntax Equivalent In-place expression\n ============================== ================================\n ``x = x.at[idx].set(y)`` ``x[idx] = y``\n ``x = x.at[idx].add(y)`` ``x[idx] += y``\n ``x = x.at[idx].multiply(y)`` ``x[idx] *= y``\n ``x = x.at[idx].divide(y)`` ``x[idx] /= y``\n ``x = x.at[idx].power(y)`` ``x[idx] **= y``\n ``x = x.at[idx].min(y)`` ``x[idx] = minimum(x[idx], y)``\n ``x = x.at[idx].max(y)`` ``x[idx] = maximum(x[idx], y)``\n ``x = x.at[idx].get()`` ``x = x[idx]``\n ============================== ================================\n\n None of the ``x.at`` expressions modify the original ``x``; instead they return\n a modified copy of ``x``. However, inside a :py:func:`~jax.jit` compiled function,\n expressions like :code:`x = x.at[idx].set(y)` are guaranteed to be applied in-place.\n\n Unlike NumPy in-place operations such as :code:`x[idx] += y`, if multiple\n indices refer to the same location, all updates will be applied (NumPy would\n only apply the last update, rather than applying all updates.) The order\n in which conflicting updates are applied is implementation-defined and may be\n nondeterministic (e.g., due to concurrency on some hardware platforms).\n\n By default, JAX assumes that all indices are in-bounds. There is experimental\n support for giving more precise semantics to out-of-bounds indexed accesses,\n via the ``mode`` parameter (see below).\n\n Arguments\n ---------\n mode : str\n Specify out-of-bound indexing mode. Options are:\n\n - ``\"promise_in_bounds\"``: (default) The user promises that indices are in bounds.\n No additional checking will be performed. In practice, this means that\n out-of-bounds indices in ``get()`` will be clipped, and out-of-bounds indices\n in ``set()``, ``add()``, etc. will be dropped.\n - ``\"clip\"``: clamp out of bounds indices into valid range.\n - ``\"drop\"``: ignore out-of-bound indices.\n - ``\"fill\"``: alias for ``\"drop\"``. For `get()`, the optional ``fill_value``\n argument specifies the value that will be returned.\n\n indices_are_sorted : bool\n If True, the implementation will assume that the indices passed to ``at[]``\n are sorted in ascending order, which can lead to more efficient execution\n on some backends.\n unique_indices : bool\n If True, the implementation will assume that the indices passed to ``at[]``\n are unique, which can result in more efficient execution on some backends.\n fill_value : Any\n Only applies to the ``get()`` method: the fill value to return for out-of-bounds\n slices when `mode` is ``'fill'``. Ignored otherwise. Defaults to ``NaN`` for\n inexact types, the largest negative value for signed types, the largest positive\n value for unsigned types, and ``True`` for booleans.\n\n Examples\n --------\n >>> x = jnp.arange(5.0)\n >>> x\n DeviceArray([0., 1., 2., 3., 4.], dtype=float32)\n >>> x.at[2].add(10)\n DeviceArray([ 0., 1., 12., 3., 4.], dtype=float32)\n >>> x.at[10].add(10) # out-of-bounds indices are ignored\n DeviceArray([0., 1., 2., 3., 4.], dtype=float32)\n >>> x.at[20].add(10, mode='clip')\n DeviceArray([ 0., 1., 2., 3., 14.], dtype=float32)\n >>> x.at[2].get()\n DeviceArray(2., dtype=float32)\n >>> x.at[20].get() # out-of-bounds indices clipped\n DeviceArray(4., dtype=float32)\n >>> x.at[20].get(mode='fill') # out-of-bounds indices filled with NaN\n DeviceArray(nan, dtype=float32)\n >>> x.at[20].get(mode='fill', fill_value=-1) # custom fill value\n DeviceArray(-1., dtype=float32)\n \"\"\"\n __slots__ = (\"array\",)\n\n def __init__(self, array):\n self.array = array\n\n def __getitem__(self, index):\n return _IndexUpdateRef(self.array, index)\n\n def __repr__(self):\n return f\"_IndexUpdateHelper({repr(self.array)})\"\nndarray.at.__doc__ = _IndexUpdateHelper.__doc__\n\n_power_fn = power\n_divide_fn = divide\n\nclass _IndexUpdateRef:\n \"\"\"Helper object to call indexed update functions for an (advanced) index.\n\n This object references a source array and a specific indexer into that array.\n Methods on this object return copies of the source array that have been\n modified at the positions specified by the indexer.\n \"\"\"\n __slots__ = (\"array\", \"index\")\n\n def __init__(self, array, index):\n self.array = array\n self.index = index\n\n def __repr__(self):\n return f\"_IndexUpdateRef({repr(self.array)}, {repr(self.index)})\"\n\n def get(self, indices_are_sorted=False, unique_indices=False,\n mode=None, fill_value=None):\n \"\"\"Equivalent to ``x[idx]``.\n\n Returns the value of ``x`` that would result from the NumPy-style\n :mod:indexing <numpy.doc.indexing>` ``x[idx]``. This function differs from\n the usual array indexing syntax in that it allows additional keyword\n arguments ``indices_are_sorted`` and ``unique_indices`` to be passed.\n\n See :mod:`jax.ops` for details.\n \"\"\"\n return _rewriting_take(self.array, self.index,\n indices_are_sorted=indices_are_sorted,\n unique_indices=unique_indices, mode=mode,\n fill_value=fill_value)\n\n def set(self, values, indices_are_sorted=False, unique_indices=False,\n mode=None):\n \"\"\"Pure equivalent of ``x[idx] = y``.\n\n Returns the value of ``x`` that would result from the NumPy-style\n :mod:indexed assignment <numpy.doc.indexing>` ``x[idx] = y``.\n\n See :mod:`jax.ops` for details.\n \"\"\"\n return scatter._scatter_update(self.array, self.index, values, lax.scatter,\n indices_are_sorted=indices_are_sorted,\n unique_indices=unique_indices, mode=mode)\n\n def add(self, values, indices_are_sorted=False, unique_indices=False,\n mode=None):\n \"\"\"Pure equivalent of ``x[idx] += y``.\n\n Returns the value of ``x`` that would result from the NumPy-style\n :mod:indexed assignment <numpy.doc.indexing>` ``x[idx] += y``.\n\n See :mod:`jax.ops` for details.\n \"\"\"\n return scatter._scatter_update(self.array, self.index, values,\n lax.scatter_add,\n indices_are_sorted=indices_are_sorted,\n unique_indices=unique_indices, mode=mode)\n\n def multiply(self, values, indices_are_sorted=False, unique_indices=False,\n mode=None):\n \"\"\"Pure equivalent of ``x[idx] *= y``.\n\n Returns the value of ``x`` that would result from the NumPy-style\n :mod:indexed assignment <numpy.doc.indexing>` ``x[idx] *= y``.\n\n See :mod:`jax.ops` for details.\n \"\"\"\n return scatter._scatter_update(self.array, self.index, values,\n lax.scatter_mul,\n indices_are_sorted=indices_are_sorted,\n unique_indices=unique_indices,\n mode=mode)\n mul = multiply\n\n def divide(self, values, indices_are_sorted=False, unique_indices=False,\n mode=None):\n \"\"\"Pure equivalent of ``x[idx] /= y``.\n\n Returns the value of ``x`` that would result from the NumPy-style\n :mod:indexed assignment <numpy.doc.indexing>` ``x[idx] /= y``.\n\n See :mod:`jax.ops` for details.\n \"\"\"\n return _divide_fn(\n self.array,\n scatter._scatter_update(ones_like(self.array), self.index, values,\n lax.scatter_mul,\n indices_are_sorted=indices_are_sorted,\n unique_indices=unique_indices, mode=mode))\n\n def power(self, values, indices_are_sorted=False, unique_indices=False,\n mode=None):\n \"\"\"Pure equivalent of ``x[idx] **= y``.\n\n Returns the value of ``x`` that would result from the NumPy-style\n :mod:indexed assignment <numpy.doc.indexing>` ``x[idx] **= y``.\n\n See :mod:`jax.ops` for details.\n \"\"\"\n return _power_fn(\n self.array,\n scatter._scatter_update(ones_like(self.array), self.index, values,\n lax.scatter_mul,\n indices_are_sorted=indices_are_sorted,\n unique_indices=unique_indices, mode=mode))\n\n def min(self, values, indices_are_sorted=False, unique_indices=False,\n mode=None):\n \"\"\"Pure equivalent of ``x[idx] = minimum(x[idx], y)``.\n\n Returns the value of ``x`` that would result from the NumPy-style\n :mod:indexed assignment <numpy.doc.indexing>`\n ``x[idx] = minimum(x[idx], y)``.\n\n See :mod:`jax.ops` for details.\n \"\"\"\n return scatter._scatter_update(self.array, self.index, values,\n lax.scatter_min,\n indices_are_sorted=indices_are_sorted,\n unique_indices=unique_indices, mode=mode)\n\n def max(self, values, indices_are_sorted=False, unique_indices=False,\n mode=None):\n \"\"\"Pure equivalent of ``x[idx] = maximum(x[idx], y)``.\n\n Returns the value of ``x`` that would result from the NumPy-style\n :mod:indexed assignment <numpy.doc.indexing>`\n ``x[idx] = maximum(x[idx], y)``.\n\n See :mod:`jax.ops` for details.\n \"\"\"\n return scatter._scatter_update(self.array, self.index, values,\n lax.scatter_max,\n indices_are_sorted=indices_are_sorted,\n unique_indices=unique_indices, mode=mode)\n\n\ndef _set_shaped_array_attributes(shaped_array):\n # Set up operator, method, and property forwarding on Tracer instances\n # containing\n # ShapedArray avals by following the forwarding conventions for Tracer.\n # Forward operators using a single-underscore-prefix naming convention:\n for operator_name, function in _operators.items():\n setattr(shaped_array, \"_{}\".format(operator_name), staticmethod(function))\n # Forward methods and properties using core.{aval_method, aval_property}:\n for method_name in _nondiff_methods + _diff_methods:\n setattr(shaped_array, method_name, core.aval_method(globals()[method_name]))\n setattr(shaped_array, \"reshape\", core.aval_method(_reshape))\n setattr(shaped_array, \"transpose\", core.aval_method(_transpose))\n setattr(shaped_array, \"flatten\", core.aval_method(ravel))\n setattr(shaped_array, \"T\", core.aval_property(transpose))\n setattr(shaped_array, \"real\", core.aval_property(real))\n setattr(shaped_array, \"imag\", core.aval_property(imag))\n setattr(shaped_array, \"astype\", core.aval_method(_astype))\n setattr(shaped_array, \"view\", core.aval_method(_view))\n setattr(shaped_array, \"nbytes\", core.aval_property(_nbytes))\n setattr(shaped_array, \"clip\", core.aval_method(_clip))\n\n setattr(shaped_array, \"_array_module\", staticmethod(__array_module__))\n setattr(shaped_array, \"broadcast\", core.aval_method(lax.broadcast))\n setattr(shaped_array, \"broadcast_in_dim\", core.aval_method(lax.broadcast_in_dim))\n setattr(shaped_array, \"split\", core.aval_method(split))\n setattr(shaped_array, \"compress\", _compress_method)\n setattr(shaped_array, \"at\", core.aval_property(_IndexUpdateHelper))\n setattr(shaped_array, \"item\", core.aval_method(device_array.DeviceArray.item))\n\n_set_shaped_array_attributes(ShapedArray)\n\n\ndef _set_device_array_base_attributes(device_array):\n # Forward operators, methods, and properties on DeviceArray to lax_numpy\n # functions (with no Tracers involved; this forwarding is direct)\n for operator_name, function in _operators.items():\n setattr(device_array, \"__{}__\".format(operator_name), function)\n for method_name in _nondiff_methods + _diff_methods:\n setattr(device_array, method_name, globals()[method_name])\n setattr(device_array, \"reshape\", _reshape)\n setattr(device_array, \"transpose\", _transpose)\n setattr(device_array, \"flatten\", ravel)\n setattr(device_array, \"T\", property(transpose))\n setattr(device_array, \"real\", property(real))\n setattr(device_array, \"imag\", property(imag))\n setattr(device_array, \"astype\", _astype)\n setattr(device_array, \"view\", _view)\n setattr(device_array, \"nbytes\", property(_nbytes))\n setattr(device_array, \"clip\", _clip)\n\n_set_device_array_base_attributes(device_array.DeviceArray)\n\n\ndef _set_device_array_attributes(device_array):\n setattr(device_array, \"__array_module__\", __array_module__)\n # Extra methods that are handy\n setattr(device_array, \"broadcast\", lax.broadcast)\n setattr(device_array, \"broadcast_in_dim\", lax.broadcast_in_dim)\n setattr(device_array, \"split\", split)\n setattr(device_array, \"compress\", _compress_method)\n setattr(device_array, \"_multi_slice\", _multi_slice)\n setattr(device_array, \"at\", property(_IndexUpdateHelper))\n\nfor t in device_array.device_array_types:\n _set_device_array_attributes(t)\n_set_device_array_attributes(pxla._ShardedDeviceArray)\n_set_device_array_attributes(pxla.pmap_lib.ShardedDeviceArray)\n", "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Experimental module transforms JAX functions to be executed by TensorFlow.\"\"\"\nfrom functools import partial\nimport contextlib\nimport os\nimport re\nimport threading\nfrom typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union\n\nimport jax\nfrom jax import lax\nfrom jax._src import ad_util\nfrom jax._src import api_util\nfrom jax import config\nfrom jax import core, custom_derivatives\nfrom jax import linear_util as lu\nfrom jax import random, tree_util\nfrom jax import numpy as jnp\nfrom jax._src import ad_checkpoint\nfrom jax._src import api\nfrom jax._src import dispatch\nfrom jax._src import dtypes\nfrom jax._src.lax import control_flow as lax_control_flow\nfrom jax._src.lax import lax as lax_internal\nfrom jax._src.lax import linalg as lax_linalg\nfrom jax._src.lax import slicing as lax_slicing\nfrom jax._src import source_info_util\nfrom jax._src import util\nimport jax._src.prng\nimport jax._src.random\nfrom jax.experimental import maps\nfrom jax.experimental import pjit\nfrom jax.interpreters import ad\nfrom jax.interpreters import partial_eval\nfrom jax.interpreters import pxla\nfrom jax.interpreters import sharded_jit\nfrom jax.interpreters import xla\nfrom jax._src.lib import xla_client\n\nfrom jax.experimental.jax2tf import shape_poly\nfrom jax.experimental.jax2tf import impl_no_xla\n\nimport numpy as np\nimport tensorflow as tf # type: ignore[import]\n\n# These don't have public equivalents.\n# pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.compiler.tf2xla.python import xla as tfxla # type: ignore[import]\nfrom tensorflow.compiler.xla import xla_data_pb2 # type: ignore[import]\nfrom tensorflow.core.framework import attr_value_pb2 # type: ignore[import]\nfrom tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding # type: ignore[import]\nfrom tensorflow.python.framework import ops as tf_ops # type: ignore[import]\n# pylint: enable=g-direct-tensorflow-import\n\nPolyShape = shape_poly.PolyShape\n\n# A temporary internal flag, to enable the wrapping of jax.jit functions\n# with tf.function(jit_compile=True). See #7389. This change has triggered a\n# number of failures in TF. We keep this until we are confident that it does\n# not create problems.\n# TODO(b/207464757): figure out why this change breaks test\n_WRAP_JAX_JIT_WITH_TF_FUNCTION = False\n\n# The scope name need to be a valid TensorFlow name. See\n# https://github.com/tensorflow/tensorflow/blob/r2.3/tensorflow/core/framework/node_def_util.cc#L731\n_VALID_SCOPE_REGEX = re.compile(\"^[A-Za-z0-9.][A-Za-z0-9_.\\\\/>-]*$\")\n_INVALID_SCOPE_CHAR = re.compile(\"[^A-Za-z0-9_.\\\\/>-]\")\n\nmap = util.safe_map\nzip = util.safe_zip\n\n\ndef _sanitize_scope_name(name):\n scope_name = _INVALID_SCOPE_CHAR.sub(\"_\", name)\n if not _VALID_SCOPE_REGEX.match(scope_name):\n scope_name = \".{}\".format(scope_name)\n return scope_name\n\n\n# A value suitable in a TF tracing context: tf.Tensor, tf.Variable,\n# or Python scalar or numpy.ndarray. (A tf.EagerTensor is a tf.Tensor.)\nTfVal = Any\nDType = Any\nPrecisionType = int # Enum xla_data.PrecisionConfig.Precision\n\ndef _is_tfval(v: TfVal) -> bool:\n if isinstance(v, (tf.Tensor, tf.Variable)):\n return True\n try:\n # Include all convertible types, even if not supported on accelerators.\n with tf.device(\"CPU\"):\n tf.constant(v)\n return True\n except:\n return False\n\n\n# The implementation rules for primitives. The rule will be called with the\n# arguments (TfVal) and must return TfVal (or a sequence thereof,\n# if primitive.multiple_results). The vast majority of primitives do not need\n# to worry about core.unit inputs or results. The exception are primarily the\n# control-flow primitives.\ntf_impl: Dict[core.Primitive, Callable[..., Any]] = {}\n\n# Some primitive implementation rules need the abstract values of arguments\n# and the results. This is the case for the primitives implemented using\n# _convert_jax_impl and those that need to adjust the shape of the outputs\n# due to missing TF shape inference rules for TFXLA ops. The rules for these\n# primitives should be added to `tf_impl_with_avals`.\n# The abstract value are passed to the implementation as two special kwargs\n# `_in_avals` (a tuple of core.ShapedArray) and `_out_aval` (a\n# core.ShapedArray, or a tuple thereof when primitive.multiple_results).\ntf_impl_with_avals: Dict[core.Primitive, Callable[..., Any]] = {}\n\n# XLA is not linked in all environments when converting a primitive. If this is\n# the case, we first search for implementation rules for primitives in the\n# following map. These implementations are workarounds, making use of TF ops\n# that do work when XLA is not linked in.\ntf_impl_no_xla = impl_no_xla.tf_impl_no_xla\n\n# In order to ensure that JAX picks up the proper user-frame for source\n# locations we will register the TensorFlow source path as an internal\n# path with source_info_util. The typical stack when a JAX primitive\n# conversion happens is:\n# jax2tf.process_primitive (top of stack)\n# jax tracing machinery ...\n# tf.custom_gradient machinery ...\n# jax2tf.converted_fun\n# tf function machinery ...\n# user code invokes the converted function on TF tensors\n#\n# We need to skip over not only JAX internal frames, but TF internal frames\n# also.\n# We register the TensorFlow source path lazily\n_has_registered_tf_source_path = False\n\nclass _ThreadLocalState(threading.local):\n def __init__(self):\n self.name_stack = \"\"\n # XLA is not linked in all environments; when converting a primitive, if this\n # variable is disabled, we try harder to use only standard TF ops if they are\n # applicable to the concrete use case; if the resulting conversion path ends up\n # requiring a TFXLA operation, an exception is thrown instead.\n self.enable_xla = True\n\n # Keep track if we are inside a call_tf. In that context we disable the\n # safety check that we are not inside JAX transformations.\n self.inside_call_tf = False\n\n # Maps dimension variables to TF expressions\n self.shape_env: Sequence[Tuple[str, TfVal]] = ()\n\n # Whether to actually include XLA op metadata in the generated TF ops\n self.include_xla_op_metadata = True\n\n # A cache for the tf.convert_to_tensor for constants. We try to preserve\n # sharing for constants, to enable tf.Graph to take advantage of it.\n # See https://github.com/google/jax/issues/7992.\n self.constant_cache = None # None means that we don't use a cache. We\n # may be outside a conversion scope.\n\n\n_thread_local_state = _ThreadLocalState()\n\ndef _get_current_name_stack():\n return _thread_local_state.name_stack\n\[email protected]\ndef inside_call_tf():\n # Set the inside_call_tf flag for a context.\n prev = _thread_local_state.inside_call_tf\n _thread_local_state.inside_call_tf = True\n try:\n yield\n finally:\n _thread_local_state.inside_call_tf = prev\n\n@partial(api_util.api_hook, tag=\"jax2tf_convert\")\ndef convert(fun: Callable,\n *,\n polymorphic_shapes=None,\n with_gradient=True,\n enable_xla=True\n ) -> Callable:\n \"\"\"Transforms `fun` to be executed by TensorFlow.\n\n See\n [README](https://github.com/google/jax/blob/main/jax/experimental/jax2tf/README.md)\n for more details about usage and common problems.\n\n Args:\n fun: Function to be transformed. Its arguments and return value should be\n JAX arrays, or nested standard Python containers (tuple/list/dict) thereof\n (pytrees).\n polymorphic_shapes: Specifies input shapes to be treated polymorphically\n during conversion.\n\n .. warning:: The shape-polymorphic conversion is an experimental feature.\n It is meant to be sound, but it is known to reject some JAX programs\n that are shape polymorphic. The details of this feature can change.\n\n It should be `None` (all arguments are monomorphic), a single PolyShape\n or string (applies to all arguments), or a tuple/list of the same length\n as the function arguments. For each argument the shape specification\n should be `None` (monomorphic argument), or a Python object with the\n same pytree structure as the argument.\n See [how optional parameters are matched to\n arguments](https://jax.readthedocs.io/en/latest/pytrees.html#applying-optional-parameters-to-pytrees).\n\n A shape specification for an array argument should be an object\n `PolyShape(dim0, dim1, ..., dimn)`\n where each `dim` is a dimension specification: a positive integer denoting\n a monomorphic dimension of the given size, or a string denoting a\n dimension variable assumed to range over non-zero dimension sizes, or\n the special placeholder string \"_\" denoting a monomorphic dimension\n whose size is given by the actual argument. As a shortcut, an Ellipsis\n suffix in the list of dimension specifications stands for a list of \"_\"\n placeholders.\n\n For convenience, a shape specification can also be given as a string\n representation, e.g.: \"batch, ...\", \"batch, height, width, _\", possibly\n with surrounding parentheses: \"(batch, ...)\".\n\n The conversion fails if it cannot ensure that the it would produce the same\n sequence of TF ops for any non-zero values of the dimension variables.\n\n polymorphic_shapes are only supported for positional arguments; shape\n polymorphism is not supported for keyword arguments.\n\n See [the README](https://github.com/google/jax/blob/main/jax/experimental/jax2tf/README.md#shape-polymorphic-conversion)\n for more details.\n\n in_shapes: DEPRECATED in favor of `polymorphic_shapes`.\n with_gradient: if set (default), add a tf.custom_gradient to the converted\n function, by converting the ``jax.vjp(fun)``. This means that reverse-mode\n TensorFlow AD is supported for the output TensorFlow function, and the\n value of the gradient will be JAX-accurate.\n enable_xla: if set (default), the converter will use the simplest conversion\n and use XLA TF ops when necessary. These ops are known to create issues\n for the TFLite and TFjs converters. For those cases, unset this parameter\n so the converter tries harder to use non-XLA TF ops to convert the\n function and aborts if this is not possible.\n\n Returns:\n A version of `fun` that expects TfVals as arguments (or\n tuple/lists/dicts) thereof, and returns TfVals as outputs, and uses\n only TensorFlow ops.\n \"\"\"\n api._check_callable(fun)\n fun_name = getattr(fun, \"__name__\", \"unknown\")\n name_stack = util.extend_name_stack(util.wrap_name(fun_name, \"jax2tf\"))\n def converted_fun(*args: TfVal, **kwargs: TfVal) -> TfVal:\n # TODO: is there a better way to check if we are inside a transformation?\n if not core.trace_state_clean() and not _thread_local_state.inside_call_tf:\n # It is Ok to nest convert when we are inside a call_tf\n raise ValueError(\"convert must be used outside all JAX transformations.\" +\n f\"Trace state: {core.thread_local_state.trace_state.trace_stack}\")\n\n # We support kwargs by wrapping the function to take only positional arguments.\n # This is in part because jax.vjp does not support kwargs.\n nr_positional_args = len(args)\n kw_names = kwargs.keys()\n args = tuple(args) + tuple(kwargs[kw] for kw in kw_names)\n\n def fun_no_kwargs(*args_and_kwargs):\n assert len(args_and_kwargs) == nr_positional_args + len(kw_names)\n args = args_and_kwargs[:nr_positional_args]\n kwargs = {kw: args_and_kwargs[nr_positional_args + i]\n for i, kw in enumerate(kw_names)}\n return fun(*args, **kwargs)\n\n def check_arg(a):\n if not _is_tfval(a):\n msg = (f\"Argument {a} of type {type(a)} of jax2tf.convert(f) should \"\n \"be NumPy array, scalar, tf.Variable, or tf.Tensor\")\n raise TypeError(msg)\n\n tree_util.tree_map(check_arg, args)\n\n args_flat, in_tree = tree_util.tree_flatten((args, {}))\n # May need to cast the arguments to have the type assumed by JAX\n args_and_dtypes_flat = tuple(map(_tfval_to_tensor_jax_dtype, args_flat))\n args_flat, arg_dtypes_flat = util.unzip2(args_and_dtypes_flat)\n # Name input tensors; do this after we have cast the arguments\n def _apply_name(a: TfVal, suffix) -> TfVal:\n return tf.identity(a, f\"jax2tf_arg_{suffix}\")\n args_flat = tuple(_apply_name(a, i) for i, a in enumerate(args_flat))\n\n if polymorphic_shapes is None:\n polymorphic_shapes_ = (polymorphic_shapes,) * len(args)\n elif isinstance(polymorphic_shapes, (PolyShape, str)):\n polymorphic_shapes_ = (polymorphic_shapes,) * len(args) # type: ignore\n else:\n if not isinstance(polymorphic_shapes, Sequence) or len(polymorphic_shapes) != len(args) - len(kw_names):\n msg = (\"polymorphic_shapes must be a sequence with the same length as the positional argument list \"\n f\"({len(args)}). Got polymorphic_shapes={repr(polymorphic_shapes)}.\")\n raise TypeError(msg)\n polymorphic_shapes_ = tuple(polymorphic_shapes) + (None,) * len(kw_names)\n\n # Expand the polymorphic_shapes to match the argument pytree\n polymorphic_shapes_flat = tuple(api_util.flatten_axes(\"jax2tf.convert polymorphic_shapes\",\n in_tree.children()[0],\n polymorphic_shapes_))\n\n def fix_tf1_shape(arg: TfVal) -> Sequence[Optional[int]]:\n tf_arg_shape = np.shape(arg)\n return tuple(d.value if isinstance(d, tf.compat.v1.Dimension) else d for d in tf_arg_shape)\n args_shapes_flat = tuple(fix_tf1_shape(a) for a in args_flat)\n\n # Construct the abstract values for the flat arguments, possibly based on\n # the input shapes and the polymorphic_shapes if given. May create new shape\n # variables. May cast the args_flat to JAX types, using JAX's interpretation\n # of types of constants.\n args_avals_flat = shape_poly.args_avals(\n args_shapes_flat, arg_dtypes_flat, polymorphic_shapes_flat)\n\n dim_vars, get_dim_values = shape_poly.prepare_dim_var_env(args_avals_flat)\n dim_values, _ = util.unzip2(_interpret_fun(lu.wrap_init(get_dim_values),\n args_flat, args_avals_flat, \"\"))\n shape_env = zip(dim_vars, dim_values)\n\n # This function may take pytrees of TfVals. We can only set\n # tf.custom_gradient on functions that take a flat argument list.\n f = lu.wrap_init(fun_no_kwargs)\n # out_tree_thunk() will be the output tree, after running _interpret_fun.\n flat_fun, out_tree_thunk = api_util.flatten_fun(f, in_tree)\n # out_tree_thunk will be ready after _interpret_fun below.\n\n # Prepare the grad_fn for tf.custom_gradient.\n def converted_grad_fn(*out_cts_flat: TfVal,\n _out_cts_avals: Sequence[core.ShapedArray],\n variables=None):\n if variables:\n raise ValueError(\n \"Unexpected variables used in forward pass. \"\n \"This should not happen for first-order differentiation. \"\n f\"variables={variables}\")\n\n out_tree = out_tree_thunk()\n if polymorphic_shapes is None:\n vjp_polymorphic_shapes = None\n else:\n args_flat_polymorphic_shapes = polymorphic_shapes_flat\n out_cts_flat_polymorphic_shapes = tuple(str(out_aval.shape) # Note: may be polynomials, not just DimVar\n for out_aval in _out_cts_avals) # type: ignore\n vjp_polymorphic_shapes = [\n args_flat_polymorphic_shapes, out_cts_flat_polymorphic_shapes\n ]\n\n def fun_vjp_jax(args_flat_jax, out_cts_flat_jax):\n # One may think that we can get the pullback while we are converting\n # the main function in the first place. That is problematic, because the\n # pullback may contain captured tracers from the conversion of the\n # main function. Those tracers will confuse the conversion of the\n # pullback. So, we construct the vjp anew and we convert it separately.\n args_jax, kwargs_jax = tree_util.tree_unflatten(in_tree, args_flat_jax)\n assert not kwargs_jax\n _, pullback_jax = jax.vjp(fun_no_kwargs, *args_jax)\n\n def fix_out_ct(out_ct_jax, out_ct_aval: core.ShapedArray):\n # If the primal function has outputs of integer or bool types, and if we are\n # under a tf.function context, then TF will pass None in _out_cts_flat\n # in place of these values. We should change these to float0 or\n # else JAX gets unhappy. See issue #6975.\n if out_ct_jax is not None:\n return out_ct_jax\n assert core.primal_dtype_to_tangent_dtype(out_ct_aval.dtype) == dtypes.float0, f\"out_ct={out_ct_jax}\"\n # Note that out_ct_aval.shape contains dimension variable from the\n # primal function scope. It is Ok to use them here because we\n # use the same shape variables for the VJP function.\n return jnp.zeros(out_ct_aval.shape, dtype=_tf_np_dtype_for_float0)\n\n out_cts_fixed_flat = tuple(map(fix_out_ct, out_cts_flat_jax, _out_cts_avals))\n\n out_cts_fixed = tree_util.tree_unflatten(out_tree, out_cts_fixed_flat)\n in_cts_jax = pullback_jax(out_cts_fixed)\n\n in_cts_flat_jax, in_cts_tree = tree_util.tree_flatten(in_cts_jax)\n def fix_in_ct(in_ct, arg_aval: core.ShapedArray):\n if np.issubdtype(arg_aval.dtype, np.inexact):\n return in_ct\n else:\n assert in_ct.dtype == dtypes.float0\n return jnp.zeros(arg_aval.shape, _tf_np_dtype_for_float0)\n\n in_cts_fixed_flat_jax = tuple(map(fix_in_ct, in_cts_flat_jax, args_avals_flat))\n return in_cts_fixed_flat_jax\n\n # TODO: enable higher-order gradients\n with tf.name_scope(\"jax2tf_vjp\"):\n in_cts_flat = convert(\n fun_vjp_jax,\n with_gradient=False,\n polymorphic_shapes=vjp_polymorphic_shapes)(args_flat, out_cts_flat)\n in_cts, kwin_cts = tree_util.tree_unflatten(in_tree, in_cts_flat)\n assert not kwin_cts\n return in_cts\n\n try:\n assert not _thread_local_state.shape_env, f\"Unexpected shape environment {_thread_local_state.shape_env}\"\n\n prev_enable_xla = _thread_local_state.enable_xla\n _thread_local_state.enable_xla = enable_xla\n\n prev_include_xla_op_metadata = _thread_local_state.include_xla_op_metadata\n _thread_local_state.include_xla_op_metadata = False\n\n _thread_local_state.shape_env = shape_env\n global _has_registered_tf_source_path\n if not _has_registered_tf_source_path:\n source_info_util.register_exclusion(os.path.dirname(tf.__file__))\n _has_registered_tf_source_path = True\n\n if with_gradient:\n\n @tf.custom_gradient\n def converted_fun_flat_with_custom_gradient(*args_flat: TfVal) -> TfVal:\n out_with_avals = _interpret_fun(flat_fun, args_flat, args_avals_flat,\n name_stack,\n fresh_constant_cache=True)\n outs, out_avals = util.unzip2(out_with_avals)\n return (tuple(outs),\n partial(converted_grad_fn, _out_cts_avals=tuple(out_avals)))\n\n out_flat = converted_fun_flat_with_custom_gradient(*args_flat)\n else:\n out_with_avals = _interpret_fun(flat_fun, args_flat, args_avals_flat,\n name_stack, fresh_constant_cache=True)\n outs, out_avals = util.unzip2(out_with_avals)\n message = (\"The jax2tf-converted function does not support gradients. \"\n \"Use `with_gradient` parameter to enable gradients\")\n # We use PreventGradient, which is propagated through a SavedModel.\n out_flat = [\n tf.raw_ops.PreventGradient(input=o, message=message)\n for o in outs\n ]\n finally:\n _thread_local_state.shape_env = ()\n _thread_local_state.enable_xla = prev_enable_xla\n _thread_local_state.include_xla_op_metadata = prev_include_xla_op_metadata\n\n out_flat = [tf.identity(x, \"jax2tf_out\") for x in out_flat]\n out = tree_util.tree_unflatten(out_tree_thunk(), out_flat)\n return out\n\n return converted_fun\n\n\ndef dtype_of_val(val: TfVal) -> DType:\n \"\"\"Computes the TensorFlow dtype using JAX's typing rules.\n\n If the value is a tf.Tensor, it starts with its dtype. If the value is a\n constant it uses JAX to infer its dtype. The resulting dtype follows the\n JAX type inference rules, and depends on the value of the\n JAX_ENABLE_X64 flag.\n\n See README.md for how 64-bit values are treated.\n \"\"\"\n tval, _ = _tfval_to_tensor_jax_dtype(val)\n return tval.dtype\n\n# Internals\n\[email protected]\ndef _extended_name_stack(extra_name_stack: Optional[str]):\n prev_name_stack = _thread_local_state.name_stack\n if extra_name_stack:\n if not prev_name_stack:\n _thread_local_state.name_stack = extra_name_stack\n else:\n _thread_local_state.name_stack = util.extend_name_stack(\n _thread_local_state.name_stack, extra_name_stack)\n try:\n yield\n finally:\n _thread_local_state.name_stack = prev_name_stack\n\n\ndef _interpret_fun(\n fun: lu.WrappedFun, in_vals: Sequence[TfVal],\n in_avals: Sequence[core.ShapedArray],\n extra_name_stack: Optional[str],\n fresh_constant_cache: bool = False\n) -> Sequence[Tuple[TfVal, core.ShapedArray]]:\n with core.new_base_main(TensorFlowTrace) as main: # type: ignore\n fun = _interpret_subtrace(fun, main, in_avals)\n with _extended_name_stack(extra_name_stack):\n with core.new_sublevel():\n out_vals: Sequence[Tuple[TfVal, core.ShapedArray]] = \\\n _call_wrapped_with_new_constant_cache(fun, in_vals,\n fresh_constant_cache=fresh_constant_cache)\n\n del main\n\n return tuple(out_vals)\n\ndef _call_wrapped_with_new_constant_cache(fun: lu.WrappedFun,\n in_vals: Sequence[TfVal],\n fresh_constant_cache: bool = False\n ) -> Sequence[Tuple[TfVal, core.ShapedArray]]:\n try:\n prev_constant_cache = _thread_local_state.constant_cache\n prev_constant_cache_keys = set(prev_constant_cache.keys()) if prev_constant_cache is not None else set()\n # Start a new cache, so that we don't share constants across tf.function\n # boundaries.\n if fresh_constant_cache:\n _thread_local_state.constant_cache = {}\n\n out_vals: Sequence[Tuple[TfVal, core.ShapedArray]] = \\\n fun.call_wrapped(*in_vals)\n finally:\n if prev_constant_cache is not None and not fresh_constant_cache:\n newly_added_keys = set(prev_constant_cache.keys()) - prev_constant_cache_keys\n # Delete the newly added keys\n for k in newly_added_keys:\n del prev_constant_cache[k]\n _thread_local_state.constant_cache = prev_constant_cache\n return out_vals\n\ndef _convert_jax_impl(jax_impl: Callable, *,\n multiple_results=True,\n extra_name_stack: Optional[str] = None) -> Callable:\n \"\"\"Convert the JAX implementation of a primitive.\n\n Args:\n jax_impl: typically the impl-rule for a primitive, with signature\n `(*args: JaxVal, **kwargs) -> Sequence[JaxVal]`. This function implements\n a primitive in terms of other primitives.\n multiple_results: whether `jax_impl` returns a sequence of results.\n extra_name_stack: additional element to add to the name stack for the\n converted ops.\n\n Returns:\n a function with signature `(*args: TfVal, _in_avals, _out_aval, **kwargs)\n -> Sequence[TfVal]`.\n \"\"\"\n\n def wrapped(*tf_args: TfVal, _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray,\n **kwargs) -> Sequence[TfVal]:\n\n # We wrap the jax_impl under _interpret_fun to abstract the TF values\n # from jax_impl and turn them into JAX abstract values.\n def jax_impl_jax_args(*jax_args):\n jax_results = jax_impl(*jax_args, **kwargs)\n return jax_results if multiple_results else [jax_results]\n\n tf_results_with_avals = _interpret_fun(\n lu.wrap_init(jax_impl_jax_args), tf_args, _in_avals,\n extra_name_stack)\n tf_results, _ = util.unzip2(tf_results_with_avals)\n return tf_results if multiple_results else tf_results[0]\n\n return wrapped\n\n\[email protected]\ndef _interpret_subtrace(main: core.MainTrace,\n in_avals: Sequence[core.ShapedArray],\n *in_vals: TfVal):\n trace = TensorFlowTrace(main, core.cur_sublevel())\n in_tracers = tuple(\n TensorFlowTracer(trace, val, aval)\n for val, aval in zip(in_vals, in_avals))\n # The outs may be core.unit, see comment in TensorFlowTrace.pure.\n outs = yield in_tracers, {} # type: Sequence[Union[TfVal, core.Unit]]\n out_tracers: Iterable[TensorFlowTracer] = (\n map(trace.full_raise, outs)) # type: ignore\n out_vals_with_avals: Sequence[Tuple[TfVal, core.ShapedArray]] = (\n tuple((t.val, t.aval) for t in out_tracers))\n yield out_vals_with_avals\n\n\ndef _interpret_jaxpr(jaxpr: core.ClosedJaxpr, *args: TfVal,\n extra_name_stack: Optional[str]) -> Sequence[TfVal]:\n \"\"\"Evaluates a Jaxpr with tf.Tensor arguments.\n\n The output is a sequence of TfVal (no `core.unit`), suitable for use with TF.\n \"\"\"\n fun: lu.WrappedFun = lu.wrap_init(core.jaxpr_as_fun(jaxpr))\n out_with_avals = _interpret_fun(fun, args, jaxpr.in_avals, extra_name_stack)\n return tuple(v for v, _ in out_with_avals)\n\n\ndef _aval_to_tf_shape(aval: core.ShapedArray) -> Tuple[Optional[int], ...]:\n \"\"\"Generate a TF shape, possibly containing None for polymorphic dimensions.\"\"\"\n return tuple(map(lambda d: None if shape_poly.is_poly_dim(d) else d,\n aval.shape)) # type: ignore[attr-defined]\n\n# In the TF world, we represent float0 as zeros of this type.\n_tf_np_dtype_for_float0 = np.int32\n\ndef _to_tf_dtype(jax_dtype):\n # Note that converting _to_tf_dtype and _to_jax_dtype are not inverses,\n # due to float0 and 64-bit behavior.\n if jax_dtype == dtypes.float0:\n jax_dtype = _tf_np_dtype_for_float0\n return tf.dtypes.as_dtype(jax_dtype)\n\n\ndef _to_jax_dtype(tf_dtype):\n # Note that converting _to_tf_dtype and _to_jax_dtype are not inverses,\n # due to float0 and 64-bit behavior.\n return dtypes.canonicalize_dtype(tf_dtype.as_numpy_dtype)\n\n\ndef _tfval_to_tensor_jax_dtype(val: TfVal,\n jax_dtype: Optional[DType] = None,\n memoize_constants=False) -> Tuple[TfVal, DType]:\n \"\"\"Converts a scalar, ndarray, or tf.Tensor to a tf.Tensor with proper type.\n\n If `jax_dtype` is missing, uses JAX typing rules.\n See README.md for details regarding 64-bit values.\n\n Args:\n val: a scalar, ndarray, tf.Tensor, or tf.Variable\n jax_dtype: an optional dtype to use. If missing, uses JAX type inference\n rules for constants.\n memoize_constants: whether to memoize TF constants. We can't do this\n everywhere, we may be outside of a conversion scope.\n\n Returns:\n a tuple with a tf.Tensor with the type as needed by JAX, and the JAX type.\n \"\"\"\n if isinstance(val, (tf.Tensor, tf.Variable)):\n jax_dtype = jax_dtype or _to_jax_dtype(val.dtype) # Give JAX a chance to pick the type\n conversion_dtype = _to_tf_dtype(jax_dtype)\n if conversion_dtype != val.dtype:\n return tf.cast(val, conversion_dtype), jax_dtype\n else:\n return val, jax_dtype\n else: # A constant\n jax_dtype = jax_dtype or xla.abstractify(val).dtype\n # TODO(document): We assume that the value of a constant does not\n # change through the scope of the function. But it may be an ndarray, ...\n # JAX has the same problem when generating HLO.\n const_key = (id(val), jax_dtype)\n # Since we use id(val) as a cache key, we have to make sure that we keep\n # the previous `val` alive. Otherwise, for an ndarray, it can get garbage\n # collected and reused for a different value, which would create correctness\n # issues. We keep the `val` alive by storing in the cache the pair\n # `(val, tf_val)`.\n do_memoize = (memoize_constants and np.shape(val) and _thread_local_state.constant_cache is not None)\n if do_memoize:\n _, tf_val = _thread_local_state.constant_cache.get(const_key, (None, None))\n else:\n tf_val = None\n if tf_val is None:\n conversion_dtype = _to_tf_dtype(jax_dtype)\n # The float0 type is not known to TF.\n if jax_dtype == dtypes.float0:\n val = np.zeros(np.shape(val), conversion_dtype.as_numpy_dtype)\n tf_val = tf.convert_to_tensor(val, dtype=conversion_dtype)\n if do_memoize:\n _thread_local_state.constant_cache[const_key] = (val, tf_val)\n return tf_val, jax_dtype\n\n\ndef _eval_shape(shape: Sequence[shape_poly.DimSize]) -> Sequence[TfVal]:\n assert all(map(lambda x: x is not None, shape)), (\n f\"Argument shape should be a valid JAX shape but got {shape}\")\n dim_vars, dim_values = util.unzip2(_thread_local_state.shape_env)\n eval_shape, dim_avals = shape_poly.get_shape_evaluator(dim_vars, shape)\n shape_values, _ = util.unzip2(_interpret_fun(lu.wrap_init(eval_shape),\n dim_values, dim_avals, \"\")) # type: ignore\n return shape_values\n\n\n# TODO(b/26854495): pylint doesn't understand slots and inheritance.\n# pylint: disable=assigning-non-slot\n\n\nclass TensorFlowTracer(core.Tracer):\n \"\"\"Tracer class that boxes a TF value and a JAX abstract value.\n\n In addition to the TF value we carry the JAX abstract value because there are\n two cases when it cannot be recovered from the value: (a) when the abstract\n value is core.abstract_unit, in which case the value is tf.nan; (b) when we\n are converting with polymorphic shapes, in which case the shape of the value\n may have dimensions set to `None`, which the JAX abstract value may contain\n more precise information.\n\n When the value has a partially-known shape, the dimensions marked as `None`\n must correspond to non-constant dimensions in the abstract value.\n\n See README.md for details.\n \"\"\"\n # val: TfVal\n # _aval: core.ShapedArray\n __slots__ = [\"val\", \"_aval\"]\n\n def __init__(self, trace: \"TensorFlowTrace\", val: TfVal,\n aval: core.AbstractValue):\n self._trace = trace\n self._aval = aval\n if aval is core.abstract_unit:\n self.val = val\n return\n\n if isinstance(val, (tf.Tensor, tf.Variable)):\n val_shape = val.shape\n\n if config.jax_enable_checks:\n assert len(self._aval.shape) == len(val_shape), f\"_aval.shape={self._aval.shape} different rank than val_shape={val_shape}\"\n # To compare types, we must handle float0 in JAX and x64 in TF\n if self._aval.dtype == dtypes.float0:\n assert _to_tf_dtype(self._aval.dtype) == val.dtype, f\"expected {self._aval.dtype} == {val.dtype}\"\n else:\n assert self._aval.dtype == _to_jax_dtype(val.dtype), f\"expected {self._aval.dtype} == {val.dtype}\"\n\n for aval_dim, val_dim in zip(self._aval.shape, val_shape): # type: ignore[attr-defined]\n if val_dim is None:\n assert shape_poly.is_poly_dim(aval_dim), f\"expected {self._aval.shape} == {val_shape}\" # type: ignore[attr-defined]\n elif not shape_poly.is_poly_dim(aval_dim):\n assert aval_dim == val_dim, f\"expected {self._aval.shape} == {val_shape}\" # type: ignore[attr-defined]\n else:\n # We have a TF value with known shape, and the abstract shape is a shape variable.\n try:\n aval_int = int(_eval_shape([aval_dim])) # type: ignore\n except (TypeError, KeyError):\n continue\n assert aval_int == val_dim, f\"expected {self._aval.shape} == {val_shape}. Found {aval_int} != {val_dim}.\" # type: ignore\n\n self.val = _tfval_to_tensor_jax_dtype(val,\n self._aval.dtype,\n memoize_constants=True)[0] # type: ignore[attr-defined]\n\n @property\n def aval(self):\n return self._aval\n\n def full_lower(self):\n return self\n\n\nclass TensorFlowTrace(core.Trace):\n \"\"\"Trace class that underlies the jax2tf transformation.\n\n We are going to ensure that jax2tf.convert is never nested inside other\n transformations. This is sufficient for intended use cases (converting\n fully-transformed JAX code). It also simplifies our job because we do not have\n to handle situations where we apply primitives on a mix of TF values and\n JAX tracers from an outer transformation. E.g., for addition both the TF\n values\n and the JAX tracers have an override and they get confused if they see values\n from the other world.\n\n Hence a TFT trace does not interact with non-TFT traces at lower-level. For\n higher-order control-flow primitives we invoke recursively\n _interpret_fun on the body of the conditional, which will create a nested TFT.\n\n We do want to allow transformations nested inside a TensorFlowTrace (TFT), but\n those will introduce their own MainTrace, and any operations involving those\n will be done on those traces, i.e., not a concern for TFT.\n \"\"\"\n def pure(self, val: Union[TfVal, core.Unit]) -> TensorFlowTracer:\n \"\"\"Lifts a non-Tracer into the TensorFlowTracer.\n\n This function may be called by way of trace.full_raise.\n\n The value may be a core.unit. During JAX transformations we sometimes\n produce a Jaxpr that has arguments of abstract value core.abstract_unit\n and results equal to core.unit. These are arguments and results that are\n not used in the computation.\n\n In TF world, we represent core.unit as NaN. This is safe, as these values\n should never be used.\n \"\"\"\n if val is core.unit:\n return TensorFlowTracer(self, tf.constant(np.nan, tf.float32),\n core.abstract_unit)\n else:\n tf_val, jax_dtype = _tfval_to_tensor_jax_dtype(val, memoize_constants=True)\n return TensorFlowTracer(\n self, val, core.ShapedArray(tf_val.shape, jax_dtype,\n weak_type=dtypes.is_weakly_typed(val)))\n\n def lift(self, val: core.Tracer) -> TensorFlowTracer:\n # This would be called when we need to raise a tracer from a lower-level\n # main into the TensorFlowTrace. Since the TensorFlowTrace is never nested\n # inside another transform, there are no lower-level main traces.\n assert False\n\n def sublift(self, val: TensorFlowTracer) -> TensorFlowTracer:\n # This is called when we need to raise a tracer from the same main,\n # but a lower sublevel. This could come from a nested jit.\n return TensorFlowTracer(self, val.val, val._aval)\n\n def process_primitive(self, primitive: core.Primitive,\n tracers: Sequence[TensorFlowTracer],\n params) -> TensorFlowTracer:\n impl, impl_needs_avals = self.get_primitive_impl(primitive)\n args_avals: Sequence[core.ShapedArray] = tuple(t.aval for t in tracers)\n # This is a bit conservative, doing abstract_eval even in op-by-op execution\n # but we needed it for, e.g., shape_polymorphism where only JAX's\n # abstract evaluation rules can properly track polymorphic shapes.\n # Unfortunately under op-by-op execution this is a rare occasion where we\n # need abstract evaluation.\n out_aval = primitive.abstract_eval(*args_avals, **params)\n args_tf: Sequence[TfVal] = [t.val for t in tracers]\n def invoke_impl() -> TfVal:\n if impl_needs_avals:\n return impl(\n *args_tf,\n _in_avals=args_avals, # type: ignore\n _out_aval=out_aval,\n **params)\n else:\n return impl(*args_tf, **params)\n\n if _thread_local_state.include_xla_op_metadata:\n op_metadata = xla.make_op_metadata(primitive, params,\n name_stack=_get_current_name_stack(),\n source_info=source_info_util.current())\n op_metadata_proto = xla_data_pb2.OpMetadata(\n op_type=op_metadata.op_type,\n op_name=op_metadata.op_name,\n source_file=op_metadata.source_file,\n source_line=op_metadata.source_line\n )\n with tf_ops.get_default_graph()._attr_scope(\n {\"_XlaOpMetadata\": attr_value_pb2.AttrValue(\n s=op_metadata_proto.SerializeToString())}):\n val_out = invoke_impl()\n else:\n val_out = invoke_impl()\n\n if primitive.multiple_results:\n out = [\n TensorFlowTracer(self, v, a)\n for v, a in zip(val_out, out_aval)\n ] # type: ignore\n else:\n out = TensorFlowTracer(self, val_out, out_aval) # type: ignore\n\n # Check that the impl rule returned a value of expected shape and dtype\n # TODO: adapt this to match polymorphic shapes\n if config.jax_enable_checks:\n if primitive.multiple_results:\n for o, expected_aval in zip(out, out_aval): # type: ignore\n assert o.aval.strip_weak_type() == expected_aval.strip_weak_type(), (\n f\"{primitive}: out.aval = {o.aval}; expected {expected_aval}\")\n else:\n assert out.aval == out_aval, ( # type: ignore\n f\"{primitive}: out.aval = {out.aval}; expected {out_aval}\"\n ) # type: ignore\n return out # type: ignore\n\n def process_call(self, call_primitive: core.Primitive, fun: lu.WrappedFun,\n tracers: Sequence[TensorFlowTracer], params):\n assert call_primitive.multiple_results\n vals: Sequence[TfVal] = [t.val for t in tracers]\n avals: Sequence[core.ShapedArray] = tuple(t.aval for t in tracers)\n interpreted_fun = _interpret_subtrace(fun, self.main, avals)\n extra_name_stack = None\n if call_primitive == core.named_call_p:\n extra_name_stack = util.wrap_name(params[\"name\"], \"named\")\n elif call_primitive == xla.xla_call_p:\n extra_name_stack = util.wrap_name(params[\"name\"], \"jit\")\n with _extended_name_stack(extra_name_stack):\n with core.new_sublevel():\n if call_primitive == core.named_call_p:\n with tf.name_scope(_sanitize_scope_name(params[\"name\"])):\n vals_out: Sequence[Tuple[TfVal, core.ShapedArray]] = \\\n interpreted_fun.call_wrapped(*vals)\n elif call_primitive == sharded_jit.sharded_call_p:\n vals_out = _sharded_call(interpreted_fun, vals, **params)\n elif call_primitive == xla.xla_call_p:\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n # Make a nested tf.function(jit_compile=True)\n store_tf_res_avals = None\n def f_tf(*tf_args):\n nonlocal store_tf_res_avals\n tf_res_out: Sequence[Tuple[TfVal, core.ShapedArray]] = \\\n _call_wrapped_with_new_constant_cache(interpreted_fun, tf_args,\n fresh_constant_cache=False)\n tf_res_vals, tf_res_avals = util.unzip2(tf_res_out)\n store_tf_res_avals = tf_res_avals\n return tf_res_vals\n tf_vals_out = tf.function(f_tf, autograph=False, jit_compile=True)(*vals)\n vals_out = zip(tf_vals_out, store_tf_res_avals)\n else:\n vals_out = interpreted_fun.call_wrapped(*vals)\n else:\n vals_out = interpreted_fun.call_wrapped(*vals)\n return [TensorFlowTracer(self, v, a) for v, a in vals_out]\n\n def post_process_call(self, call_primitive: core.Primitive,\n out_tracers: Sequence[TensorFlowTracer], params):\n # We encountered a call primitive, e.g., remat_call_p, whose result\n # (out_tracers) include TensorFlowTracer that were not passed through\n # its arguments (captured from the environment).\n vals = tuple(t.val for t in out_tracers)\n main = self.main\n\n def todo(vals: Sequence[TfVal]):\n # TODO: is name_stack correct?\n trace = TensorFlowTrace(main, core.cur_sublevel())\n return [\n TensorFlowTracer(trace, v, out_tracer.aval)\n for v, out_tracer in zip(vals, out_tracers)\n ]\n\n return vals, todo\n\n def process_map(self, map_primitive, f, tracers, params):\n raise NotImplementedError(\"process_map\")\n\n def post_process_map(self, map_primitive, out_tracers, params):\n raise NotImplementedError(\"post_process_map\")\n\n def process_custom_jvp_call(self, prim, fun, jvp, tracers):\n # Drop the custom differentiation rule and act like a call primitive. This\n # behavior is desirable because jax2tf stages code out of the JAX system, so\n # there are no more JAX differentiation transformations to be applied.\n del jvp # Unused.\n return self.process_call(core.call_p, fun, tracers, {})\n\n def post_process_custom_jvp_call(self, out_tracers, params):\n assert False # unreachable assuming jax2tf runs with clean trace state\n\n def process_custom_vjp_call(self, prim, fun, fwd, bwd, tracers, out_trees):\n # Drop the custom differentiation rule and act like a call primitive. This\n # behavior is desirable because jax2tf stages code out of the JAX system, so\n # there are no more JAX differentiation transformations to be applied.\n del fwd, bwd, out_trees # Unused.\n return self.process_call(core.call_p, fun, tracers, {})\n\n def post_process_custom_vjp_call(self, out_tracers, params):\n assert False # unreachable assuming jax2tf runs with clean trace state\n\n def get_primitive_impl(self, p: core.Primitive) -> Tuple[Callable, bool]:\n # Returns the primitive implementation and whether the implementation\n # takes abstract values (see definition of tf_impl_with_avals)\n if not _thread_local_state.enable_xla:\n try:\n return tf_impl_no_xla[p], True # Always require avals.\n except KeyError:\n pass\n try:\n return tf_impl[p], False\n except KeyError:\n try:\n return tf_impl_with_avals[p], True\n except KeyError as err:\n msg = \"TensorFlow interpretation rule for '{}' not implemented\"\n raise NotImplementedError(msg.format(p)) from err\n\ndef _unexpected_primitive(p: core.Primitive, *args, **kwargs):\n assert False, f\"Encountered unexpected primitive {p}\"\n\n\n# Call primitives are inlined\nfor unexpected in [core.call_p, core.named_call_p, xla.xla_call_p,\n partial_eval.remat_call_p, sharded_jit.sharded_call_p,\n maps.xmap_p]:\n tf_impl[unexpected] = partial(_unexpected_primitive, unexpected)\n\n# Primitives that are not yet implemented must be explicitly declared here.\ntf_not_yet_impl = [\n \"clz\",\n \"igamma_grad_a\",\n \"random_gamma_grad\",\n \"reduce_precision\",\n \"schur\",\n \"name\",\n\n # Not high priority?\n \"after_all\",\n \"all_to_all\",\n \"create_token\",\n \"infeed\",\n \"linear_call\",\n \"outfeed\",\n \"pmax_p\",\n \"pmin\",\n \"ppermute\",\n \"psum\",\n \"pmax\",\n \"pgather\",\n \"reduce_scatter\",\n \"axis_index\",\n \"pdot\",\n \"all_gather\",\n \"lu_pivots_to_permutation\",\n \"xla_pmap\",\n]\n\ntf_impl[ad_util.stop_gradient_p] = tf.stop_gradient\ntf_impl[ad_util.zeros_like_p] = tf.zeros_like\n\n\ndef _add(x: TfVal, y: TfVal) -> TfVal:\n return tf.raw_ops.AddV2(x=x, y=y)\n\n\ntf_impl[ad_util.add_jaxvals_p] = _add\ntf_impl[dispatch.device_put_p] = lambda x, device=None: x\n\ndef _neg(x: TfVal) -> TfVal:\n if x.dtype.is_unsigned:\n signed_dtype = _UNSIGNED_TO_SIGNED_TABLE[x.dtype]\n x_signed = tf.cast(x, signed_dtype)\n res_signed = tf.math.negative(x_signed)\n return tf.cast(res_signed, x.dtype)\n else:\n return tf.math.negative(x)\n\ntf_impl[lax.neg_p] = _neg\n\n\ndef _sign(x: TfVal) -> TfVal:\n if x.dtype.is_unsigned:\n # TF and XLA do not support tf.math.sign for unsigned types.\n return tf.where(\n tf.math.equal(x, 0), tf.constant(0, dtype=x.dtype),\n tf.constant(1, dtype=x.dtype))\n else:\n return tf.math.sign(x)\n\n\ntf_impl[lax.sign_p] = _sign\ntf_impl[lax.floor_p] = tf.math.floor\ntf_impl[lax.ceil_p] = tf.math.ceil\n\n\ndef _round(operand, *, rounding_method,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n if rounding_method is lax.RoundingMethod.AWAY_FROM_ZERO:\n # JAX uses a single HLO op Round here\n sign = _sign(operand)\n operand *= sign\n floor = tf.math.floor(operand)\n operand -= floor\n cond = tf.math.equal(operand, tf.constant(np.array(0.5), operand.dtype))\n return sign * (\n tf.where(cond, tf.constant(np.array(1), operand.dtype),\n tf.math.round(operand)) + floor)\n else: # rounding_method is RoundingMethod.TO_NEAREST_EVEN\n rounding_fun = _convert_jax_impl(\n lax_internal._round_to_nearest_even, multiple_results=False)\n return rounding_fun(operand, _in_avals=_in_avals, _out_aval=_out_aval)\n\ntf_impl_with_avals[lax.round_p] = _round\ntf_impl[lax.nextafter_p] = tf.math.nextafter\n\n\ndef _population_count(x):\n orig_dtype = x.dtype\n return tf.cast(tf.raw_ops.PopulationCount(x=x), orig_dtype)\n\n\ntf_impl[lax.population_count_p] = _population_count\ntf_impl[lax.is_finite_p] = tf.math.is_finite\n\n\ndef _abs(x: TfVal) -> TfVal:\n # TF and XLA do not support tf.math.abs for unsigned types.\n return tf.math.abs(x) if not x.dtype.is_unsigned else x\n\n\ntf_impl[lax.abs_p] = _abs\ntf_impl[lax.pow_p] = tf.math.pow\n\n\ndef _integer_pow(x, *, y: int, _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n # Follows the implementation in lax._integer_pow_translation_rule\n if y == 0:\n return tf.broadcast_to(\n tf.constant(1, dtype=x.dtype, shape=()), _eval_shape(_out_aval.shape))\n is_reciprocal = y < 0\n if is_reciprocal:\n y = -y\n acc = None\n while y > 0:\n if y & 1:\n acc = x if acc is None else tf.math.multiply(acc, x)\n y >>= 1\n if y > 0:\n x = tf.math.multiply(x, x)\n return tf.math.reciprocal(acc) if is_reciprocal else acc\n\n\ntf_impl_with_avals[lax.integer_pow_p] = _integer_pow\ntf_impl[lax.exp_p] = tf.math.exp\ntf_impl[lax.expm1_p] = tf.math.expm1\ntf_impl[lax.log_p] = tf.math.log\ntf_impl[lax.log1p_p] = tf.math.log1p\ntf_impl[lax.tan_p] = tf.math.tan\ntf_impl[lax.tanh_p] = tf.math.tanh\ntf_impl[lax.sin_p] = tf.math.sin\ntf_impl[lax.sinh_p] = tf.math.sinh\ntf_impl[lax.cos_p] = tf.math.cos\ntf_impl[lax.cosh_p] = tf.math.cosh\ntf_impl_with_avals[lax.acos_p] = _convert_jax_impl(\n lax_internal.acos_translation_rule, multiple_results=False)\ntf_impl_with_avals[lax.asin_p] = _convert_jax_impl(\n lax_internal.asin_translation_rule, multiple_results=False)\ntf_impl_with_avals[lax.atan_p] = _convert_jax_impl(\n lax_internal.atan_translation_rule, multiple_results=False)\n\ndef _atan2(y, x, **kwargs):\n if x.dtype.is_complex or y.dtype.is_complex:\n complex_component_dtype = {\n tf.complex64: tf.float32,\n tf.complex128: tf.float64\n }.get(y.dtype)\n zero = tf.constant(0, complex_component_dtype)\n one = tf.constant(1, complex_component_dtype)\n i = tf.complex(zero, one)\n return -i * tf.math.log((x + i * y)/tf.math.sqrt(x * x + y * y))\n else:\n return tf.math.atan2(y, x)\n\n\ntf_impl[lax.atan2_p] = _atan2\ntf_impl[lax.acosh_p] = tf.math.acosh\ntf_impl[lax.atanh_p] = tf.math.atanh\ntf_impl[lax.asinh_p] = tf.math.asinh\n\ntf_impl[lax.sqrt_p] = tf.math.sqrt\ntf_impl[lax.rsqrt_p] = tf.math.rsqrt\n\ndef _cbrt(x):\n return tf.math.sign(x) * tf.math.pow(tf.math.abs(x), 1/3)\n\ntf_impl[lax.cbrt_p] = _cbrt\n\ntf_impl[lax.lgamma_p] = tf.math.lgamma\ntf_impl[lax.digamma_p] = tf.math.digamma\ntf_impl[lax.igamma_p] = tf.math.igamma\ntf_impl[lax.igammac_p] = tf.math.igammac\ntf_impl[lax.regularized_incomplete_beta_p] = tf.math.betainc\ntf_impl[lax.erf_p] = tf.math.erf\ntf_impl[lax.erfc_p] = tf.math.erfc\ntf_impl[lax.erf_inv_p] = tf.math.erfinv\ntf_impl[lax.bessel_i0e_p] = tf.math.bessel_i0e\ntf_impl[lax.bessel_i1e_p] = tf.math.bessel_i1e\n\ntf_impl[lax.complex_p] = tf.complex\n\n\ndef _conj(x, **kwargs):\n # The only dtypes that are allowed are: float32, float64, complex64, and\n # complex128.\n if x.dtype == tf.float32:\n return tf.cast(x, tf.complex64)\n elif x.dtype == tf.float64:\n return tf.cast(x, tf.complex128)\n else:\n return tf.math.conj(x)\n\n\ntf_impl[lax.conj_p] = _conj\ntf_impl[lax.real_p] = tf.math.real\ntf_impl[lax.imag_p] = tf.math.imag\n\ntf_impl[lax.add_p] = _add\ntf_impl[lax.sub_p] = tf.math.subtract\ntf_impl[lax.mul_p] = tf.math.multiply\n\n\ndef _iota(*, dtype, shape, dimension):\n dtype = _to_tf_dtype(dtype)\n # Some dtypes are unsupported, like uint32, so we just fall back to int32.\n # TODO(mattjj, necula): improve tf.range dtype handling\n shape_tf = _eval_shape(shape)\n vec = tf.range(tf.cast(shape_tf[dimension], tf.int32), dtype=tf.int32)\n vec_shape = [-1 if i == dimension else 1 for i in range(len(shape))]\n return tf.cast(tf.broadcast_to(tf.reshape(vec, vec_shape), shape_tf), dtype)\n\n\ntf_impl[lax.iota_p] = _iota\n\n\ndef _div(lhs, rhs):\n if lhs.dtype.is_integer:\n quotient = tf.math.floordiv(lhs, rhs)\n select = tf.math.logical_and(\n tf.not_equal(_sign(lhs), _sign(rhs)),\n tf.not_equal(tf.math.floormod(lhs, rhs), 0))\n return tf.where(select, quotient + 1, quotient)\n else:\n return tf.math.truediv(lhs, rhs)\n\n\ndef _rem(lhs, rhs):\n return _sign(lhs) * tf.math.floormod(_abs(lhs), _abs(rhs))\n\n\ntf_impl[lax.div_p] = _div\ntf_impl[lax.rem_p] = _rem\n\n\ndef _minmax(x: TfVal, y: TfVal, *, is_min: bool,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray,) -> TfVal:\n # For complex numbers use lexicographic ordering, like JAX\n if dtypes.issubdtype(x.dtype.as_numpy_dtype, np.complexfloating):\n return _convert_jax_impl(\n partial(lax_internal._minmax_complex_lowering,\n lax_cmp_pick_x=lax.lt if is_min else lax.gt),\n multiple_results=False)(x, y, _in_avals=_in_avals, _out_aval=_out_aval)\n elif x.dtype.as_numpy_dtype == np.bool_:\n return (tf.math.logical_and if is_min else tf.math.logical_or)(x, y)\n else:\n return (tf.math.minimum if is_min else tf.math.maximum)(x, y)\n\ndef _minmax_scalar(x: TfVal, y: TfVal, *, is_min: bool) -> TfVal:\n # For reducers we will need min/max for scalars only. In that case we\n # can construct the AbstractValues outselves, even in the presence of\n # shape polymorphism.\n assert len(x.shape) == 0 and len(y.shape) == 0, f\"x: {x.shape}, y: {y.shape}\"\n aval = core.ShapedArray((), _to_jax_dtype(x.dtype))\n return _minmax(x, y, is_min=is_min,\n _in_avals=[aval, aval], _out_aval=aval)\n\ntf_impl_with_avals[lax.max_p] = partial(_minmax, is_min=False)\ntf_impl_with_avals[lax.min_p] = partial(_minmax, is_min=True)\n\n# Map from TF signed types to TF unsigned types.\n_SIGNED_TO_UNSIGNED_TABLE = {\n tf.int8: tf.uint8,\n tf.int16: tf.uint16,\n tf.int32: tf.uint32,\n tf.int64: tf.uint64,\n}\n\n# Map from TF unsigned types to TF signed types.\n_UNSIGNED_TO_SIGNED_TABLE = {u: s for s, u in _SIGNED_TO_UNSIGNED_TABLE.items()}\n\n\n# Note: Bitwise operations only yield identical results on unsigned integers!\n# pylint: disable=protected-access\ndef _shift_right_arithmetic_raw(x, y):\n if x.dtype.is_unsigned:\n assert x.dtype == y.dtype\n orig_dtype = x.dtype\n signed_dtype = _UNSIGNED_TO_SIGNED_TABLE[orig_dtype]\n x = tf.cast(x, signed_dtype)\n y = tf.cast(y, signed_dtype)\n res = tf.bitwise.right_shift(x, y)\n return tf.cast(res, orig_dtype)\n else:\n return tf.bitwise.right_shift(x, y)\n\n\ndef _shift_right_arithmetic(x, y):\n # TF shift is \"implementation defined\" if the shift amount is negative\n # or larger or equal to the size of the value. We implement the XLA\n # semantics to return the shift by the max value (x_bits - 1).\n # TODO: it is likely better to add XlaOps for shifts\n x_bits = 8 * x.dtype.size\n clamp_y = tf.where(_shift_in_bounds(x, y), y, x_bits - 1)\n return _shift_right_arithmetic_raw(x, clamp_y)\n\n\ntf_impl[lax.shift_right_arithmetic_p] = _shift_right_arithmetic\n\n\ndef _shift_right_logical_raw(x, y):\n if x.dtype.is_unsigned:\n return tf.bitwise.right_shift(x, y)\n else:\n assert x.dtype == y.dtype\n orig_dtype = x.dtype\n unsigned_dtype = _SIGNED_TO_UNSIGNED_TABLE[orig_dtype]\n x = tf.cast(x, unsigned_dtype)\n y = tf.cast(y, unsigned_dtype)\n res = tf.bitwise.right_shift(x, y)\n return tf.cast(res, orig_dtype)\n\n\ndef _shift_right_logical(x, y):\n # TF shift is \"implementation defined\" if the shift amount is negative\n # or larger or equal to the size of the value. We implement the XLA semantics\n # to return 0.\n # TODO: it is likely better to add XlaOps for shifts\n return tf.where(\n _shift_in_bounds(x, y), _shift_right_logical_raw(x, y), tf.zeros_like(x))\n\n\ntf_impl[lax.shift_right_logical_p] = _shift_right_logical\n\n\ndef _shift_left(x, y):\n # TF shift is \"implementation defined\" if the shift amount is negative\n # or larger or equal to the size of the value. We implement the XLA semantics\n # to return 0.\n # TODO: it is likely better to add XlaOps for shifts\n return tf.where(\n _shift_in_bounds(x, y), tf.bitwise.left_shift(x, y), tf.zeros_like(x))\n\n\ntf_impl[lax.shift_left_p] = _shift_left\n\n\ndef _shift_in_bounds(x: TfVal, y: TfVal) -> TfVal:\n # Return the TF expression for when y is within bounds (0 <= y < |x|)\n x_bits = 8 * x.dtype.size\n # TF does not have comparisons for uint16 and uint32 (despite what the\n # documentation says)\n y_comp = tf.cast(\n y, _UNSIGNED_TO_SIGNED_TABLE[y.dtype]) if y.dtype.is_unsigned else y\n y_lt_x_bits = tf.math.less(y_comp, x_bits)\n y_ge_0 = tf.math.greater_equal(y_comp, 0)\n return tf.logical_and(y_lt_x_bits, y_ge_0)\n\n\ndef _not(x):\n \"\"\"Computes bitwise not with support for booleans.\n\n Numpy and JAX support bitwise not for booleans by applying a logical not!\n This means that applying bitwise_not yields an unexpected result:\n jnp.bitwise_not(jnp.array([True, False]))\n >> DeviceArray([False, True], dtype=bool)\n\n if you assume that booleans are simply casted to integers.\n jnp.bitwise_not(jnp.array([True, False]).astype(np.int32)).astype(bool)\n >> DeviceArray([True, True], dtype=bool)\n \"\"\"\n if x.dtype == tf.bool:\n return tf.logical_not(x)\n else:\n return tf.bitwise.invert(x)\n\n\ntf_impl[lax.not_p] = _not\n\n\ndef bool_to_int8(f, argnums: Sequence[int]):\n \"\"\"Computes functions with some bool args and bool results using int8.\n\n This is needed because some TF ops do not work for bool args, e.g.,\n inequalities, min/max.\n\n Args:\n f: a TF callable to wrap. It will be called with non-boolean arguments.\n argnums: the positional arguments that may be booleans.\n\n Returns: a TF callable that can take a mix of boolean positional arguments\n (in the positions specified by `argnums`) and some non-boolean positional\n arguments. If there are no boolean arguments, just calls `f`. Otherwise,\n casts the boolean arguments to `int8`, calls `f`, then casts the result to\n `bool`.\n \"\"\"\n argnums = tf.nest.flatten(argnums)\n\n def wrapper(*args: TfVal, **kwargs):\n argnum_types = {args[i].dtype for i in argnums}\n if tf.bool not in argnum_types:\n return f(*args, **kwargs)\n else:\n # All argnums should be boolean\n assert len(argnum_types) == 1, argnum_types\n args_cast = [(tf.cast(a, tf.int8) if i in argnums else a)\n for i, a in enumerate(args)]\n if \"_in_avals\" in kwargs:\n\n def cast_aval(aval):\n assert aval.dtype == np.bool_\n return core.ShapedArray(aval.shape, np.int8)\n\n _in_avals_cast = [\n cast_aval(aval) if i in argnums else aval\n for i, aval in enumerate(kwargs[\"_in_avals\"])\n ]\n _out_aval_cast = tf.nest.map_structure(cast_aval, kwargs[\"_out_aval\"])\n kwargs = dict(\n kwargs, _in_avals=_in_avals_cast, _out_aval=_out_aval_cast)\n out = f(*args_cast, **kwargs)\n return tf.nest.map_structure(lambda o: tf.cast(o, tf.bool), out)\n\n return wrapper\n\n\ntf_impl[lax.or_p] = bool_to_int8(tf.bitwise.bitwise_or, argnums=(0, 1))\ntf_impl[lax.and_p] = bool_to_int8(tf.bitwise.bitwise_and, argnums=(0, 1))\ntf_impl[lax.xor_p] = bool_to_int8(tf.bitwise.bitwise_xor, argnums=(0, 1))\n\ntf_impl[lax.eq_p] = tf.math.equal\ntf_impl[lax.ne_p] = tf.math.not_equal\n\ntf_impl[lax.ge_p] = bool_to_int8(tf.math.greater_equal, argnums=(0, 1))\ntf_impl[lax.gt_p] = bool_to_int8(tf.math.greater, argnums=(0, 1))\ntf_impl[lax.le_p] = bool_to_int8(tf.math.less_equal, argnums=(0, 1))\ntf_impl[lax.lt_p] = bool_to_int8(tf.math.less, argnums=(0, 1))\n\ntf_impl[lax.linalg.cholesky_p] = tf.linalg.cholesky\n\n\ndef _convert_element_type(operand, *, new_dtype, weak_type=False):\n old_dtype = operand.dtype.as_numpy_dtype\n if (dtypes.issubdtype(old_dtype, np.complexfloating) and\n not dtypes.issubdtype(new_dtype, np.complexfloating)):\n operand = tf.math.real(operand)\n if (dtypes.issubdtype(old_dtype, np.floating) and\n not (dtypes.issubdtype(new_dtype, np.floating) or dtypes.issubdtype(\n new_dtype, np.complexfloating) or new_dtype == np.bool_)):\n sign = _sign(operand)\n operand = sign * tf.math.floor(sign * operand)\n return tf.dtypes.cast(operand, _to_tf_dtype(new_dtype))\n\n\ntf_impl[lax.convert_element_type_p] = _convert_element_type\n\n\ndef _bitcast_convert_type(operand, new_dtype):\n if operand.dtype == new_dtype:\n return operand\n return tf.bitcast(operand, _to_tf_dtype(new_dtype))\n\n\ntf_impl[lax.bitcast_convert_type_p] = _bitcast_convert_type\n\n\ndef _clamp(minval, operand, maxval, *, _in_avals, _out_aval):\n # The below permits mirroring the behavior of JAX when maxval < minval\n op_shape_tf_val = _eval_shape(_in_avals[1].shape)\n maxval = tf.broadcast_to(maxval, op_shape_tf_val)\n minval = tf.math.minimum(tf.broadcast_to(minval, op_shape_tf_val), maxval)\n return tf.clip_by_value(operand, minval, maxval)\n\n\ntf_impl_with_avals[lax.clamp_p] = _clamp\n\n\ndef _concatenate(*operands, dimension):\n return tf.concat(operands, axis=dimension)\n\n\ntf_impl[lax.concatenate_p] = _concatenate\n\n\ndef _conv_general_dimension_numbers_proto(dimension_numbers):\n \"\"\"Converts a ConvDimensionNumbers to an XLA ConvolutionDimensionNumbers.\"\"\"\n assert isinstance(dimension_numbers, lax.ConvDimensionNumbers)\n lhs_spec, rhs_spec, out_spec = dimension_numbers\n proto = xla_data_pb2.ConvolutionDimensionNumbers()\n proto.input_batch_dimension = lhs_spec[0]\n proto.input_feature_dimension = lhs_spec[1]\n proto.output_batch_dimension = out_spec[0]\n proto.output_feature_dimension = out_spec[1]\n proto.kernel_output_feature_dimension = rhs_spec[0]\n proto.kernel_input_feature_dimension = rhs_spec[1]\n proto.input_spatial_dimensions.extend(lhs_spec[2:])\n proto.kernel_spatial_dimensions.extend(rhs_spec[2:])\n proto.output_spatial_dimensions.extend(out_spec[2:])\n return proto\n\n\ndef _precision_config_proto(precision: Optional[Tuple[PrecisionType,\n PrecisionType]]):\n \"\"\"Convert an integer to an XLA.PrecisionConfig.\"\"\"\n if precision is None:\n return None\n\n proto = xla_data_pb2.PrecisionConfig()\n proto.operand_precision.append(int(precision[0]))\n proto.operand_precision.append(int(precision[1]))\n return proto\n\n\ndef _conv_general_dilated(lhs, rhs, *,\n window_strides, padding, lhs_dilation,\n rhs_dilation,\n dimension_numbers: lax.ConvDimensionNumbers,\n feature_group_count: int,\n batch_group_count: int,\n lhs_shape: Sequence[int],\n rhs_shape: Sequence[int],\n precision: Optional[Tuple[PrecisionType, PrecisionType]],\n preferred_element_type: Optional[DType],\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n \"\"\"Implementation of lax.conv_general_dilated_p using XlaConv.\"\"\"\n out_tf_shape = _aval_to_tf_shape(_out_aval)\n dnums_proto = _conv_general_dimension_numbers_proto(dimension_numbers)\n precision_config_proto = _precision_config_proto(precision)\n\n def gen_conv(lhs, rhs, preferred_element_type: Optional[DType]):\n out = tfxla.conv(\n lhs,\n rhs,\n window_strides,\n padding,\n lhs_dilation,\n rhs_dilation,\n dnums_proto,\n feature_group_count=feature_group_count,\n batch_group_count=batch_group_count,\n precision_config=precision_config_proto,\n preferred_element_type=preferred_element_type,\n use_v2=True)\n # TODO: implement shape inference for XlaConv\n out.set_shape(out_tf_shape)\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n out = tf.stop_gradient(out) # See #7839\n return out\n\n # Follow the lowering for complex convolutions from\n # lax._conv_general_dilated_translation. We can use the same conversion on all\n # platforms because on XLA:TPU the compiler does the same as a rewrite.\n preferred_float_et: Optional[Any]\n if np.issubdtype(_in_avals[0].dtype, np.complexfloating):\n if preferred_element_type is not None:\n # Convert complex dtype to types used for real and imaginary parts\n assert np.issubdtype(preferred_element_type, np.complexfloating)\n preferred_float_et = (\n np.float64 if preferred_element_type == np.complex128 else np.float32)\n else:\n preferred_float_et = None\n lhs_real, lhs_imag = tf.math.real(lhs), tf.math.imag(lhs)\n rhs_real, rhs_imag = tf.math.real(rhs), tf.math.imag(rhs)\n k1 = gen_conv(_add(lhs_real, lhs_imag), rhs_real, preferred_float_et)\n k2 = gen_conv(lhs_real, tf.math.subtract(rhs_imag, rhs_real),\n preferred_float_et)\n k3 = gen_conv(lhs_imag, _add(rhs_real, rhs_imag), preferred_float_et)\n return tf.complex(tf.math.subtract(k1, k3), _add(k1, k2))\n else:\n return gen_conv(lhs, rhs, preferred_element_type)\n\n\ntf_impl_with_avals[lax.conv_general_dilated_p] = _conv_general_dilated\n\n\ndef _dot_general(lhs, rhs, *, dimension_numbers,\n precision: Optional[Tuple[PrecisionType, PrecisionType]],\n preferred_element_type: Optional[DType],\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n \"\"\"Implementation of lax.dot_general_p in terms of tf.linalg.einsum.\"\"\"\n (lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers\n dnums_proto = xla_data_pb2.DotDimensionNumbers()\n dnums_proto.lhs_contracting_dimensions.extend(lhs_contracting)\n dnums_proto.rhs_contracting_dimensions.extend(rhs_contracting)\n dnums_proto.lhs_batch_dimensions.extend(lhs_batch)\n dnums_proto.rhs_batch_dimensions.extend(rhs_batch)\n precision_config_proto = _precision_config_proto(precision)\n res = tfxla.dot_general(\n lhs,\n rhs,\n dnums_proto,\n precision_config_proto,\n preferred_element_type=preferred_element_type,\n use_v2=True)\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n res = tf.stop_gradient(res) # See #7839\n return res\n\n\ntf_impl_with_avals[lax.dot_general_p] = _dot_general\n\n\ndef _broadcast_in_dim(operand, *, shape, broadcast_dimensions,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n # for i in range(len(operand.shape)):\n # result.shape[bcast_dims[i]] <- operand.shape[i]\n # bcast_dims must be strictly increasing.\n # len(bcast_dims) == len(operand.shape)\n op_shape = _in_avals[0].shape\n add_1s_shape = [1] * len(shape)\n for i, broadcast_dim_i in enumerate(broadcast_dimensions):\n add_1s_shape[broadcast_dim_i] = op_shape[i]\n with_1s = tf.reshape(operand, _eval_shape(add_1s_shape))\n return tf.broadcast_to(with_1s, _eval_shape(shape))\n\n\ntf_impl_with_avals[lax.broadcast_in_dim_p] = _broadcast_in_dim\n\n\ndef _reshape(operand, *, new_sizes, dimensions):\n if dimensions is None:\n dimensions = tf.range(tf.rank(operand))\n new_sizes_tf = _eval_shape(new_sizes)\n return tf.reshape(tf.transpose(operand, dimensions), new_sizes_tf)\n\n\ntf_impl[lax.reshape_p] = _reshape\n\n\ndef _squeeze(operand, *, dimensions, _in_avals, _out_aval):\n op_shape = _in_avals[0].shape\n new_shape = tuple(d for i, d in enumerate(op_shape) if i not in dimensions)\n new_shape_tf = _eval_shape(new_shape)\n return tf.reshape(operand, new_shape_tf)\n\n\ntf_impl_with_avals[lax.squeeze_p] = _squeeze\n\n\ndef _pad(operand, padding_value, *, padding_config,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n low, high, interior = util.unzip3(padding_config)\n out = tfxla.pad(operand, padding_value, low, high, interior)\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n out = tf.stop_gradient(out) # See #7839\n return out\n\n\ntf_impl_with_avals[lax.pad_p] = _pad\n\n\ndef _rev(operand, *, dimensions):\n return tf.reverse(operand, dimensions)\n\n\ntf_impl[lax.rev_p] = _rev\n\ntf_impl[lax.select_p] = tf.where\n\n\ndef _transpose(operand, *, permutation):\n return tf.transpose(operand, perm=permutation)\n\n\ntf_impl[lax.transpose_p] = _transpose\n\naxes_to_axis = lambda func: lambda operand, axes: func(operand, axis=axes)\n\n# reduce_sum and reduce_prod are not supported for bool\ntf_impl[lax.reduce_sum_p] = axes_to_axis(tf.reduce_sum)\ntf_impl[lax.reduce_prod_p] = axes_to_axis(tf.reduce_prod)\ntf_impl[lax.reduce_max_p] = (\n bool_to_int8(axes_to_axis(tf.reduce_max), argnums=[0]))\ntf_impl[lax.reduce_min_p] = (\n bool_to_int8(axes_to_axis(tf.reduce_min), argnums=[0]))\ntf_impl[lax.reduce_or_p] = axes_to_axis(tf.reduce_any)\ntf_impl[lax.reduce_and_p] = axes_to_axis(tf.reduce_all)\n\n\ndef _argminmax(is_min: bool, operand: TfVal, axes: Sequence[int],\n index_dtype: DType,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n # Follow the JAX implementation, using a XlaReduce with a custom comparator\n if is_min:\n extra_name_stack = \"argmin\"\n value_comparator = lax.lt\n get_identity = lax_internal._get_min_identity\n else:\n extra_name_stack = \"argmax\"\n value_comparator = lax.gt\n get_identity = lax_internal._get_max_identity\n\n res = _convert_jax_impl(\n partial(lax_internal._compute_argminmax, value_comparator, get_identity),\n multiple_results=False,\n extra_name_stack=extra_name_stack)(\n operand,\n index_dtype=index_dtype,\n axes=axes,\n _in_avals=_in_avals,\n _out_aval=_out_aval)\n return res\n\n\ntf_impl_with_avals[lax.argmin_p] = partial(_argminmax, True)\ntf_impl_with_avals[lax.argmax_p] = partial(_argminmax, False)\n\n\n_add_fn = tf.function(_add, autograph=False)\n_ge_fn = tf.function(tf.math.greater_equal, autograph=False)\n\n\ndef _select_and_gather_add(\n tangents: TfVal, operand: TfVal, select_prim: core.Primitive,\n window_dimensions: Sequence[int], window_strides: Sequence[int],\n base_dilation: Sequence[int], window_dilation: Sequence[int],\n padding: Sequence[Tuple[int, int]], _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n # Note: this function follows the pattern in\n # jax.lax._select_and_gather_add_translation.\n dtype = operand.dtype\n nbits = dtypes.finfo(dtype.as_numpy_dtype).bits\n\n # Specializing the function for 64 bits. Only up to 32 bits are supported on TPU,\n # we thus intend to let the code throw a different exception on this platform.\n max_bits = 64\n\n assert nbits <= max_bits\n double_word_reduction = nbits * 2 <= max_bits\n\n const = lambda dtype, x: tf.constant(np.array(x), dtype)\n\n if double_word_reduction:\n word_dtype = lax_internal._UINT_DTYPES[nbits]\n double_word_dtype = lax_internal._UINT_DTYPES[nbits * 2]\n\n # Packs two values into a tuple.\n def pack(a, b):\n a = _bitcast_convert_type(a, word_dtype)\n b = _bitcast_convert_type(b, word_dtype)\n a = _convert_element_type(a, new_dtype=double_word_dtype)\n b = _convert_element_type(b, new_dtype=double_word_dtype)\n a = tf.bitwise.left_shift(a, const(double_word_dtype, nbits))\n return tf.bitwise.bitwise_or(a, b)\n\n # Unpacks the first element of a tuple.\n def fst(t):\n assert t.dtype == double_word_dtype\n st = _shift_right_logical(t, const(double_word_dtype, nbits))\n return _bitcast_convert_type(\n _convert_element_type(st, new_dtype=word_dtype), dtype)\n\n # Unpacks the second element of a tuple.\n def snd(t):\n return _bitcast_convert_type(\n _convert_element_type(t, new_dtype=word_dtype), dtype)\n\n else:\n raise NotImplementedError(\n f\"TODO: need to pack {nbits * 2} bits but this platform can only go up to {max_bits} bits.\"\n )\n\n assert select_prim is lax.ge_p or select_prim is lax.le_p, select_prim\n\n def reducer(x, y):\n which = tf_impl[select_prim]\n return tf_impl[lax.select_p](which(fst(x), fst(y)), x=x, y=y)\n\n init = -np.inf if select_prim is lax.ge_p else np.inf\n init_identity = lambda x: pack(const(dtype, init), const(dtype, 0))\n\n out = _specialized_reduce_window(\n reducer,\n init_identity,\n pack(operand, tangents),\n window_dimensions=window_dimensions,\n window_strides=window_strides,\n padding=padding,\n base_dilation=base_dilation,\n window_dilation=window_dilation,\n _in_avals=_in_avals,\n _out_aval=_out_aval)\n\n return snd(out)\n\n\ntf_impl_with_avals[lax.select_and_gather_add_p] = _select_and_gather_add\n\n\ndef _get_shape_from_tensor_or_array(x):\n if isinstance(x.shape, tf.TensorShape):\n return tuple(x.shape.as_list())\n return tuple(x.shape)\n\n\ndef _common_reduce_window(operand, init_val, reducer, window_dimensions,\n window_strides, padding, base_dilation,\n window_dilation, _in_avals, _out_aval):\n o_spec = tf.TensorSpec((), dtype=operand.dtype)\n reducer_fn = tf.function(\n reducer, autograph=False).get_concrete_function(o_spec, o_spec)\n\n if not isinstance(init_val, (tf.Tensor, tf.Variable)):\n init_val = tf.constant(init_val, operand.dtype)\n out = tfxla.reduce_window(\n operand,\n init_val,\n reducer_fn,\n window_dimensions,\n window_strides,\n base_dilations=base_dilation,\n window_dilations=window_dilation,\n padding=padding)\n # TODO: implement shape inference for XlaReduceWindow\n out.set_shape(_aval_to_tf_shape(_out_aval))\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n out = tf.stop_gradient(out) # See #7839\n return out\n\n\ndef _reduce_window(*args, jaxpr, consts, window_dimensions,\n window_strides, padding, base_dilation, window_dilation,\n _in_avals, _out_aval):\n \"\"\"TensorFlow implementation of reduce_window.\n\n Args:\n operands: N dimensional arrays containing elements of type T\n init_values: starting values of the reduction\n jaxpr: the jaxpr corresponding to the reduction function\n consts: the constants associated with jaxpr.\n window_dimensions: array of integers for window dimension values\n window_strides: array of integers for window stride values\n padding: array of pairs of integers for padding values\n base_dilation: array of integers for base dilation values\n window_dilation: array of integers for window dilation values\n\n Returns:\n The reduced operand.\n \"\"\"\n assert len(consts) == 0, \"Reduction computation cannot have constants\"\n operands, init_values = util.split_list(args, [len(args) // 2])\n\n if len(operands) != 1:\n raise NotImplementedError(\"jax2tf does not support variadic reduce_window\")\n\n def reducer(arg1: TfVal, arg2: TfVal) -> TfVal:\n closed_jaxpr = core.ClosedJaxpr(jaxpr, consts)\n res, = _interpret_jaxpr(closed_jaxpr, arg1, arg2, extra_name_stack=None)\n return res\n\n return (_common_reduce_window(operands[0], init_values[0], reducer,\n window_dimensions, window_strides, padding,\n base_dilation, window_dilation, _in_avals,\n _out_aval[0]),)\n\n\n\ndef _specialized_reduce_window(reducer,\n identity,\n operand,\n *,\n window_dimensions,\n window_strides,\n padding,\n base_dilation,\n window_dilation,\n _in_avals,\n _out_aval,\n name=None):\n \"\"\"Wraps the TensorFlow reduce window operation based on a reducer and an\n\n identity function defining the initial value of the reduction depending on\n the dtype of the operand.\n\n Args:\n reducer: reduction function of type TfVal -> TfVal -> TfVal\n identity: function that takes a TensorFlow dtype as a parameter and returns\n the starting value of the reduction.\n operand: N dimensional array containing elements of type T\n window_dimensions: array of integers for window dimension values\n window_strides: array of integers for window stride values\n padding: array of pairs of integers for padding values\n base_dilation: array of integers for base dilation values\n window_dilation: array of integers for window dilation values\n name: the name of the specialized reduce window primitive for which this\n conversion function is called. This information may help to choose a\n different conversion path (optional)\n\n Returns:\n The reduced operand.\n \"\"\"\n return _common_reduce_window(operand, identity(operand.dtype), reducer,\n window_dimensions, window_strides, padding,\n base_dilation, window_dilation, _in_avals,\n _out_aval)\n\n\ndef _get_max_identity(tf_dtype):\n numpy_tf_dtype = tf_dtype.as_numpy_dtype\n if tf_dtype == tf.bfloat16 or dtypes.issubdtype(numpy_tf_dtype, np.inexact):\n return numpy_tf_dtype(-np.inf)\n elif dtypes.issubdtype(numpy_tf_dtype, np.integer):\n return dtypes.iinfo(numpy_tf_dtype).min\n else:\n assert dtypes.issubdtype(\n numpy_tf_dtype, np.bool_), (f\"{tf_dtype} has no defined max identity\")\n return False\n\n\ndef _get_min_identity(tf_dtype):\n numpy_tf_dtype = tf_dtype.as_numpy_dtype\n if tf_dtype == tf.bfloat16 or dtypes.issubdtype(numpy_tf_dtype, np.inexact):\n return numpy_tf_dtype(np.inf)\n elif dtypes.issubdtype(numpy_tf_dtype, np.integer):\n return dtypes.iinfo(numpy_tf_dtype).max\n else:\n assert dtypes.issubdtype(\n numpy_tf_dtype, np.bool_), (f\"{tf_dtype} has no defined min identity\")\n return True\n\n\n# pylint: disable=protected-access\ntf_impl_with_avals[lax.reduce_window_sum_p] = (\n partial(_specialized_reduce_window, _add, lambda x: 0,\n name=\"reduce_window_sum\"))\ntf_impl_with_avals[lax.reduce_window_min_p] = (\n partial(_specialized_reduce_window,\n partial(_minmax_scalar, is_min=True),\n _get_min_identity,\n name=\"reduce_window_min\"))\ntf_impl_with_avals[lax.reduce_window_max_p] = (\n partial(_specialized_reduce_window,\n partial(_minmax_scalar, is_min=False),\n _get_max_identity,\n name=\"reduce_window_max\"))\ntf_impl_with_avals[lax.reduce_window_p] = _reduce_window\n# pylint: enable=protected-access\n\ndef _reduce(*operands: TfVal,\n computation: Callable,\n jaxpr: core.Jaxpr,\n consts: Sequence[Any],\n dimensions: Sequence[int],\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray) -> Sequence[TfVal]:\n del computation\n assert not consts\n assert len(operands) % 2 == 0\n # operands: op1, op2, ..., init_val1, init_val2, ...\n # reducer takes op1[i], op2[i], ..., init_val1, init_val2, ...\n nr_operands = len(operands) // 2\n init_vals = operands[nr_operands:]\n operands = operands[0:nr_operands]\n\n reducer_arg_spec = tuple([tf.TensorSpec((), op.dtype) for op in init_vals] * 2)\n\n def reducer_computation(*args: TfVal) -> TfVal:\n closed_jaxpr = core.ClosedJaxpr(jaxpr, consts)\n res = _interpret_jaxpr(closed_jaxpr, *args, extra_name_stack=None)\n return res\n\n xla_reducer_computation = (\n tf.function(reducer_computation,\n autograph=False).get_concrete_function(*reducer_arg_spec))\n\n outs = tfxla.variadic_reduce(operands, init_vals,\n dimensions_to_reduce=dimensions,\n reducer=xla_reducer_computation)\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n outs = tuple(tf.stop_gradient(out) for out in outs) # See #7839\n return outs\n\ntf_impl_with_avals[lax.reduce_p] = _reduce\n\n\n# We use lax._cumred_tpu_translation_rule to convert cummax,\n# cummin, cumsum and cumprod. This is efficient on TPU, but the complexity is\n# O(n^2) on other backends. This may be implemented using associative_scan\n# instead to favor different backends.\ntf_impl_with_avals[lax.cummin_p] = _convert_jax_impl(\n partial(lax_control_flow._cumred_tpu_translation_rule,\n lax._reduce_window_min),\n multiple_results=False,\n extra_name_stack=\"cummin\")\ntf_impl_with_avals[lax.cummax_p] = _convert_jax_impl(\n partial(lax_control_flow._cumred_tpu_translation_rule,\n lax._reduce_window_max),\n multiple_results=False,\n extra_name_stack=\"cummin\")\n# TODO(bchetioui): cumsum and cumprod can be converted using pure TF ops for\n# certain dtypes: bfloat16, float16, float32, float64, and int32. Other dtypes\n# will fail when running in compiled mode, but are otherwise compatible with\n# the operation. A non-XLA path can thus be defined for all dtypes, though the\n# tests will crash.\ntf_impl_with_avals[lax.cumsum_p] = _convert_jax_impl(\n partial(lax_control_flow._cumred_tpu_translation_rule,\n lax._reduce_window_sum),\n multiple_results=False,\n extra_name_stack=\"cumsum\")\ntf_impl_with_avals[lax.cumprod_p] = _convert_jax_impl(\n partial(lax_control_flow._cumred_tpu_translation_rule,\n lax._reduce_window_prod),\n multiple_results=False,\n extra_name_stack=\"cumprod\")\n\n\ndef _select_and_scatter(operand, source, init_value, select_jaxpr,\n select_consts, scatter_jaxpr, scatter_consts,\n window_dimensions, window_strides, padding):\n raise NotImplementedError(\"TODO: jax2tf can not convert _select_and_scatter\")\n\n\ntf_impl[lax.select_and_scatter_p] = _select_and_scatter\n\n\n@partial(bool_to_int8, argnums=(0, 1))\ndef _select_and_scatter_add(source, operand, *, select_prim, window_dimensions,\n window_strides, padding, _in_avals, _out_aval):\n init_value = tf.zeros((), operand.dtype)\n select_fn = (\n tf.function(tf_impl[select_prim], autograph=False).get_concrete_function(\n init_value, init_value))\n scatter_fn = _add_fn.get_concrete_function(init_value, init_value)\n out = tfxla.select_and_scatter(operand, window_dimensions, window_strides,\n padding, source, init_value, select_fn,\n scatter_fn)\n out.set_shape(_aval_to_tf_shape(_out_aval))\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n out = tf.stop_gradient(out) # See #7839\n return out\n\n\ntf_impl_with_avals[lax.select_and_scatter_add_p] = _select_and_scatter_add\n\n\ndef _threefry2x32_jax_impl(*args: TfVal, _in_avals, _out_aval):\n res = _convert_jax_impl(\n partial(jax._src.prng._threefry2x32_lowering, use_rolled_loops=False),\n multiple_results=True, extra_name_stack=\"threefry\")(\n *args, _in_avals=_in_avals, _out_aval=_out_aval)\n return res\n\n\ntf_impl_with_avals[jax._src.prng.threefry2x32_p] = _threefry2x32_jax_impl\n\n# Use the vmap implementation, otherwise on TPU the performance is really bad\n# With use_vmap=True on, we get about the same performance for JAX and jax2tf.\ntf_impl_with_avals[random.random_gamma_p] = _convert_jax_impl(\n partial(jax._src.random._gamma_impl, use_vmap=True),\n multiple_results=False, extra_name_stack=\"random_gamma\")\n\n\ndef _rng_bit_generator(key: TfVal, *, shape, dtype, algorithm) -> Sequence[TfVal]:\n shape_tf = _eval_shape(shape)\n # JAX uses XLA algorithm enums; tfxla uses tf.random.Algorithm\n if algorithm == lax.RandomAlgorithm.RNG_THREE_FRY:\n algorithm_tf = tf.random.Algorithm.THREEFRY\n elif algorithm == lax.RandomAlgorithm.RNG_PHILOX:\n algorithm_tf = tf.random.Algorithm.PHILOX\n elif algorithm == lax.RandomAlgorithm.RNG_DEFAULT:\n algorithm_tf = tf.random.Algorithm.AUTO_SELECT\n else:\n assert False\n outs = tfxla.rng_bit_generator(algorithm_tf.value, key, shape_tf,\n dtype=_to_tf_dtype(dtype))\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n outs = tuple(tf.stop_gradient(out) for out in outs) # See #7839\n return outs\n\n\ntf_impl[lax.rng_bit_generator_p] = _rng_bit_generator\n\n\ndef _rng_uniform(minval: TfVal, maxval: TfVal, *, shape) -> TfVal:\n shape_tf = _eval_shape(shape)\n return tf.random.uniform(shape_tf, minval=minval, maxval=maxval, dtype=minval.dtype)\n\ntf_impl[lax.rng_uniform_p] = _rng_uniform\n\n\ndef _gather_dimensions_proto(indices_shape, dimension_numbers):\n proto = xla_data_pb2.GatherDimensionNumbers()\n proto.offset_dims.extend(dimension_numbers.offset_dims)\n proto.collapsed_slice_dims.extend(dimension_numbers.collapsed_slice_dims)\n proto.start_index_map.extend(dimension_numbers.start_index_map)\n assert indices_shape\n proto.index_vector_dim = len(indices_shape) - 1\n return proto\n\n\n@partial(bool_to_int8, argnums=[0])\ndef _gather(operand, start_indices, *, dimension_numbers, slice_sizes: core.Shape,\n indices_are_sorted, unique_indices, mode, fill_value,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n \"\"\"Tensorflow implementation of gather.\"\"\"\n if mode == lax.GatherScatterMode.FILL_OR_DROP:\n gather_fill_fn = _convert_jax_impl(lax_slicing._gather_fill,\n multiple_results=False)\n return gather_fill_fn(\n operand, start_indices, dimension_numbers=dimension_numbers,\n slice_sizes=slice_sizes, unique_indices=unique_indices,\n indices_are_sorted=indices_are_sorted, fill_value=fill_value,\n output_shape=_out_aval.shape, _in_avals=_in_avals, _out_aval=_out_aval)\n\n proto = _gather_dimensions_proto(start_indices.shape, dimension_numbers)\n slice_sizes_tf = _eval_shape(slice_sizes)\n out = tfxla.gather(operand, start_indices, proto, slice_sizes_tf,\n indices_are_sorted)\n out.set_shape(_aval_to_tf_shape(_out_aval))\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n out = tf.stop_gradient(out) # See #7839\n return out\n\n\ntf_impl_with_avals[lax.gather_p] = _gather\n\n\ndef _slice(operand, start_indices, limit_indices, strides, _in_avals,\n _out_aval):\n if strides is None:\n strides = [1] * len(start_indices)\n slices = tuple(\n map(slice, _eval_shape(start_indices), _eval_shape(limit_indices),\n _eval_shape(strides)))\n out = operand[slices]\n # TODO(b/184503314): improve shape inference for __getitem__\n # E.g., operand.shape=(b, 5, 3), start_indices=(0, 1, 1), limit_indices=(b, 5, 3), strides=(1, 2, 1)\n out.set_shape(_aval_to_tf_shape(_out_aval))\n return out\n\n\ntf_impl_with_avals[lax.slice_p] = _slice\n\n\ndef _dynamic_slice(operand, *start_indices, slice_sizes: core.Shape,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n start_indices = tf.stack(start_indices)\n slice_sizes_tf = _eval_shape(slice_sizes)\n\n res = tfxla.dynamic_slice(operand, start_indices, size_indices=slice_sizes_tf)\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n res = tf.stop_gradient(res) # See #7839\n return res\n\n\ntf_impl_with_avals[lax.dynamic_slice_p] = _dynamic_slice\n\n\ndef _dynamic_update_slice(operand, update, *start_indices,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n out = tfxla.dynamic_update_slice(operand, update, tf.stack(start_indices))\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n out = tf.stop_gradient(out) # See #7839\n return out\n\n\ntf_impl_with_avals[lax.dynamic_update_slice_p] = _dynamic_update_slice\n\n\ndef _scatter_dimensions_proto(indices_shape, dimension_numbers):\n proto = xla_data_pb2.ScatterDimensionNumbers()\n proto.update_window_dims.extend(dimension_numbers.update_window_dims)\n proto.inserted_window_dims.extend(dimension_numbers.inserted_window_dims)\n proto.scatter_dims_to_operand_dims.extend(\n dimension_numbers.scatter_dims_to_operand_dims)\n assert indices_shape\n proto.index_vector_dim = len(indices_shape) - 1\n return proto\n\n\ndef _scatter(operand, scatter_indices, updates, *, update_jaxpr, update_consts,\n dimension_numbers, indices_are_sorted, unique_indices, mode,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n del unique_indices\n\n if mode == lax.GatherScatterMode.CLIP:\n clip_fn = _convert_jax_impl(lax_slicing._clamp_scatter_indices,\n multiple_results=False)\n scatter_indices = clip_fn(\n operand, scatter_indices, updates, dnums=dimension_numbers,\n _in_avals=_in_avals, _out_aval=_in_avals[1])\n\n assert len(update_consts) == 0, \"Update computation cannot have constants\"\n\n proto = _scatter_dimensions_proto(scatter_indices.shape, dimension_numbers)\n\n def update_computation(arg1: TfVal, arg2: TfVal) -> TfVal:\n closed_jaxpr = core.ClosedJaxpr(update_jaxpr, update_consts)\n res, = _interpret_jaxpr(closed_jaxpr, arg1, arg2, extra_name_stack=None)\n return res\n\n o_spec = tf.TensorSpec((), dtype=operand.dtype)\n xla_update_computation = (\n tf.function(update_computation,\n autograph=False).get_concrete_function(o_spec, o_spec))\n out = tfxla.scatter(\n operand,\n scatter_indices,\n updates,\n xla_update_computation,\n proto,\n indices_are_sorted=indices_are_sorted)\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n out = tf.stop_gradient(out) # See #7839\n return out\n\n\ntf_impl_with_avals[lax.scatter_p] = _scatter\ntf_impl_with_avals[lax.scatter_min_p] = _scatter\ntf_impl_with_avals[lax.scatter_max_p] = _scatter\ntf_impl_with_avals[lax.scatter_mul_p] = _scatter\ntf_impl_with_avals[lax.scatter_add_p] = _scatter\n\n\ndef _cond(index: TfVal, *operands: TfVal, branches: Sequence[core.ClosedJaxpr],\n linear: Sequence[bool]) -> Sequence[TfVal]:\n del linear\n # tf.cond needs lambdas with no arguments.\n branches_tf = [\n partial(_interpret_jaxpr, jaxpr, *operands,\n # Same name stack as the XLA translation of cond_p\n extra_name_stack=f\"branch_{i}_fun\")\n for jaxpr in branches\n for i, jaxpr in enumerate(branches)\n ]\n return tf.switch_case(index, branches_tf)\n\n\ntf_impl[lax.cond_p] = _cond\n\n\ndef _while(*args: TfVal, cond_nconsts: int, cond_jaxpr: core.ClosedJaxpr,\n body_nconsts: int, body_jaxpr: core.ClosedJaxpr) -> Sequence[TfVal]:\n cond_consts, body_consts, init_carry = util.split_list(\n args, [cond_nconsts, body_nconsts])\n if cond_jaxpr.out_avals[0].shape: # type: ignore[attr-defined]\n # The conditional is not a scalar, this must be a batched while\n return _batched_cond_while(\n *args,\n cond_nconsts=cond_nconsts,\n cond_jaxpr=cond_jaxpr,\n body_nconsts=body_nconsts,\n body_jaxpr=body_jaxpr)\n\n # The conditional must return a single value to TF\n def cond_tf_func(*args: TfVal) -> TfVal:\n pred, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *args,\n # Same name stack as the XLA translation of while_p\n extra_name_stack=\"while/cond\")\n return pred\n\n body_tf_func = partial(_interpret_jaxpr, body_jaxpr, *body_consts,\n extra_name_stack=\"while/body\")\n return tf.while_loop(cond_tf_func, body_tf_func, init_carry)\n\n\ndef _batched_cond_while(*args: TfVal, cond_nconsts: int,\n cond_jaxpr: core.ClosedJaxpr, body_nconsts: int,\n body_jaxpr: core.ClosedJaxpr) -> Sequence[TfVal]:\n \"\"\"Interprets a while_loop with a batched condition.\n\n A batched while has a conditional that returns a tensor of booleans, and\n a body that returns a list of tensors whose leading dimensions match those\n of the conditional tensor.\n\n We need to turn it into a while with scalar boolean conditional. We will\n expand the loop carry to include a prefix with the current tensor boolean\n condition. We prepend to the loop the first calculation of the tensor boolean\n condition. The loop condition will use a \"reduce_any\" to calculate a scalar\n boolean from the tensor boolean condition. The end of the loop body will\n compute the new carry using a \"tf.where\", and we compute the new tensor\n boolean condition.\n \"\"\"\n cond_consts, body_consts, init_carry = util.split_list(\n args, [cond_nconsts, body_nconsts])\n # Initial computation of batched condition\n init_pred_b, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *init_carry,\n extra_name_stack=\"while/body_pred\")\n assert init_pred_b is not core.unit\n\n def new_cond_tf_func(pred_b: TfVal, *carry: TfVal) -> TfVal:\n pred = tf.reduce_any(pred_b, axis=list(range(len(pred_b.shape))))\n return pred\n\n def new_body_tf_func(pred_b: TfVal, *carry: TfVal) -> Sequence[TfVal]:\n new_carry: Sequence[TfVal] = _interpret_jaxpr(body_jaxpr, *body_consts,\n *carry,\n extra_name_stack=\"while/body\")\n # We repeat those carries for which the loop termination condition is false\n def select_one_carry(new_c: TfVal, c: TfVal, c_aval: core.ShapedArray) -> TfVal:\n pred_b_bcast = _broadcast_in_dim(\n pred_b,\n shape=c_aval.shape, # a JAX shape\n broadcast_dimensions=list(range(len(pred_b.shape))),\n _in_avals=cond_jaxpr.out_avals,\n _out_aval=core.ShapedArray(c_aval.shape, np.bool_))\n return tf.where(pred_b_bcast, new_c, c)\n\n selected_carry: Sequence[TfVal] = list(map(select_one_carry, new_carry, carry, body_jaxpr.out_avals))\n next_pred_b, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *selected_carry,\n extra_name_stack=\"body_pred\")\n return (next_pred_b, *selected_carry)\n\n _, *res_carry = tf.while_loop(new_cond_tf_func, new_body_tf_func,\n (init_pred_b, *init_carry))\n return res_carry\n\n\ntf_impl[lax.while_p] = _while\n\n# We use the scan impl rule to rewrite in terms of while.\ntf_impl_with_avals[lax.scan_p] = _convert_jax_impl(\n lax_control_flow._scan_impl,\n extra_name_stack=\"scan\")\n\ntf_impl_with_avals[ad_checkpoint.remat_p] = \\\n _convert_jax_impl(partial(lax_control_flow._remat_translation_rule,\n # TODO: jax2tf cannot discriminate by platform\n platform=\"tpu\"),\n multiple_results=True,\n extra_name_stack=\"checkpoint\")\n\ndef _top_k(operand: TfVal, k: int) -> Tuple[TfVal, TfVal]:\n # Some types originally incompatible with tf.math.top_k can be promoted\n # to a compatible type without loss of precision.\n def promote_tf_dtype(tf_dtype):\n if tf_dtype in [tf.bool, tf.uint8, tf.uint16]:\n return tf.uint32\n if tf_dtype in [tf.int8, tf.int16]:\n return tf.int32\n if tf_dtype is tf.float16:\n return tf.float32\n return None\n\n conversion_dtype = promote_tf_dtype(operand.dtype)\n if conversion_dtype:\n values, indices = tf.math.top_k(\n tf.dtypes.cast(operand, conversion_dtype), k=k, sorted=True)\n return tf.dtypes.cast(values, operand.dtype), indices\n else:\n return tf.math.top_k(operand, k=k, sorted=True)\n\n\ntf_impl[lax.top_k_p] = _top_k\n\n\ndef _sort(*operands: TfVal, dimension: int, is_stable: bool,\n num_keys: int) -> Tuple[TfVal, ...]:\n assert 1 <= num_keys <= len(operands)\n assert 0 <= dimension < len(\n operands[0].shape\n ), f\"Invalid {dimension} for ndim {len(operands[0].shape)}\"\n\n comparator_spec: List[tf.TensorSpec] = []\n comparator_jax_in_avals: List[core.ShapedArray] = []\n for op in operands:\n o_spec = tf.TensorSpec((), dtype=op.dtype)\n comparator_spec.extend([o_spec, o_spec])\n o_aval = core.ShapedArray((), _to_jax_dtype(op.dtype))\n comparator_jax_in_avals.extend([o_aval, o_aval])\n\n # Use the same comparator that JAX uses when compiling to XLA, to get the\n # proper NaN/Inf total order, and the lexicographic ordering.\n # The comparator is a 2N-argument TF function, with arguments [2k] and [2k +1]\n # corresponding to two scalars from operand[k].\n def lexicographic_comparator(*tf_args: TfVal) -> TfVal:\n return _convert_jax_impl(\n lax_internal._sort_lt_comparator, multiple_results=False)(\n *tf_args,\n _in_avals=comparator_jax_in_avals,\n _out_aval=core.ShapedArray((), np.bool_),\n num_keys=num_keys)\n\n xla_comparator_computation = (\n tf.function(lexicographic_comparator,\n autograph=False).get_concrete_function(*comparator_spec))\n results = tfxla.variadic_sort(\n operands,\n dimension=dimension,\n is_stable=is_stable,\n comparator=xla_comparator_computation)\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n results = tuple(tf.stop_gradient(out) for out in results) # See #7839\n return results\n\n\ntf_impl[lax.sort_p] = _sort\n\n\ndef _fft(x, fft_type, fft_lengths):\n FFT, IFFT, RFFT, IRFFT = list(map(xla_client.FftType, [0, 1, 2, 3]))\n if fft_type == IRFFT:\n expected_lengths = x.shape[-len(fft_lengths):-1] + ((x.shape[-1] - 1) * 2,)\n else:\n expected_lengths = x.shape[-len(fft_lengths):]\n if expected_lengths != fft_lengths:\n raise NotImplementedError(\n f\"Unsupported fft_lengths={fft_lengths} for fft_type={fft_type} of \"\n f\"array with shape={x.shape}.\")\n tf_funcs = {\n FFT: [tf.signal.fft, tf.signal.fft2d, tf.signal.fft3d],\n IFFT: [tf.signal.ifft, tf.signal.ifft2d, tf.signal.ifft3d],\n RFFT: [tf.signal.rfft, tf.signal.rfft2d, tf.signal.rfft3d],\n IRFFT: [tf.signal.irfft, tf.signal.irfft2d, tf.signal.irfft3d]\n }\n return tf_funcs[fft_type][len(fft_lengths) - 1](x)\n\n\ntf_impl[lax.fft_p] = _fft\n\n\ndef _qr(operand, full_matrices):\n return tf.linalg.qr(operand, full_matrices=full_matrices)\n\n\ntf_impl[lax.linalg.qr_p] = _qr\n\n\ndef _svd(operand, full_matrices, compute_uv):\n result = tf.linalg.svd(operand, full_matrices, compute_uv)\n if not compute_uv:\n return result,\n s, u, v = result\n return s, u, tf.linalg.adjoint(v)\n\n\ntf_impl[lax.linalg.svd_p] = _svd\n\n\ndef _eig(operand: TfVal, compute_left_eigenvectors: bool,\n compute_right_eigenvectors: bool):\n if compute_left_eigenvectors and compute_right_eigenvectors:\n # TODO(bchetioui): didn't find a 100% reliable, easy and satisfying way to\n # sort the left eigenvectors in the right order. The jax.numpy.linalg API\n # suggests to me that left eigenvectors are anyway seldom used, so I\n # think it is acceptable to leave as unimplemented for now.\n msg = (\"Conversion of eig is not implemented when both \"\n \"compute_left_eigenvectors and compute_right_eigenvectors are set \"\n \"to True.\")\n raise NotImplementedError(msg)\n elif not (compute_left_eigenvectors or compute_right_eigenvectors):\n return tuple([tf.linalg.eigvals(operand)])\n elif compute_right_eigenvectors:\n return tuple(tf.linalg.eig(operand))\n else: # compute_left_eigenvectors == True\n wH, vl = tf.linalg.eig(tf.linalg.adjoint(operand))\n wHH = tf.math.conj(wH)\n return tuple([wHH, vl])\n\n\ntf_impl[lax.linalg.eig_p] = _eig\n\n\ndef _eigh(operand: TfVal, lower: bool, _in_avals, _out_aval):\n if operand.shape[-1] == 0:\n v, w = operand, tf.reshape(operand, _eval_shape(_in_avals[0].shape[:-1]))\n else:\n if not lower:\n operand = tf.linalg.adjoint(operand)\n w, v = tf.linalg.eigh(operand)\n cast_type = {\n tf.complex64: tf.float32,\n tf.complex128: tf.float64\n }.get(operand.dtype)\n if cast_type is not None:\n w = tf.cast(w, cast_type)\n return v, w\n\n\ntf_impl_with_avals[lax.linalg.eigh_p] = _eigh\n\n\ndef _lu(operand: TfVal, _in_avals, _out_aval):\n return _convert_jax_impl(lax_linalg._lu_python, extra_name_stack=\"lu\")(\n operand, _in_avals=_in_avals, _out_aval=_out_aval)\n\n\ntf_impl_with_avals[lax.linalg.lu_p] = _lu\n\n\ndef _triangular_solve(a: TfVal, b: TfVal, *, left_side: bool, lower: bool,\n transpose_a: bool, conjugate_a: bool, unit_diagonal: bool,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n if unit_diagonal:\n a_aval, _ = _in_avals\n a_shape = _eval_shape(a_aval.shape)\n a = tf.linalg.set_diag(a, tf.ones(a_shape[:-1], dtype=a.dtype))\n if not left_side:\n rank = len(a.shape)\n transpose_dimensions = list(range(rank - 2)) + [rank - 1, rank - 2]\n a = tf.transpose(a, transpose_dimensions)\n b = tf.transpose(b, transpose_dimensions)\n lower = not lower\n # adjoint == transpose for real dtypes, so special care need only be taken\n # for complex types.\n if a.dtype in [tf.complex64, tf.complex128]:\n if (transpose_a and not conjugate_a) or (not transpose_a and conjugate_a):\n a = tf.math.conj(a)\n result = tf.linalg.triangular_solve(a, b, lower=lower, adjoint=transpose_a)\n if not left_side:\n result = tf.transpose(result, transpose_dimensions)\n return result\n\n\ntf_impl_with_avals[lax.linalg.triangular_solve_p] = _triangular_solve\n\n\ndef _linear_solve(*args: TfVal, const_lengths, jaxprs, _in_avals, _out_aval):\n return _convert_jax_impl(lax_control_flow._custom_linear_solve_impl,\n extra_name_stack=\"linear_solve\")(\n *args,\n const_lengths=const_lengths,\n jaxprs=jaxprs,\n _in_avals=_in_avals,\n _out_aval=_out_aval)\n\n\ntf_impl_with_avals[lax.linear_solve_p] = _linear_solve\n\ndef _tridiagonal_solve(*args: TfVal, _in_avals, _out_aval, **params):\n return _convert_jax_impl(lax_linalg._tridiagonal_solve_jax,\n multiple_results=False,\n extra_name_stack=\"tridiagonal_solve\")(\n *args,\n _in_avals=_in_avals,\n _out_aval=_out_aval)\n\n\ntf_impl_with_avals[lax.linalg.tridiagonal_solve_p] = _tridiagonal_solve\n\ndef _custom_jvp_call_jaxpr(*args: TfVal, fun_jaxpr: core.ClosedJaxpr,\n jvp_jaxpr_thunk: Callable,\n num_consts: int) -> Sequence[TfVal]:\n # TODO(necula): ensure that there is no AD transformation in scope\n return _interpret_jaxpr(fun_jaxpr, *args, extra_name_stack=\"custom_jvp\")\n\n\ntf_impl[custom_derivatives.custom_jvp_call_jaxpr_p] = _custom_jvp_call_jaxpr\n\n\ndef _custom_vjp_call_jaxpr(*args: TfVal, fun_jaxpr: core.ClosedJaxpr,\n **_) -> Sequence[TfVal]:\n # TODO(necula): ensure that there is no AD transformation in scope\n return _interpret_jaxpr(fun_jaxpr, *args, extra_name_stack=\"custom_vjp\")\n\n\ntf_impl[custom_derivatives.custom_vjp_call_jaxpr_p] = _custom_vjp_call_jaxpr\n\n\ndef _custom_lin(*args: TfVal, **_) -> Sequence[TfVal]:\n raise TypeError(\"can't apply forward-mode autodiff (jvp) to a custom_vjp \"\n \"function.\")\n\n\ntf_impl[ad.custom_lin_p] = _custom_lin\n\n\ndef split_to_logical_devices(tensor: TfVal,\n partition_dimensions: pxla.PartitionsOrReplicated):\n \"\"\"Like TPUMPStrategy.experimental_split_to_logical_devices.\n\n For jax2tf purposes we want to avoid needing to thread the `strategy` object\n through the generated computation. It seems that the original function needs\n the strategy object only for error checking, which we assume is done upstream\n by JAX.\n\n Args:\n tensor: Input tensor to annotate.\n partition_dimensions: A list of integers, with one integer per tensor\n dimension, specifying in how many parts the dimension should be split. The\n product of integers must equal the number of devices per replica.\n use_sharding_op: whether to use a sharding op, or not.\n\n Returns:\n an annotated tensor.\n \"\"\"\n # TODO: this is only for sharded_jit. Either remove, or implement in terms\n # of _shard_values.\n if partition_dimensions is None:\n return xla_sharding.replicate(tensor, use_sharding_op=True)\n num_partition_splits = np.prod(partition_dimensions)\n tile_assignment = np.arange(num_partition_splits).reshape(\n partition_dimensions)\n return xla_sharding.tile(tensor, tile_assignment, use_sharding_op=True)\n\n\ndef _shard_value(mesh: maps.Mesh,\n val: TfVal,\n aval: core.ShapedArray,\n axis_resources: pjit.ParsedPartitionSpec) -> TfVal:\n \"\"\"Apply sharding to a TfVal.\"\"\"\n sharding_proto: xla_client.OpSharding = pjit.get_aval_sharding_proto(\n aval, axis_resources, mesh)\n # To use xla_sharding.py, we must have a xla_data_pb2.OpSharding.\n xla_sharding_proto: xla_data_pb2.OpSharding = (\n xla_data_pb2.OpSharding(\n type=int(sharding_proto.type),\n tile_assignment_dimensions=sharding_proto.tile_assignment_dimensions,\n tile_assignment_devices=sharding_proto.tile_assignment_devices,\n replicate_on_last_tile_dim=sharding_proto.replicate_on_last_tile_dim))\n return xla_sharding.Sharding(proto=xla_sharding_proto).apply_to_tensor(\n val, use_sharding_op=True)\n\n\ndef _sharded_call(f: lu.WrappedFun, vals: Sequence[TfVal],\n in_parts: Sequence[pxla.PartitionsOrReplicated],\n out_parts_thunk,\n **_) -> Sequence[Tuple[TfVal, core.ShapedArray]]:\n sharded_vals = map(split_to_logical_devices, vals, in_parts)\n vals_out = f.call_wrapped(*sharded_vals) # caller handles new_sublevel\n out_parts_flat = out_parts_thunk()\n assert len(out_parts_flat) == len(\n vals_out), f\"expected {len(out_parts_flat)} == {len(vals_out)}\"\n sharded_vals_out = [\n (split_to_logical_devices(val, val_part), val_aval)\n for (val, val_aval), val_part in zip(vals_out, out_parts_flat)\n ]\n return sharded_vals_out\n\n\ndef _sharded_jit_sharding_constraint(arg: TfVal, *,\n partitions: pxla.PartitionsOrReplicated,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n del _in_avals, _out_aval\n return split_to_logical_devices(arg, partitions)\n\n\ntf_impl_with_avals[sharded_jit.sharding_constraint_p] = _sharded_jit_sharding_constraint\n\n\ndef _pjit(*args: TfVal,\n jaxpr: core.ClosedJaxpr,\n in_axis_resources: Sequence[pjit.ParsedPartitionSpec],\n out_axis_resources: Sequence[pjit.ParsedPartitionSpec],\n resource_env: maps.ResourceEnv,\n donated_invars,\n name: str,\n in_positional_semantics,\n out_positional_semantics,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray) -> TfVal:\n del donated_invars\n if resource_env.physical_mesh.is_multi_process:\n raise NotImplementedError(\"jax2tf translation for pjit over multi-process \"\n \"meshes is not supported yet\")\n # TODO: add `name` to the name stack\n shard_value_for_mesh = partial(_shard_value, resource_env.physical_mesh)\n # Apply sharding annotation to the arguments\n sharded_args: Sequence[TfVal] = tuple(\n map(shard_value_for_mesh, args, _in_avals, in_axis_resources))\n results = _interpret_jaxpr(jaxpr, *sharded_args,\n extra_name_stack=util.wrap_name(name, \"pjit\"))\n sharded_results: Sequence[TfVal] = tuple(\n map(shard_value_for_mesh, results, _out_aval, out_axis_resources))\n return tuple(sharded_results)\n\n\ntf_impl_with_avals[pjit.pjit_p] = _pjit\n\n\ndef _pjit_sharding_constraint(arg: TfVal, *,\n axis_resources: pjit.ParsedPartitionSpec,\n resource_env: maps.ResourceEnv,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray,\n **kwargs) -> TfVal:\n return _shard_value(resource_env.physical_mesh, arg, _in_avals[0], axis_resources)\n\n\ntf_impl_with_avals[pjit.sharding_constraint_p] = _pjit_sharding_constraint\n\ndef _dimension_size_jax2tf(op: TfVal, *, dimension):\n return tf.shape(op)[dimension]\n\ntf_impl[shape_poly.dimension_size_p] = _dimension_size_jax2tf\n\ndef _dim_as_value_jax2tf(dim: shape_poly.DimSize):\n dim_tf, = _eval_shape((dim,))\n return dim_tf\n\ntf_impl[shape_poly.dim_as_value_p] = _dim_as_value_jax2tf\n\ndef _register_checkpoint_pytrees():\n \"\"\"Registers TF custom container types as pytrees.\"\"\"\n m = tf.Module()\n # The types here are automagically changed by TensorFlow's checkpointing\n # infrastructure.\n m.a = (tf.Module(), tf.Module())\n m.b = [tf.Module(), tf.Module()]\n m.c = {\"a\": tf.Module()}\n tuple_wrapper = type(m.a)\n list_wrapper = type(m.b)\n dict_wrapper = type(m.c)\n\n # TF AutoTrackable swaps container types out for wrappers.\n assert tuple_wrapper is not tuple\n assert list_wrapper is not list\n assert dict_wrapper is not dict\n\n jax.tree_util.register_pytree_node(tuple_wrapper, lambda xs:\n (tuple(xs), None), lambda _, xs: tuple(xs))\n\n jax.tree_util.register_pytree_node(list_wrapper, lambda xs: (tuple(xs), None),\n lambda _, xs: list(xs))\n\n jax.tree_util.register_pytree_node(\n dict_wrapper,\n lambda s: (tuple(s.values()), tuple(s.keys())),\n lambda k, xs: dict(zip(k, xs)))\n\n\n_register_checkpoint_pytrees()\n" ]
[ [ "numpy.asarray", "numpy.issubdtype", "numpy.dtype", "numpy.concatenate", "numpy.any", "numpy.where", "numpy.arange", "numpy.uint8", "numpy.ceil", "numpy.diff", "numpy.ravel", "numpy.load", "numpy.zeros", "numpy.log", "numpy.multiply", "numpy.ndim", "numpy.int64", "numpy.cumprod", "numpy.iscomplex", "numpy.array", "numpy.sum", "numpy.ones", "numpy.sign", "numpy.shape", "numpy.broadcast_to", "numpy.isscalar", "numpy.empty" ], [ "tensorflow.convert_to_tensor", "tensorflow.math.floormod", "tensorflow.raw_ops.PreventGradient", "tensorflow.compiler.tf2xla.python.xla.dot_general", "tensorflow.math.greater_equal", "tensorflow.linalg.eigh", "tensorflow.dtypes.cast", "tensorflow.compiler.tf2xla.python.xla.variadic_reduce", "tensorflow.math.imag", "tensorflow.math.conj", "tensorflow.math.truediv", "tensorflow.switch_case", "tensorflow.compiler.tf2xla.python.xla.conv", "tensorflow.linalg.qr", "tensorflow.zeros_like", "tensorflow.function", "tensorflow.compiler.tf2xla.python.xla.dynamic_slice", "numpy.array", "tensorflow.compiler.xla.xla_data_pb2.PrecisionConfig", "tensorflow.linalg.eigvals", "tensorflow.ones", "numpy.shape", "tensorflow.concat", "tensorflow.math.sign", "tensorflow.stack", "tensorflow.compiler.xla.xla_data_pb2.ConvolutionDimensionNumbers", "tensorflow.where", "tensorflow.compiler.xla.experimental.xla_sharding.xla_sharding.replicate", "tensorflow.linalg.svd", "tensorflow.bitwise.invert", "tensorflow.math.real", "tensorflow.compiler.xla.xla_data_pb2.DotDimensionNumbers", "tensorflow.compiler.xla.xla_data_pb2.GatherDimensionNumbers", "tensorflow.compiler.tf2xla.python.xla.variadic_sort", "tensorflow.random.uniform", "tensorflow.identity", "tensorflow.math.floordiv", "tensorflow.dtypes.as_dtype", "tensorflow.math.subtract", "tensorflow.broadcast_to", "tensorflow.compiler.xla.experimental.xla_sharding.xla_sharding.Sharding", "tensorflow.compiler.xla.xla_data_pb2.OpMetadata", "tensorflow.zeros", "tensorflow.compiler.tf2xla.python.xla.gather", "tensorflow.compiler.tf2xla.python.xla.scatter", "numpy.issubdtype", "tensorflow.compiler.xla.experimental.xla_sharding.xla_sharding.tile", "tensorflow.cast", "tensorflow.math.equal", "tensorflow.raw_ops.PopulationCount", "tensorflow.rank", "tensorflow.compiler.xla.xla_data_pb2.ScatterDimensionNumbers", "tensorflow.while_loop", "tensorflow.stop_gradient", "tensorflow.complex", "tensorflow.logical_not", "tensorflow.reverse", "tensorflow.bitwise.left_shift", "tensorflow.linalg.eig", "tensorflow.shape", "tensorflow.compiler.tf2xla.python.xla.select_and_scatter", "tensorflow.bitwise.bitwise_or", "tensorflow.math.floor", "tensorflow.math.round", "tensorflow.raw_ops.AddV2", "tensorflow.clip_by_value", "tensorflow.transpose", "tensorflow.constant", "tensorflow.reshape", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.Module", "tensorflow.compiler.tf2xla.python.xla.reduce_window", "tensorflow.nest.map_structure", "tensorflow.logical_and", "tensorflow.math.abs", "tensorflow.device", "tensorflow.math.reciprocal", "tensorflow.nest.flatten", "tensorflow.bitwise.right_shift", "numpy.arange", "tensorflow.name_scope", "tensorflow.linalg.triangular_solve", "tensorflow.math.atan2", "tensorflow.math.multiply", "tensorflow.linalg.adjoint", "tensorflow.math.less", "tensorflow.compiler.tf2xla.python.xla.pad", "tensorflow.math.sqrt", "tensorflow.math.negative", "numpy.prod", "tensorflow.math.top_k", "tensorflow.TensorSpec" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] } ]
Zeinab-Haroon/detectron2
[ "6a56c9cadaf392697c4bdef00325e415d07a459f" ]
[ "detectron2/evaluation/cityscapes_evaluation.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\nimport glob\nimport logging\nimport numpy as np\nimport os\nimport tempfile\nfrom collections import OrderedDict\nimport torch\nfrom PIL import Image\n\nfrom detectron2.data import MetadataCatalog\nfrom detectron2.utils import comm\nfrom detectron2.utils.file_io import PathManager\n\nfrom .evaluator import DatasetEvaluator\n\n\nclass CityscapesEvaluator(DatasetEvaluator):\n \"\"\"\n Base class for evaluation using cityscapes API.\n \"\"\"\n\n def __init__(self, dataset_name):\n \"\"\"\n Args:\n dataset_name (str): the name of the dataset.\n It must have the following metadata associated with it:\n \"thing_classes\", \"gt_dir\".\n \"\"\"\n self._metadata = MetadataCatalog.get(dataset_name)\n self._cpu_device = torch.device(\"cpu\")\n self._logger = logging.getLogger(__name__)\n\n def reset(self):\n self._working_dir = tempfile.TemporaryDirectory(prefix=\"cityscapes_eval_\")\n self._temp_dir = self._working_dir.name\n # All workers will write to the same results directory\n # TODO this does not work in distributed training\n self._temp_dir = comm.all_gather(self._temp_dir)[0]\n if self._temp_dir != self._working_dir.name:\n self._working_dir.cleanup()\n self._logger.info(\n \"Writing cityscapes results to temporary directory {} ...\".format(self._temp_dir)\n )\n\n\nclass CityscapesInstanceEvaluator(CityscapesEvaluator):\n \"\"\"\n Evaluate instance segmentation results on cityscapes dataset using cityscapes API.\n\n Note:\n * It does not work in multi-machine distributed training.\n * It contains a synchronization, therefore has to be used on all ranks.\n * Only the main process runs evaluation.\n \"\"\"\n\n def process(self, inputs, outputs):\n from cityscapesscripts.helpers.labels import name2label\n\n for input, output in zip(inputs, outputs):\n file_name = input[\"file_name\"]\n basename = os.path.splitext(os.path.basename(file_name))[0]\n pred_txt = os.path.join(self._temp_dir, basename + \"_pred.txt\")\n\n if \"instances\" in output:\n output = output[\"instances\"].to(self._cpu_device)\n num_instances = len(output)\n with open(pred_txt, \"w\") as fout:\n for i in range(num_instances):\n pred_class = output.pred_classes[i]\n classes = self._metadata.thing_classes[pred_class]\n class_id = name2label[classes].id\n score = output.scores[i]\n mask = output.pred_masks[i].numpy().astype(\"uint8\")\n png_filename = os.path.join(\n self._temp_dir, basename + \"_{}_{}.png\".format(i, classes)\n )\n\n Image.fromarray(mask * 255).save(png_filename)\n fout.write(\n \"{} {} {}\\n\".format(os.path.basename(png_filename), class_id, score)\n )\n else:\n # Cityscapes requires a prediction file for every ground truth image.\n with open(pred_txt, \"w\") as fout:\n pass\n\n def evaluate(self):\n \"\"\"\n Returns:\n dict: has a key \"segm\", whose value is a dict of \"AP\" and \"AP50\".\n \"\"\"\n comm.synchronize()\n if comm.get_rank() > 0:\n return\n import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval\n\n self._logger.info(\"Evaluating results under {} ...\".format(self._temp_dir))\n\n # set some global states in cityscapes evaluation API, before evaluating\n cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)\n cityscapes_eval.args.predictionWalk = None\n cityscapes_eval.args.JSONOutput = False\n cityscapes_eval.args.colorized = False\n cityscapes_eval.args.gtInstancesFile = os.path.join(self._temp_dir, \"gtInstances.json\")\n\n # These lines are adopted from\n # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa\n gt_dir = PathManager.get_local_path(self._metadata.gt_dir)\n \n print('==================== ** ======================')\n # wanted = 'lindau'\n wanted = 'munster'\n # wanted = 'frankfurt'\n # groundTruthImgList = glob.glob(os.path.join(gt_dir, \"*\", \"*_gtFine_instanceIds.png\"))\n groundTruthImgList = glob.glob(os.path.join(gt_dir, wanted, \"*_gtFine_instanceIds.png\"))\n\n assert len(\n groundTruthImgList\n ), \"Cannot find any ground truth images to use for evaluation. Searched for: {}\".format(\n cityscapes_eval.args.groundTruthSearch\n )\n predictionImgList = []\n for gt in groundTruthImgList:\n predictionImgList.append(cityscapes_eval.getPrediction(gt, cityscapes_eval.args))\n results = cityscapes_eval.evaluateImgLists(\n predictionImgList, groundTruthImgList, cityscapes_eval.args\n )[\"averages\"]\n\n ret = OrderedDict()\n print('ret[\"segm\"]:', ret[\"segm\"])\n\n self._working_dir.cleanup()\n return ret\n\n\nclass CityscapesSemSegEvaluator(CityscapesEvaluator):\n \"\"\"\n Evaluate semantic segmentation results on cityscapes dataset using cityscapes API.\n\n Note:\n * It does not work in multi-machine distributed training.\n * It contains a synchronization, therefore has to be used on all ranks.\n * Only the main process runs evaluation.\n \"\"\"\n\n def process(self, inputs, outputs):\n from cityscapesscripts.helpers.labels import trainId2label\n\n for input, output in zip(inputs, outputs):\n file_name = input[\"file_name\"]\n basename = os.path.splitext(os.path.basename(file_name))[0]\n pred_filename = os.path.join(self._temp_dir, basename + \"_pred.png\")\n\n output = output[\"sem_seg\"].argmax(dim=0).to(self._cpu_device).numpy()\n pred = 255 * np.ones(output.shape, dtype=np.uint8)\n for train_id, label in trainId2label.items():\n if label.ignoreInEval:\n continue\n pred[output == train_id] = label.id\n Image.fromarray(pred).save(pred_filename)\n\n def evaluate(self):\n comm.synchronize()\n if comm.get_rank() > 0:\n return\n # Load the Cityscapes eval script *after* setting the required env var,\n # since the script reads CITYSCAPES_DATASET into global variables at load time.\n import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval\n\n self._logger.info(\"Evaluating results under {} ...\".format(self._temp_dir))\n\n # set some global states in cityscapes evaluation API, before evaluating\n cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)\n cityscapes_eval.args.predictionWalk = None\n cityscapes_eval.args.JSONOutput = False\n cityscapes_eval.args.colorized = False\n\n # These lines are adopted from\n # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa\n gt_dir = PathManager.get_local_path(self._metadata.gt_dir)\n groundTruthImgList = glob.glob(os.path.join(gt_dir, \"*\", \"*_gtFine_labelIds.png\"))\n assert len(\n groundTruthImgList\n ), \"Cannot find any ground truth images to use for evaluation. Searched for: {}\".format(\n cityscapes_eval.args.groundTruthSearch\n )\n predictionImgList = []\n for gt in groundTruthImgList:\n predictionImgList.append(cityscapes_eval.getPrediction(cityscapes_eval.args, gt))\n results = cityscapes_eval.evaluateImgLists(\n predictionImgList, groundTruthImgList, cityscapes_eval.args\n )\n ret = OrderedDict()\n ret[\"sem_seg\"] = {\n \"IoU\": 100.0 * results[\"averageScoreClasses\"],\n \"iIoU\": 100.0 * results[\"averageScoreInstClasses\"],\n \"IoU_sup\": 100.0 * results[\"averageScoreCategories\"],\n \"iIoU_sup\": 100.0 * results[\"averageScoreInstCategories\"],\n }\n self._working_dir.cleanup()\n return ret\n" ]
[ [ "torch.device", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kalyanvasudev/ptv_temp
[ "85c2282bed9aa4eadc3454bd7e8f2a8e8c4b4ec6" ]
[ "pytorchvideo/models/x3d.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\nfrom typing import Callable, Tuple\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom fvcore.nn.squeeze_excitation import SqueezeExcitation\nfrom pytorchvideo.layers.convolutions import Conv2plus1d\nfrom pytorchvideo.layers.swish import Swish\nfrom pytorchvideo.layers.utils import round_repeats, round_width, set_attributes\nfrom pytorchvideo.models.head import ResNetBasicHead\nfrom pytorchvideo.models.net import Net\nfrom pytorchvideo.models.resnet import BottleneckBlock, ResBlock, ResStage\nfrom pytorchvideo.models.stem import ResNetBasicStem\n\n\ndef create_x3d_stem(\n *,\n # Conv configs.\n in_channels: int,\n out_channels: int,\n conv_kernel_size: Tuple[int] = (5, 3, 3),\n conv_stride: Tuple[int] = (1, 2, 2),\n conv_padding: Tuple[int] = (2, 1, 1),\n # BN configs.\n norm: Callable = nn.BatchNorm3d,\n norm_eps: float = 1e-5,\n norm_momentum: float = 0.1,\n # Activation configs.\n activation: Callable = nn.ReLU,\n) -> nn.Module:\n \"\"\"\n Creates the stem layer for X3D. It performs spatial Conv, temporal Conv, BN, and Relu.\n\n ::\n\n Conv_xy\n ↓\n Conv_t\n ↓\n Normalization\n ↓\n Activation\n\n Args:\n in_channels (int): input channel size of the convolution.\n out_channels (int): output channel size of the convolution.\n conv_kernel_size (tuple): convolutional kernel size(s).\n conv_stride (tuple): convolutional stride size(s).\n conv_padding (tuple): convolutional padding size(s).\n\n norm (callable): a callable that constructs normalization layer, options\n include nn.BatchNorm3d, None (not performing normalization).\n norm_eps (float): normalization epsilon.\n norm_momentum (float): normalization momentum.\n\n activation (callable): a callable that constructs activation layer, options\n include: nn.ReLU, nn.Softmax, nn.Sigmoid, and None (not performing\n activation).\n\n Returns:\n (nn.Module): X3D stem layer.\n \"\"\"\n conv_xy_module = nn.Conv3d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=(1, conv_kernel_size[1], conv_kernel_size[2]),\n stride=(1, conv_stride[1], conv_stride[2]),\n padding=(0, conv_padding[1], conv_padding[2]),\n bias=False,\n )\n conv_t_module = nn.Conv3d(\n in_channels=out_channels,\n out_channels=out_channels,\n kernel_size=(conv_kernel_size[0], 1, 1),\n stride=(conv_stride[0], 1, 1),\n padding=(conv_padding[0], 0, 0),\n bias=False,\n groups=out_channels,\n )\n stacked_conv_module = Conv2plus1d(\n conv_t=conv_xy_module,\n norm=None,\n activation=None,\n conv_xy=conv_t_module,\n )\n\n norm_module = (\n None\n if norm is None\n else norm(num_features=out_channels, eps=norm_eps, momentum=norm_momentum)\n )\n activation_module = None if activation is None else activation()\n\n return ResNetBasicStem(\n conv=stacked_conv_module,\n norm=norm_module,\n activation=activation_module,\n pool=None,\n )\n\n\ndef create_x3d_bottleneck_block(\n *,\n # Convolution configs.\n dim_in: int,\n dim_inner: int,\n dim_out: int,\n conv_kernel_size: Tuple[int] = (3, 3, 3),\n conv_stride: Tuple[int] = (1, 2, 2),\n # Norm configs.\n norm: Callable = nn.BatchNorm3d,\n norm_eps: float = 1e-5,\n norm_momentum: float = 0.1,\n se_ratio: float = 0.0625,\n # Activation configs.\n activation: Callable = nn.ReLU,\n inner_act: Callable = Swish,\n) -> nn.Module:\n \"\"\"\n Bottleneck block for X3D: a sequence of Conv, Normalization with optional SE block,\n and Activations repeated in the following order:\n\n ::\n\n Conv3d (conv_a)\n ↓\n Normalization (norm_a)\n ↓\n Activation (act_a)\n ↓\n Conv3d (conv_b)\n ↓\n Normalization (norm_b)\n ↓\n Squeeze-and-Excitation\n ↓\n Activation (act_b)\n ↓\n Conv3d (conv_c)\n ↓\n Normalization (norm_c)\n\n Args:\n dim_in (int): input channel size to the bottleneck block.\n dim_inner (int): intermediate channel size of the bottleneck.\n dim_out (int): output channel size of the bottleneck.\n conv_kernel_size (tuple): convolutional kernel size(s) for conv_b.\n conv_stride (tuple): convolutional stride size(s) for conv_b.\n\n norm (callable): a callable that constructs normalization layer, examples\n include nn.BatchNorm3d, None (not performing normalization).\n norm_eps (float): normalization epsilon.\n norm_momentum (float): normalization momentum.\n se_ratio (float): if > 0, apply SE to the 3x3x3 conv, with the SE\n channel dimensionality being se_ratio times the 3x3x3 conv dim.\n\n activation (callable): a callable that constructs activation layer, examples\n include: nn.ReLU, nn.Softmax, nn.Sigmoid, and None (not performing\n activation).\n inner_act (callable): whether use Swish activation for act_b or not.\n\n Returns:\n (nn.Module): X3D bottleneck block.\n \"\"\"\n # 1x1x1 Conv\n conv_a = nn.Conv3d(\n in_channels=dim_in, out_channels=dim_inner, kernel_size=(1, 1, 1), bias=False\n )\n norm_a = (\n None\n if norm is None\n else norm(num_features=dim_inner, eps=norm_eps, momentum=norm_momentum)\n )\n act_a = None if activation is None else activation()\n\n # 3x3x3 Conv\n conv_b = nn.Conv3d(\n in_channels=dim_inner,\n out_channels=dim_inner,\n kernel_size=conv_kernel_size,\n stride=conv_stride,\n padding=[size // 2 for size in conv_kernel_size],\n bias=False,\n groups=dim_inner,\n dilation=(1, 1, 1),\n )\n se = (\n SqueezeExcitation(\n num_channels=dim_inner,\n num_channels_reduced=round_width(dim_inner, se_ratio),\n is_3d=True,\n )\n if se_ratio > 0.0\n else nn.Identity()\n )\n norm_b = nn.Sequential(\n (\n nn.Identity()\n if norm is None\n else norm(num_features=dim_inner, eps=norm_eps, momentum=norm_momentum)\n ),\n se,\n )\n act_b = None if inner_act is None else inner_act()\n\n # 1x1x1 Conv\n conv_c = nn.Conv3d(\n in_channels=dim_inner, out_channels=dim_out, kernel_size=(1, 1, 1), bias=False\n )\n norm_c = (\n None\n if norm is None\n else norm(num_features=dim_out, eps=norm_eps, momentum=norm_momentum)\n )\n\n return BottleneckBlock(\n conv_a=conv_a,\n norm_a=norm_a,\n act_a=act_a,\n conv_b=conv_b,\n norm_b=norm_b,\n act_b=act_b,\n conv_c=conv_c,\n norm_c=norm_c,\n )\n\n\ndef create_x3d_res_block(\n *,\n # Bottleneck Block configs.\n dim_in: int,\n dim_inner: int,\n dim_out: int,\n bottleneck: Callable = create_x3d_bottleneck_block,\n use_shortcut: bool = True,\n # Conv configs.\n conv_kernel_size: Tuple[int] = (3, 3, 3),\n conv_stride: Tuple[int] = (1, 2, 2),\n # Norm configs.\n norm: Callable = nn.BatchNorm3d,\n norm_eps: float = 1e-5,\n norm_momentum: float = 0.1,\n se_ratio: float = 0.0625,\n # Activation configs.\n activation: Callable = nn.ReLU,\n inner_act: Callable = Swish,\n) -> nn.Module:\n \"\"\"\n Residual block for X3D. Performs a summation between an identity shortcut in branch1 and a\n main block in branch2. When the input and output dimensions are different, a\n convolution followed by a normalization will be performed.\n\n ::\n\n Input\n |-------+\n ↓ |\n Block |\n ↓ |\n Summation ←-+\n ↓\n Activation\n\n Args:\n dim_in (int): input channel size to the bottleneck block.\n dim_inner (int): intermediate channel size of the bottleneck.\n dim_out (int): output channel size of the bottleneck.\n bottleneck (callable): a callable for create_x3d_bottleneck_block.\n\n conv_kernel_size (tuple): convolutional kernel size(s) for conv_b.\n conv_stride (tuple): convolutional stride size(s) for conv_b.\n\n norm (callable): a callable that constructs normalization layer, examples\n include nn.BatchNorm3d, None (not performing normalization).\n norm_eps (float): normalization epsilon.\n norm_momentum (float): normalization momentum.\n se_ratio (float): if > 0, apply SE to the 3x3x3 conv, with the SE\n channel dimensionality being se_ratio times the 3x3x3 conv dim.\n\n activation (callable): a callable that constructs activation layer, examples\n include: nn.ReLU, nn.Softmax, nn.Sigmoid, and None (not performing\n activation).\n inner_act (callable): whether use Swish activation for act_b or not.\n\n Returns:\n (nn.Module): X3D block layer.\n \"\"\"\n\n norm_model = None\n if norm is not None and dim_in != dim_out:\n norm_model = norm(num_features=dim_out)\n\n return ResBlock(\n branch1_conv=nn.Conv3d(\n dim_in,\n dim_out,\n kernel_size=(1, 1, 1),\n stride=conv_stride,\n bias=False,\n )\n if (dim_in != dim_out or np.prod(conv_stride) > 1) and use_shortcut\n else None,\n branch1_norm=norm_model if dim_in != dim_out and use_shortcut else None,\n branch2=bottleneck(\n dim_in=dim_in,\n dim_inner=dim_inner,\n dim_out=dim_out,\n conv_kernel_size=conv_kernel_size,\n conv_stride=conv_stride,\n norm=norm,\n norm_eps=norm_eps,\n norm_momentum=norm_momentum,\n se_ratio=se_ratio,\n activation=activation,\n inner_act=inner_act,\n ),\n activation=None if activation is None else activation(),\n branch_fusion=lambda x, y: x + y,\n )\n\n\ndef create_x3d_res_stage(\n *,\n # Stage configs.\n depth: int,\n # Bottleneck Block configs.\n dim_in: int,\n dim_inner: int,\n dim_out: int,\n bottleneck: Callable = create_x3d_bottleneck_block,\n # Conv configs.\n conv_kernel_size: Tuple[int] = (3, 3, 3),\n conv_stride: Tuple[int] = (1, 2, 2),\n # Norm configs.\n norm: Callable = nn.BatchNorm3d,\n norm_eps: float = 1e-5,\n norm_momentum: float = 0.1,\n se_ratio: float = 0.0625,\n # Activation configs.\n activation: Callable = nn.ReLU,\n inner_act: Callable = Swish,\n) -> nn.Module:\n \"\"\"\n Create Residual Stage, which composes sequential blocks that make up X3D.\n\n ::\n\n Input\n ↓\n ResBlock\n ↓\n .\n .\n .\n ↓\n ResBlock\n\n Args:\n\n depth (init): number of blocks to create.\n\n dim_in (int): input channel size to the bottleneck block.\n dim_inner (int): intermediate channel size of the bottleneck.\n dim_out (int): output channel size of the bottleneck.\n bottleneck (callable): a callable for create_x3d_bottleneck_block.\n\n conv_kernel_size (tuple): convolutional kernel size(s) for conv_b.\n conv_stride (tuple): convolutional stride size(s) for conv_b.\n\n norm (callable): a callable that constructs normalization layer, examples\n include nn.BatchNorm3d, None (not performing normalization).\n norm_eps (float): normalization epsilon.\n norm_momentum (float): normalization momentum.\n se_ratio (float): if > 0, apply SE to the 3x3x3 conv, with the SE\n channel dimensionality being se_ratio times the 3x3x3 conv dim.\n\n activation (callable): a callable that constructs activation layer, examples\n include: nn.ReLU, nn.Softmax, nn.Sigmoid, and None (not performing\n activation).\n inner_act (callable): whether use Swish activation for act_b or not.\n\n Returns:\n (nn.Module): X3D stage layer.\n \"\"\"\n res_blocks = []\n for idx in range(depth):\n block = create_x3d_res_block(\n dim_in=dim_in if idx == 0 else dim_out,\n dim_inner=dim_inner,\n dim_out=dim_out,\n bottleneck=bottleneck,\n conv_kernel_size=conv_kernel_size,\n conv_stride=conv_stride if idx == 0 else (1, 1, 1),\n norm=norm,\n norm_eps=norm_eps,\n norm_momentum=norm_momentum,\n se_ratio=(se_ratio if (idx + 1) % 2 else 0.0),\n activation=activation,\n inner_act=inner_act,\n )\n res_blocks.append(block)\n\n return ResStage(res_blocks=nn.ModuleList(res_blocks))\n\n\ndef create_x3d_head(\n *,\n # Projection configs.\n dim_in: int,\n dim_inner: int,\n dim_out: int,\n num_classes: int,\n # Pooling configs.\n pool_act: Callable = nn.ReLU,\n pool_kernel_size: Tuple[int] = (13, 5, 5),\n # BN configs.\n norm: Callable = nn.BatchNorm3d,\n norm_eps: float = 1e-5,\n norm_momentum: float = 0.1,\n bn_lin5_on=False,\n # Dropout configs.\n dropout_rate: float = 0.5,\n # Activation configs.\n activation: Callable = nn.Softmax,\n # Output configs.\n output_with_global_average: bool = True,\n) -> nn.Module:\n \"\"\"\n Creates X3D head. This layer performs an projected pooling operation followed\n by an dropout, a fully-connected projection, an activation layer and a global\n spatiotemporal averaging.\n\n ::\n\n ProjectedPool\n ↓\n Dropout\n ↓\n Projection\n ↓\n Activation\n ↓\n Averaging\n\n Args:\n dim_in (int): input channel size of the X3D head.\n dim_inner (int): intermediate channel size of the X3D head.\n dim_out (int): output channel size of the X3D head.\n num_classes (int): the number of classes for the video dataset.\n\n pool_act (callable): a callable that constructs resnet pool activation\n layer such as nn.ReLU.\n pool_kernel_size (tuple): pooling kernel size(s) when not using adaptive\n pooling.\n\n norm (callable): a callable that constructs normalization layer, examples\n include nn.BatchNorm3d, None (not performing normalization).\n norm_eps (float): normalization epsilon.\n norm_momentum (float): normalization momentum.\n bn_lin5_on (bool): if True, perform normalization on the features\n before the classifier.\n\n dropout_rate (float): dropout rate.\n\n activation (callable): a callable that constructs resnet head activation\n layer, examples include: nn.ReLU, nn.Softmax, nn.Sigmoid, and None (not\n applying activation).\n\n output_with_global_average (bool): if True, perform global averaging on temporal\n and spatial dimensions and reshape output to batch_size x out_features.\n\n Returns:\n (nn.Module): X3D head layer.\n \"\"\"\n pre_conv_module = nn.Conv3d(\n in_channels=dim_in, out_channels=dim_inner, kernel_size=(1, 1, 1), bias=False\n )\n\n pre_norm_module = norm(num_features=dim_inner, eps=norm_eps, momentum=norm_momentum)\n pre_act_module = None if pool_act is None else pool_act()\n\n if pool_kernel_size is None:\n pool_module = nn.AdaptiveAvgPool3d((1, 1, 1))\n else:\n pool_module = nn.AvgPool3d(pool_kernel_size, stride=1)\n\n post_conv_module = nn.Conv3d(\n in_channels=dim_inner, out_channels=dim_out, kernel_size=(1, 1, 1), bias=False\n )\n\n if bn_lin5_on:\n post_norm_module = norm(\n num_features=dim_out, eps=norm_eps, momentum=norm_momentum\n )\n else:\n post_norm_module = None\n post_act_module = None if pool_act is None else pool_act()\n\n projected_pool_module = ProjectedPool(\n pre_conv=pre_conv_module,\n pre_norm=pre_norm_module,\n pre_act=pre_act_module,\n pool=pool_module,\n post_conv=post_conv_module,\n post_norm=post_norm_module,\n post_act=post_act_module,\n )\n\n if activation is None:\n activation_module = None\n elif activation == nn.Softmax:\n activation_module = activation(dim=1)\n elif activation == nn.Sigmoid:\n activation_module = activation()\n else:\n raise NotImplementedError(\n \"{} is not supported as an activation\" \"function.\".format(activation)\n )\n\n if output_with_global_average:\n output_pool = nn.AdaptiveAvgPool3d(1)\n else:\n output_pool = None\n\n return ResNetBasicHead(\n proj=nn.Linear(dim_out, num_classes, bias=True),\n activation=activation_module,\n pool=projected_pool_module,\n dropout=nn.Dropout(dropout_rate) if dropout_rate > 0 else None,\n output_pool=output_pool,\n )\n\n\ndef create_x3d(\n *,\n # Input clip configs.\n input_channel: int = 3,\n input_clip_length: int = 13,\n input_crop_size: int = 160,\n # Model configs.\n model_num_class: int = 400,\n dropout_rate: float = 0.5,\n width_factor: float = 2.0,\n depth_factor: float = 2.2,\n # Normalization configs.\n norm: Callable = nn.BatchNorm3d,\n norm_eps: float = 1e-5,\n norm_momentum: float = 0.1,\n # Activation configs.\n activation: Callable = nn.ReLU,\n # Stem configs.\n stem_dim_in: int = 12,\n stem_conv_kernel_size: Tuple[int] = (5, 3, 3),\n stem_conv_stride: Tuple[int] = (1, 2, 2),\n # Stage configs.\n stage_conv_kernel_size: Tuple[Tuple[int]] = (\n (3, 3, 3),\n (3, 3, 3),\n (3, 3, 3),\n (3, 3, 3),\n ),\n stage_spatial_stride: Tuple[int] = (2, 2, 2, 2),\n stage_temporal_stride: Tuple[int] = (1, 1, 1, 1),\n bottleneck: Callable = create_x3d_bottleneck_block,\n bottleneck_factor: float = 2.25,\n se_ratio: float = 0.0625,\n inner_act: Callable = Swish,\n # Head configs.\n head_dim_out: int = 2048,\n head_pool_act: Callable = nn.ReLU,\n head_bn_lin5_on: bool = False,\n head_activation: Callable = nn.Softmax,\n head_output_with_global_average: bool = True,\n) -> nn.Module:\n \"\"\"\n X3D model builder. It builds a X3D network backbone, which is a ResNet.\n\n Christoph Feichtenhofer.\n \"X3D: Expanding Architectures for Efficient Video Recognition.\"\n https://arxiv.org/abs/2004.04730\n\n ::\n\n Input\n ↓\n Stem\n ↓\n Stage 1\n ↓\n .\n .\n .\n ↓\n Stage N\n ↓\n Head\n\n Args:\n input_channel (int): number of channels for the input video clip.\n input_clip_length (int): length of the input video clip. Value for\n different models: X3D-XS: 4; X3D-S: 13; X3D-M: 16; X3D-L: 16.\n input_crop_size (int): spatial resolution of the input video clip.\n Value for different models: X3D-XS: 160; X3D-S: 160; X3D-M: 224;\n X3D-L: 312.\n\n model_num_class (int): the number of classes for the video dataset.\n dropout_rate (float): dropout rate.\n width_factor (float): width expansion factor.\n depth_factor (float): depth expansion factor. Value for different\n models: X3D-XS: 2.2; X3D-S: 2.2; X3D-M: 2.2; X3D-L: 5.0.\n\n norm (callable): a callable that constructs normalization layer.\n norm_eps (float): normalization epsilon.\n norm_momentum (float): normalization momentum.\n\n activation (callable): a callable that constructs activation layer.\n\n stem_dim_in (int): input channel size for stem before expansion.\n stem_conv_kernel_size (tuple): convolutional kernel size(s) of stem.\n stem_conv_stride (tuple): convolutional stride size(s) of stem.\n\n stage_conv_kernel_size (tuple): convolutional kernel size(s) for conv_b.\n stage_spatial_stride (tuple): the spatial stride for each stage.\n stage_temporal_stride (tuple): the temporal stride for each stage.\n bottleneck_factor (float): bottleneck expansion factor for the 3x3x3 conv.\n se_ratio (float): if > 0, apply SE to the 3x3x3 conv, with the SE\n channel dimensionality being se_ratio times the 3x3x3 conv dim.\n inner_act (callable): whether use Swish activation for act_b or not.\n\n head_dim_out (int): output channel size of the X3D head.\n head_pool_act (callable): a callable that constructs resnet pool activation\n layer such as nn.ReLU.\n head_bn_lin5_on (bool): if True, perform normalization on the features\n before the classifier.\n head_activation (callable): a callable that constructs activation layer.\n head_output_with_global_average (bool): if True, perform global averaging on\n the head output.\n\n Returns:\n (nn.Module): the X3D network.\n \"\"\"\n blocks = []\n # Create stem for X3D.\n stem_dim_out = round_width(stem_dim_in, width_factor)\n stem = create_x3d_stem(\n in_channels=input_channel,\n out_channels=stem_dim_out,\n conv_kernel_size=stem_conv_kernel_size,\n conv_stride=stem_conv_stride,\n conv_padding=[size // 2 for size in stem_conv_kernel_size],\n norm=norm,\n norm_eps=norm_eps,\n norm_momentum=norm_momentum,\n activation=activation,\n )\n blocks.append(stem)\n\n # Compute the depth and dimension for each stage\n stage_depths = [1, 2, 5, 3]\n exp_stage = 2.0\n stage_dim1 = stem_dim_in\n stage_dim2 = round_width(stage_dim1, exp_stage, divisor=8)\n stage_dim3 = round_width(stage_dim2, exp_stage, divisor=8)\n stage_dim4 = round_width(stage_dim3, exp_stage, divisor=8)\n stage_dims = [stage_dim1, stage_dim2, stage_dim3, stage_dim4]\n\n dim_in = stem_dim_out\n # Create each stage for X3D.\n for idx in range(len(stage_depths)):\n dim_out = round_width(stage_dims[idx], width_factor)\n dim_inner = int(bottleneck_factor * dim_out)\n depth = round_repeats(stage_depths[idx], depth_factor)\n\n stage_conv_stride = (\n stage_temporal_stride[idx],\n stage_spatial_stride[idx],\n stage_spatial_stride[idx],\n )\n\n stage = create_x3d_res_stage(\n depth=depth,\n dim_in=dim_in,\n dim_inner=dim_inner,\n dim_out=dim_out,\n bottleneck=bottleneck,\n conv_kernel_size=stage_conv_kernel_size[idx],\n conv_stride=stage_conv_stride,\n norm=norm,\n norm_eps=norm_eps,\n norm_momentum=norm_momentum,\n se_ratio=se_ratio,\n activation=activation,\n inner_act=inner_act,\n )\n blocks.append(stage)\n dim_in = dim_out\n\n # Create head for X3D.\n total_spatial_stride = stem_conv_stride[1] * np.prod(stage_spatial_stride)\n total_temporal_stride = stem_conv_stride[0] * np.prod(stage_temporal_stride)\n\n assert (\n input_clip_length >= total_temporal_stride\n ), \"Clip length doesn't match temporal stride!\"\n assert (\n input_crop_size >= total_spatial_stride\n ), \"Crop size doesn't match spatial stride!\"\n\n head_pool_kernel_size = (\n input_clip_length // total_temporal_stride,\n input_crop_size // total_spatial_stride,\n input_crop_size // total_spatial_stride,\n )\n\n head = create_x3d_head(\n dim_in=dim_out,\n dim_inner=dim_inner,\n dim_out=head_dim_out,\n num_classes=model_num_class,\n pool_act=head_pool_act,\n pool_kernel_size=head_pool_kernel_size,\n norm=norm,\n norm_eps=norm_eps,\n norm_momentum=norm_momentum,\n bn_lin5_on=head_bn_lin5_on,\n dropout_rate=dropout_rate,\n activation=head_activation,\n output_with_global_average=head_output_with_global_average,\n )\n blocks.append(head)\n return Net(blocks=nn.ModuleList(blocks))\n\n\nclass ProjectedPool(nn.Module):\n \"\"\"\n A pooling module augmented with Conv, Normalization and Activation both\n before and after pooling for the head layer of X3D.\n\n ::\n\n Conv3d (pre_conv)\n ↓\n Normalization (pre_norm)\n ↓\n Activation (pre_act)\n ↓\n Pool3d\n ↓\n Conv3d (post_conv)\n ↓\n Normalization (post_norm)\n ↓\n Activation (post_act)\n \"\"\"\n\n def __init__(\n self,\n *,\n pre_conv: nn.Module = None,\n pre_norm: nn.Module = None,\n pre_act: nn.Module = None,\n pool: nn.Module = None,\n post_conv: nn.Module = None,\n post_norm: nn.Module = None,\n post_act: nn.Module = None,\n ) -> None:\n \"\"\"\n Args:\n pre_conv (torch.nn.modules): convolutional module.\n pre_norm (torch.nn.modules): normalization module.\n pre_act (torch.nn.modules): activation module.\n pool (torch.nn.modules): pooling module.\n post_conv (torch.nn.modules): convolutional module.\n post_norm (torch.nn.modules): normalization module.\n post_act (torch.nn.modules): activation module.\n \"\"\"\n super().__init__()\n set_attributes(self, locals())\n assert self.pre_conv is not None\n assert self.pool is not None\n assert self.post_conv is not None\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.pre_conv(x)\n\n if self.pre_norm is not None:\n x = self.pre_norm(x)\n if self.pre_act is not None:\n x = self.pre_act(x)\n\n x = self.pool(x)\n x = self.post_conv(x)\n\n if self.post_norm is not None:\n x = self.post_norm(x)\n if self.post_act is not None:\n x = self.post_act(x)\n return x\n" ]
[ [ "torch.nn.AvgPool3d", "torch.nn.Dropout", "torch.nn.ModuleList", "torch.nn.AdaptiveAvgPool3d", "torch.nn.Linear", "torch.nn.Conv3d", "torch.nn.Identity", "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pmehta08/MulensModel
[ "261738c445a8d116d09c90e65f6e847cfc8a7ad8", "261738c445a8d116d09c90e65f6e847cfc8a7ad8" ]
[ "examples/run_time_tests/compare.py", "data/interpolate_elliptic_integral_1_2.py" ]
[ "import os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.coordinates import SkyCoord\nfrom astropy import units as u\n\nsys.path.append(\"/usr/custom/pyLIMA-1.0.0\")\nfrom pyLIMA import event, telescopes, microlmodels\nimport MulensModel as mm\n\n\n# Common settings:\nt_0 = 2456900.\nu_0 = 0.1\nt_E = 150.\npi_E_N = 1.\npi_E_E = 2.\nra_deg = 270.\ndec_deg = -30.\ntime = np.linspace(t_0-80., t_0+80., 1000)\nd_time = 2450000.\n\n# MulensModel calculations and plot:\ncoords = SkyCoord(ra_deg, dec_deg, unit=u.deg)\nparams = {'t_0': t_0, 'u_0': u_0, 't_E': t_E,\n 'pi_E_N': pi_E_N, 'pi_E_E': pi_E_E}\nmm_model = mm.Model(params, coords=coords)\nmm_mag = mm_model.magnification(time)\nplt.plot(time-d_time, mm_mag, 'r.', label='MulensModel')\n\n# Read VBBL output and plot it:\nvbbl_data = np.loadtxt(\"fake.out\", unpack=True)\nplt.plot(vbbl_data[0], vbbl_data[1], 'g-.', label='VBBL')\n\n# This are the changes I have to make to make the results as close as possible:\npi_E_E = -pi_E_E\npi_E_N = -pi_E_N\n\n# pyLIMA calculations and plots:\nyour_event = event.Event()\nyour_event.ra = ra_deg\nyour_event.dec = dec_deg\ndata_1 = np.array([time, time*0.+15., time*0.+.01]).T\ntelescope_1 = telescopes.Telescope(light_curve_magnitude=data_1)\nyour_event.telescopes.append(telescope_1)\nyour_event.check_event()\nmodel_1 = microlmodels.create_model(\n 'PSPL', your_event, parallax=['Annual', t_0])\nmodel_1.define_model_parameters()\npyLIMA_parameters = model_1.compute_pyLIMA_parameters(\n [t_0, u_0, t_E, pi_E_N, pi_E_E])\nmodel = model_1.compute_the_microlensing_model(telescope_1, pyLIMA_parameters)\nmag_pyLIMA = model_1.model_magnification(telescope_1, pyLIMA_parameters)\nplt.plot(time-d_time, mag_pyLIMA, 'b.', label='pyLIMA')\n\n# Compare pyLIMA and MM:\nindex = np.argmax(np.abs(mm_mag - mag_pyLIMA))\nprint(\"Largest difference is for: \", index, time[index]-d_time)\nprint(\"pyLIMA:\", mag_pyLIMA[index])\nprint(\"MM:\", mm_mag[index])\n\n# This is the end:\nplt.legend(loc='best')\nplt.xlabel('JD-2450000')\nplt.ylabel('magnification')\nplt.show()\n", "\"\"\"\nCalculates interpolation tables for elliptical integral of\nthe first and second kind.\n\"\"\"\nimport math\nimport numpy as np\nfrom math import sin, cos, sqrt\nfrom scipy import integrate\nfrom scipy.interpolate import interp1d\nfrom scipy.special import ellipk, ellipe\n# These are complete elliptic integrals of the first and the second kind.\n\n\naccuracy = 1.e-6\nn_divide = 10 + 1\nx_start = 2.e-5\nx_stop = 1. - 1.e-12\nn_start = 10\n\n# Settings end here.\n\n\ndef get_ellip(x):\n k = []\n e = []\n for x_ in x:\n if x_ not in get_ellip.k:\n get_ellip.k[x_] = ellipk(x_)\n get_ellip.e[x_] = ellipe(x_)\n k.append(get_ellip.k[x_])\n e.append(get_ellip.e[x_])\n return (np.array(k), np.array(e))\nget_ellip.k = dict()\nget_ellip.e = dict()\n\nx = np.logspace(np.log10(x_start), np.log10(x_stop), n_start)\n\niteration = 0\nadd = [None]\nwhile len(add) > 0:\n iteration += 1\n add = []\n (k, e) = get_ellip(x)\n interp_k = interp1d(np.log10(x), k, kind='cubic')\n interp_e = interp1d(np.log10(x), e, kind='cubic')\n for i in range(len(x)-1):\n x_1 = x[i]\n x_2 = x[i+1]\n check = np.logspace(np.log10(x_1), np.log10(x_2), n_divide)[1:-1]\n (check_true_k, check_true_e) = get_ellip(check)\n check_k = []\n check_e = []\n for c in check:\n check_k.append(interp_k(np.log10(c)))\n check_e.append(interp_e(np.log10(c)))\n check_k = np.array(check_k)\n check_e = np.array(check_e)\n relative_diff_k = np.abs(check_k - check_true_k) / check_true_k\n relative_diff_e = np.abs(check_e - check_true_e) / check_true_e\n relative_diff_max = np.amax(\n np.array([relative_diff_k, relative_diff_e]), axis=0)\n index = np.argsort(relative_diff_max)[-1]\n if relative_diff_max[index] < accuracy:\n continue\n add.append(check[index])\n new_x = np.sort(add + x.tolist())\n x = new_x\n\nfor (x_, k_, e_) in zip(x, k, e):\n print(x_, k_, e_)\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.abs", "numpy.linspace", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "numpy.loadtxt", "matplotlib.pyplot.ylabel" ], [ "numpy.abs", "scipy.special.ellipe", "scipy.special.ellipk", "numpy.log10", "numpy.argsort", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "0.16", "0.19", "0.18", "1.2", "0.12", "1.0", "0.17", "1.3" ], "tensorflow": [] } ]
fabriziocosta/EGO
[ "d89e88183cce1ff24dca9333c09fa11597a45c7a" ]
[ "ego/optimization/neighborhood_graph_grammar.py" ]
[ "#!/usr/bin/env python\n\"\"\"Provides scikit interface.\"\"\"\n\nimport numpy as np\nimport networkx as nx\nimport random\nfrom ego.decompose import do_decompose\nfrom ego.decomposition.positive_and_negative import decompose_positive, decompose_negative\nfrom ego.decomposition.union import decompose_all_union\nfrom graphlearn.sample import LocalSubstitutionGraphGrammarSample as lsgg\nfrom graphlearn.lsgg_ego import lsgg_ego\nfrom ego.optimization.part_importance_estimator import PartImportanceEstimator\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass NeighborhoodGraphGrammar(object):\n\n def __init__(self,\n core=2,\n context=1,\n count=2,\n filter_max_num_substitutions=None,\n n_neighbors=None,\n perturbation_size=2,\n part_importance_estimator=None,\n objective_func=None):\n self.n_neighbors = n_neighbors\n self.graph_grammar = lsgg(\n radii=list(range(core)),\n thickness=context,\n filter_min_cip=count,\n filter_min_interface=2,\n filter_max_num_substitutions=filter_max_num_substitutions,\n nodelevel_radius_and_thickness=False)\n self.perturbation_size = perturbation_size\n self.part_importance_estimator = part_importance_estimator\n self.objective_func = objective_func\n\n def fit(self, graphs):\n self.graph_grammar.fit(graphs)\n return self\n\n def __repr__(self):\n return str(self.graph_grammar)\n\n def perturb(self, orig_graph):\n graph = nx.Graph(orig_graph)\n for i in range(self.perturbation_size):\n # select a node at random\n ns = [u for u in graph.nodes()]\n root = np.random.choice(ns, size=1)[0]\n # materialize the neighborhood\n graphs_it = self.graph_grammar.root_neighbors(\n graph, [root], n_neighbors=self.n_neighbors)\n graphs = list(graphs_it) + [graph]\n # select a neighbor at random\n ids = list(range(len(graphs)))\n id = np.random.choice(ids, size=1)[0]\n graph = nx.Graph(graphs[id])\n return graph\n\n def _select_nodes_in_prob(self, graph, node_score_dict, edge_score_dict):\n # select root nodes with probability proportional to negative scores\n _eps_ = 1e-4\n s = np.array([node_score_dict[u] + _eps_ for u in graph.nodes()])\n # invert score and take exp\n p = np.exp(-s / np.max(np.absolute(s)))\n # normalize it to make it a prob\n p = p / p.sum()\n ns = [u for u in graph.nodes()]\n # sample up to half of the most negative nodes\n n = int(len(graph) / 2)\n sel = np.random.choice(ns, size=n, p=p, replace=False)\n selected_nodes = list(sel)\n return selected_nodes\n\n def _select_negative_nodes(self, graph, node_score_dict, edge_score_dict):\n selected_nodes = [u for u in graph.nodes() if node_score_dict[u] <= 0]\n return selected_nodes\n\n def _select_best(self, graphs):\n # select graph that maximizes the average objective score\n # (to contrast the tendency to produce large molecules)\n func = lambda g: self.objective_func(g) / float(len(g))\n best_graph = max(graphs, key=func)\n return best_graph\n\n def neighbors(self, graph):\n if self.n_neighbors is None:\n return list(self.graph_grammar.neighbors(graph))\n else:\n return list(self.graph_grammar.neighbors_sample(graph, self.n_neighbors))\n\n def make_neighbors(self, graph, selected_nodes=None, include_original=True):\n if selected_nodes is None or len(selected_nodes) == 0:\n out_graphs = self.make_all_neighbors(graph, include_original)\n else:\n out_graphs = self.make_neighbors_from_selected_nodes(\n graph, selected_nodes, include_original)\n return out_graphs\n\n def make_all_neighbors(self, graph, include_original=True):\n if include_original:\n out_graphs = [graph]\n else:\n out_graphs = []\n out_graphs += self.neighbors(graph)\n return out_graphs\n\n def make_neighbors_from_selected_nodes(self, graph, selected_nodes=None, include_original=True):\n # compute neighborhood, i.e. graphs that are obtained as a single core\n # substitution\n if include_original:\n out_graphs = [graph]\n else:\n out_graphs = []\n for selected_node in selected_nodes:\n graphs_it = self.graph_grammar.root_neighbors(\n graph, [selected_node], n_neighbors=self.n_neighbors)\n out_graphs += list(graphs_it)\n return out_graphs\n\n def make_gradient_neighbors(self, graph):\n if self.part_importance_estimator is None:\n selected_nodes = None\n else:\n res = self.part_importance_estimator.predict(graph)\n node_score_dict, edge_score_dict = res\n\n selected_nodes = self._select_negative_nodes(\n graph, node_score_dict, edge_score_dict)\n\n out_graphs = self.make_neighbors(graph, selected_nodes)\n return out_graphs\n\n def gradient_descent(self, graph):\n out_graphs = self.make_gradient_neighbors(graph)\n best_graph = self._select_best(out_graphs)\n return best_graph\n\n\n# ----------------------------------------------------------\n\nclass NeighborhoodEgoGraphGrammar(object):\n\n def __init__(self,\n decomposition_function=None,\n context=1,\n count=1,\n filter_max_num_substitutions=None,\n n_neighbors=None,\n perturbation_size=0,\n objective_func=None):\n self.n_neighbors = n_neighbors\n self.graph_grammar = lsgg_ego(\n decomposition_function=decomposition_function,\n thickness=context,\n filter_min_cip=count,\n filter_min_interface=2,\n filter_max_num_substitutions=filter_max_num_substitutions,\n nodelevel_radius_and_thickness=False)\n self.perturbation_size = perturbation_size\n self.objective_func = objective_func\n\n def fit(self, graphs):\n self.graph_grammar.fit(graphs)\n return self\n\n def __repr__(self):\n return str(self.graph_grammar)\n\n def neighbors(self, graph, n_neighbors=None):\n if n_neighbors is None:\n n_neighbors = self.n_neighbors\n if n_neighbors is None:\n ns = list(self.graph_grammar.neighbors(graph))\n else:\n ns = list(self.graph_grammar.neighbors_sample(graph, n_neighbors))\n return ns\n\n def perturb(self, orig_graph):\n graph = nx.Graph(orig_graph)\n for i in range(self.perturbation_size):\n # select a graph at random\n out_graphs = self.neighbors(graph)\n if len(out_graphs) > 2:\n graph = np.random.choice(out_graphs, size=1)[0]\n return graph\n\n def _select_best(self, graphs):\n # select graph that maximizes the average objective score\n # (to contrast the tendency to produce large molecules)\n func = lambda g: self.objective_func(g) / float(len(g))\n best_graph = max(graphs, key=func)\n return best_graph\n\n def gradient_descent(self, graph):\n out_graphs = self.neighbors(graph)\n best_graph = self._select_best([graph] + out_graphs)\n return best_graph\n\n\n# ----------------------------------------------------------\n\nclass NeighborhoodPartImportanceGraphGrammar(object):\n\n def __init__(self,\n decomposition_function=None,\n context=1,\n count=1,\n filter_max_num_substitutions=None,\n n_neighbors=None,\n fit_at_each_iteration=False,\n frac_nodes_to_select=.5,\n enforce_connected=True,\n domain_graphs=[]):\n self.domain_graphs = domain_graphs\n self.decomposition_function = decomposition_function\n self.n_neighbors = n_neighbors\n self.fit_at_each_iteration = fit_at_each_iteration\n self.part_importance_estimator = PartImportanceEstimator(\n decompose_func=decomposition_function)\n self.graph_grammar = lsgg_ego(\n decomposition_function=decomposition_function,\n thickness=context,\n filter_min_cip=count,\n filter_min_interface=2,\n filter_max_num_substitutions=filter_max_num_substitutions,\n nodelevel_radius_and_thickness=False)\n self.frac_nodes_to_select = frac_nodes_to_select\n self.enforce_connected = enforce_connected\n\n def fit_grammar(self, graphs):\n self.graph_grammar.fit(graphs+self.domain_graphs)\n return self\n\n def fit_part_importance_estimator(self, graphs, targets):\n self.part_importance_estimator.fit(graphs, targets)\n return self\n\n def fit(self, graphs, targets):\n if self.fit_at_each_iteration:\n self.fit_grammar(graphs)\n print('%30s: %s' % ('Part Importance Graph Grammar', self))\n return self.fit_part_importance_estimator(graphs, targets)\n\n def __repr__(self):\n return str(self.graph_grammar)\n\n def neighbors(self, graph, n_neighbors=None):\n res = self.part_importance_estimator.predict(graph)\n node_score_dict, edge_score_dict = res\n nodes = list(graph.nodes())\n # select the nodes with lowest score\n selected_nodes = sorted(nodes, key=lambda u: node_score_dict[u])\n selected_nodes = selected_nodes[\n :int(len(selected_nodes) * self.frac_nodes_to_select)]\n if n_neighbors is None:\n n_neighbors = self.n_neighbors\n if n_neighbors is None:\n ns = list(self.graph_grammar.neighbors(graph))\n else:\n ns = list(self.graph_grammar.root_neighbors(\n graph, selected_nodes, n_neighbors))\n random.shuffle(ns)\n ns = ns[:n_neighbors]\n if self.enforce_connected:\n ns = [g for g in ns if nx.is_connected(g)]\n return ns\n\n\n# ----------------------------------------------------------\n\nclass NeighborhoodAdaptiveGraphGrammar(object):\n\n def __init__(self,\n base_decomposition_function=None,\n approximate_decomposition_function=None,\n context=1,\n count=1,\n filter_max_num_substitutions=None,\n n_neighbors=None,\n ktop=4,\n enforce_connected=True):\n self.ktop = ktop\n self.base_decomposition_function = base_decomposition_function\n self.approximate_decomposition_function = approximate_decomposition_function\n self.n_neighbors = n_neighbors\n self.part_importance_estimator = PartImportanceEstimator(\n decompose_func=self.base_decomposition_function)\n self.graph_grammar = lsgg_ego(\n decomposition_function=self.base_decomposition_function,\n thickness=context,\n filter_min_cip=count,\n filter_min_interface=2,\n filter_max_num_substitutions=filter_max_num_substitutions,\n nodelevel_radius_and_thickness=False)\n self.enforce_connected = enforce_connected\n\n def fit(self, graphs, targets):\n self.fit_part_importance_estimator(graphs, targets)\n\n pos_dec = do_decompose(decompose_positive(ktop=self.ktop, part_importance_estimator=self.part_importance_estimator),\n compose_function=decompose_all_union)\n neg_dec = do_decompose(decompose_negative(ktop=self.ktop, part_importance_estimator=self.part_importance_estimator),\n compose_function=decompose_all_union)\n frag_dec = do_decompose(pos_dec, neg_dec, compose_function=self.approximate_decomposition_function)\n self.adaptive_decomposition_function = do_decompose(pos_dec, neg_dec, frag_dec)\n self.graph_grammar.set_decomposition(self.adaptive_decomposition_function)\n self.fit_grammar(graphs)\n print('%30s: %s' % ('Adaptive Graph Grammar', self))\n return self\n\n def fit_grammar(self, graphs):\n self.graph_grammar.fit(graphs)\n return self\n\n def fit_part_importance_estimator(self, graphs, targets):\n self.part_importance_estimator.fit(graphs, targets)\n return self\n\n def __repr__(self):\n return str(self.graph_grammar)\n\n def neighbors(self, graph, n_neighbors=None):\n if n_neighbors is None:\n n_neighbors = self.n_neighbors\n if n_neighbors is None:\n ns = list(self.graph_grammar.neighbors(graph))\n else:\n ns = list(self.graph_grammar.neighbors_sample(graph, n_neighbors))\n if self.enforce_connected:\n ns = [g for g in ns if nx.is_connected(g)]\n return ns\n" ]
[ [ "numpy.absolute", "numpy.random.choice" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
OliverT1/gnina_tensorflow
[ "339310c643a85e6df1248d03dbbe4ae78cf59f19", "339310c643a85e6df1248d03dbbe4ae78cf59f19" ]
[ "layers/layer_functions.py", "layers/inverse_convolution.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 11 09:12:15 2020\n\n@author: scantleb\n@brief: Functions for generating keras layers.\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.keras import layers\n\n\ndef generate_activation_layers(block_name, activation, append_name_info=True):\n \"\"\"Generate activation layers from strings representing activation layers.\n\n Arguments:\n block_name: name of the block the layer is a part of\n activation: string representing an activation function; can be\n standard keras string to AF names ('relu', 'sigmoid', etc.), or\n one of either 'prelu' (Parameterised ReLU) or 'threlu'\n (Thresholded ReLU)\n append_name_info: add activation function information to name\n\n Returns:\n Generator which produces layers with names containing increasing\n index numbers, with the specified activation function and base name.\n \"\"\"\n name_template = '{0}_{{0}}_{1}'.format(block_name, activation)\n block_name_index = 0\n while True:\n if append_name_info:\n act_name = name_template.format(block_name_index)\n else:\n act_name = block_name\n block_name_index += 1\n if activation == 'prelu':\n yield layers.PReLU(\n name=act_name,\n alpha_initializer=tf.keras.initializers.constant(0.1))\n elif activation == 'threlu':\n yield layers.ThresholdedReLU(theta=1.0, name=act_name)\n else:\n yield layers.Activation(activation, name=act_name)\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 14 16:55:13 2020\n@author: scantleb\n@brief: Layer definition for artifact-free upsample and convolution operation.\n\nThe Conv3DTranspose must be used with great care to avoid checkerboard artifacts\non reconstructed images (for example, the stride must be devisible by the\nkernel size). Using this 'deconvolution' has thus fallen out of favour, being\nreplaced by upsampling (using nearest neighbour interpolation) followed by\na convolution with stride 1.\n\"\"\"\n\nfrom tensorflow.keras import layers\n\n\nclass InverseConvolution3D(layers.Layer):\n \"\"\"Convolution with upsampling.\"\"\"\n\n def __init__(self, filters, kernel_size, scale_factor, **kwargs):\n \"\"\"Constructor for layer.\n\n Arguments:\n filters: number of filters in output of layer\n kernel_size: int or tuple of 3 ints, size of kernel to apply to\n inputs\n scale_factor: factor to multiply spatial dimensions of inputs by\n kwargs: other arguments for the 3D convolution (see documentation\n for layers.Conv3D)\n \"\"\"\n super().__init__()\n self.scale_factor = scale_factor\n self.filters = filters\n self.kernel_size = kernel_size\n\n self.upsample = layers.UpSampling3D(\n self.scale_factor,\n data_format=kwargs.get('data_format', 'channels_first')\n )\n self.conv = layers.Conv3D(self.filters, self.kernel_size, 1, **kwargs)\n\n def __call__(self, inputs):\n \"\"\"Overloaded method; see base class (layers.Layer).\n\n Performs an upsample operation before a convolution.\n \"\"\"\n x = self.upsample(inputs)\n x = self.conv(x)\n return x\n\n def get_config(self):\n \"\"\"Overloaded method; see base class (layers.Layer).\"\"\"\n config = super().get_config()\n config.update(\n {\n 'scale_factor': self.scale_factor,\n 'filters': self.filters,\n 'kernel_size': self.kernel_size,\n 'upsample': self.upsample,\n 'conv': self.conv\n }\n )\n return config\n" ]
[ [ "tensorflow.keras.layers.ThresholdedReLU", "tensorflow.keras.initializers.constant", "tensorflow.keras.layers.Activation" ], [ "tensorflow.keras.layers.Conv3D" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
hologerry/magenta
[ "c08c17a548f97a3f5d294a010c28ea2803718d6f", "c08c17a548f97a3f5d294a010c28ea2803718d6f", "c08c17a548f97a3f5d294a010c28ea2803718d6f", "c08c17a548f97a3f5d294a010c28ea2803718d6f", "c08c17a548f97a3f5d294a010c28ea2803718d6f", "c08c17a548f97a3f5d294a010c28ea2803718d6f", "c08c17a548f97a3f5d294a010c28ea2803718d6f", "c08c17a548f97a3f5d294a010c28ea2803718d6f", "c08c17a548f97a3f5d294a010c28ea2803718d6f" ]
[ "magenta/models/gansynth/lib/data_helpers.py", "magenta/common/beam_search_test.py", "magenta/music/encoder_decoder.py", "magenta/music/alignment/align_fine_lib.py", "magenta/models/score2perf/modalities_test.py", "magenta/models/score2perf/datagen_beam_test.py", "magenta/models/onsets_frames_transcription/data_test.py", "magenta/music/lead_sheets_lib_test.py", "magenta/models/latent_transfer/common_joint.py" ]
[ "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data utility.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom magenta.models.gansynth.lib import datasets\nfrom magenta.models.gansynth.lib import train_util\nfrom magenta.models.gansynth.lib.specgrams_helper import SpecgramsHelper\nimport tensorflow.compat.v1 as tf # noqa\n\n\n\nclass DataHelper(object):\n \"\"\"A class for querying and converting data.\"\"\"\n\n def __init__(self, config):\n self._config = config\n self._dataset_name = config['dataset_name']\n self.dataset = datasets.registry[self._dataset_name](config)\n self.specgrams_helper = self.make_specgrams_helper()\n\n def _map_fn(self):\n \"\"\"Create a mapping function for the dataset.\"\"\"\n raise NotImplementedError\n\n def make_specgrams_helper(self):\n \"\"\"Create a specgrams helper for the dataset.\"\"\"\n raise NotImplementedError\n\n def data_to_waves(self, data):\n \"\"\"Converts data representation to waveforms.\"\"\"\n raise NotImplementedError\n\n def waves_to_data(self, waves):\n \"\"\"Converts data representation to waveforms.\"\"\"\n raise NotImplementedError\n\n def get_pitch_counts(self):\n \"\"\"Returns a dictionary {pitch value (int): count (int)}.\"\"\"\n return self.dataset.get_pitch_counts()\n\n def provide_one_hot_labels(self, batch_size):\n \"\"\"Returns a batch of one-hot labels.\"\"\"\n with tf.name_scope('inputs'):\n with tf.device('/cpu:0'):\n return self.dataset.provide_one_hot_labels(batch_size=batch_size)\n\n def provide_data(self, batch_size):\n \"\"\"Returns a batch of data and one-hot labels.\"\"\"\n with tf.name_scope('inputs'):\n with tf.device('/cpu:0'):\n dataset = self.dataset.provide_dataset()\n dataset = dataset.shuffle(buffer_size=1000)\n dataset = dataset.map(self._map_fn, num_parallel_calls=4)\n dataset = dataset.batch(batch_size)\n dataset = dataset.prefetch(1)\n\n iterator = dataset.make_initializable_iterator()\n tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS,\n iterator.initializer)\n\n data, one_hot_labels = iterator.get_next()\n data.set_shape([batch_size, None, None, None])\n one_hot_labels.set_shape([batch_size, None])\n return data, one_hot_labels\n\n\nclass DataSTFTHelper(DataHelper):\n \"\"\"A data helper for Linear Spectrograms.\"\"\"\n\n def make_specgrams_helper(self):\n final_resolutions = train_util.make_resolution_schedule(\n **self._config).final_resolutions\n return SpecgramsHelper(\n audio_length=self._config['audio_length'],\n spec_shape=final_resolutions,\n overlap=0.75,\n sample_rate=self._config['sample_rate'],\n mel_downscale=1,\n ifreq=True)\n\n def _map_fn(self, wave, one_hot_label):\n waves = wave[tf.newaxis, :, :]\n data = self.waves_to_data(waves)\n return data[0], one_hot_label\n\n def data_to_waves(self, data):\n return self.specgrams_helper.specgrams_to_waves(data)\n\n def waves_to_data(self, waves):\n return self.specgrams_helper.waves_to_specgrams(waves)\n\n\nclass DataWaveHelper(DataSTFTHelper):\n \"\"\"A data helper for raw waveforms.\n\n For compatibility with the spectral network architectues, we add a second\n (redundant) channel and zero-pad along the time axis.\n \"\"\"\n\n def make_specgrams_helper(self):\n return SpecgramsHelper(audio_length=64000,\n spec_shape=(256, 512),\n overlap=0.75,\n sample_rate=self._config['sample_rate'],\n mel_downscale=2)\n\n def data_to_waves(self, data):\n return data[:, 768:-768, 0, :1]\n\n def waves_to_data(self, waves):\n waves = waves[:, :, None, :]\n pad = tf.zeros([tf.shape(waves)[0], 768, 1, 1])\n waves = tf.concat([pad, waves, pad], axis=1)\n return tf.concat([waves, waves], axis=3)\n\n\nclass DataSTFTNoIFreqHelper(DataHelper):\n \"\"\"A data helper for Linear Spectrograms.\"\"\"\n\n def make_specgrams_helper(self):\n final_resolutions = train_util.make_resolution_schedule(\n **self._config).final_resolutions\n return SpecgramsHelper(\n audio_length=self._config['audio_length'],\n spec_shape=final_resolutions,\n overlap=0.75,\n sample_rate=self._config['sample_rate'],\n mel_downscale=1,\n ifreq=False)\n\n def _map_fn(self, wave, one_hot_label):\n waves = wave[tf.newaxis, :, :]\n data = self.waves_to_data(waves)\n return data[0], one_hot_label\n\n def data_to_waves(self, data):\n return self.specgrams_helper.specgrams_to_waves(data)\n\n def waves_to_data(self, waves):\n return self.specgrams_helper.waves_to_specgrams(waves)\n\n\nclass DataMelHelper(DataSTFTHelper):\n \"\"\"A data helper for Mel Spectrograms.\"\"\"\n\n def data_to_waves(self, data):\n return self.specgrams_helper.melspecgrams_to_waves(data)\n\n def waves_to_data(self, waves):\n return self.specgrams_helper.waves_to_melspecgrams(waves)\n\n\nregistry = {\n 'linear': DataSTFTHelper,\n 'phase': DataSTFTNoIFreqHelper,\n 'mel': DataMelHelper,\n 'wave': DataWaveHelper,\n}\n", "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for beam search.\"\"\"\n\nfrom magenta.common import beam_search\nimport tensorflow.compat.v1 as tf # noqa\n\n\n\nclass BeamSearchTest(tf.test.TestCase):\n\n def _generate_step_fn(self, sequences, states, scores):\n # This acts as a binary counter for testing purposes. For scoring, zeros\n # accumulate value exponentially in the state, ones \"cash in\". The highest-\n # scoring sequence would be all zeros followed by a single one.\n value = 0\n for i, seq in enumerate(sequences):\n seq.append(value)\n if value == 0:\n states[i] *= 2\n else:\n scores[i] += states[i]\n states[i] = 1\n if (i - 1) % (2 ** len(seq)) == 0:\n value = 1 - value\n return sequences, states, scores\n\n def testNoBranchingSingleStepPerIteration(self):\n sequence, state, score = beam_search(\n initial_sequence=[], initial_state=1,\n generate_step_fn=self._generate_step_fn, num_steps=5, beam_size=1,\n branch_factor=1, steps_per_iteration=1)\n\n # The generator should emit all zeros, as only a single sequence is ever\n # considered so the counter doesn't reach one.\n self.assertEqual(sequence, [0, 0, 0, 0, 0])\n self.assertEqual(state, 32)\n self.assertEqual(score, 0)\n\n def testNoBranchingMultipleStepsPerIteration(self):\n sequence, state, score = beam_search(\n initial_sequence=[], initial_state=1,\n generate_step_fn=self._generate_step_fn, num_steps=5, beam_size=1,\n branch_factor=1, steps_per_iteration=2)\n\n # Like the above case, the counter should never reach one as only a single\n # sequence is ever considered.\n self.assertEqual(sequence, [0, 0, 0, 0, 0])\n self.assertEqual(state, 32)\n self.assertEqual(score, 0)\n\n def testBranchingSingleBeamEntry(self):\n sequence, state, score = beam_search(\n initial_sequence=[], initial_state=1,\n generate_step_fn=self._generate_step_fn, num_steps=5, beam_size=1,\n branch_factor=32, steps_per_iteration=1)\n\n # Here the beam search should greedily choose ones.\n self.assertEqual(sequence, [1, 1, 1, 1, 1])\n self.assertEqual(state, 1)\n self.assertEqual(score, 5)\n\n def testNoBranchingMultipleBeamEntries(self):\n sequence, state, score = beam_search(\n initial_sequence=[], initial_state=1,\n generate_step_fn=self._generate_step_fn, num_steps=5, beam_size=32,\n branch_factor=1, steps_per_iteration=1)\n\n # Here the beam has enough capacity to find the optimal solution without\n # branching.\n self.assertEqual(sequence, [0, 0, 0, 0, 1])\n self.assertEqual(state, 1)\n self.assertEqual(score, 16)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Classes for converting between event sequences and models inputs/outputs.\n\nOneHotEncoding is an abstract class for specifying a one-hot encoding, i.e.\nhow to convert back and forth between an arbitrary event space and integer\nindices between 0 and the number of classes.\n\nEventSequenceEncoderDecoder is an abstract class for translating event\n_sequences_, i.e. how to convert event sequences to input vectors and output\nlabels to be fed into a model, and how to convert from output labels back to\nevents.\n\nUse EventSequenceEncoderDecoder.encode to convert an event sequence to a\ntf.train.SequenceExample of inputs and labels. These SequenceExamples are fed\ninto the model during training and evaluation.\n\nDuring generation, use EventSequenceEncoderDecoder.get_inputs_batch to convert a\nlist of event sequences into an inputs batch which can be fed into the model to\npredict what the next event should be for each sequence. Then use\nEventSequenceEncoderDecoder.extend_event_sequences to extend each of those event\nsequences with an event sampled from the softmax output by the model.\n\nOneHotEventSequenceEncoderDecoder is an EventSequenceEncoderDecoder that uses a\nOneHotEncoding of individual events. The input vectors are one-hot encodings of\nthe most recent event. The output labels are one-hot encodings of the next\nevent.\n\nLookbackEventSequenceEncoderDecoder is an EventSequenceEncoderDecoder that also\nuses a OneHotEncoding of individual events. However, its input and output\nencodings also consider whether the event sequence is repeating, and the input\nencoding includes binary counters for timekeeping.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport numbers\n\nfrom magenta.music import constants\nimport numpy as np\nfrom six.moves import range # pylint: disable=redefined-builtin\nimport tensorflow.compat.v1 as tf # noqa\n\n\nDEFAULT_STEPS_PER_BAR = constants.DEFAULT_STEPS_PER_BAR\nDEFAULT_LOOKBACK_DISTANCES = [DEFAULT_STEPS_PER_BAR, DEFAULT_STEPS_PER_BAR * 2]\n\n\nclass OneHotEncoding(object):\n \"\"\"An interface for specifying a one-hot encoding of individual events.\"\"\"\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractproperty\n def num_classes(self):\n \"\"\"The number of distinct event encodings.\n\n Returns:\n An int, the range of ints that can be returned by self.encode_event.\n \"\"\"\n pass\n\n @abc.abstractproperty\n def default_event(self):\n \"\"\"An event value to use as a default.\n\n Returns:\n The default event value.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def encode_event(self, event):\n \"\"\"Convert from an event value to an encoding integer.\n\n Args:\n event: An event value to encode.\n\n Returns:\n An integer representing the encoded event, in range [0, self.num_classes).\n \"\"\"\n pass\n\n @abc.abstractmethod\n def decode_event(self, index):\n \"\"\"Convert from an encoding integer to an event value.\n\n Args:\n index: The encoding, an integer in the range [0, self.num_classes).\n\n Returns:\n The decoded event value.\n \"\"\"\n pass\n\n def event_to_num_steps(self, unused_event):\n \"\"\"Returns the number of time steps corresponding to an event value.\n\n This is used for normalization when computing metrics. Subclasses with\n variable step size should override this method.\n\n Args:\n unused_event: An event value for which to return the number of steps.\n\n Returns:\n The number of steps corresponding to the given event value, defaulting to\n one.\n \"\"\"\n return 1\n\n\nclass EventSequenceEncoderDecoder(object):\n \"\"\"An abstract class for translating between events and model data.\n\n When building your dataset, the `encode` method takes in an event sequence\n and returns a SequenceExample of inputs and labels. These SequenceExamples\n are fed into the model during training and evaluation.\n\n During generation, the `get_inputs_batch` method takes in a list of the\n current event sequences and returns an inputs batch which is fed into the\n model to predict what the next event should be for each sequence. The\n `extend_event_sequences` method takes in the list of event sequences and the\n softmax returned by the model and extends each sequence by one step by\n sampling from the softmax probabilities. This loop (`get_inputs_batch` ->\n inputs batch is fed through the model to get a softmax ->\n `extend_event_sequences`) is repeated until the generated event sequences\n have reached the desired length.\n\n Properties:\n input_size: The length of the list returned by self.events_to_input.\n num_classes: The range of ints that can be returned by\n self.events_to_label.\n\n The `input_size`, `num_classes`, `events_to_input`, `events_to_label`, and\n `class_index_to_event` method must be overwritten to be specific to your\n model.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractproperty\n def input_size(self):\n \"\"\"The size of the input vector used by this model.\n\n Returns:\n An integer, the length of the list returned by self.events_to_input.\n \"\"\"\n pass\n\n @abc.abstractproperty\n def num_classes(self):\n \"\"\"The range of labels used by this model.\n\n Returns:\n An integer, the range of integers that can be returned by\n self.events_to_label.\n \"\"\"\n pass\n\n @abc.abstractproperty\n def default_event_label(self):\n \"\"\"The class label that represents a default event.\n\n Returns:\n An int, the class label that represents a default event.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def events_to_input(self, events, position):\n \"\"\"Returns the input vector for the event at the given position.\n\n Args:\n events: A list-like sequence of events.\n position: An integer event position in the sequence.\n\n Returns:\n An input vector, a self.input_size length list of floats.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def events_to_label(self, events, position):\n \"\"\"Returns the label for the event at the given position.\n\n Args:\n events: A list-like sequence of events.\n position: An integer event position in the sequence.\n\n Returns:\n A label, an integer in the range [0, self.num_classes).\n \"\"\"\n pass\n\n @abc.abstractmethod\n def class_index_to_event(self, class_index, events):\n \"\"\"Returns the event for the given class index.\n\n This is the reverse process of the self.events_to_label method.\n\n Args:\n class_index: An integer in the range [0, self.num_classes).\n events: A list-like sequence of events.\n\n Returns:\n An event value.\n \"\"\"\n pass\n\n def labels_to_num_steps(self, labels):\n \"\"\"Returns the total number of time steps for a sequence of class labels.\n\n This is used for normalization when computing metrics. Subclasses with\n variable step size should override this method.\n\n Args:\n labels: A list-like sequence of integers in the range\n [0, self.num_classes).\n\n Returns:\n The total number of time steps for the label sequence, defaulting to one\n per event.\n \"\"\"\n return len(labels)\n\n def encode(self, events):\n \"\"\"Returns a SequenceExample for the given event sequence.\n\n Args:\n events: A list-like sequence of events.\n\n Returns:\n A tf.train.SequenceExample containing inputs and labels.\n \"\"\"\n inputs = []\n labels = []\n for i in range(len(events) - 1):\n inputs.append(self.events_to_input(events, i))\n labels.append(self.events_to_label(events, i + 1))\n return make_sequence_example(inputs, labels)\n\n def get_inputs_batch(self, event_sequences, full_length=False):\n \"\"\"Returns an inputs batch for the given event sequences.\n\n Args:\n event_sequences: A list of list-like event sequences.\n full_length: If True, the inputs batch will be for the full length of\n each event sequence. If False, the inputs batch will only be for the\n last event of each event sequence. A full-length inputs batch is used\n for the first step of extending the event sequences, since the RNN\n cell state needs to be initialized with the priming sequence. For\n subsequent generation steps, only a last-event inputs batch is used.\n\n Returns:\n An inputs batch. If `full_length` is True, the shape will be\n [len(event_sequences), len(event_sequences[0]), INPUT_SIZE]. If\n `full_length` is False, the shape will be\n [len(event_sequences), 1, INPUT_SIZE].\n \"\"\"\n inputs_batch = []\n for events in event_sequences:\n inputs = []\n if full_length:\n for i in range(len(events)):\n inputs.append(self.events_to_input(events, i))\n else:\n inputs.append(self.events_to_input(events, len(events) - 1))\n inputs_batch.append(inputs)\n return inputs_batch\n\n def extend_event_sequences(self, event_sequences, softmax):\n \"\"\"Extends the event sequences by sampling the softmax probabilities.\n\n Args:\n event_sequences: A list of EventSequence objects.\n softmax: A list of softmax probability vectors. The list of softmaxes\n should be the same length as the list of event sequences.\n\n Returns:\n A Python list of chosen class indices, one for each event sequence.\n \"\"\"\n chosen_classes = []\n for i in range(len(event_sequences)):\n if not isinstance(softmax[0][0][0], numbers.Number):\n # In this case, softmax is a list of several sub-softmaxes, each\n # potentially with a different size.\n # shape: [[beam_size, event_num, softmax_size]]\n chosen_class = []\n for sub_softmax in softmax:\n num_classes = len(sub_softmax[0][0])\n chosen_class.append(\n np.random.choice(num_classes, p=sub_softmax[i][-1]))\n else:\n # In this case, softmax is just one softmax.\n # shape: [beam_size, event_num, softmax_size]\n num_classes = len(softmax[0][0])\n chosen_class = np.random.choice(num_classes, p=softmax[i][-1])\n event = self.class_index_to_event(chosen_class, event_sequences[i])\n event_sequences[i].append(event)\n chosen_classes.append(chosen_class)\n return chosen_classes\n\n def evaluate_log_likelihood(self, event_sequences, softmax):\n \"\"\"Evaluate the log likelihood of multiple event sequences.\n\n Each event sequence is evaluated from the end. If the size of the\n corresponding softmax vector is 1 less than the number of events, the entire\n event sequence will be evaluated (other than the first event, whose\n distribution is not modeled). If the softmax vector is shorter than this,\n only the events at the end of the sequence will be evaluated.\n\n Args:\n event_sequences: A list of EventSequence objects.\n softmax: A list of softmax probability vectors. The list of softmaxes\n should be the same length as the list of event sequences.\n\n Returns:\n A Python list containing the log likelihood of each event sequence.\n\n Raises:\n ValueError: If one of the event sequences is too long with respect to the\n corresponding softmax vectors.\n \"\"\"\n all_loglik = []\n for i in range(len(event_sequences)):\n if len(softmax[i]) >= len(event_sequences[i]):\n raise ValueError(\n 'event sequence must be longer than softmax vector (%d events but '\n 'softmax vector has length %d)' % (len(event_sequences[i]),\n len(softmax[i])))\n end_pos = len(event_sequences[i])\n start_pos = end_pos - len(softmax[i])\n loglik = 0.0\n for softmax_pos, position in enumerate(range(start_pos, end_pos)):\n index = self.events_to_label(event_sequences[i], position)\n if isinstance(index, numbers.Number):\n loglik += np.log(softmax[i][softmax_pos][index])\n else:\n for sub_softmax_i in range(len(index)):\n loglik += np.log(\n softmax[i][softmax_pos][sub_softmax_i][index[sub_softmax_i]])\n all_loglik.append(loglik)\n return all_loglik\n\n\nclass OneHotEventSequenceEncoderDecoder(EventSequenceEncoderDecoder):\n \"\"\"An EventSequenceEncoderDecoder that produces a one-hot encoding.\"\"\"\n\n def __init__(self, one_hot_encoding):\n \"\"\"Initialize a OneHotEventSequenceEncoderDecoder object.\n\n Args:\n one_hot_encoding: A OneHotEncoding object that transforms events to and\n from integer indices.\n \"\"\"\n self._one_hot_encoding = one_hot_encoding\n\n @property\n def input_size(self):\n return self._one_hot_encoding.num_classes\n\n @property\n def num_classes(self):\n return self._one_hot_encoding.num_classes\n\n @property\n def default_event_label(self):\n return self._one_hot_encoding.encode_event(\n self._one_hot_encoding.default_event)\n\n def events_to_input(self, events, position):\n \"\"\"Returns the input vector for the given position in the event sequence.\n\n Returns a one-hot vector for the given position in the event sequence, as\n determined by the one hot encoding.\n\n Args:\n events: A list-like sequence of events.\n position: An integer event position in the event sequence.\n\n Returns:\n An input vector, a list of floats.\n \"\"\"\n input_ = [0.0] * self.input_size\n input_[self._one_hot_encoding.encode_event(events[position])] = 1.0\n return input_\n\n def events_to_label(self, events, position):\n \"\"\"Returns the label for the given position in the event sequence.\n\n Returns the zero-based index value for the given position in the event\n sequence, as determined by the one hot encoding.\n\n Args:\n events: A list-like sequence of events.\n position: An integer event position in the event sequence.\n\n Returns:\n A label, an integer.\n \"\"\"\n return self._one_hot_encoding.encode_event(events[position])\n\n def class_index_to_event(self, class_index, events):\n \"\"\"Returns the event for the given class index.\n\n This is the reverse process of the self.events_to_label method.\n\n Args:\n class_index: An integer in the range [0, self.num_classes).\n events: A list-like sequence of events. This object is not used in this\n implementation.\n\n Returns:\n An event value.\n \"\"\"\n return self._one_hot_encoding.decode_event(class_index)\n\n def labels_to_num_steps(self, labels):\n \"\"\"Returns the total number of time steps for a sequence of class labels.\n\n Args:\n labels: A list-like sequence of integers in the range\n [0, self.num_classes).\n\n Returns:\n The total number of time steps for the label sequence, as determined by\n the one-hot encoding.\n \"\"\"\n events = []\n for label in labels:\n events.append(self.class_index_to_event(label, events))\n return sum(self._one_hot_encoding.event_to_num_steps(event)\n for event in events)\n\n\nclass OneHotIndexEventSequenceEncoderDecoder(OneHotEventSequenceEncoderDecoder):\n \"\"\"An EventSequenceEncoderDecoder that produces one-hot indices.\"\"\"\n\n @property\n def input_size(self):\n return 1\n\n @property\n def input_depth(self):\n return self._one_hot_encoding.num_classes\n\n def events_to_input(self, events, position):\n \"\"\"Returns the one-hot index for the event at the given position.\n\n Args:\n events: A list-like sequence of events.\n position: An integer event position in the event sequence.\n\n Returns:\n An integer input event index.\n \"\"\"\n return [self._one_hot_encoding.encode_event(events[position])]\n\n\nclass LookbackEventSequenceEncoderDecoder(EventSequenceEncoderDecoder):\n \"\"\"An EventSequenceEncoderDecoder that encodes repeated events and meter.\"\"\"\n\n def __init__(self, one_hot_encoding, lookback_distances=None,\n binary_counter_bits=5):\n \"\"\"Initializes the LookbackEventSequenceEncoderDecoder.\n\n Args:\n one_hot_encoding: A OneHotEncoding object that transforms events to and\n from integer indices.\n lookback_distances: A list of step intervals to look back in history to\n encode both the following event and whether the current step is a\n repeat. If None, use default lookback distances.\n binary_counter_bits: The number of input bits to use as a counter for the\n metric position of the next event.\n \"\"\"\n self._one_hot_encoding = one_hot_encoding\n if lookback_distances is None:\n self._lookback_distances = DEFAULT_LOOKBACK_DISTANCES\n else:\n self._lookback_distances = lookback_distances\n self._binary_counter_bits = binary_counter_bits\n\n @property\n def input_size(self):\n one_hot_size = self._one_hot_encoding.num_classes\n num_lookbacks = len(self._lookback_distances)\n return (one_hot_size + # current event\n num_lookbacks * one_hot_size + # next event for each lookback\n self._binary_counter_bits + # binary counters\n num_lookbacks) # whether event matches lookbacks\n\n @property\n def num_classes(self):\n return self._one_hot_encoding.num_classes + len(self._lookback_distances)\n\n @property\n def default_event_label(self):\n return self._one_hot_encoding.encode_event(\n self._one_hot_encoding.default_event)\n\n def events_to_input(self, events, position):\n \"\"\"Returns the input vector for the given position in the event sequence.\n\n Returns a self.input_size length list of floats. Assuming a one-hot\n encoding with 38 classes, two lookback distances, and five binary counters,\n self.input_size will = 121. Each index represents a different input signal\n to the model.\n\n Indices [0, 120]:\n [0, 37]: Event of current step.\n [38, 75]: Event of next step for first lookback.\n [76, 113]: Event of next step for second lookback.\n 114: 16th note binary counter.\n 115: 8th note binary counter.\n 116: 4th note binary counter.\n 117: Half note binary counter.\n 118: Whole note binary counter.\n 119: The current step is repeating (first lookback).\n 120: The current step is repeating (second lookback).\n\n Args:\n events: A list-like sequence of events.\n position: An integer position in the event sequence.\n\n Returns:\n An input vector, an self.input_size length list of floats.\n \"\"\"\n input_ = [0.0] * self.input_size\n offset = 0\n\n # Last event.\n index = self._one_hot_encoding.encode_event(events[position])\n input_[index] = 1.0\n offset += self._one_hot_encoding.num_classes\n\n # Next event if repeating N positions ago.\n for i, lookback_distance in enumerate(self._lookback_distances):\n lookback_position = position - lookback_distance + 1\n if lookback_position < 0:\n event = self._one_hot_encoding.default_event\n else:\n event = events[lookback_position]\n index = self._one_hot_encoding.encode_event(event)\n input_[offset + index] = 1.0\n offset += self._one_hot_encoding.num_classes\n\n # Binary time counter giving the metric location of the *next* event.\n n = position + 1\n for i in range(self._binary_counter_bits):\n input_[offset] = 1.0 if (n // 2 ** i) % 2 else -1.0\n offset += 1\n\n # Last event is repeating N bars ago.\n for i, lookback_distance in enumerate(self._lookback_distances):\n lookback_position = position - lookback_distance\n if (lookback_position >= 0 and\n events[position] == events[lookback_position]):\n input_[offset] = 1.0\n offset += 1\n\n assert offset == self.input_size\n\n return input_\n\n def events_to_label(self, events, position):\n \"\"\"Returns the label for the given position in the event sequence.\n\n Returns an integer in the range [0, self.num_classes). Indices in the range\n [0, self._one_hot_encoding.num_classes) map to standard events. Indices\n self._one_hot_encoding.num_classes and self._one_hot_encoding.num_classes +\n 1 are signals to repeat events from earlier in the sequence. More distant\n repeats are selected first and standard events are selected last.\n\n Assuming a one-hot encoding with 38 classes and two lookback distances,\n self.num_classes = 40 and the values will be as follows.\n\n Values [0, 39]:\n [0, 37]: Event of the last step in the event sequence, if not repeating\n any of the lookbacks.\n 38: If the last event is repeating the first lookback, if not also\n repeating the second lookback.\n 39: If the last event is repeating the second lookback.\n\n Args:\n events: A list-like sequence of events.\n position: An integer position in the event sequence.\n\n Returns:\n A label, an integer.\n \"\"\"\n if (self._lookback_distances and\n position < self._lookback_distances[-1] and\n events[position] == self._one_hot_encoding.default_event):\n return (self._one_hot_encoding.num_classes +\n len(self._lookback_distances) - 1)\n\n # If last step repeated N bars ago.\n for i, lookback_distance in reversed(\n list(enumerate(self._lookback_distances))):\n lookback_position = position - lookback_distance\n if (lookback_position >= 0 and\n events[position] == events[lookback_position]):\n return self._one_hot_encoding.num_classes + i\n\n # If last step didn't repeat at one of the lookback positions, use the\n # specific event.\n return self._one_hot_encoding.encode_event(events[position])\n\n def class_index_to_event(self, class_index, events):\n \"\"\"Returns the event for the given class index.\n\n This is the reverse process of the self.events_to_label method.\n\n Args:\n class_index: An int in the range [0, self.num_classes).\n events: The current event sequence.\n\n Returns:\n An event value.\n \"\"\"\n # Repeat N bar ago.\n for i, lookback_distance in reversed(\n list(enumerate(self._lookback_distances))):\n if class_index == self._one_hot_encoding.num_classes + i:\n if len(events) < lookback_distance:\n return self._one_hot_encoding.default_event\n return events[-lookback_distance]\n\n # Return the event for that class index.\n return self._one_hot_encoding.decode_event(class_index)\n\n def labels_to_num_steps(self, labels):\n \"\"\"Returns the total number of time steps for a sequence of class labels.\n\n This method assumes the event sequence begins with the event corresponding\n to the first label, which is inconsistent with the `encode` method in\n EventSequenceEncoderDecoder that uses the second event as the first label.\n Therefore, if the label sequence includes a lookback to the very first event\n and that event is a different number of time steps than the default event,\n this method will give an incorrect answer.\n\n Args:\n labels: A list-like sequence of integers in the range\n [0, self.num_classes).\n\n Returns:\n The total number of time steps for the label sequence, as determined by\n the one-hot encoding.\n \"\"\"\n events = []\n for label in labels:\n events.append(self.class_index_to_event(label, events))\n return sum(self._one_hot_encoding.event_to_num_steps(event)\n for event in events)\n\n\nclass ConditionalEventSequenceEncoderDecoder(object):\n \"\"\"An encoder/decoder for conditional event sequences.\n\n This class is similar to an EventSequenceEncoderDecoder but operates on\n *conditional* event sequences, where there is both a control event sequence\n and a target event sequence. The target sequence consists of events that are\n directly generated by the model, while the control sequence, known in advance,\n affects the inputs provided to the model. The event types of the two sequences\n can be different.\n\n Model inputs are determined by both control and target sequences, and are\n formed by concatenating the encoded control and target input vectors. Model\n outputs are determined by the target sequence only.\n\n This implementation assumes that the control event at position `i` is known\n when the target event at position `i` is to be generated.\n\n Properties:\n input_size: The length of the list returned by self.events_to_input.\n num_classes: The range of ints that can be returned by\n self.events_to_label.\n \"\"\"\n\n def __init__(self, control_encoder_decoder, target_encoder_decoder):\n \"\"\"Initialize a ConditionalEventSequenceEncoderDecoder object.\n\n Args:\n control_encoder_decoder: The EventSequenceEncoderDecoder to encode/decode\n the control sequence.\n target_encoder_decoder: The EventSequenceEncoderDecoder to encode/decode\n the target sequence.\n \"\"\"\n self._control_encoder_decoder = control_encoder_decoder\n self._target_encoder_decoder = target_encoder_decoder\n\n @property\n def input_size(self):\n \"\"\"The size of the concatenated control and target input vectors.\n\n Returns:\n An integer, the size of an input vector.\n \"\"\"\n return (self._control_encoder_decoder.input_size +\n self._target_encoder_decoder.input_size)\n\n @property\n def num_classes(self):\n \"\"\"The range of target labels used by this model.\n\n Returns:\n An integer, the range of integers that can be returned by\n self.events_to_label.\n \"\"\"\n return self._target_encoder_decoder.num_classes\n\n @property\n def default_event_label(self):\n \"\"\"The class label that represents a default target event.\n\n Returns:\n An integer, the class label that represents a default target event.\n \"\"\"\n return self._target_encoder_decoder.default_event_label\n\n def events_to_input(self, control_events, target_events, position):\n \"\"\"Returns the input vector for the given position in the sequence pair.\n\n Returns the vector formed by concatenating the input vector for the control\n sequence and the input vector for the target sequence.\n\n Args:\n control_events: A list-like sequence of control events.\n target_events: A list-like sequence of target events.\n position: An integer event position in the event sequences. When\n predicting the target label at position `i + 1`, the input vector is\n the concatenation of the control input vector at position `i + 1` and\n the target input vector at position `i`.\n\n Returns:\n An input vector, a list of floats.\n \"\"\"\n return (\n self._control_encoder_decoder.events_to_input(\n control_events, position + 1) +\n self._target_encoder_decoder.events_to_input(target_events, position))\n\n def events_to_label(self, target_events, position):\n \"\"\"Returns the label for the given position in the target event sequence.\n\n Args:\n target_events: A list-like sequence of target events.\n position: An integer event position in the target event sequence.\n\n Returns:\n A label, an integer.\n \"\"\"\n return self._target_encoder_decoder.events_to_label(target_events, position)\n\n def class_index_to_event(self, class_index, target_events):\n \"\"\"Returns the event for the given class index.\n\n This is the reverse process of the self.events_to_label method.\n\n Args:\n class_index: An integer in the range [0, self.num_classes).\n target_events: A list-like sequence of target events.\n\n Returns:\n A target event value.\n \"\"\"\n return self._target_encoder_decoder.class_index_to_event(\n class_index, target_events)\n\n def labels_to_num_steps(self, labels):\n \"\"\"Returns the total number of time steps for a sequence of class labels.\n\n Args:\n labels: A list-like sequence of integers in the range\n [0, self.num_classes).\n\n Returns:\n The total number of time steps for the label sequence, as determined by\n the target encoder/decoder.\n \"\"\"\n return self._target_encoder_decoder.labels_to_num_steps(labels)\n\n def encode(self, control_events, target_events):\n \"\"\"Returns a SequenceExample for the given event sequence pair.\n\n Args:\n control_events: A list-like sequence of control events.\n target_events: A list-like sequence of target events, the same length as\n `control_events`.\n\n Returns:\n A tf.train.SequenceExample containing inputs and labels.\n\n Raises:\n ValueError: If the control and target event sequences have different\n length.\n \"\"\"\n if len(control_events) != len(target_events):\n raise ValueError('must have the same number of control and target events '\n '(%d control events but %d target events)' % (\n len(control_events), len(target_events)))\n\n inputs = []\n labels = []\n for i in range(len(target_events) - 1):\n inputs.append(self.events_to_input(control_events, target_events, i))\n labels.append(self.events_to_label(target_events, i + 1))\n return make_sequence_example(inputs, labels)\n\n def get_inputs_batch(self, control_event_sequences, target_event_sequences,\n full_length=False):\n \"\"\"Returns an inputs batch for the given control and target event sequences.\n\n Args:\n control_event_sequences: A list of list-like control event sequences.\n target_event_sequences: A list of list-like target event sequences, the\n same length as `control_event_sequences`. Each target event sequence\n must be shorter than the corresponding control event sequence.\n full_length: If True, the inputs batch will be for the full length of\n each control/target event sequence pair. If False, the inputs batch\n will only be for the last event of each target event sequence. A full-\n length inputs batch is used for the first step of extending the target\n event sequences, since the RNN cell state needs to be initialized with\n the priming target sequence. For subsequent generation steps, only a\n last-event inputs batch is used.\n\n Returns:\n An inputs batch. If `full_length` is True, the shape will be\n [len(target_event_sequences), len(target_event_sequences[0]), INPUT_SIZE].\n If `full_length` is False, the shape will be\n [len(target_event_sequences), 1, INPUT_SIZE].\n\n Raises:\n ValueError: If there are a different number of control and target event\n sequences, or if one of the control event sequences is not shorter\n than the corresponding control event sequence.\n \"\"\"\n if len(control_event_sequences) != len(target_event_sequences):\n raise ValueError(\n '%d control event sequences but %d target event sequences' %\n (len(control_event_sequences, len(target_event_sequences))))\n\n inputs_batch = []\n for control_events, target_events in zip(\n control_event_sequences, target_event_sequences):\n if len(control_events) <= len(target_events):\n raise ValueError('control event sequence must be longer than target '\n 'event sequence (%d control events but %d target '\n 'events)' % (len(control_events), len(target_events)))\n inputs = []\n if full_length:\n for i in range(len(target_events)):\n inputs.append(self.events_to_input(control_events, target_events, i))\n else:\n inputs.append(self.events_to_input(\n control_events, target_events, len(target_events) - 1))\n inputs_batch.append(inputs)\n return inputs_batch\n\n def extend_event_sequences(self, target_event_sequences, softmax):\n \"\"\"Extends the event sequences by sampling the softmax probabilities.\n\n Args:\n target_event_sequences: A list of target EventSequence objects.\n softmax: A list of softmax probability vectors. The list of softmaxes\n should be the same length as the list of event sequences.\n\n Returns:\n A Python list of chosen class indices, one for each target event sequence.\n \"\"\"\n return self._target_encoder_decoder.extend_event_sequences(\n target_event_sequences, softmax)\n\n def evaluate_log_likelihood(self, target_event_sequences, softmax):\n \"\"\"Evaluate the log likelihood of multiple target event sequences.\n\n Args:\n target_event_sequences: A list of target EventSequence objects.\n softmax: A list of softmax probability vectors. The list of softmaxes\n should be the same length as the list of target event sequences. The\n softmax vectors are assumed to have been generated by a full-length\n inputs batch.\n\n Returns:\n A Python list containing the log likelihood of each target event sequence.\n \"\"\"\n return self._target_encoder_decoder.evaluate_log_likelihood(\n target_event_sequences, softmax)\n\n\nclass OptionalEventSequenceEncoder(EventSequenceEncoderDecoder):\n \"\"\"An encoder that augments a base encoder with a disable flag.\n\n This encoder encodes event sequences consisting of tuples where the first\n element is a disable flag. When set, the encoding consists of a 1 followed by\n a zero-encoding the size of the base encoder's input. When unset, the encoding\n consists of a 0 followed by the base encoder's encoding.\n \"\"\"\n\n def __init__(self, encoder):\n \"\"\"Initialize an OptionalEventSequenceEncoder object.\n\n Args:\n encoder: The base EventSequenceEncoderDecoder to use.\n \"\"\"\n self._encoder = encoder\n\n @property\n def input_size(self):\n return 1 + self._encoder.input_size\n\n @property\n def num_classes(self):\n raise NotImplementedError\n\n @property\n def default_event_label(self):\n raise NotImplementedError\n\n def events_to_input(self, events, position):\n # The event sequence is a list of tuples where the first element is a\n # disable flag.\n disable, _ = events[position]\n if disable:\n return [1.0] + [0.0] * self._encoder.input_size\n else:\n return [0.0] + self._encoder.events_to_input(\n [event for _, event in events], position)\n\n def events_to_label(self, events, position):\n raise NotImplementedError\n\n def class_index_to_event(self, class_index, events):\n raise NotImplementedError\n\n\nclass MultipleEventSequenceEncoder(EventSequenceEncoderDecoder):\n \"\"\"An encoder that concatenates multiple component encoders.\n\n This class, largely intended for use with control sequences for conditional\n encoder/decoders, encodes event sequences with multiple encoders and\n concatenates the encodings.\n\n Despite being an EventSequenceEncoderDecoder this class does not decode.\n \"\"\"\n\n def __init__(self, encoders, encode_single_sequence=False):\n \"\"\"Initialize a MultipleEventSequenceEncoder object.\n\n Args:\n encoders: A list of component EventSequenceEncoderDecoder objects whose\n output will be concatenated.\n encode_single_sequence: If True, at encoding time all of the encoders will\n be applied to a single event sequence. If False, each event of the\n event sequence should be a tuple with size the same as the number of\n encoders, each of which will be applied to the events in the\n corresponding position in the tuple, i.e. the first encoder will be\n applied to the first element of each event tuple, the second encoder\n will be applied to the second element, etc.\n \"\"\"\n self._encoders = encoders\n self._encode_single_sequence = encode_single_sequence\n\n @property\n def input_size(self):\n return sum(encoder.input_size for encoder in self._encoders)\n\n @property\n def num_classes(self):\n raise NotImplementedError\n\n @property\n def default_event_label(self):\n raise NotImplementedError\n\n def events_to_input(self, events, position):\n input_ = []\n if self._encode_single_sequence:\n # Apply all encoders to the event sequence.\n for encoder in self._encoders:\n input_ += encoder.events_to_input(events, position)\n else:\n # The event sequence is a list of tuples. Apply each encoder to the\n # elements in the corresponding tuple position.\n event_sequences = list(zip(*events))\n if len(event_sequences) != len(self._encoders):\n raise ValueError(\n 'Event tuple size must be the same as the number of encoders.')\n for encoder, event_sequence in zip(self._encoders, event_sequences):\n input_ += encoder.events_to_input(event_sequence, position)\n return input_\n\n def events_to_label(self, events, position):\n raise NotImplementedError\n\n def class_index_to_event(self, class_index, events):\n raise NotImplementedError\n\n\ndef make_sequence_example(inputs, labels):\n \"\"\"Returns a SequenceExample for the given inputs and labels.\n\n Args:\n inputs: A list of input vectors. Each input vector is a list of floats.\n labels: A list of ints.\n\n Returns:\n A tf.train.SequenceExample containing inputs and labels.\n \"\"\"\n input_features = [\n tf.train.Feature(float_list=tf.train.FloatList(value=input_))\n for input_ in inputs]\n label_features = []\n for label in labels:\n if isinstance(label, numbers.Number):\n label = [label]\n label_features.append(\n tf.train.Feature(int64_list=tf.train.Int64List(value=label)))\n feature_list = {\n 'inputs': tf.train.FeatureList(feature=input_features),\n 'labels': tf.train.FeatureList(feature=label_features)\n }\n feature_lists = tf.train.FeatureLists(feature_list=feature_list)\n return tf.train.SequenceExample(feature_lists=feature_lists)\n", "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for fine alignment.\n\nCQT calculations and NoteSequence manipulations are done in Python. For speed,\nDTW calculations are done in C++ by calling the 'align' program, which is\nspecifically intended to be used with this library. Communication between\nPython and C++ is done with a protobuf.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport subprocess\nimport tempfile\n\nfrom absl import logging\nimport alignment_pb2 # noqa\nimport librosa\nfrom magenta.music import midi_synth\nfrom magenta.music import sequences_lib\nimport numpy as np\n\n\n# Constants based on craffel's example alignment script:\n# https://github.com/craffel/pretty-midi/blob/master/examples/align_midi.py\n\nSAMPLE_RATE = 22050\nCQT_HOP_LENGTH_FINE = 64 # ~3ms\nCQT_N_BINS = 48\nCQT_BINS_PER_OCTAVE = 12\nCQT_FMIN = librosa.midi_to_hz(36)\n\nALIGN_BINARY = './align'\n\n\ndef extract_cqt(samples, sample_rate, cqt_hop_length):\n \"\"\"Transforms the contents of a wav/mp3 file into a series of CQT frames.\"\"\"\n cqt = np.abs(librosa.core.cqt(\n samples,\n sample_rate,\n hop_length=cqt_hop_length, # noqa\n fmin=CQT_FMIN,\n n_bins=CQT_N_BINS,\n bins_per_octave=CQT_BINS_PER_OCTAVE), dtype=np.float32)\n\n # Compute log-amplitude\n cqt = librosa.power_to_db(cqt)\n return cqt\n\n\ndef align_cpp(samples,\n sample_rate,\n ns,\n cqt_hop_length,\n sf2_path,\n penalty_mul=1.0,\n band_radius_seconds=.5):\n \"\"\"Aligns the notesequence to the wav file using C++ DTW.\n\n Args:\n samples: Samples to align.\n sample_rate: Sample rate for samples.\n ns: The source notesequence to align.\n cqt_hop_length: Hop length to use for CQT calculations.\n sf2_path: Path to SF2 file for synthesis.\n penalty_mul: Penalty multiplier to use for non-diagonal moves.\n band_radius_seconds: What size of band radius to use for restricting DTW.\n\n Raises:\n RuntimeError: If notes are skipped during alignment.\n\n Returns:\n samples: The samples used from the wav file.\n aligned_ns: The aligned version of the notesequence.\n remaining_ns: Any remaining notesequence that extended beyond the length\n of the wav file.\n \"\"\"\n logging.info('Synthesizing')\n ns_samples = midi_synth.fluidsynth(\n ns, sf2_path=sf2_path, sample_rate=sample_rate).astype(np.float32)\n\n # It is critical that ns_samples and samples are the same length because the\n # alignment code does not do subsequence alignment.\n ns_samples = np.pad(ns_samples,\n (0, max(0, samples.shape[0] - ns_samples.shape[0])),\n 'constant')\n\n # Pad samples too, if needed, because there are some cases where the\n # synthesized NoteSequence is actually longer.\n samples = np.pad(samples,\n (0, max(0, ns_samples.shape[0] - samples.shape[0])),\n 'constant')\n\n # Note that we skip normalization here becasue it happens in C++.\n logging.info('source_cqt')\n source_cqt = extract_cqt(ns_samples, sample_rate, cqt_hop_length)\n\n logging.info('dest_cqt')\n dest_cqt = extract_cqt(samples, sample_rate, cqt_hop_length)\n\n alignment_task = alignment_pb2.AlignmentTask()\n alignment_task.sequence_1.x = source_cqt.shape[0]\n alignment_task.sequence_1.y = source_cqt.shape[1]\n for c in source_cqt.reshape([-1]):\n alignment_task.sequence_1.content.append(c)\n\n alignment_task.sequence_2.x = dest_cqt.shape[0]\n alignment_task.sequence_2.y = dest_cqt.shape[1]\n for c in dest_cqt.reshape([-1]):\n alignment_task.sequence_2.content.append(c)\n\n seconds_per_frame = cqt_hop_length / sample_rate\n\n alignment_task.band_radius = int(band_radius_seconds / seconds_per_frame)\n alignment_task.penalty = 0\n alignment_task.penalty_mul = penalty_mul\n\n # Write to file.\n fh, temp_path = tempfile.mkstemp(suffix='.proto')\n os.close(fh)\n with open(temp_path, 'w') as f:\n f.write(alignment_task.SerializeToString())\n\n # Align with C++ program.\n subprocess.check_call([ALIGN_BINARY, temp_path])\n\n # Read file.\n with open(temp_path + '.result') as f:\n result = alignment_pb2.AlignmentResult.FromString(f.read())\n\n # Clean up.\n os.remove(temp_path)\n os.remove(temp_path + '.result')\n\n logging.info('Aligning NoteSequence with warp path.')\n\n warp_seconds_i = np.array([i * seconds_per_frame for i in result.i])\n warp_seconds_j = np.array([j * seconds_per_frame for j in result.j])\n\n time_diffs = np.abs(warp_seconds_i - warp_seconds_j)\n warps = np.abs(time_diffs[1:] - time_diffs[:-1])\n\n stats = {\n 'alignment_score': result.score,\n 'warp_mean_s': np.mean(warps),\n 'warp_median_s': np.median(warps),\n 'warp_max_s': np.max(warps),\n 'warp_min_s': np.min(warps),\n 'time_diff_mean_s': np.mean(time_diffs),\n 'time_diff_median_s': np.median(time_diffs),\n 'time_diff_max_s': np.max(time_diffs),\n 'time_diff_min_s': np.min(time_diffs),\n }\n\n for name, value in sorted(stats.iteritems()):\n logging.info('%s: %f', name, value)\n\n aligned_ns, skipped_notes = sequences_lib.adjust_notesequence_times(\n ns,\n lambda t: np.interp(t, warp_seconds_i, warp_seconds_j),\n minimum_duration=seconds_per_frame)\n if skipped_notes > 0:\n raise RuntimeError('Skipped {} notes'.format(skipped_notes))\n\n logging.debug('done')\n\n return aligned_ns, stats\n", "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for Magenta's Tensor2Tensor modalities.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nfrom magenta.models.score2perf import modalities\nimport numpy as np\nfrom tensor2tensor.layers import common_hparams\nfrom tensor2tensor.utils import expert_utils\nimport tensorflow.compat.v1 as tf # noqa\n\n\n\nclass ModalitiesTest(tf.test.TestCase):\n\n def testBottomInputs(self):\n \"\"\"Adapted from tensor2tensor/layers/modalities_test.py.\"\"\"\n batch_size = 10\n num_datashards = 5\n length = 5\n vocab_size = [2000, 500, 2500]\n hidden_size = 9\n model_hparams = common_hparams.basic_params1()\n model_hparams.hidden_size = hidden_size\n model_hparams.mode = tf.estimator.ModeKeys.TRAIN\n x = np.stack([\n -1 + np.random.random_integers(\n vocab_size[i], size=(batch_size, length, 1))\n for i in range(len(vocab_size))\n ], axis=3)\n data_parallelism = expert_utils.Parallelism(\n ['/device:CPU:0'] * num_datashards)\n bottom = functools.partial(modalities.bottom,\n model_hparams=model_hparams,\n vocab_size=vocab_size)\n with self.test_session() as session:\n xs = tf.split(x, num_datashards)\n sharded_output = data_parallelism(bottom, xs)\n output = tf.concat(sharded_output, 0)\n session.run(tf.global_variables_initializer())\n res = session.run(output)\n self.assertEqual(res.shape, (batch_size, length, 1, hidden_size))\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for Score2Perf datagen using beam.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tempfile\n\nimport apache_beam as beam\nfrom magenta.models.score2perf import datagen_beam\nfrom magenta.models.score2perf import music_encoders\nfrom magenta.music import testing_lib\nfrom magenta.music.protobuf import music_pb2\nimport tensorflow.compat.v1 as tf # noqa\n\n\n\nclass GenerateExamplesTest(tf.test.TestCase):\n\n def testGenerateExamples(self):\n ns = music_pb2.NoteSequence()\n testing_lib.add_track_to_sequence(\n ns, 0, [(60, 100, 0.0, 1.0), (64, 100, 1.0, 2.0), (67, 127, 2.0, 3.0)])\n input_transform = beam.transforms.Create([('0', ns.SerializeToString())])\n output_dir = tempfile.mkdtemp()\n encoder = music_encoders.MidiPerformanceEncoder(\n steps_per_second=100,\n num_velocity_bins=32,\n min_pitch=21,\n max_pitch=108)\n\n datagen_beam.generate_examples(\n input_transform=input_transform,\n output_dir=output_dir,\n problem_name='test_problem',\n splits={'train': 1.0},\n min_hop_size_seconds=3.0,\n max_hop_size_seconds=3.0,\n min_pitch=21,\n max_pitch=108,\n num_replications=1,\n encode_performance_fn=encoder.encode_note_sequence)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for shared data lib.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport tempfile\nimport time\n\nfrom magenta.models.onsets_frames_transcription import configs\nfrom magenta.models.onsets_frames_transcription import constants\nfrom magenta.models.onsets_frames_transcription import data\n\nfrom magenta.music import audio_io\nfrom magenta.music import sequences_lib\nfrom magenta.music import testing_lib\nfrom magenta.music.protobuf import music_pb2\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf # noqa\n\n\n\nclass DataTest(tf.test.TestCase):\n\n def _FillExample(self, sequence, wav_data, filename):\n velocity_range = music_pb2.VelocityRange(min=0, max=127)\n feature_dict = {\n 'id':\n tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[filename.encode('utf-8')])\n ),\n 'sequence':\n tf.train.Feature(\n bytes_list=tf.train.BytesList(\n value=[sequence.SerializeToString()])),\n 'audio':\n tf.train.Feature(bytes_list=tf.train.BytesList(value=[wav_data])),\n 'velocity_range':\n tf.train.Feature(\n bytes_list=tf.train.BytesList(\n value=[velocity_range.SerializeToString()])),\n }\n return tf.train.Example(features=tf.train.Features(feature=feature_dict))\n\n def _DataToInputs(self, spec, labels, weighted_labels, length, filename,\n truncated_length):\n del weighted_labels\n # This method re-implements a portion of the TensorFlow graph using numpy.\n # While typically it is frowned upon to test complicated code with other\n # code, there is no way around this for testing the pipeline end to end,\n # which requires an actual spec computation. Furthermore, much of the\n # complexity of the pipeline is due to the TensorFlow implementation,\n # so comparing it against simpler numpy code still provides effective\n # coverage.\n truncated_length = (\n min(truncated_length, length) if truncated_length else length)\n\n # Pad or slice spec if differs from truncated_length.\n if len(spec) < truncated_length:\n pad_amt = truncated_length - len(spec)\n spec = np.pad(spec, [(0, pad_amt), (0, 0)], 'constant')\n else:\n spec = spec[0:truncated_length]\n\n # Pad or slice labels if differs from truncated_length.\n if len(labels) < truncated_length:\n pad_amt = truncated_length - len(labels)\n labels = np.pad(labels, [(0, pad_amt), (0, 0)], 'constant')\n else:\n labels = labels[0:truncated_length]\n\n inputs = [(spec, labels, truncated_length, filename)]\n\n return inputs\n\n def _ExampleToInputs(self,\n ex,\n truncated_length=0):\n hparams = copy.deepcopy(configs.DEFAULT_HPARAMS)\n\n filename = ex.features.feature['id'].bytes_list.value[0]\n sequence = music_pb2.NoteSequence.FromString(\n ex.features.feature['sequence'].bytes_list.value[0])\n wav_data = ex.features.feature['audio'].bytes_list.value[0]\n\n spec = data.wav_to_spec(wav_data, hparams=hparams)\n roll = sequences_lib.sequence_to_pianoroll(\n sequence,\n frames_per_second=data.hparams_frames_per_second(hparams),\n min_pitch=constants.MIN_MIDI_PITCH,\n max_pitch=constants.MAX_MIDI_PITCH,\n min_frame_occupancy_for_label=0.0,\n onset_mode='length_ms',\n onset_length_ms=32.,\n onset_delay_ms=0.)\n length = data.wav_to_num_frames(\n wav_data, frames_per_second=data.hparams_frames_per_second(hparams))\n\n return self._DataToInputs(spec, roll.active, roll.weights, length, filename,\n truncated_length)\n\n def _ValidateProvideBatch(self,\n examples,\n truncated_length,\n batch_size,\n expected_inputs,\n feed_dict=None):\n \"\"\"Tests for correctness of batches.\"\"\"\n hparams = copy.deepcopy(configs.DEFAULT_HPARAMS)\n hparams.batch_size = batch_size\n hparams.truncated_length_secs = (\n truncated_length / data.hparams_frames_per_second(hparams))\n\n with self.test_session() as sess:\n dataset = data.provide_batch(\n examples=examples,\n preprocess_examples=True,\n params=hparams,\n is_training=False,\n shuffle_examples=False,\n skip_n_initial_records=0)\n iterator = dataset.make_initializable_iterator()\n next_record = iterator.get_next()\n sess.run([\n tf.initializers.local_variables(),\n tf.initializers.global_variables(),\n iterator.initializer\n ], feed_dict=feed_dict)\n for i in range(0, len(expected_inputs), batch_size):\n # Wait to ensure example is pre-processed.\n time.sleep(0.1)\n features, labels = sess.run(next_record)\n inputs = [\n features.spec, labels.labels, features.length, features.sequence_id]\n max_length = np.max(inputs[2])\n for j in range(batch_size):\n # Add batch padding if needed.\n input_length = expected_inputs[i + j][2]\n if input_length < max_length:\n expected_inputs[i + j] = list(expected_inputs[i + j])\n pad_amt = max_length - input_length\n expected_inputs[i + j][0] = np.pad(\n expected_inputs[i + j][0], [(0, pad_amt), (0, 0)], 'constant')\n expected_inputs[i + j][1] = np.pad(\n expected_inputs[i + j][1],\n [(0, pad_amt), (0, 0)], 'constant')\n for exp_input, input_ in zip(expected_inputs[i + j], inputs):\n self.assertAllEqual(np.squeeze(exp_input), np.squeeze(input_[j]))\n\n with self.assertRaisesOpError('End of sequence'):\n _ = sess.run(next_record)\n\n def _SyntheticSequence(self, duration, note):\n seq = music_pb2.NoteSequence(total_time=duration)\n testing_lib.add_track_to_sequence(\n seq, 0, [(note, 100, 0, duration)])\n return seq\n\n def _CreateExamplesAndExpectedInputs(self,\n truncated_length,\n lengths,\n expected_num_inputs):\n hparams = copy.deepcopy(configs.DEFAULT_HPARAMS)\n examples = []\n expected_inputs = []\n\n for i, length in enumerate(lengths):\n wav_samples = np.zeros(\n (np.int((length / data.hparams_frames_per_second(hparams)) *\n hparams.sample_rate), 1), np.float32)\n wav_data = audio_io.samples_to_wav_data(wav_samples, hparams.sample_rate)\n\n num_frames = data.wav_to_num_frames(\n wav_data, frames_per_second=data.hparams_frames_per_second(hparams))\n\n seq = self._SyntheticSequence(\n num_frames / data.hparams_frames_per_second(hparams),\n i + constants.MIN_MIDI_PITCH)\n\n examples.append(self._FillExample(seq, wav_data, 'ex%d' % i))\n expected_inputs += self._ExampleToInputs(\n examples[-1],\n truncated_length)\n self.assertEqual(expected_num_inputs, len(expected_inputs))\n return examples, expected_inputs\n\n def _ValidateProvideBatchTFRecord(self,\n truncated_length,\n batch_size,\n lengths,\n expected_num_inputs):\n examples, expected_inputs = self._CreateExamplesAndExpectedInputs(\n truncated_length, lengths, expected_num_inputs)\n\n with tempfile.NamedTemporaryFile() as temp_tfr:\n with tf.python_io.TFRecordWriter(temp_tfr.name) as writer:\n for ex in examples:\n writer.write(ex.SerializeToString())\n\n self._ValidateProvideBatch(\n temp_tfr.name,\n truncated_length,\n batch_size,\n expected_inputs)\n\n def _ValidateProvideBatchMemory(self,\n truncated_length,\n batch_size,\n lengths,\n expected_num_inputs):\n examples, expected_inputs = self._CreateExamplesAndExpectedInputs(\n truncated_length, lengths, expected_num_inputs)\n\n self._ValidateProvideBatch(\n [e.SerializeToString() for e in examples],\n truncated_length,\n batch_size,\n expected_inputs)\n\n def _ValidateProvideBatchPlaceholder(self,\n truncated_length,\n batch_size,\n lengths,\n expected_num_inputs):\n examples, expected_inputs = self._CreateExamplesAndExpectedInputs(\n truncated_length, lengths, expected_num_inputs)\n examples_ph = tf.placeholder(tf.string, [None])\n feed_dict = {examples_ph: [e.SerializeToString() for e in examples]}\n\n self._ValidateProvideBatch(\n examples_ph,\n truncated_length,\n batch_size,\n expected_inputs,\n feed_dict=feed_dict)\n\n def _ValidateProvideBatchBoth(self,\n truncated_length,\n batch_size,\n lengths,\n expected_num_inputs):\n self._ValidateProvideBatchTFRecord(\n truncated_length=truncated_length,\n batch_size=batch_size,\n lengths=lengths,\n expected_num_inputs=expected_num_inputs)\n self._ValidateProvideBatchMemory(\n truncated_length=truncated_length,\n batch_size=batch_size,\n lengths=lengths,\n expected_num_inputs=expected_num_inputs)\n self._ValidateProvideBatchPlaceholder(\n truncated_length=truncated_length,\n batch_size=batch_size,\n lengths=lengths,\n expected_num_inputs=expected_num_inputs)\n\n def testProvideBatchFullSeqs(self):\n self._ValidateProvideBatchBoth(\n truncated_length=0,\n batch_size=2,\n lengths=[10, 50, 100, 10, 50, 80],\n expected_num_inputs=6)\n\n def testProvideBatchTruncated(self):\n self._ValidateProvideBatchBoth(\n truncated_length=15,\n batch_size=2,\n lengths=[10, 50, 100, 10, 50, 80],\n expected_num_inputs=6)\n\n def testGeneratedShardedFilenamesCommaWithShard(self):\n filenames = data.generate_sharded_filenames('/foo/bar@3,/baz/qux@2')\n self.assertEqual(\n [\n '/foo/bar-00000-of-00003',\n '/foo/bar-00001-of-00003',\n '/foo/bar-00002-of-00003',\n '/baz/qux-00000-of-00002',\n '/baz/qux-00001-of-00002',\n ],\n filenames)\n\n def testGeneratedShardedFilenamesCommaWithoutShard(self):\n filenames = data.generate_sharded_filenames('/foo/bar,/baz/qux')\n self.assertEqual(\n [\n '/foo/bar',\n '/baz/qux',\n ],\n filenames)\n\n def testCombineTensorBatch(self):\n with tf.Graph().as_default():\n tensor = tf.constant([[1, 2, 3, 0, 0], [4, 5, 0, 0, 0]])\n lengths = tf.constant([3, 2])\n combined = data.combine_tensor_batch(\n tensor, lengths, max_length=5, batch_size=2)\n sess = tf.Session()\n np.testing.assert_equal([1, 2, 3, 4, 5, 0, 0, 0, 0, 0],\n sess.run(combined))\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for lead_sheets.\"\"\"\n\nimport copy\n\nfrom magenta.music import chords_lib\nfrom magenta.music import constants\nfrom magenta.music import lead_sheets_lib\nfrom magenta.music import melodies_lib\nfrom magenta.music import testing_lib as music_testing_lib\nfrom magenta.music.protobuf import music_pb2\nimport tensorflow.compat.v1 as tf # noqa\n\n\nNOTE_OFF = constants.MELODY_NOTE_OFF\nNO_EVENT = constants.MELODY_NO_EVENT\nNO_CHORD = constants.NO_CHORD\n\n\nclass LeadSheetsLibTest(tf.test.TestCase):\n\n def setUp(self):\n self.steps_per_quarter = 4\n self.note_sequence = music_testing_lib.parse_test_proto(\n music_pb2.NoteSequence,\n \"\"\"\n time_signatures: {\n numerator: 4\n denominator: 4\n }\n tempos: {\n qpm: 60\n }\n \"\"\")\n\n def testTranspose(self):\n # LeadSheet transposition should agree with melody & chords transpositions.\n melody_events = [12 * 5 + 4, NO_EVENT, 12 * 5 + 5,\n NOTE_OFF, 12 * 6, NO_EVENT]\n chord_events = [NO_CHORD, 'C', 'F', 'Dm', 'D', 'G']\n melody = melodies_lib.Melody(melody_events)\n chords = chords_lib.ChordProgression(chord_events)\n expected_melody = copy.deepcopy(melody)\n expected_chords = copy.deepcopy(chords)\n lead_sheet = lead_sheets_lib.LeadSheet(melody, chords)\n lead_sheet.transpose(transpose_amount=-5, min_note=12 * 5, max_note=12 * 7)\n expected_melody.transpose(\n transpose_amount=-5, min_note=12 * 5, max_note=12 * 7)\n expected_chords.transpose(transpose_amount=-5)\n self.assertEqual(expected_melody, lead_sheet.melody)\n self.assertEqual(expected_chords, lead_sheet.chords)\n\n def testSquash(self):\n # LeadSheet squash should agree with melody squash & chords transpose.\n melody_events = [12 * 5, NO_EVENT, 12 * 5 + 2,\n NOTE_OFF, 12 * 6 + 4, NO_EVENT]\n chord_events = ['C', 'Am', 'Dm', 'G', 'C', NO_CHORD]\n melody = melodies_lib.Melody(melody_events)\n chords = chords_lib.ChordProgression(chord_events)\n expected_melody = copy.deepcopy(melody)\n expected_chords = copy.deepcopy(chords)\n lead_sheet = lead_sheets_lib.LeadSheet(melody, chords)\n lead_sheet.squash(min_note=12 * 5, max_note=12 * 6, transpose_to_key=0)\n transpose_amount = expected_melody.squash(\n min_note=12 * 5, max_note=12 * 6, transpose_to_key=0)\n expected_chords.transpose(transpose_amount=transpose_amount)\n self.assertEqual(expected_melody, lead_sheet.melody)\n self.assertEqual(expected_chords, lead_sheet.chords)\n\n def testSetLength(self):\n # Setting LeadSheet length should agree with setting length on melody and\n # chords separately.\n melody_events = [60]\n chord_events = ['C7']\n melody = melodies_lib.Melody(melody_events, start_step=9)\n chords = chords_lib.ChordProgression(chord_events, start_step=9)\n expected_melody = copy.deepcopy(melody)\n expected_chords = copy.deepcopy(chords)\n lead_sheet = lead_sheets_lib.LeadSheet(melody, chords)\n lead_sheet.set_length(5)\n expected_melody.set_length(5)\n expected_chords.set_length(5)\n self.assertEqual(expected_melody, lead_sheet.melody)\n self.assertEqual(expected_chords, lead_sheet.chords)\n self.assertEqual(9, lead_sheet.start_step)\n self.assertEqual(14, lead_sheet.end_step)\n self.assertListEqual([9, 10, 11, 12, 13], lead_sheet.steps)\n\n def testToSequence(self):\n # Sequence produced from lead sheet should contain notes from melody\n # sequence and chords from chord sequence as text annotations.\n melody = melodies_lib.Melody(\n [NO_EVENT, 1, NO_EVENT, NOTE_OFF, NO_EVENT, 2, 3, NOTE_OFF, NO_EVENT])\n chords = chords_lib.ChordProgression(\n [NO_CHORD, 'A', 'A', 'C#m', 'C#m', 'D', 'B', 'B', 'B'])\n lead_sheet = lead_sheets_lib.LeadSheet(melody, chords)\n\n sequence = lead_sheet.to_sequence(\n velocity=10,\n instrument=1,\n sequence_start_time=2,\n qpm=60.0)\n melody_sequence = melody.to_sequence(\n velocity=10,\n instrument=1,\n sequence_start_time=2,\n qpm=60.0)\n chords_sequence = chords.to_sequence(\n sequence_start_time=2,\n qpm=60.0)\n\n self.assertEqual(melody_sequence.ticks_per_quarter,\n sequence.ticks_per_quarter)\n self.assertProtoEquals(melody_sequence.tempos, sequence.tempos)\n self.assertEqual(melody_sequence.total_time, sequence.total_time)\n self.assertProtoEquals(melody_sequence.notes, sequence.notes)\n self.assertProtoEquals(chords_sequence.text_annotations,\n sequence.text_annotations)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Common functions/helpers for the joint model.\n\nThis library contains many comman functions and helpers used to train (using\nscript `train_joint.py`) the joint model (defined in `model_joint.py`). These\ncomponents are classified in the following categories:\n\n - Inetration helper that helps interate through data in the training loop.\n This includes:\n `BatchIndexIterator`, `InterGroupSamplingIndexIterator`,\n `GuasssianDataHelper`, `SingleDataIterator`, `PairedDataIterator`.\n\n - Summary helper that makes manual sumamrization easiser. This includes:\n `ManualSummaryHelper`.\n\n - Loading helper that makes loading config / dataset / model easier. This\n includes:\n `config_is_wavegan`, `load_dataset`, `load_dataset_wavegan`,\n `load_config`, `load_model`, `restore_model`.\n\n - Model helpers that makes model-related actions such as running,\n classifying and inferencing easier. This includes:\n `run_with_batch`, `ModelHelper`, `ModelWaveGANHelper`, `OneSideHelper`.\n\n - Miscellaneous Helpers, including\n `prepare_dirs`\n\n\"\"\"\nimport importlib\nimport os\n\nfrom magenta.models.latent_transfer import common\nfrom magenta.models.latent_transfer import model_dataspace\nimport numpy as np\nfrom scipy.io import wavfile\nimport tensorflow.compat.v1 as tf # noqa\n\n\nFLAGS = tf.flags.FLAGS\ntf.flags.DEFINE_string(\n 'wavegan_gen_ckpt_dir', '', 'The directory to WaveGAN generator\\'s ckpt. '\n 'If WaveGAN is involved, this argument must be set.')\ntf.flags.DEFINE_string(\n 'wavegan_inception_ckpt_dir', '',\n 'The directory to WaveGAN inception (classifier)\\'s ckpt. '\n 'If WaveGAN is involved, this argument must be set.')\ntf.flags.DEFINE_string(\n 'wavegan_latent_dir', '', 'The directory to WaveGAN\\'s latent space.'\n 'If WaveGAN is involved, this argument must be set.')\n\n\nclass BatchIndexIterator(object):\n \"\"\"An inifite iterator each time yielding batch.\n\n This iterator yields the index of data instances rather than data itself.\n This design enables the index to be resuable in indexing multiple arrays.\n\n Args:\n n: An integer indicating total size of dataset.\n batch_size: An integer indictating size of batch.\n \"\"\"\n\n def __init__(self, n, batch_size):\n \"\"\"Inits this integer.\"\"\"\n self.n = n\n self.batch_size = batch_size\n\n self._pos = 0\n self._order = self._make_random_order()\n\n def __iter__(self):\n return self\n\n def next(self):\n return self.__next__()\n\n def __next__(self):\n batch = []\n for i in range(self._pos, self._pos + self.batch_size):\n if i % self.n == 0:\n self._order = self._make_random_order()\n batch.append(self._order[i % self.n])\n batch = np.array(batch, dtype=np.int32)\n\n self._pos += self.batch_size\n return batch\n\n def _make_random_order(self):\n \"\"\"Make a new, shuffled order.\"\"\"\n return np.random.permutation(np.arange(0, self.n))\n\n\nclass InterGroupSamplingIndexIterator(object):\n \"\"\"Radonmly samples index with a label group.\n\n This iterator yields a pair of indices in two dataset that always has the\n same label. This design enables the index to be resuable in indexing multiple\n arrays and is needed for the scenario where only label-level alignment is\n provided.\n\n Args:\n group_by_label_A: List of lists for data space A. The i-th list indicates\n the non-empty list of indices for data instance with i-th (zero-based)\n label.\n group_by_label_B: List of lists for data space B. The i-th list indicates\n the non-empty list of indices for data instance with i-th (zero-based)\n label.\n pairing_number: An integer indictating the umber of paired data to be used.\n batch_size: An integer indictating size of batch.\n \"\"\"\n\n # Variable that in its name has A or B indictating their belonging of one side\n # of data has name consider to be invalid by pylint so we disable the warning.\n # pylint:disable=invalid-name\n def __init__(self, group_by_label_A, group_by_label_B, pairing_number,\n batch_size):\n assert len(group_by_label_A) == len(group_by_label_B)\n for _ in group_by_label_A:\n assert _\n for _ in group_by_label_B:\n assert _\n\n n_label = self.n_label = len(group_by_label_A)\n\n for i in range(n_label):\n if pairing_number >= 0:\n n_use = pairing_number // n_label\n if pairing_number % n_label != 0:\n n_use += int(i < pairing_number % n_label)\n else:\n n_use = max(len(group_by_label_A[i]), len(group_by_label_B[i]))\n group_by_label_A[i] = np.array(group_by_label_A[i])[:n_use]\n group_by_label_B[i] = np.array(group_by_label_B[i])[:n_use]\n self.group_by_label_A = group_by_label_A\n self.group_by_label_B = group_by_label_B\n self.batch_size = batch_size\n\n self._pos = 0\n\n self._sub_pos_A = [0] * n_label\n self._sub_pos_B = [0] * n_label\n\n def __iter__(self):\n return self\n\n def next(self):\n \"\"\"Python 2 compatible interface.\"\"\"\n return self.__next__()\n\n def __next__(self):\n batch = []\n for i in range(self._pos, self._pos + self.batch_size):\n label = i % self.n_label\n index_A = self.pick_index(self._sub_pos_A, self.group_by_label_A, label)\n index_B = self.pick_index(self._sub_pos_B, self.group_by_label_B, label)\n batch.append((index_A, index_B))\n batch = np.array(batch, dtype=np.int32)\n\n self._pos += self.batch_size\n return batch\n\n def pick_index(self, sub_pos, group_by_label, label):\n if sub_pos[label] == 0:\n np.random.shuffle(group_by_label[label])\n\n result = group_by_label[label][sub_pos[label]]\n sub_pos[label] = (sub_pos[label] + 1) % len(group_by_label[label])\n return result\n\n # pylint:enable=invalid-name\n\n\nclass GuasssianDataHelper(object):\n \"\"\"A helper to hold data where each instance is a sampled point.\n\n Args:\n mu: Mean of data points.\n sigma: Variance of data points. If it is None, it is treated as zeros.\n batch_size: An integer indictating size of batch.\n \"\"\"\n\n def __init__(self, mu, sigma=None):\n if sigma is None:\n sigma = np.zeros_like(mu)\n assert mu.shape == sigma.shape\n self.mu, self.sigma = mu, sigma\n\n def pick_batch(self, batch_index):\n \"\"\"Pick a batch where instances are sampled from Guassian distributions.\"\"\"\n mu, sigma = self.mu, self.sigma\n batch_mu, batch_sigma = self._np_index_arrs(batch_index, mu, sigma)\n batch = self._np_sample_from_gaussian(batch_mu, batch_sigma)\n return batch\n\n def __len__(self):\n return len(self.mu)\n\n @staticmethod\n def _np_sample_from_gaussian(mu, sigma):\n \"\"\"Sampling from Guassian distribtuion specified by `mu` and `sigma`.\"\"\"\n assert mu.shape == sigma.shape\n return mu + sigma * np.random.randn(*sigma.shape)\n\n @staticmethod\n def _np_index_arrs(index, *args):\n \"\"\"Index arrays with the same given `index`.\"\"\"\n return (arr[index] for arr in args)\n\n\nclass SingleDataIterator(object):\n \"\"\"Iterator of a single-side dataset of encoded representation.\n\n Args:\n mu: Mean of data points.\n sigma: Variance of data points. If it is None, it is treated as zeros.\n batch_size: An integer indictating size of batch.\n \"\"\"\n\n def __init__(self, mu, sigma, batch_size):\n self.data_helper = GuasssianDataHelper(mu, sigma)\n\n n = len(self.data_helper)\n self.batch_index_iterator = BatchIndexIterator(n, batch_size)\n\n def __iter__(self):\n return self\n\n def next(self):\n \"\"\"Python 2 compatible interface.\"\"\"\n return self.__next__()\n\n def __next__(self):\n batch_index = next(self.batch_index_iterator)\n batch = self.data_helper.pick_batch(batch_index)\n debug_info = (batch_index,)\n return batch, debug_info\n\n\nclass PairedDataIterator(object):\n \"\"\"Iterator of a paired dataset of encoded representation.\n\n\n Args:\n mu_A: Mean of data points in data space A.\n sigma_A: Variance of data points in data space A. If it is None, it is\n treated as zeros.\n label_A: A List of labels for data points in data space A.\n index_grouped_by_label_A: List of lists for data space A. The i-th list\n indicates the non-empty list of indices for data instance with i-th\n (zero-based) label.\n mu_B: Mean of data points in data space B.\n sigma_B: Variance of data points in data space B. If it is None, it is\n treated as zeros.\n label_B: A List of labels for data points in data space B.\n index_grouped_by_label_B: List of lists for data space B. The i-th list\n indicates the non-empty list of indices for data instance with i-th\n (zero-based) label.\n pairing_number: An integer indictating the umber of paired data to be used.\n batch_size: An integer indictating size of batch.\n \"\"\"\n\n # Variable that in its name has A or B indictating their belonging of one side\n # of data has name consider to be invalid by pylint so we disable the warning.\n # pylint:disable=invalid-name\n\n def __init__(self, mu_A, sigma_A, train_data_A, label_A,\n index_grouped_by_label_A, mu_B, sigma_B, train_data_B, label_B,\n index_grouped_by_label_B, pairing_number, batch_size):\n self._data_helper_A = GuasssianDataHelper(mu_A, sigma_A)\n self._data_helper_B = GuasssianDataHelper(mu_B, sigma_B)\n\n self.batch_index_iterator = InterGroupSamplingIndexIterator(\n index_grouped_by_label_A,\n index_grouped_by_label_B,\n pairing_number,\n batch_size,\n )\n\n self.label_A, self.label_B = label_A, label_B\n self.train_data_A, self.train_data_B = train_data_A, train_data_B\n\n def __iter__(self):\n return self\n\n def next(self):\n \"\"\"Python 2 compatible interface.\"\"\"\n return self.__next__()\n\n def __next__(self):\n batch_index = next(self.batch_index_iterator)\n batch_index_A, batch_index_B = (batch_index[:, 0], batch_index[:, 1])\n\n batch_A = self._data_helper_A.pick_batch(batch_index_A)\n batch_B = self._data_helper_B.pick_batch(batch_index_B)\n\n batch_label_A = self.label_A[batch_index_A]\n batch_label_B = self.label_B[batch_index_B]\n assert np.array_equal(batch_label_A, batch_label_B)\n\n batch_train_data_A = (\n None if self._train_data_A is None else self.train_data_A[batch_index_A]\n )\n batch_train_data_B = (\n None if self._train_data_B is None else self.train_data_B[batch_index_B]\n )\n debug_info = (batch_train_data_A, batch_train_data_B)\n\n return batch_A, batch_B, debug_info\n\n # pylint:enable=invalid-name\n\n\nclass ManualSummaryHelper(object):\n \"\"\"A helper making manual TF summary easier.\"\"\"\n\n def __init__(self):\n self._key_to_ph_summary_tuple = {}\n\n def get_summary(self, sess, key, value):\n \"\"\"Get TF (scalar) summary.\n\n Args:\n sess: A TF Session to be used in making summary.\n key: A string indicating the name of summary.\n value: A string indicating the value of summary.\n\n Returns:\n A TF summary.\n \"\"\"\n self._add_key_if_not_exists(key)\n placeholder, summary = self._key_to_ph_summary_tuple[key]\n return sess.run(summary, {placeholder: value})\n\n def _add_key_if_not_exists(self, key):\n \"\"\"Add related TF heads for a key if it is not used before.\"\"\"\n if key in self._key_to_ph_summary_tuple:\n return\n placeholder = tf.placeholder(tf.float32, shape=(), name=key + '_ph')\n summary = tf.summary.scalar(key, placeholder)\n self._key_to_ph_summary_tuple[key] = (placeholder, summary)\n\n\ndef config_is_wavegan(config):\n return config['dataset'].lower() == 'wavegan'\n\n\ndef load_dataset(config_name, exp_uid):\n \"\"\"Load a dataset from a config's name.\n\n The loaded dataset consists of:\n - original data (dataset_blob, train_data, train_label),\n - encoded data from a pretrained model (train_mu, train_sigma), and\n - index grouped by label (index_grouped_by_label).\n\n Args:\n config_name: A string indicating the name of config to parameterize the\n model that associates with the dataset.\n exp_uid: A string representing the unique id of experiment to be used in\n model that associates with the dataset.\n\n Returns:\n An tuple of abovementioned components in the dataset.\n \"\"\"\n\n config = load_config(config_name)\n if config_is_wavegan(config):\n return load_dataset_wavegan()\n\n model_uid = common.get_model_uid(config_name, exp_uid)\n\n dataset = common.load_dataset(config)\n train_data = dataset.train_data\n attr_train = dataset.attr_train\n path_train = os.path.join(dataset.basepath, 'encoded', model_uid,\n 'encoded_train_data.npz')\n train = np.load(path_train)\n train_mu = train['mu']\n train_sigma = train['sigma']\n train_label = np.argmax(attr_train, axis=-1) # from one-hot to label\n index_grouped_by_label = common.get_index_grouped_by_label(train_label)\n\n tf.logging.info('index_grouped_by_label size: %s',\n [len(_) for _ in index_grouped_by_label])\n\n tf.logging.info('train loaded from %s', path_train)\n tf.logging.info('train shapes: mu = %s, sigma = %s', train_mu.shape,\n train_sigma.shape)\n dataset_blob = dataset\n return (dataset_blob, train_data, train_label, train_mu, train_sigma,\n index_grouped_by_label)\n\n\ndef load_dataset_wavegan():\n \"\"\"Load WaveGAN's dataset.\n\n The loaded dataset consists of:\n - original data (dataset_blob, train_data, train_label),\n - encoded data from a pretrained model (train_mu, train_sigma), and\n - index grouped by label (index_grouped_by_label).\n\n Some of these attributes are not avaiable (set as None) but are left here\n to keep everything aligned with returned value of `load_dataset`.\n\n Returns:\n An tuple of abovementioned components in the dataset.\n \"\"\"\n\n latent_dir = os.path.expanduser(FLAGS.wavegan_latent_dir)\n path_train = os.path.join(latent_dir, 'data_train.npz')\n train = np.load(path_train)\n train_z = train['z']\n train_label = train['label']\n index_grouped_by_label = common.get_index_grouped_by_label(train_label)\n\n dataset_blob, train_data = None, None\n train_mu, train_sigma = train_z, None\n return (dataset_blob, train_data, train_label, train_mu, train_sigma,\n index_grouped_by_label)\n\n\ndef load_config(config_name):\n \"\"\"Load the config from its name.\"\"\"\n return importlib.import_module('configs.%s' % config_name).config\n\n\ndef load_model(model_cls, config_name, exp_uid):\n \"\"\"Load a model.\n\n Args:\n model_cls: A sonnet Class that is the factory of model.\n config_name: A string indicating the name of config to parameterize the\n model.\n exp_uid: A string representing the unique id of experiment to be used in\n model.\n\n Returns:\n An instance of sonnet model.\n \"\"\"\n\n config = load_config(config_name)\n model_uid = common.get_model_uid(config_name, exp_uid)\n\n m = model_cls(config, name=model_uid)\n m()\n return m\n\n\ndef restore_model(saver, config_name, exp_uid, sess, save_path,\n ckpt_filename_template):\n model_uid = common.get_model_uid(config_name, exp_uid)\n saver.restore(\n sess,\n os.path.join(\n save_path, model_uid, 'best', ckpt_filename_template % model_uid))\n\n\ndef prepare_dirs(\n signature='unspecified_signature',\n config_name='unspecified_config_name',\n exp_uid='unspecified_exp_uid',\n):\n \"\"\"Prepare saving and sampling direcotories for training.\n\n Args:\n signature: A string of signature of model such as `joint_model`.\n config_name: A string representing the name of config for joint model.\n exp_uid: A string representing the unique id of experiment to be used in\n joint model.\n\n Returns:\n A tuple of (save_dir, sample_dir). They are strings and are paths to the\n directory for saving checkpoints / summaries and path to the directory\n for saving samplings, respectively.\n \"\"\"\n\n model_uid = common.get_model_uid(config_name, exp_uid)\n\n local_base_path = os.path.join(common.get_default_scratch(), signature)\n\n save_dir = os.path.join(local_base_path, 'ckpts', model_uid)\n tf.gfile.MakeDirs(save_dir)\n sample_dir = os.path.join(local_base_path, 'sample', model_uid)\n tf.gfile.MakeDirs(sample_dir)\n\n return save_dir, sample_dir\n\n\ndef run_with_batch(sess, op_target, op_feed, arr_feed, batch_size=None):\n if batch_size is None:\n batch_size = len(arr_feed)\n return np.concatenate([\n sess.run(op_target, {op_feed: arr_feed[i:i + batch_size]})\n for i in range(0, len(arr_feed), batch_size)\n ])\n\n\nclass ModelHelper(object):\n \"\"\"A Helper that provides sampling and classification for pre-trained WaveGAN.\n\n This generic helper is for VAE model we trained as dataspace model.\n For external sourced model use specified helper such as `ModelWaveGANHelper`.\n \"\"\"\n DEFAULT_BATCH_SIZE = 100\n\n def __init__(self, config_name, exp_uid):\n self.config_name = config_name\n self.exp_uid = exp_uid\n\n self.build()\n\n def build(self):\n \"\"\"Build the TF graph and heads for dataspace model.\n\n It also prepares different graph, session and heads for sampling and\n classification respectively.\n \"\"\"\n\n config_name = self.config_name\n config = load_config(config_name)\n exp_uid = self.exp_uid\n\n graph = tf.Graph()\n with graph.as_default():\n sess = tf.Session(graph=graph)\n m = load_model(model_dataspace.Model, config_name, exp_uid)\n\n self.config = config\n self.graph = graph\n self.sess = sess\n self.m = m\n\n def restore_best(self, saver_name, save_path, ckpt_filename_template):\n \"\"\"Restore the weights of best pre-trained models.\"\"\"\n config_name = self.config_name\n exp_uid = self.exp_uid\n sess = self.sess\n saver = getattr(self.m, saver_name)\n restore_model(saver, config_name, exp_uid, sess, save_path,\n ckpt_filename_template)\n\n def decode(self, z, batch_size=None):\n \"\"\"Decode from given latant space vectors `z`.\n\n Args:\n z: A numpy array of latent space vectors.\n batch_size: (Optional) a integer to indication batch size for computation\n which is useful if the sampling requires lots of GPU memory.\n\n Returns:\n A numpy array, the dataspace points from decoding.\n \"\"\"\n m = self.m\n batch_size = batch_size or self.DEFAULT_BATCH_SIZE\n return run_with_batch(self.sess, m.x_mean, m.z, z, batch_size)\n\n def classify(self, real_x, batch_size=None):\n \"\"\"Classify given dataspace points `real_x`.\n\n Args:\n real_x: A numpy array of dataspace points.\n batch_size: (Optional) a integer to indication batch size for computation\n which is useful if the classification requires lots of GPU memory.\n\n Returns:\n A numpy array, the prediction from classifier.\n \"\"\"\n m = self.m\n op_target = m.pred_classifier\n op_feed = m.x\n arr_feed = real_x\n batch_size = batch_size or self.DEFAULT_BATCH_SIZE\n pred = run_with_batch(self.sess, op_target, op_feed, arr_feed, batch_size)\n pred = np.argmax(pred, axis=-1)\n return pred\n\n def save_data(self, x, name, save_dir, x_is_real_x=False):\n \"\"\"Save dataspace instances.\n\n Args:\n x: A numpy array of dataspace points.\n name: A string indicating the name in the saved file.\n save_dir: A string indicating the directory to put the saved file.\n x_is_real_x: An boolean indicating whether `x` is already in dataspace. If\n not, `x` is converted to dataspace before saving\n \"\"\"\n real_x = x if x_is_real_x else self.decode(x)\n real_x = common.post_proc(real_x, self.config)\n batched_real_x = common.batch_image(real_x)\n sample_file = os.path.join(save_dir, '%s.png' % name)\n common.save_image(batched_real_x, sample_file)\n\n\nclass ModelWaveGANHelper(object):\n \"\"\"A Helper that provides sampling and classification for pre-trained WaveGAN.\n \"\"\"\n DEFAULT_BATCH_SIZE = 100\n\n def __init__(self):\n self.build()\n\n def build(self):\n \"\"\"Build the TF graph and heads from pre-trained WaveGAN ckpts.\n\n It also prepares different graph, session and heads for sampling and\n classification respectively.\n \"\"\"\n\n # pylint:disable=unused-variable,possibly-unused-variable\n # Reason:\n # All endpoints are stored as attribute at the end of `_build`.\n # Pylint cannot infer this case so it emits false alarm of\n # unused-variable if we do not disable this warning.\n\n # pylint:disable=invalid-name\n # Reason:\n # Variable useing 'G' in is name to be consistent with WaveGAN's author\n # has name consider to be invalid by pylint so we disable the warning.\n\n # Dataset (SC09, WaveGAN)'s generator\n graph_sc09_gan = tf.Graph()\n with graph_sc09_gan.as_default():\n # Use the retrained, Gaussian priored model\n gen_ckpt_dir = os.path.expanduser(FLAGS.wavegan_gen_ckpt_dir)\n sess_sc09_gan = tf.Session(graph=graph_sc09_gan)\n saver_gan = tf.train.import_meta_graph(\n os.path.join(gen_ckpt_dir, 'infer', 'infer.meta'))\n\n # Dataset (SC09, WaveGAN)'s classifier (inception)\n graph_sc09_class = tf.Graph()\n with graph_sc09_class.as_default():\n inception_ckpt_dir = os.path.expanduser(FLAGS.wavegan_inception_ckpt_dir)\n sess_sc09_class = tf.Session(graph=graph_sc09_class)\n saver_class = tf.train.import_meta_graph(\n os.path.join(inception_ckpt_dir, 'infer.meta'))\n\n # Dataset B (SC09, WaveGAN)'s Tensor symbols\n sc09_gan_z = graph_sc09_gan.get_tensor_by_name('z:0')\n sc09_gan_G_z = graph_sc09_gan.get_tensor_by_name('G_z:0')[:, :, 0]\n\n # Classification: Tensor symbols\n sc09_class_x = graph_sc09_class.get_tensor_by_name('x:0')\n sc09_class_scores = graph_sc09_class.get_tensor_by_name('scores:0')\n\n # Add all endpoints as object attributes\n for k, v in locals().items():\n self.__dict__[k] = v\n\n def restore(self):\n \"\"\"Restore the weights of models.\"\"\"\n gen_ckpt_dir = self.gen_ckpt_dir\n graph_sc09_gan = self.graph_sc09_gan\n saver_gan = self.saver_gan\n sess_sc09_gan = self.sess_sc09_gan\n\n inception_ckpt_dir = self.inception_ckpt_dir\n graph_sc09_class = self.graph_sc09_class\n saver_class = self.saver_class\n sess_sc09_class = self.sess_sc09_class\n\n with graph_sc09_gan.as_default():\n saver_gan.restore(\n sess_sc09_gan,\n os.path.join(gen_ckpt_dir, 'bridge', 'model.ckpt'))\n\n with graph_sc09_class.as_default():\n saver_class.restore(sess_sc09_class,\n os.path.join(inception_ckpt_dir, 'best_acc-103005'))\n\n # pylint:enable=unused-variable,possibly-unused-variable\n # pylint:enable=invalid-name\n\n def decode(self, z, batch_size=None):\n \"\"\"Decode from given latant space vectors `z`.\n\n Args:\n z: A numpy array of latent space vectors.\n batch_size: (Optional) a integer to indication batch size for computation\n which is useful if the sampling requires lots of GPU memory.\n\n Returns:\n A numpy array, the dataspace points from decoding.\n \"\"\"\n batch_size = batch_size or self.DEFAULT_BATCH_SIZE\n return run_with_batch(self.sess_sc09_gan, self.sc09_gan_G_z,\n self.sc09_gan_z, z, batch_size)\n\n def classify(self, real_x, batch_size=None):\n \"\"\"Classify given dataspace points `real_x`.\n\n Args:\n real_x: A numpy array of dataspace points.\n batch_size: (Optional) a integer to indication batch size for computation\n which is useful if the classification requires lots of GPU memory.\n\n Returns:\n A numpy array, the prediction from classifier.\n \"\"\"\n batch_size = batch_size or self.DEFAULT_BATCH_SIZE\n pred = run_with_batch(self.sess_sc09_class, self.sc09_class_scores,\n self.sc09_class_x, real_x, batch_size)\n pred = np.argmax(pred, axis=-1)\n return pred\n\n def save_data(self, x, name, save_dir, x_is_real_x=False):\n \"\"\"Save dataspace instances.\n\n Args:\n x: A numpy array of dataspace points.\n name: A string indicating the name in the saved file.\n save_dir: A string indicating the directory to put the saved file.\n x_is_real_x: An boolean indicating whether `x` is already in dataspace. If\n not, `x` is converted to dataspace before saving\n \"\"\"\n real_x = x if x_is_real_x else self.decode(x)\n real_x = real_x.reshape(-1)\n sample_file = os.path.join(save_dir, '%s.wav' % name)\n wavfile.write(sample_file, rate=16000, data=real_x)\n\n\nclass OneSideHelper(object):\n \"\"\"The helper that manages model and classifier in dataspace for joint model.\n\n Args:\n config_name: A string representing the name of config for model in\n dataspace.\n exp_uid: A string representing the unique id of experiment used in\n the model in dataspace.\n config_name_classifier: A string representing the name of config for\n clasisifer in dataspace.\n exp_uid_classifier: A string representing the unique id of experiment used\n in the clasisifer in dataspace.\n \"\"\"\n\n def __init__(\n self,\n config_name,\n exp_uid,\n config_name_classifier,\n exp_uid_classifier,\n ):\n config = load_config(config_name)\n this_config_is_wavegan = config_is_wavegan(config)\n if this_config_is_wavegan:\n # The sample object servers both purpose.\n m_helper = ModelWaveGANHelper()\n m_classifier_helper = m_helper\n else:\n # In this case two diffent objects serve two purpose.\n m_helper = ModelHelper(config_name, exp_uid)\n m_classifier_helper = ModelHelper(config_name_classifier,\n exp_uid_classifier)\n\n self.config_name = config_name\n self.this_config_is_wavegan = this_config_is_wavegan\n self.config = config\n self.m_helper = m_helper\n self.m_classifier_helper = m_classifier_helper\n\n def restore(self, dataset_blob):\n \"\"\"Restore the pretrained model and classifier.\n\n Args:\n dataset_blob: The object containts `save_path` used for restoring.\n \"\"\"\n this_config_is_wavegan = self.this_config_is_wavegan\n m_helper = self.m_helper\n m_classifier_helper = self.m_classifier_helper\n\n if this_config_is_wavegan:\n m_helper.restore()\n # We don't need restore the `m_classifier_helper` again since `m_helper`\n # and `m_classifier_helper` are two identicial objects.\n else:\n m_helper.restore_best('vae_saver', dataset_blob.save_path,\n 'vae_best_%s.ckpt')\n m_classifier_helper.restore_best(\n 'classifier_saver', dataset_blob.save_path, 'classifier_best_%s.ckpt')\n" ]
[ [ "tensorflow.compat.v1.device", "tensorflow.compat.v1.concat", "tensorflow.compat.v1.add_to_collection", "tensorflow.compat.v1.shape", "tensorflow.compat.v1.name_scope" ], [ "tensorflow.compat.v1.test.main" ], [ "numpy.log", "numpy.random.choice", "tensorflow.compat.v1.train.FeatureLists", "tensorflow.compat.v1.train.FloatList", "tensorflow.compat.v1.train.SequenceExample", "tensorflow.compat.v1.train.Int64List", "tensorflow.compat.v1.train.FeatureList" ], [ "numpy.abs", "numpy.min", "numpy.median", "numpy.max", "numpy.mean", "numpy.interp", "numpy.array" ], [ "tensorflow.compat.v1.concat", "tensorflow.compat.v1.test.main", "tensorflow.compat.v1.split", "tensorflow.compat.v1.global_variables_initializer", "numpy.random.random_integers" ], [ "tensorflow.compat.v1.test.main" ], [ "tensorflow.compat.v1.train.BytesList", "numpy.pad", "tensorflow.compat.v1.test.main", "numpy.squeeze", "tensorflow.compat.v1.initializers.local_variables", "tensorflow.compat.v1.Session", "numpy.max", "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.Graph", "tensorflow.compat.v1.python_io.TFRecordWriter", "tensorflow.compat.v1.initializers.global_variables", "tensorflow.compat.v1.train.Features", "tensorflow.compat.v1.constant" ], [ "tensorflow.compat.v1.test.main" ], [ "scipy.io.wavfile.write", "numpy.array_equal", "numpy.arange", "numpy.random.shuffle", "tensorflow.compat.v1.flags.DEFINE_string", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.placeholder", "numpy.argmax", "tensorflow.compat.v1.logging.info", "tensorflow.compat.v1.Graph", "tensorflow.compat.v1.summary.scalar", "numpy.zeros_like", "tensorflow.compat.v1.gfile.MakeDirs", "numpy.load", "numpy.random.randn", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
BMC-SDNU/Cross-Modal-Hashing-Retrieval
[ "0196e313aad0a93ebf93e1150f024d6a07f8363a" ]
[ "DCHUC/utils/txt_module.py" ]
[ "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nLAYER1_NODE = 10240\n\n\ndef weights_init(m):\n if type(m) == nn.Conv2d:\n nn.init.xavier_uniform(m.weight.data)\n nn.init.constant(m.bias.data, 0.01)\n\n\nclass TxtModule(nn.Module):\n def __init__(self, y_dim, bit):\n \"\"\"\n :param y_dim: dimension of tags\n :param bit: bit number of the final binary code\n \"\"\"\n super(TxtModule, self).__init__()\n self.module_name = \"text_model\"\n\n # full-conv layers\n self.conv1 = nn.Conv2d(1, LAYER1_NODE, kernel_size=(y_dim, 1), stride=(1, 1))\n self.conv2 = nn.Conv2d(LAYER1_NODE, bit, kernel_size=1, stride=(1, 1))\n self.apply(weights_init)\n self.classifier = nn.Sequential(\n self.conv1,\n nn.ReLU(inplace=True),\n nn.Dropout(),\n self.conv2,\n )\n\n def forward(self, x):\n x = self.classifier(x)\n x = x.squeeze()\n tanh = nn.Tanh()\n x = tanh(x)\n return x\n\n" ]
[ [ "torch.nn.Dropout", "torch.nn.Conv2d", "torch.nn.Tanh", "torch.nn.ReLU", "torch.nn.init.constant", "torch.nn.init.xavier_uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
csherstan/DeepRL
[ "fbf8da1f158792a0b9d29728c9d407ae40573070", "fbf8da1f158792a0b9d29728c9d407ae40573070" ]
[ "aux_0.99_10.0.py", "aux_0.99_1.0.py" ]
[ "import argparse\n\nimport torch\n\nfrom deep_rl import random_seed, set_one_thread, select_device, Config, generate_tag, Task, TDAuxNet, NatureConvBody, \\\n LinearSchedule, AsyncReplay, ImageNormalizer, SignNormalizer, run_steps, mkdir\nfrom deep_rl.agent.TDAux_agent import TDAuxAgent\nimport os\n\ndef td_aux_many(config: Config, **kwargs):\n \"\"\"\n\n :param config:\n :param kwargs: kwargs used to generate the experiment tag name uses for saving.\n :return:\n \"\"\"\n generate_tag(kwargs)\n kwargs.setdefault('log_level', 0)\n config.merge(kwargs)\n\n mkdir(os.path.join(config.data_dir, 'log'))\n mkdir(os.path.join(config.data_dir, 'data'))\n\n config.task_fn = lambda: Task(config.game)\n config.eval_env = config.task_fn()\n # aux_gammas = [0.0, 0.5, 0.9, 0.99]\n aux_gammas = [0.99]\n\n aux_dict = {str(g).replace(\".\", \"_\"): TDAuxNet.AuxCFG(g, loss_weight=10.0) for g in aux_gammas}\n # aux_dict = {}\n\n # config.optimizer_fn = lambda params: torch.optim.RMSprop(\n # params, lr=0.00025, alpha=0.95, eps=0.01, centered=True)\n config.optimizer_fn = lambda params: torch.optim.Adam(params, lr=1e-4)\n # I'm just hard coding the shape of the target\n config.network_fn = lambda: TDAuxNet((84, 84), config.action_dim,\n NatureConvBody(in_channels=config.history_length), aux_dict)\n config.random_action_prob = LinearSchedule(1.0, 0.01, 1e6)\n\n config.replay_fn = lambda: AsyncReplay(memory_size=int(5e5), batch_size=32)\n\n config.batch_size = 32\n config.state_normalizer = ImageNormalizer()\n config.reward_normalizer = SignNormalizer()\n config.discount = 0.99\n config.target_network_update_freq = 10000\n config.exploration_steps = 50000\n config.sgd_update_frequency = 4\n config.gradient_clip = 5\n config.history_length = 4\n # config.double_q = True\n config.double_q = False\n run_steps(TDAuxAgent(config))\n\n\nif __name__ == \"__main__\":\n cf = Config()\n cf.add_argument('--game', required=True)\n cf.add_argument('--run', type=int, required=True)\n cf.add_argument('--data_dir', type=str, required=True)\n cf.add_argument('--save_interval', type=int, default=1000000)\n cf.add_argument('--max_steps', type=int, default=int(2.5e7))\n cf.merge()\n\n set_one_thread()\n select_device(0)\n\n td_aux_many(cf, game=cf.game, run=cf.run, remark=\"aux_0.99_10.0\")\n", "import argparse\n\nimport torch\n\nfrom deep_rl import random_seed, set_one_thread, select_device, Config, generate_tag, Task, TDAuxNet, NatureConvBody, \\\n LinearSchedule, AsyncReplay, ImageNormalizer, SignNormalizer, run_steps, mkdir\nfrom deep_rl.agent.TDAux_agent import TDAuxAgent\nimport os\n\n\ndef td_aux_many(config: Config, **kwargs):\n \"\"\"\n\n :param config:\n :param kwargs: kwargs used to generate the experiment tag name uses for saving.\n :return:\n \"\"\"\n generate_tag(kwargs)\n kwargs.setdefault('log_level', 0)\n config.merge(kwargs)\n\n mkdir(os.path.join(config.data_dir, 'log'))\n mkdir(os.path.join(config.data_dir, 'data'))\n\n config.task_fn = lambda: Task(config.game)\n config.eval_env = config.task_fn()\n # aux_gammas = [0.0, 0.5, 0.9, 0.99]\n aux_gammas = [0.99]\n\n aux_dict = {str(g).replace(\".\", \"_\"): TDAuxNet.AuxCFG(g, loss_weight=1.0) for g in aux_gammas}\n # aux_dict = {}\n\n # config.optimizer_fn = lambda params: torch.optim.RMSprop(\n # params, lr=0.00025, alpha=0.95, eps=0.01, centered=True)\n config.optimizer_fn = lambda params: torch.optim.Adam(params, lr=1e-4)\n # I'm just hard coding the shape of the target\n config.network_fn = lambda: TDAuxNet((84, 84), config.action_dim,\n NatureConvBody(in_channels=config.history_length), aux_dict)\n config.random_action_prob = LinearSchedule(1.0, 0.01, 1e6)\n\n config.replay_fn = lambda: AsyncReplay(memory_size=int(5e5), batch_size=32)\n\n config.batch_size = 32\n config.state_normalizer = ImageNormalizer()\n config.reward_normalizer = SignNormalizer()\n config.discount = 0.99\n config.target_network_update_freq = 10000\n config.exploration_steps = 50000\n config.sgd_update_frequency = 4\n config.gradient_clip = 5\n config.history_length = 4\n # config.double_q = True\n config.double_q = False\n run_steps(TDAuxAgent(config))\n\n\nif __name__ == \"__main__\":\n cf = Config()\n cf.add_argument('--game', required=True)\n cf.add_argument('--run', type=int, required=True)\n cf.add_argument('--data_dir', type=str, required=True)\n cf.add_argument('--save_interval', type=int, default=1000000)\n cf.add_argument('--max_steps', type=int, default=int(2.5e7))\n cf.merge()\n\n set_one_thread()\n select_device(0)\n\n td_aux_many(cf, game=cf.game, run=cf.run, remark=\"aux_0.99_1.0\")\n" ]
[ [ "torch.optim.Adam" ], [ "torch.optim.Adam" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wqruan/tf-encrypted
[ "50ee4ae3ba76b7c1f70a90e18f875191adea0a07", "50ee4ae3ba76b7c1f70a90e18f875191adea0a07", "50ee4ae3ba76b7c1f70a90e18f875191adea0a07" ]
[ "primitives/tf_encrypted/primitives/paillier/primitives_test.py", "operations/secure_random/test_secure_random.py", "tf_encrypted/config.py" ]
[ "# pylint: disable=missing-docstring\nimport unittest\n\nimport numpy as np\nimport tensorflow as tf\nfrom absl.testing import parameterized\n\nfrom tf_encrypted.primitives import paillier\nfrom tf_encrypted.test import tf_execution_context\n\n\nclass EncryptionTest(parameterized.TestCase):\n @parameterized.parameters(\n {\n \"run_eagerly\": run_eagerly,\n \"export_dtype\": export_dtype,\n \"export_expansion\": export_expansion,\n }\n for run_eagerly in [True, False]\n for export_dtype, export_expansion in [(tf.string, ())]\n )\n def test_export(self, run_eagerly, export_dtype, export_expansion):\n x = np.array([[12345, 34342]])\n\n context = tf_execution_context(run_eagerly)\n with context.scope():\n\n ek, dk = paillier.gen_keypair()\n assert isinstance(ek, paillier.EncryptionKey)\n assert isinstance(dk, paillier.DecryptionKey)\n n_exported = ek.export(export_dtype)\n assert isinstance(n_exported, tf.Tensor)\n assert n_exported.dtype == export_dtype\n assert n_exported.shape == (1, 1), n_exported.shape\n p_exported, q_exported = dk.export(export_dtype)\n assert isinstance(p_exported, tf.Tensor)\n assert p_exported.dtype == export_dtype\n assert p_exported.shape == (1, 1), p_exported.shape\n assert isinstance(q_exported, tf.Tensor)\n assert q_exported.dtype == export_dtype\n assert q_exported.shape == (1, 1), q_exported.shape\n\n r = paillier.gen_randomness(ek, shape=x.shape)\n assert isinstance(r, paillier.Randomness)\n r_exported = r.export(export_dtype)\n assert isinstance(r_exported, tf.Tensor)\n assert r_exported.dtype == export_dtype\n assert r_exported.shape == x.shape + export_expansion\n\n c = paillier.encrypt(ek, x, r)\n assert isinstance(c, paillier.Ciphertext)\n c_exported = c.export(export_dtype)\n assert isinstance(c_exported, tf.Tensor)\n assert c_exported.dtype == export_dtype\n assert c_exported.shape == x.shape + export_expansion\n\n @parameterized.parameters(\n {\"run_eagerly\": run_eagerly} for run_eagerly in (True, False)\n )\n def test_correctness(self, run_eagerly):\n\n p = 100000015333\n q = 100000015021\n n = p * q\n nn = n * n\n g = 1 + n\n x = 123456789\n r = 5083216764521909821749\n c = pow(g, x, nn) * pow(r, n, nn) % nn\n\n context = tf_execution_context(run_eagerly)\n with context.scope():\n\n ek = paillier.EncryptionKey(tf.constant([[str(n)]]))\n plaintext = np.array([[x]]).astype(str)\n randomness = paillier.Randomness(tf.constant([[str(r)]]))\n ciphertext = paillier.encrypt(ek, plaintext, randomness)\n\n expected = np.array([[c]]).astype(str)\n actual = ciphertext.export(tf.string)\n\n np.testing.assert_equal(context.evaluate(actual).astype(str), expected)\n\n @parameterized.parameters(\n {\"run_eagerly\": run_eagerly, \"x\": x, \"dtype\": dtype}\n for run_eagerly in [True, False]\n for x, dtype in [\n (np.array([[12345, 34342]]).astype(np.int32), tf.int32),\n (np.array([[\"12345\", \"34342\"]]).astype(str), tf.string),\n (\n np.array(\n [\n [\n \"123456789123456789123456789123456789\",\n \"987654321987654321987654321987654321\",\n ]\n ]\n ).astype(str),\n tf.string,\n ),\n ]\n )\n def test_encrypt_decrypt(self, run_eagerly, x, dtype):\n context = tf_execution_context(run_eagerly)\n with context.scope():\n\n ek, dk = paillier.gen_keypair()\n r = paillier.gen_randomness(ek, shape=x.shape)\n c = paillier.encrypt(ek, x, r)\n y = paillier.decrypt(dk, c, dtype=dtype)\n assert isinstance(y, tf.Tensor)\n assert y.dtype == dtype\n\n np.testing.assert_equal(context.evaluate(y).astype(x.dtype), x)\n\n @parameterized.parameters(\n {\"run_eagerly\": run_eagerly, \"dtype\": dtype, \"x0\": x0, \"x1\": x1}\n for run_eagerly in (True, False)\n for dtype in (tf.int32, tf.string)\n for x0 in (np.array([[12345, 123243]]), np.array([[12345]]))\n for x1 in (np.array([[12656, 434234]]),)\n )\n def test_add(self, run_eagerly, dtype, x0, x1):\n\n expected = x0 + x1\n\n context = tf_execution_context(run_eagerly)\n with context.scope():\n ek, dk = paillier.gen_keypair()\n\n r0 = paillier.gen_randomness(ek, shape=x0.shape)\n c0 = paillier.encrypt(ek, x0, r0)\n\n r1 = paillier.gen_randomness(ek, shape=x1.shape)\n c1 = paillier.encrypt(ek, x1, r1)\n\n c = paillier.add(ek, c0, c1)\n y = paillier.decrypt(dk, c, dtype=dtype)\n\n np.testing.assert_equal(\n context.evaluate(y).astype(np.int32), expected.astype(np.int32)\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# pylint: disable=missing-docstring\nimport os\nimport unittest\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework.errors import NotFoundError\n\nimport tf_encrypted as tfe\n\ndirname = os.path.dirname(tfe.__file__)\nso_name = \"{dn}/operations/secure_random/secure_random_module_tf_{tfv}.so\"\nshared_object = so_name.format(dn=dirname, tfv=tf.__version__)\nnotfound_msg = \"secure_random_module not found\"\n\ntry:\n secure_random_module = tf.load_op_library(shared_object)\n seeded_random_uniform = secure_random_module.secure_seeded_random_uniform\n random_uniform = secure_random_module.secure_random_uniform\n seed = secure_random_module.secure_seed\nexcept NotFoundError:\n secure_random_module = None\n\n\[email protected](secure_random_module is None, notfound_msg)\nclass TestSeededRandomUniform(unittest.TestCase):\n def test_int32_return(self):\n expected = [[608, 425, 925], [198, 891, 721]]\n\n with tf.Session():\n output = seeded_random_uniform(\n [2, 3], [1, 1, 1, 1, 1, 1, 1, 2], 0, 1000\n ).eval()\n\n np.testing.assert_array_equal(output, expected)\n\n def test_int64_return(self):\n expected = [[425, 198, 721], [911, 617, 113]]\n\n with tf.Session():\n minval = tf.constant(0, dtype=tf.int64)\n maxval = tf.constant(1000, dtype=tf.int64)\n\n output = seeded_random_uniform(\n [2, 3], [1, 1, 1, 1, 1, 1, 1, 2], minval, maxval\n ).eval()\n\n np.testing.assert_array_equal(output, expected)\n\n def test_min_max_range(self):\n with tf.Session():\n minval = tf.constant(-100000000, dtype=tf.int32)\n maxval = tf.constant(100000000, dtype=tf.int32)\n\n output = seeded_random_uniform(\n [10000], [1, 1, 1, 500, 1, 1, 1, 2], minval, maxval\n ).eval()\n\n for out in output:\n assert -100000000 <= out < 100000000\n\n def test_invalid_max_min(self):\n with tf.Session():\n minval = tf.constant(1000, dtype=tf.int64)\n maxval = tf.constant(-1000, dtype=tf.int64)\n\n with np.testing.assert_raises(errors.InvalidArgumentError):\n seeded_random_uniform(\n [2, 3], [1, 1, 1, 1, 1, 1, 1, 2], minval, maxval\n ).eval()\n\n def test_negative_numbers(self):\n expected = [[-1575, -1802, -1279], [-1089, -1383, -1887]]\n with tf.Session():\n minval = tf.constant(-2000, dtype=tf.int64)\n maxval = tf.constant(-1000, dtype=tf.int64)\n\n output = seeded_random_uniform(\n [2, 3], [1, 1, 1, 1, 1, 1, 1, 2], minval, maxval\n ).eval()\n\n np.testing.assert_array_equal(output, expected)\n\n\[email protected](secure_random_module is None, notfound_msg)\nclass TestRandomUniform(unittest.TestCase):\n def test_min_max_range(self):\n with tf.Session():\n minval = tf.constant(-10000000, dtype=tf.int32)\n maxval = tf.constant(10000000, dtype=tf.int32)\n\n output = random_uniform([1000], minval, maxval).eval()\n\n for out in output:\n assert -10000000 <= out < 10000000\n\n def test_small_range(self):\n with tf.Session():\n minval = tf.constant(-10, dtype=tf.int32)\n maxval = tf.constant(10, dtype=tf.int32)\n\n output = random_uniform([1000], minval, maxval).eval()\n\n for out in output:\n assert -10 <= out < 10\n\n def test_neg_range(self):\n with tf.Session():\n minval = tf.constant(-100, dtype=tf.int32)\n maxval = tf.constant(0, dtype=tf.int32)\n\n output = random_uniform([1000], minval, maxval).eval()\n\n for out in output:\n assert out < 0\n\n\[email protected](secure_random_module is None, notfound_msg)\nclass TestSeed(unittest.TestCase):\n def test_seed(self):\n with tf.Session():\n s = seed()\n\n minval = tf.constant(-2000, dtype=tf.int64)\n maxval = tf.constant(0, dtype=tf.int64)\n\n shape = [2, 3]\n\n output = seeded_random_uniform(shape, s, minval, maxval).eval()\n\n np.testing.assert_array_equal(output.shape, shape)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "\"\"\"The TF Encrypted Config abstraction and its implementations.\"\"\"\nimport json\nimport logging\nimport math\nfrom abc import ABC\nfrom abc import abstractmethod\nfrom collections import OrderedDict\nfrom pathlib import Path\n\nimport tensorflow as tf\nfrom tensorflow.core.protobuf import rewriter_config_pb2\n\nfrom .player import Player\n\nlogger = logging.getLogger(\"tf_encrypted\")\n\n\ndef tensorflow_supports_int64():\n \"\"\"Test if int64 is supported by this build of TensorFlow. Hacky.\"\"\"\n with tf.Graph().as_default():\n x = tf.constant([1], shape=(1, 1), dtype=tf.int64)\n try:\n tf.matmul(x, x)\n except TypeError:\n return False\n return True\n\n\ndef _get_docker_cpu_quota():\n \"\"\"Checks for available cpu cores in a containerized environment.\n\n If you witness memory leaks while doing multiple predictions using docker\n see https://github.com/tensorflow/tensorflow/issues/22098.\n \"\"\"\n cpu_cores = None\n\n # Check for quotas if we are in a linux container\n cfs_period = Path(\"/sys/fs/cgroup/cpu/cpu.cfs_period_us\")\n cfs_quota = Path(\"/sys/fs/cgroup/cpu/cpu.cfs_quota_us\")\n\n if cfs_period.exists() and cfs_quota.exists():\n with cfs_period.open(\"rb\") as p, cfs_quota.open(\"rb\") as q:\n p_int, q_int = int(p.read()), int(q.read())\n\n # get the cores allocated by dividing the quota\n # in microseconds by the period in microseconds\n if q_int > 0 and p_int > 0:\n cpu_cores = math.ceil(q_int / p_int)\n\n return cpu_cores\n\n\nclass Config(ABC):\n \"\"\"The main tfe.Config abstraction.\"\"\"\n\n @property\n @abstractmethod\n def players(self):\n \"\"\"Returns the config's list of :class:`Player` objects.\"\"\"\n\n @abstractmethod\n def get_player(self, name_or_player):\n \"\"\"Retrieve a specific :class:`Player` object by name.\n\n For convenience it is also possible to pass in an existing Player object,\n which will simply be returned as-is if the player is known already.\n \"\"\"\n\n @abstractmethod\n def get_tf_config(\n self, log_device_placement=False, disable_optimizations=False,\n ):\n \"\"\"Extract the underlying :class:`tf.ConfigProto`.\"\"\"\n\n @classmethod\n def build_graph_options(cls, disable_optimizations):\n if not disable_optimizations:\n return tf.GraphOptions()\n\n return tf.GraphOptions(\n optimizer_options=tf.OptimizerOptions(\n opt_level=tf.OptimizerOptions.L0,\n do_common_subexpression_elimination=False,\n do_constant_folding=False,\n do_function_inlining=False,\n ),\n rewrite_options=rewriter_config_pb2.RewriterConfig(\n arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF\n ),\n )\n\n\nclass LocalConfig(Config):\n \"\"\"Configure TF Encrypted to use threads on the local CPU.\n\n Each thread instantiates a different Player to simulate secure computations\n without requiring networking. Mostly intended for development/debugging use.\n\n By default new players will be added when looked up for the first time;\n this is useful for instance to get a complete list of players involved\n in a particular computation (see `auto_add_unknown_players`).\n\n :param (str) player_names: List of players to be used in the session.\n :param str job_name: The name of the job.\n :param bool auto_add_unknown_players: Auto-add player on first lookup.\n \"\"\"\n\n def __init__(\n self, player_names=None, job_name=\"localhost\", auto_add_unknown_players=True,\n ) -> None:\n self._job_name = job_name\n self._auto_add_unknown_players = auto_add_unknown_players\n self._players = []\n if player_names is None:\n player_names = []\n for name in player_names:\n self.add_player(name)\n\n def add_player(self, name):\n index = len(self._players)\n dv_str = \"/job:{job_name}/replica:0/task:0/device:CPU:{cpu_id}\"\n player = Player(\n name=name,\n index=index,\n device_name=dv_str.format(job_name=self._job_name, cpu_id=index),\n )\n self._players.append(player)\n return player\n\n @property\n def players(self):\n return self._players\n\n def get_player(self, name_or_player):\n if isinstance(name_or_player, Player):\n # we're passed a player\n assert name_or_player in self._players\n return name_or_player\n\n # we're passed a name\n player = next(\n (player for player in self._players if player.name == name_or_player), None\n )\n if player is None and self._auto_add_unknown_players:\n player = self.add_player(name_or_player)\n return player\n\n def get_players(self, names):\n if isinstance(names, str):\n names = [name.strip() for name in names.split(\",\")]\n assert isinstance(names, list)\n return [player for player in self._players if player.name in names]\n\n def get_tf_config(\n self, log_device_placement=False, disable_optimizations=False,\n ):\n logger.info(\"Players: %s\", [player.name for player in self.players])\n target = \"\"\n config = tf.ConfigProto(\n log_device_placement=log_device_placement,\n allow_soft_placement=False,\n device_count={\"CPU\": len(self._players)},\n graph_options=self.build_graph_options(disable_optimizations),\n )\n return (target, config)\n\n\nclass RemoteConfig(Config):\n \"\"\"Configure TF Encrypted to use network hosts for the different players.\n\n :param (str,str),str->str hostmap: A mapping of hostnames to\n their IP / domain.\n :param str job_name: The name of the job.\n \"\"\"\n\n def __init__(\n self, hostmap, job_name=\"tfe\",\n ):\n assert isinstance(hostmap, dict)\n if not isinstance(hostmap, OrderedDict):\n logger.warning(\n \"Consider passing an ordered dictionary to RemoteConfig instead\"\n \"in order to preserve host mapping.\"\n )\n\n self._job_name = job_name\n self._players = OrderedDict(\n (\n name,\n Player(\n name=name,\n index=index,\n device_name=\"/job:{job_name}/replica:0/task:{task_id}/cpu:0\".format(\n job_name=job_name, task_id=index\n ),\n host=host,\n ),\n )\n for index, (name, host) in enumerate(hostmap.items())\n )\n\n @staticmethod\n def load(filename):\n \"\"\"Constructs a RemoteConfig object from a JSON hostmap file.\n\n :param str filename: Name of file to load from.\n \"\"\"\n with open(filename, \"r\") as f:\n hostmap = json.load(f, object_pairs_hook=OrderedDict)\n return RemoteConfig(hostmap)\n\n def save(self, filename):\n \"\"\"Saves the configuration as a JSON hostmap file.\n\n :param str filename: Name of file to save to.\n \"\"\"\n with open(filename, \"w\") as f:\n json.dump(self.hostmap, f)\n\n @property\n def hostmap(self):\n return OrderedDict(\n (player.name, player.host) for player in self._players.values()\n )\n\n @property\n def hosts(self):\n return [player.host for player in self._players.values()]\n\n @property\n def players(self):\n return list(self._players.values())\n\n def get_player(self, name_or_player):\n if isinstance(name_or_player, Player):\n # we're passed a player\n assert name_or_player in self._players.values()\n return name_or_player\n\n # we're passed a name\n return self._players.get(name_or_player)\n\n def get_players(self, names):\n if isinstance(names, str):\n names = [name.strip() for name in names.split(\",\")]\n assert isinstance(names, list)\n return [player for name, player in self._players.items() if name in names]\n\n def server(self, name, start=True):\n \"\"\"Construct a :class:`tf.train.Server` object for the corresponding\n :class:`Player`.\n\n :param str name: Name of player.\n \"\"\"\n player = self.get_player(name)\n assert player is not None, \"'{}' not found in configuration\".format(name)\n cluster = tf.train.ClusterSpec({self._job_name: self.hosts})\n logger.debug(\"Creating server for '%s' using %s\", name, cluster)\n server = tf.train.Server(\n cluster, job_name=self._job_name, task_index=player.index, start=start\n )\n logger.info(\n \"Created server for '%s' as device '%s'; own session target is '%s'\",\n name,\n player.device_name,\n server.target,\n )\n return server\n\n def get_tf_config(self, log_device_placement=False, disable_optimizations=False):\n # always use the first host as master; change config to match\n target = \"grpc://{}\".format(self.hosts[0])\n cpu_cores = _get_docker_cpu_quota()\n if cpu_cores is None:\n config = tf.ConfigProto(\n log_device_placement=log_device_placement,\n allow_soft_placement=False,\n graph_options=self.build_graph_options(disable_optimizations),\n )\n else:\n config = tf.ConfigProto(\n log_device_placement=log_device_placement,\n allow_soft_placement=False,\n inter_op_parallelism_threads=cpu_cores,\n intra_op_parallelism_threads=cpu_cores,\n graph_options=self.build_graph_options(disable_optimizations),\n )\n return (target, config)\n\n\n__config__ = LocalConfig()\n\n\ndef get_config():\n \"\"\"Returns the current config.\"\"\"\n return __config__\n\n\ndef set_config(config) -> None:\n \"\"\"Sets the current config.\n\n :param Config config: Intended configuration.\n \"\"\"\n global __config__\n __config__ = config\n" ]
[ [ "numpy.array" ], [ "tensorflow.constant", "numpy.testing.assert_array_equal", "numpy.testing.assert_raises", "tensorflow.Session", "tensorflow.load_op_library" ], [ "tensorflow.matmul", "tensorflow.Graph", "tensorflow.train.Server", "tensorflow.constant", "tensorflow.core.protobuf.rewriter_config_pb2.RewriterConfig", "tensorflow.OptimizerOptions", "tensorflow.train.ClusterSpec", "tensorflow.GraphOptions" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
guilhermeprokisch/catalyst
[ "21e096b261912d9e905584178d6ee626072c23cb", "21e096b261912d9e905584178d6ee626072c23cb" ]
[ "catalyst/pipeline/mixins.py", "catalyst/utils/calendars/exchange_calendar_tsx.py" ]
[ "\"\"\"\r\nMixins classes for use with Filters and Factors.\r\n\"\"\"\r\nfrom textwrap import dedent\r\n\r\nfrom numpy import (\r\n array,\r\n full,\r\n recarray,\r\n vstack,\r\n)\r\nfrom pandas import NaT as pd_NaT\r\n\r\nfrom catalyst.errors import (\r\n WindowLengthNotPositive,\r\n UnsupportedDataType,\r\n NoFurtherDataError,\r\n)\r\nfrom catalyst.utils.control_flow import nullctx\r\nfrom catalyst.utils.input_validation import expect_types\r\nfrom catalyst.utils.sharedoc import (\r\n format_docstring,\r\n PIPELINE_ALIAS_NAME_DOC,\r\n PIPELINE_DOWNSAMPLING_FREQUENCY_DOC,\r\n)\r\nfrom catalyst.utils.pandas_utils import nearest_unequal_elements\r\n\r\n\r\nfrom .downsample_helpers import (\r\n select_sampling_indices,\r\n expect_downsample_frequency,\r\n)\r\nfrom .sentinels import NotSpecified\r\nfrom .term import Term\r\n\r\n\r\nclass PositiveWindowLengthMixin(object):\r\n \"\"\"\r\n Validation mixin enforcing that a Term gets a positive WindowLength\r\n \"\"\"\r\n def _validate(self):\r\n super(PositiveWindowLengthMixin, self)._validate()\r\n if not self.windowed:\r\n raise WindowLengthNotPositive(window_length=self.window_length)\r\n\r\n\r\nclass SingleInputMixin(object):\r\n \"\"\"\r\n Validation mixin enforcing that a Term gets a length-1 inputs list.\r\n \"\"\"\r\n def _validate(self):\r\n super(SingleInputMixin, self)._validate()\r\n num_inputs = len(self.inputs)\r\n if num_inputs != 1:\r\n raise ValueError(\r\n \"{typename} expects only one input, \"\r\n \"but received {num_inputs} instead.\".format(\r\n typename=type(self).__name__,\r\n num_inputs=num_inputs\r\n )\r\n )\r\n\r\n\r\nclass StandardOutputs(object):\r\n \"\"\"\r\n Validation mixin enforcing that a Term cannot produce non-standard outputs.\r\n \"\"\"\r\n def _validate(self):\r\n super(StandardOutputs, self)._validate()\r\n if self.outputs is not NotSpecified:\r\n raise ValueError(\r\n \"{typename} does not support custom outputs,\"\r\n \" but received custom outputs={outputs}.\".format(\r\n typename=type(self).__name__,\r\n outputs=self.outputs,\r\n )\r\n )\r\n\r\n\r\nclass RestrictedDTypeMixin(object):\r\n \"\"\"\r\n Validation mixin enforcing that a term has a specific dtype.\r\n \"\"\"\r\n ALLOWED_DTYPES = NotSpecified\r\n\r\n def _validate(self):\r\n super(RestrictedDTypeMixin, self)._validate()\r\n assert self.ALLOWED_DTYPES is not NotSpecified, (\r\n \"ALLOWED_DTYPES not supplied on subclass \"\r\n \"of RestrictedDTypeMixin: %s.\" % type(self).__name__\r\n )\r\n\r\n if self.dtype not in self.ALLOWED_DTYPES:\r\n raise UnsupportedDataType(\r\n typename=type(self).__name__,\r\n dtype=self.dtype,\r\n )\r\n\r\n\r\nclass CustomTermMixin(object):\r\n \"\"\"\r\n Mixin for user-defined rolling-window Terms.\r\n\r\n Implements `_compute` in terms of a user-defined `compute` function, which\r\n is mapped over the input windows.\r\n\r\n Used by CustomFactor, CustomFilter, CustomClassifier, etc.\r\n \"\"\"\r\n ctx = nullctx()\r\n\r\n def __new__(cls,\r\n inputs=NotSpecified,\r\n outputs=NotSpecified,\r\n window_length=NotSpecified,\r\n mask=NotSpecified,\r\n dtype=NotSpecified,\r\n missing_value=NotSpecified,\r\n ndim=NotSpecified,\r\n **kwargs):\r\n\r\n unexpected_keys = set(kwargs) - set(cls.params)\r\n if unexpected_keys:\r\n raise TypeError(\r\n \"{termname} received unexpected keyword \"\r\n \"arguments {unexpected}\".format(\r\n termname=cls.__name__,\r\n unexpected={k: kwargs[k] for k in unexpected_keys},\r\n )\r\n )\r\n\r\n return super(CustomTermMixin, cls).__new__(\r\n cls,\r\n inputs=inputs,\r\n outputs=outputs,\r\n window_length=window_length,\r\n mask=mask,\r\n dtype=dtype,\r\n missing_value=missing_value,\r\n ndim=ndim,\r\n **kwargs\r\n )\r\n\r\n def compute(self, today, assets, out, *arrays):\r\n \"\"\"\r\n Override this method with a function that writes a value into `out`.\r\n \"\"\"\r\n raise NotImplementedError()\r\n\r\n def _allocate_output(self, windows, shape):\r\n \"\"\"\r\n Allocate an output array whose rows should be passed to `self.compute`.\r\n\r\n The resulting array must have a shape of ``shape``.\r\n\r\n If we have standard outputs (i.e. self.outputs is NotSpecified), the\r\n default is an empty ndarray whose dtype is ``self.dtype``.\r\n\r\n If we have an outputs tuple, the default is an empty recarray with\r\n ``self.outputs`` as field names. Each field will have dtype\r\n ``self.dtype``.\r\n\r\n This can be overridden to control the kind of array constructed\r\n (e.g. to produce a LabelArray instead of an ndarray).\r\n \"\"\"\r\n missing_value = self.missing_value\r\n outputs = self.outputs\r\n if outputs is not NotSpecified:\r\n out = recarray(\r\n shape,\r\n formats=[self.dtype.str] * len(outputs),\r\n names=outputs,\r\n )\r\n out[:] = missing_value\r\n else:\r\n out = full(shape, missing_value, dtype=self.dtype)\r\n return out\r\n\r\n def _format_inputs(self, windows, column_mask):\r\n inputs = []\r\n for input_ in windows:\r\n window = next(input_)\r\n if window.shape[1] == 1:\r\n # Do not mask single-column inputs.\r\n inputs.append(window)\r\n else:\r\n inputs.append(window[:, column_mask])\r\n return inputs\r\n\r\n def _compute(self, windows, dates, assets, mask):\r\n \"\"\"\r\n Call the user's `compute` function on each window with a pre-built\r\n output array.\r\n \"\"\"\r\n format_inputs = self._format_inputs\r\n compute = self.compute\r\n params = self.params\r\n ndim = self.ndim\r\n\r\n shape = (len(mask), 1) if ndim == 1 else mask.shape\r\n out = self._allocate_output(windows, shape)\r\n\r\n with self.ctx:\r\n for idx, date in enumerate(dates):\r\n # Never apply a mask to 1D outputs.\r\n out_mask = array([True]) if ndim == 1 else mask[idx]\r\n\r\n # Mask our inputs as usual.\r\n inputs_mask = mask[idx]\r\n\r\n masked_assets = assets[inputs_mask]\r\n out_row = out[idx][out_mask]\r\n inputs = format_inputs(windows, inputs_mask)\r\n\r\n compute(date, masked_assets, out_row, *inputs, **params)\r\n out[idx][out_mask] = out_row\r\n return out\r\n\r\n def short_repr(self):\r\n return type(self).__name__ + '(%d)' % self.window_length\r\n\r\n\r\nclass LatestMixin(SingleInputMixin):\r\n \"\"\"\r\n Mixin for behavior shared by Custom{Factor,Filter,Classifier}.\r\n \"\"\"\r\n window_length = 1\r\n\r\n def compute(self, today, assets, out, data):\r\n out[:] = data[-1]\r\n\r\n def _validate(self):\r\n super(LatestMixin, self)._validate()\r\n if self.inputs[0].dtype != self.dtype:\r\n raise TypeError(\r\n \"{name} expected an input of dtype {expected}, \"\r\n \"but got {actual} instead.\".format(\r\n name=type(self).__name__,\r\n expected=self.dtype,\r\n actual=self.inputs[0].dtype,\r\n )\r\n )\r\n\r\n\r\nclass AliasedMixin(SingleInputMixin):\r\n \"\"\"\r\n Mixin for aliased terms.\r\n \"\"\"\r\n def __new__(cls, term, name):\r\n return super(AliasedMixin, cls).__new__(\r\n cls,\r\n inputs=(term,),\r\n outputs=term.outputs,\r\n window_length=0,\r\n name=name,\r\n dtype=term.dtype,\r\n missing_value=term.missing_value,\r\n ndim=term.ndim,\r\n window_safe=term.window_safe,\r\n )\r\n\r\n def _init(self, name, *args, **kwargs):\r\n self.name = name\r\n return super(AliasedMixin, self)._init(*args, **kwargs)\r\n\r\n @classmethod\r\n def _static_identity(cls, name, *args, **kwargs):\r\n return (\r\n super(AliasedMixin, cls)._static_identity(*args, **kwargs),\r\n name,\r\n )\r\n\r\n def _compute(self, inputs, dates, assets, mask):\r\n return inputs[0]\r\n\r\n def __repr__(self):\r\n return '{type}({inner_type}(...), name={name!r})'.format(\r\n type=type(self).__name__,\r\n inner_type=type(self.inputs[0]).__name__,\r\n name=self.name,\r\n )\r\n\r\n def short_repr(self):\r\n return self.name\r\n\r\n @classmethod\r\n def make_aliased_type(cls, other_base):\r\n \"\"\"\r\n Factory for making Aliased{Filter,Factor,Classifier}.\r\n \"\"\"\r\n docstring = dedent(\r\n \"\"\"\r\n A {t} that names another {t}.\r\n\r\n Parameters\r\n ----------\r\n term : {t}\r\n {{name}}\r\n \"\"\"\r\n ).format(t=other_base.__name__)\r\n\r\n doc = format_docstring(\r\n owner_name=other_base.__name__,\r\n docstring=docstring,\r\n formatters={'name': PIPELINE_ALIAS_NAME_DOC},\r\n )\r\n\r\n return type(\r\n 'Aliased' + other_base.__name__,\r\n (cls, other_base),\r\n {'__doc__': doc,\r\n '__module__': other_base.__module__},\r\n )\r\n\r\n\r\nclass DownsampledMixin(StandardOutputs):\r\n \"\"\"\r\n Mixin for behavior shared by Downsampled{Factor,Filter,Classifier}\r\n\r\n A downsampled term is a wrapper around the \"real\" term that performs actual\r\n computation. The downsampler is responsible for calling the real term's\r\n `compute` method at selected intervals and forward-filling the computed\r\n values.\r\n\r\n Downsampling is not currently supported for terms with multiple outputs.\r\n \"\"\"\r\n # There's no reason to take a window of a downsampled term. The whole\r\n # point is that you're re-using the same result multiple times.\r\n window_safe = False\r\n\r\n @expect_types(term=Term)\r\n @expect_downsample_frequency\r\n def __new__(cls, term, frequency):\r\n return super(DownsampledMixin, cls).__new__(\r\n cls,\r\n inputs=term.inputs,\r\n outputs=term.outputs,\r\n window_length=term.window_length,\r\n mask=term.mask,\r\n frequency=frequency,\r\n wrapped_term=term,\r\n dtype=term.dtype,\r\n missing_value=term.missing_value,\r\n ndim=term.ndim,\r\n )\r\n\r\n def _init(self, frequency, wrapped_term, *args, **kwargs):\r\n self._frequency = frequency\r\n self._wrapped_term = wrapped_term\r\n return super(DownsampledMixin, self)._init(*args, **kwargs)\r\n\r\n @classmethod\r\n def _static_identity(cls, frequency, wrapped_term, *args, **kwargs):\r\n return (\r\n super(DownsampledMixin, cls)._static_identity(*args, **kwargs),\r\n frequency,\r\n wrapped_term,\r\n )\r\n\r\n def compute_extra_rows(self,\r\n all_dates,\r\n start_date,\r\n end_date,\r\n min_extra_rows):\r\n \"\"\"\r\n Ensure that min_extra_rows pushes us back to a computation date.\r\n\r\n Parameters\r\n ----------\r\n all_dates : pd.DatetimeIndex\r\n The trading sessions against which ``self`` will be computed.\r\n start_date : pd.Timestamp\r\n The first date for which final output is requested.\r\n end_date : pd.Timestamp\r\n The last date for which final output is requested.\r\n min_extra_rows : int\r\n The minimum number of extra rows required of ``self``, as\r\n determined by other terms that depend on ``self``.\r\n\r\n Returns\r\n -------\r\n extra_rows : int\r\n The number of extra rows to compute. This will be the minimum\r\n number of rows required to make our computed start_date fall on a\r\n recomputation date.\r\n \"\"\"\r\n try:\r\n current_start_pos = all_dates.get_loc(start_date) - min_extra_rows\r\n if current_start_pos < 0:\r\n raise NoFurtherDataError(\r\n initial_message=\"Insufficient data to compute Pipeline:\",\r\n first_date=all_dates[0],\r\n lookback_start=start_date,\r\n lookback_length=min_extra_rows,\r\n )\r\n except KeyError:\r\n before, after = nearest_unequal_elements(all_dates, start_date)\r\n raise ValueError(\r\n \"Pipeline start_date {start_date} is not in calendar.\\n\"\r\n \"Latest date before start_date is {before}.\\n\"\r\n \"Earliest date after start_date is {after}.\".format(\r\n start_date=start_date,\r\n before=before,\r\n after=after,\r\n )\r\n )\r\n\r\n # Our possible target dates are all the dates on or before the current\r\n # starting position.\r\n # TODO: Consider bounding this below by self.window_length\r\n candidates = all_dates[:current_start_pos + 1]\r\n\r\n # Choose the latest date in the candidates that is the start of a new\r\n # period at our frequency.\r\n choices = select_sampling_indices(candidates, self._frequency)\r\n\r\n # If we have choices, the last choice is the first date if the\r\n # period containing current_start_date. Choose it.\r\n new_start_date = candidates[choices[-1]]\r\n\r\n # Add the difference between the new and old start dates to get the\r\n # number of rows for the new start_date.\r\n new_start_pos = all_dates.get_loc(new_start_date)\r\n assert new_start_pos <= current_start_pos, \\\r\n \"Computed negative extra rows!\"\r\n\r\n return min_extra_rows + (current_start_pos - new_start_pos)\r\n\r\n def _compute(self, inputs, dates, assets, mask):\r\n \"\"\"\r\n Compute by delegating to self._wrapped_term._compute on sample dates.\r\n\r\n On non-sample dates, forward-fill from previously-computed samples.\r\n \"\"\"\r\n to_sample = dates[select_sampling_indices(dates, self._frequency)]\r\n assert to_sample[0] == dates[0], \\\r\n \"Misaligned sampling dates in %s.\" % type(self).__name__\r\n\r\n real_compute = self._wrapped_term._compute\r\n\r\n # Inputs will contain different kinds of values depending on whether or\r\n # not we're a windowed computation.\r\n\r\n # If we're windowed, then `inputs` is a list of iterators of ndarrays.\r\n # If we're not windowed, then `inputs` is just a list of ndarrays.\r\n # There are two things we care about doing with the input:\r\n # 1. Preparing an input to be passed to our wrapped term.\r\n # 2. Skipping an input if we're going to use an already-computed row.\r\n # We perform these actions differently based on the expected kind of\r\n # input, and we encapsulate these actions with closures so that we\r\n # don't clutter the code below with lots of branching.\r\n if self.windowed:\r\n # If we're windowed, inputs are stateful AdjustedArrays. We don't\r\n # need to do any preparation before forwarding to real_compute, but\r\n # we need to call `next` on them if we want to skip an iteration.\r\n def prepare_inputs():\r\n return inputs\r\n\r\n def skip_this_input():\r\n for w in inputs:\r\n next(w)\r\n else:\r\n # If we're not windowed, inputs are just ndarrays. We need to\r\n # slice out a single row when forwarding to real_compute, but we\r\n # don't need to do anything to skip an input.\r\n def prepare_inputs():\r\n # i is the loop iteration variable below.\r\n return [a[[i]] for a in inputs]\r\n\r\n def skip_this_input():\r\n pass\r\n\r\n results = []\r\n samples = iter(to_sample)\r\n next_sample = next(samples)\r\n for i, compute_date in enumerate(dates):\r\n if next_sample == compute_date:\r\n results.append(\r\n real_compute(\r\n prepare_inputs(),\r\n dates[i:i + 1],\r\n assets,\r\n mask[i:i + 1],\r\n )\r\n )\r\n try:\r\n next_sample = next(samples)\r\n except StopIteration:\r\n # No more samples to take. Set next_sample to Nat, which\r\n # compares False with any other datetime.\r\n next_sample = pd_NaT\r\n else:\r\n skip_this_input()\r\n # Copy results from previous sample period.\r\n results.append(results[-1])\r\n\r\n # We should have exhausted our sample dates.\r\n try:\r\n next_sample = next(samples)\r\n except StopIteration:\r\n pass\r\n else:\r\n raise AssertionError(\"Unconsumed sample date: %s\" % next_sample)\r\n\r\n # Concatenate stored results.\r\n return vstack(results)\r\n\r\n @classmethod\r\n def make_downsampled_type(cls, other_base):\r\n \"\"\"\r\n Factory for making Downsampled{Filter,Factor,Classifier}.\r\n \"\"\"\r\n docstring = dedent(\r\n \"\"\"\r\n A {t} that defers to another {t} at lower-than-daily frequency.\r\n\r\n Parameters\r\n ----------\r\n term : {t}\r\n {{frequency}}\r\n \"\"\"\r\n ).format(t=other_base.__name__)\r\n\r\n doc = format_docstring(\r\n owner_name=other_base.__name__,\r\n docstring=docstring,\r\n formatters={'frequency': PIPELINE_DOWNSAMPLING_FREQUENCY_DOC},\r\n )\r\n\r\n return type(\r\n 'Downsampled' + other_base.__name__,\r\n (cls, other_base,),\r\n {'__doc__': doc,\r\n '__module__': other_base.__module__},\r\n )\r\n", "from datetime import time\r\nfrom pandas.tseries.holiday import (\r\n Holiday,\r\n DateOffset,\r\n MO,\r\n weekend_to_monday,\r\n GoodFriday\r\n)\r\nfrom pytz import timezone\r\n\r\nfrom catalyst.utils.calendars.trading_calendar import TradingCalendar, \\\r\n HolidayCalendar\r\nfrom catalyst.utils.calendars.us_holidays import Christmas\r\nfrom catalyst.utils.calendars.exchange_calendar_lse import (\r\n WeekendChristmas,\r\n BoxingDay,\r\n WeekendBoxingDay\r\n)\r\n\r\n# New Year's Day\r\nTSXNewYearsDay = Holiday(\r\n \"New Year's Day\",\r\n month=1,\r\n day=1,\r\n observance=weekend_to_monday,\r\n)\r\n# Ontario Family Day\r\nFamilyDay = Holiday(\r\n \"Family Day\",\r\n month=2,\r\n day=1,\r\n offset=DateOffset(weekday=MO(3)),\r\n start_date='2008-01-01',\r\n)\r\n# Victoria Day\r\nVictoriaDay = Holiday(\r\n 'Victoria Day',\r\n month=5,\r\n day=25,\r\n offset=DateOffset(weekday=MO(-1)),\r\n)\r\n# Canada Day\r\nCanadaDay = Holiday(\r\n 'Canada Day',\r\n month=7,\r\n day=1,\r\n observance=weekend_to_monday,\r\n)\r\n# Civic Holiday\r\nCivicHoliday = Holiday(\r\n 'Civic Holiday',\r\n month=8,\r\n day=1,\r\n offset=DateOffset(weekday=MO(1)),\r\n)\r\n# Labor Day\r\nLaborDay = Holiday(\r\n 'Labor Day',\r\n month=9,\r\n day=1,\r\n offset=DateOffset(weekday=MO(1)),\r\n)\r\n# Thanksgiving\r\nThanksgiving = Holiday(\r\n 'Thanksgiving',\r\n month=10,\r\n day=1,\r\n offset=DateOffset(weekday=MO(2)),\r\n)\r\n\r\n\r\nclass TSXExchangeCalendar(TradingCalendar):\r\n \"\"\"\r\n Exchange calendar for the Toronto Stock Exchange\r\n\r\n Open Time: 9:30 AM, EST\r\n Close Time: 4:00 PM, EST\r\n\r\n Regularly-Observed Holidays:\r\n - New Years Day (observed on first business day on/after)\r\n - Family Day (Third Monday in February after 2008)\r\n - Good Friday\r\n - Victoria Day (Monday before May 25th)\r\n - Canada Day (July 1st, observed first business day after)\r\n - Civic Holiday (First Monday in August)\r\n - Labor Day (First Monday in September)\r\n - Thanksgiving (Second Monday in October)\r\n - Christmas Day\r\n - Dec. 27th (if Christmas is on a weekend)\r\n - Boxing Day\r\n - Dec. 28th (if Boxing Day is on a weekend)\r\n \"\"\"\r\n\r\n @property\r\n def name(self):\r\n return \"TSX\"\r\n\r\n @property\r\n def tz(self):\r\n return timezone('Canada/Atlantic')\r\n\r\n @property\r\n def open_time(self):\r\n return time(9, 31)\r\n\r\n @property\r\n def close_time(self):\r\n return time(16)\r\n\r\n @property\r\n def regular_holidays(self):\r\n return HolidayCalendar([\r\n TSXNewYearsDay,\r\n FamilyDay,\r\n GoodFriday,\r\n VictoriaDay,\r\n CanadaDay,\r\n CivicHoliday,\r\n LaborDay,\r\n Thanksgiving,\r\n Christmas,\r\n WeekendChristmas,\r\n BoxingDay,\r\n WeekendBoxingDay\r\n ])\r\n" ]
[ [ "numpy.array", "numpy.vstack", "numpy.full" ], [ "pandas.tseries.holiday.MO", "pandas.tseries.holiday.Holiday" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
ssbyrne89/DIYInvestmentPrimer
[ "eae81d9e7c67b5d912bc7ed7037432f03ee4792c" ]
[ "web_app/routes/company_routes.py" ]
[ "# web_app/routes/company_routes.py\nimport pandas as pd\nfrom flask import Blueprint, jsonify, request, render_template #, flash, redirect\n\nfrom web_app.models import *\n\ncompany_routes = Blueprint(\"company_routes\", __name__)\n\n@company_routes.route(\"/div_yield\")\ndef seeDivYield():\n return render_template(\"highest_DivYield_charts.html\")\n\n@company_routes.route(\"/highest_increasing_divs\")\ndef seecompanies_w_highest_dividend_increases():\n return render_template(\"companies_w_highest_dividend_increases.html\")\n\n@company_routes.route(\"/most_affordable_div_payers\")\ndef seemost_affordable_div_payers():\n return render_template(\"most_affordable.html\")\n\n\n\n@company_routes.route(\"/companies\")\ndef list_companies_for_humans():\n return render_template(\"All_SP500.html\", message=\"Here's all the companies on the S&P 500\",\n companies=get_AllCompanies())\n\n\n@company_routes.route(\"/test\")\ndef seeTEST():\n return render_template(\"test.html\", message=\"Here's all the companies on the S&P 500\")\n\ndef get_AllCompanies():\n all = Company_Info.query.all()\n names = [record.Company_Name for record in all]\n return names\n\n\ndef createCompanyInfoTable(): # ran once\n SandP500 = pd.read_csv('../DIYInvestmentPrimer/SandP_500_companies.csv')\n\n for x in range(0, len(SandP500)):\n db.create_all()\n company_entry = Company_Info.query.get\n (Company_Info(Company_Name=SandP500['Security'][x],\n Company_Ticker=SandP500['Symbol'][x],\n Sector=SandP500['GICS Sector'][x],\n SubIndustry=SandP500['GICS Sub-Industry'][x],\n HQ_Location=SandP500['Headquarters Location'][x],\n Date_first_added_to_SP500=SandP500['Date first added'][x],\n Founded=SandP500['Founded'][x]))\n\n db.session.add(company_entry)\n db.session.commit()\n\n " ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
epfl-lasa/crowdbot-evaluation-tools
[ "0e98c76428f6af5a4caa6b83b91ac05b3ed300ce" ]
[ "qolo/viz_traj.py" ]
[ "#!/usr/bin/env python3\n# -*-coding:utf-8 -*-\n# =============================================================================\n\"\"\"\n@Author : Yujie He\n@File : viz_traj.py\n@Date created : 2022/02/25\n@Maintainer : Yujie He\n@Email : [email protected]\n\"\"\"\n# =============================================================================\n\"\"\"\nThe module provides script to visualize the robot trajectory and the longest k\n(default:3) trajectories of the pedestrians around Qolo robot.\nExample: python qolo/viz_traj.py -f 0410_mds --all --overwrite\n\"\"\"\n# =============================================================================\n\"\"\"\nTODO:\n1. plot vx/vy or linear/angular velocity\n\"\"\"\n# =============================================================================\n\nimport os\nimport sys\nimport argparse\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom qolo.core.crowdbot_data import CrowdBotDatabase\nfrom qolo.utils.geo_util import quat2yaw\nfrom qolo.utils.file_io_util import (\n load_json2dict,\n load_pkl2dict,\n)\nfrom qolo.utils.res_plot_util import (\n viz_qolo_ped_traj_full,\n viz_qolo_ped_traj_frame,\n get_nlongest_peds,\n viz_ped_speed,\n viz_ped_speed_vw,\n)\n\ncolor_list = ['navy', 'blue', 'slateblue', 'violet', 'skyblue']\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"visualize trajectories of pedestrians around Qolo\"\n )\n\n parser.add_argument(\n \"-f\",\n \"--folder\",\n default=\"0410_mds\",\n type=str,\n help=\"different subfolder in rosbag/ dir\",\n )\n parser.add_argument(\n \"--seq\",\n default=\"2021-04-10-11-28-10\", # 2021-04-10-10-38-36 2021-04-10-10-41-17\n type=str,\n help=\"specific sequence in the subfolder\",\n )\n parser.add_argument(\n \"--all\",\n dest=\"process_all\",\n action=\"store_true\",\n help=\"Process all sequences and disable single sequences\",\n )\n parser.set_defaults(process_all=False)\n parser.add_argument(\n \"--overwrite\",\n dest=\"overwrite\",\n action=\"store_true\",\n help=\"Whether to overwrite existing output (default: false)\",\n )\n parser.set_defaults(overwrite=True)\n args = parser.parse_args()\n\n cb_data = CrowdBotDatabase(args.folder)\n\n if args.seq is None or args.process_all:\n seqs = [cb_data.seqs[seq_idx] for seq_idx in range(cb_data.nr_seqs())]\n else:\n seqs = [args.seq]\n\n for seq_idx, seq in enumerate(seqs):\n\n sq_idx = cb_data.seqs.index(seq)\n seq_len = cb_data.nr_frames(sq_idx)\n\n print(\"({}/{}): {} with {} frames\".format(seq_idx + 1, len(seqs), seq, seq_len))\n\n # dest: path_img_path\n eval_res_dir = os.path.join(cb_data.metrics_dir)\n\n if not os.path.exists(eval_res_dir):\n print(\"Result images and npy will be saved in {}\".format(eval_res_dir))\n os.makedirs(eval_res_dir, exist_ok=True)\n\n path_img_path = os.path.join(eval_res_dir, seq, seq + \"_traj.png\")\n proc_path_img_path = os.path.join(eval_res_dir, seq, seq + \"_traj_proc.png\")\n\n # path_img_path = os.path.join(eval_res_dir, seq, seq + \"_{}_traj.png\".format(frame_id))\n plot_exist = os.path.exists(path_img_path)\n if plot_exist and not args.overwrite:\n print(\"{} plots already generated!!!\".format(seq))\n print(\"Will not overwrite. If you want to overwrite, use flag --overwrite\")\n continue\n\n # src 1: trajectory data\n traj_dir = os.path.join(cb_data.ped_data_dir, \"traj\")\n if not os.path.exists(traj_dir):\n sys.exit(\"Please use det2traj.py to extract pedestrian trajectories first!\")\n traj_pkl_path = os.path.join(traj_dir, seq + '.pkl')\n # traj_json_path = os.path.join(traj_dir, seq + '.json')\n proc_traj_pkl_path = os.path.join(traj_dir, seq + '_proc.pkl')\n\n # src 2: qolo data\n tf_qolo_dir = os.path.join(cb_data.source_data_dir, \"tf_qolo\")\n pose_stamp_path = os.path.join(tf_qolo_dir, seq + \"_tfqolo_sampled.npy\")\n pose_stamped = np.load(pose_stamp_path, allow_pickle=True).item()\n\n # src 3: velocity path\n vel_dir = os.path.join(cb_data.ped_data_dir, \"vel\")\n vel_pkl_path = os.path.join(vel_dir, seq + '.pkl')\n\n trans_array = pose_stamped[\"position\"]\n qolo_pose = {\n 'x': trans_array[:, 0],\n 'y': trans_array[:, 1],\n 'init_ori': pose_stamped[\"orientation\"],\n }\n\n ped_traj_dict = load_pkl2dict(traj_pkl_path)\n # ped_traj_dict = load_json2dict(traj_json_path)\n ped_vel_dict = load_pkl2dict(vel_pkl_path)\n\n top_ids = get_nlongest_peds(ped_traj_dict, ped_num=5)\n\n viz_qolo_ped_traj_full(\n path_img_path,\n qolo_pose,\n ped_traj_dict,\n viz_ids=top_ids,\n color_list=color_list,\n )\n\n # visualize processed trajectory\n proc_ped_traj_dict = load_pkl2dict(proc_traj_pkl_path)\n viz_qolo_ped_traj_full(\n proc_path_img_path,\n qolo_pose,\n proc_ped_traj_dict,\n viz_ids=top_ids,\n color_list=color_list,\n )\n\n ped_vel_img_path1 = os.path.join(eval_res_dir, seq, seq + \"_ped_vel.png\")\n ped_vel_img_path2 = os.path.join(eval_res_dir, seq, seq + \"_ped_vw.png\")\n\n viz_ped_speed(\n ped_vel_img_path1,\n ped_vel_dict,\n viz_ids=top_ids,\n color_list=color_list,\n )\n\n viz_ped_speed_vw(\n ped_vel_img_path2,\n ped_vel_dict,\n viz_ids=top_ids,\n color_list=color_list,\n )\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
muzudho/collatz
[ "da99c1cf4cc65c42eeb3ea07134c0e25fc35e606" ]
[ "lifegame_lj.py" ]
[ "\"\"\"\n左寄せ表記(Left justified)\n\"\"\"\n\nimport os\nimport numpy as np\n\n# 環境変数\nRADIX = int(os.getenv(\"RADIX\", 2))\n\n# 桁揃えに利用。10進数27 を指定したときの見やすさをデフォルトにするぜ(^~^)\ncount_width = 3\ncount_str = \"\"\ndec_width = 4\ndec_str = \"\"\nradix_str = \"\"\n# 表示した数の個数\ncount = 0\n\ndef update_print_number(dec):\n \"\"\"表示するテキストの更新\"\"\"\n global count_width\n global count_str\n global dec_width\n global dec_str\n global radix_str\n global count\n global RADIX\n\n count_str = f\"{count}\"\n\n # 桁数の更新\n if count_width < len(count_str):\n count_width += 1\n if dec_width < len(str(dec)):\n dec_width += 1\n\n count_str = count_str.rjust(count_width)\n radix_str = str(np.base_repr(dec, RADIX))\n dec_str = str(dec).rjust(dec_width)\n count += 1\n\ndef print_number():\n \"\"\"表示\"\"\"\n global count_str\n global dec_str\n global radix_str\n\n print(f\"[{count_str}] ({dec_str}) {radix_str}\")\n\n# 数を入力してください。\nprint(f\"RADIX={RADIX}\")\nprint(\"Please enter a number.\")\nprint(\"Example 1: 27\")\nprint(\"Example 2: 0b11011\")\n\n# めんどくさいんで、内部的には10進で計算\nnumber_str = input()\n\nif number_str.startswith(\"0b\"):\n radix_str = number_str[2:]\n dec = int(radix_str, 2) # 2進数で入力されたものを10進に変換\nelse:\n dec = int(number_str) # 10進数\n\n# 初回表示\nprint(f\"Start\")\nupdate_print_number(dec)\nprint_number()\n\nwhile True:\n # 奇数になるまで2で割るぜ(^~^)\n while dec % 2 == 0:\n dec = dec // 2\n\n # 表示\n # TODO 偶数なんで省くオプションとか欲しい(^~^)\n update_print_number(dec)\n print_number()\n\n # 1 だったら抜けるぜ(^~^)\n if dec==1:\n break\n\n # 3x+1 するぜ(^~^)\n dec = 3 * dec + 1\n\n # 表示\n update_print_number(dec)\n print_number()\n\nprint(f\"Finished\")\n" ]
[ [ "numpy.base_repr" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
balazssimon/ml-playground
[ "c2eba497bebc53e5a03807bdd8873c55f0ec73e1", "c2eba497bebc53e5a03807bdd8873c55f0ec73e1", "c2eba497bebc53e5a03807bdd8873c55f0ec73e1" ]
[ "udemy/Deep Learning A-Z/Volume 2 - Unsupervised Deep Learning/Part 4 - Self Organizing Maps (SOM)/mega_case_study.py", "udemy/Machine Learning A-Z/Part 2 - Regression/Section 7 - Support Vector Regression (SVR)/svr.py", "udemy/Deep Learning A-Z/Volume 2 - Unsupervised Deep Learning/Part 4 - Self Organizing Maps (SOM)/som.py" ]
[ "# Mega Case Study - Make a Hybrid Deep Learning Model\n\n\n\n# Part 1 - Identify the Frauds with the Self-Organizing Map\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Credit_Card_Applications.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values\n\n# Feature Scaling\nfrom sklearn.preprocessing import MinMaxScaler\nsc = MinMaxScaler(feature_range = (0, 1))\nX = sc.fit_transform(X)\n\n# Training the SOM\nfrom minisom import MiniSom\nsom = MiniSom(x = 10, y = 10, input_len = 15, sigma = 1.0, learning_rate = 0.5)\nsom.random_weights_init(X)\nsom.train_random(data = X, num_iteration = 100)\n\n# Visualizing the results\nfrom pylab import bone, pcolor, colorbar, plot, show\nbone()\npcolor(som.distance_map().T)\ncolorbar()\nmarkers = ['o', 's']\ncolors = ['r', 'g']\nfor i, x in enumerate(X):\n w = som.winner(x)\n plot(w[0] + 0.5,\n w[1] + 0.5,\n markers[y[i]],\n markeredgecolor = colors[y[i]],\n markerfacecolor = 'None',\n markersize = 10,\n markeredgewidth = 2)\nshow()\n\n# Finding the frauds\nmappings = som.win_map(X)\nfrauds = np.concatenate((mappings[(1,7)], mappings[1,4]), axis = 0)\nfrauds = sc.inverse_transform(frauds)\n\n\n\n# Part 2 - Going from Unsupervised to Supervised Deep Learning\n\n# Creating the matrix of features\ncustomers = dataset.iloc[:, 1:].values\n\n# Creating the dependent variable\nis_fraud = np.zeros(len(dataset))\nfor i in range(len(dataset)):\n if dataset.iloc[i,0] in frauds:\n is_fraud[i] = 1\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\ncustomers = sc.fit_transform(customers)\n\n# Part 2 - Now let's make the ANN!\n\n# Importing the Keras libraries and packages\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n# Initialising the ANN\nclassifier = Sequential()\n\n# Adding the input layer and the first hidden layer\nclassifier.add(Dense(units = 2, kernel_initializer = 'uniform', activation = 'relu', input_dim = 15))\n\n# Adding the output layer\nclassifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))\n\n# Compiling the ANN\nclassifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\n# Fitting the ANN to the Training set\nclassifier.fit(customers, is_fraud, batch_size = 1, epochs = 2)\n\n# Predicting the probabilities of frauds\ny_pred = classifier.predict(customers)\ny_pred = np.concatenate((dataset.iloc[:, 0:1].values, y_pred), axis = 1)\ny_pred = y_pred[y_pred[:, 1].argsort()]", "# SVR\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Position_Salaries.csv')\nX = dataset.iloc[:, 1:2].values\ny = dataset.iloc[:, 2].values.reshape(-1,1)\n\n# Splitting the dataset into the Training set and Test set\n\"\"\"from sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\"\"\"\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX = sc_X.fit_transform(X)\nsc_y = StandardScaler()\ny = sc_y.fit_transform(y)\n\n# Fitting SVR to the dataset\nfrom sklearn.svm import SVR\nregressor = SVR(kernel='rbf')\nregressor.fit(X, y)\n\n\n# Predicting a new result\ny_pred = sc_y.inverse_transform(regressor.predict(sc_X.transform(np.array([[6.5]]))))\n\n# Visualising the Regression results\nplt.scatter(X, y, color = 'red')\nplt.plot(X, regressor.predict(X), color = 'blue')\nplt.title('Truth or Bluff (SVR)')\nplt.xlabel('Position level')\nplt.ylabel('Salary')\nplt.show()\n", "# Self Organizing Map\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Credit_Card_Applications.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values\n\n# Feature Scaling\nfrom sklearn.preprocessing import MinMaxScaler\nsc = MinMaxScaler(feature_range = (0, 1))\nX = sc.fit_transform(X)\n\n# Training the SOM\nfrom minisom import MiniSom\nsom = MiniSom(x = 10, y = 10, input_len = 15, sigma = 1.0, learning_rate = 0.5)\nsom.random_weights_init(X)\nsom.train_random(data = X, num_iteration = 100)\n\n# Visualizing the results\nfrom pylab import bone, pcolor, colorbar, plot, show\nbone()\npcolor(som.distance_map().T)\ncolorbar()\nmarkers = ['o', 's']\ncolors = ['r', 'g']\nfor i, x in enumerate(X):\n w = som.winner(x)\n plot(w[0] + 0.5,\n w[1] + 0.5,\n markers[y[i]],\n markeredgecolor = colors[y[i]],\n markerfacecolor = 'None',\n markersize = 10,\n markeredgewidth = 2)\nshow()\n\n# Finding the frauds\nmappings = som.win_map(X)\nfrauds = np.concatenate((mappings[(3,5)], mappings[(3,7)]), axis = 0)\nfrauds = sc.inverse_transform(frauds)" ]
[ [ "numpy.concatenate", "sklearn.preprocessing.StandardScaler", "pandas.read_csv", "sklearn.preprocessing.MinMaxScaler" ], [ "numpy.array", "pandas.read_csv", "matplotlib.pyplot.title", "matplotlib.pyplot.scatter", "sklearn.svm.SVR", "matplotlib.pyplot.xlabel", "sklearn.preprocessing.StandardScaler", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "numpy.concatenate", "pandas.read_csv", "sklearn.preprocessing.MinMaxScaler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
gatling-nrl/scikit-fem
[ "04730d80d612470b7e802eed4c21dd96b89cef61", "73890816c2142385abf4a9ffcd8d233e2d25e865" ]
[ "skfem/generic_utils.py", "tests/test_elements.py" ]
[ "import numpy as np\n\nfrom numpy import ndarray\n\n\ndef hash_args(*args):\n \"\"\"Return a tuple of hashes, with numpy support.\"\"\"\n return tuple(hash(arg.tobytes())\n if isinstance(arg, ndarray)\n else hash(arg) for arg in args)\n\n\nclass OrientedBoundary(ndarray):\n \"\"\"An array of facet indices with orientation.\"\"\"\n\n def __new__(cls, indices, ori):\n obj = np.asarray(indices).view(cls)\n obj.ori = np.array(ori, dtype=int)\n assert len(obj) == len(obj.ori)\n return obj\n\n def __array_finalize__(self, obj):\n if obj is None:\n return\n self.ori = getattr(obj, 'ori', None)\n", "from unittest import TestCase, main\n\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_array_equal\nimport pytest\n\nfrom skfem.element import (\n ElementHex1,\n ElementHexS2,\n ElementLineP0,\n ElementLineP1,\n ElementLineP2,\n ElementLinePp,\n ElementLineMini,\n ElementQuad0,\n ElementQuad1,\n ElementQuad2,\n ElementQuadP,\n ElementQuadRT0,\n ElementQuadS2,\n ElementTetMini,\n ElementTetP0,\n ElementTetP1,\n ElementTetP2,\n ElementTriMini,\n ElementTriP0,\n ElementTriP1,\n ElementTriP2,\n ElementTriP3,\n ElementTriP4,\n ElementTriRT0,\n ElementVectorH1,\n ElementHex2,\n ElementQuadBFS,\n ElementTriCR,\n ElementTriCCR,\n ElementTetCR,\n ElementTetCCR,\n ElementTriHermite,\n ElementTriMorley,\n ElementTriArgyris,\n ElementTriDG,\n ElementTetDG,\n ElementQuadDG,\n ElementQuadP,\n ElementHexDG,\n ElementWedge1,\n)\nfrom skfem.mesh import MeshHex, MeshLine, MeshQuad, MeshTet, MeshTri\nfrom skfem.assembly import InteriorBasis, Functional\nfrom skfem.mapping import MappingAffine\n\n\nclass TestNodality(TestCase):\n \"\"\"Test for Element.doflocs.\"\"\"\n\n elems = [\n ElementLineP0(),\n ElementLineP1(),\n ElementLineP2(),\n ElementLinePp(1),\n ElementLinePp(3),\n ElementLineMini(),\n ElementTriP0(),\n ElementTriP1(),\n ElementTriP2(),\n ElementTriP3(),\n ElementTriP4(),\n ElementTriMini(),\n ElementQuad0(),\n ElementQuad1(),\n ElementQuad2(),\n ElementQuadS2(),\n ElementQuadP(1),\n ElementQuadP(3),\n ElementTetP0(),\n ElementTetP1(),\n ElementTetP2(),\n ElementTetMini(),\n ElementHex1(),\n ElementHexS2(),\n ElementHex2(),\n ElementTetCR(),\n ElementTetCCR(),\n ElementTriCR(),\n ElementTriCCR(),\n ElementWedge1(),\n ]\n\n def runTest(self):\n for e in self.elems:\n N = e.doflocs.shape[0]\n Ih = np.zeros((N, N))\n for itr in range(N):\n Ih[itr] = e.lbasis(e.doflocs.T, itr)[0]\n\n # Remove nan-rows: test nodality only on non-nan doflocs.\n #\n # Some elements, such as ElementTriMini might have a combination\n # of nodal dofs and non-nodal dofs.\n #\n # Nodal dof is defined so that there exists a point where the\n # corresponding basis function is one, and other basis functions\n # are zero. Non-nodal dof does not satisfy this property.\n ix = np.isnan(np.sum(Ih, axis=1))\n Nnan = np.sum(ix)\n ixs = np.nonzero(~ix)[0]\n Ih = Ih[ixs].T[ixs].T\n\n assert_allclose(Ih, np.eye(N - Nnan), atol=1e-13,\n err_msg=\"{}\".format(type(e)))\n\n\nclass TestNodalityTriRT0(TestCase):\n\n elem = ElementTriRT0()\n\n def runTest(self):\n e = self.elem\n N = e.doflocs.shape[0]\n Ih = np.zeros((N, N))\n normals = np.array([[0., -1.],\n [1 / np.sqrt(2), 1 / np.sqrt(2)],\n [-1., 0.]]).T\n for itr in range(N):\n # calculate integral of normal component over edge\n A = np.sum(e.lbasis(e.doflocs.T, itr)[0] * normals, axis=0)\n n = np.array([1., np.sqrt(2), 1.])\n Ih[itr] = A * n\n\n assert_allclose(Ih, np.eye(N),\n err_msg=\"{}\".format(type(e)))\n\n\nclass TestNodalityQuadRT0(TestCase):\n\n elem = ElementQuadRT0()\n\n def runTest(self):\n e = self.elem\n N = e.doflocs.shape[0]\n Ih = np.zeros((N, N))\n normals = np.array([[0., -1.],\n [1., 0.],\n [0., 1.],\n [-1., 0.]]).T\n for itr in range(N):\n # calculate integral of normal component over edge\n A = np.sum(e.lbasis(e.doflocs.T, itr)[0] * normals, axis=0)\n n = np.ones(4)\n Ih[itr] = A * n\n\n assert_allclose(Ih, np.eye(N),\n err_msg=\"{}\".format(type(e)))\n\n\nclass TestComposite(TestCase):\n\n def runTest(self):\n from skfem.element.element_composite import ElementComposite\n\n self.check_equivalence(\n ElementComposite(ElementTriP1(),\n ElementTriP1()),\n ElementVectorH1(ElementTriP1())\n )\n\n def check_equivalence(self, ec, ev):\n X = np.array([[0.125, 0.1111], [0.0555, 0.6]])\n m = MeshTri.init_refdom()\n mapping = MappingAffine(m)\n\n for k in range(6):\n for i in [0, 1]:\n # accessing i'th component looks slightly different\n if ec.gbasis(mapping, X, k)[i].is_zero():\n continue\n assert_array_equal(\n ev.gbasis(mapping, X, k)[0].value[i],\n ec.gbasis(mapping, X, k)[i].value\n )\n for j in [0, 1]:\n assert_array_equal(\n ev.gbasis(mapping, X, k)[0].grad[i][j],\n ec.gbasis(mapping, X, k)[i].grad[j]\n )\n\n\nclass TestCompositeMul(TestComposite):\n\n def runTest(self):\n\n self.check_equivalence(\n ElementTriP1() * ElementTriP1(),\n ElementVectorH1(ElementTriP1())\n )\n\n\nclass TestCompatibilityWarning(TestCase):\n\n meshes = [\n MeshTet,\n MeshQuad,\n MeshHex,\n MeshLine,\n ]\n elem = ElementTriP1\n\n def runTest(self):\n\n for m in self.meshes:\n\n def init_incompatible():\n return InteriorBasis(m(), self.elem())\n\n self.assertRaises(ValueError, init_incompatible)\n\n\nclass TestDerivatives(TestCase):\n \"\"\"Test values of derivatives.\"\"\"\n\n elems = [\n ElementLineP0(),\n ElementLineP1(),\n ElementLineP2(),\n ElementLineMini(),\n ElementTriP0(),\n ElementTriP1(),\n ElementTriP2(),\n ElementTriP3(),\n ElementTriP4(),\n ElementTriMini(),\n ElementQuad0(),\n ElementQuad1(),\n ElementQuad2(),\n ElementQuadS2(),\n ElementTetP0(),\n ElementTetP1(),\n ElementTetP2(),\n ElementTetMini(),\n ElementHex1(),\n ElementHexS2(),\n ElementHex2(),\n ElementTriCR(),\n ElementTriCCR(),\n ElementTetCR(),\n ElementTetCCR(),\n ElementWedge1(),\n ]\n\n def runTest(self):\n for elem in self.elems:\n eps = 1e-6\n for base in [0., .3, .6, .9]:\n if elem.dim == 1:\n y = np.array([[base, base + eps]])\n elif elem.dim == 2:\n y = np.array([[base, base + eps, base, base],\n [base, base, base, base + eps]])\n elif elem.dim == 3:\n y = np.array([[base, base + eps, base, base, base, base],\n [base, base, base, base + eps, base, base],\n [base, base, base, base, base, base + eps]])\n i = 0\n while True:\n try:\n out = elem.lbasis(y, i)\n except ValueError:\n break\n diff = (out[0][1] - out[0][0]) / eps\n errmsg = 'x-derivative for {}th bfun failed for {}'\n self.assertAlmostEqual(diff, out[1][0][0], delta=1e-3,\n msg=errmsg.format(i, elem))\n if elem.dim > 1:\n diff = (out[0][3] - out[0][2]) / eps\n errmsg = 'y-derivative for {}th bfun failed for {}'\n self.assertAlmostEqual(diff, out[1][1][3], delta=1e-3,\n msg=errmsg.format(i, elem))\n if elem.dim == 3:\n diff = (out[0][5] - out[0][4]) / eps\n errmsg = 'z-derivative for {}th bfun failed for {}'\n self.assertAlmostEqual(diff, out[1][2][4], delta=1e-3,\n msg=errmsg.format(i, elem))\n i += 1\n\n\nclass TestPartitionofUnity(TestCase):\n \"\"\"Test that elements form a partition of unity.\"\"\"\n\n elems = [\n ElementLineP1(),\n ElementLineP2(),\n ElementTriP1(),\n ElementTriP2(),\n ElementTriP3(),\n ElementTriP4(),\n ElementQuad1(),\n ElementQuad2(),\n ElementQuadS2(),\n ElementTetP1(),\n ElementTetP2(),\n ElementHex1(),\n ElementHexS2(),\n ElementHex2(),\n ElementTetCR(),\n ElementTetCCR(),\n ElementTriCR(),\n ElementTriCCR(),\n ElementWedge1(),\n ]\n\n def runTest(self):\n for elem in self.elems:\n if elem.dim == 1:\n y = np.array([[.15]])\n elif elem.dim == 2:\n y = np.array([[.15],\n [.15]])\n elif elem.dim == 3:\n y = np.array([[.15],\n [.15],\n [.15]])\n out = 0.\n for i in range(elem.doflocs.shape[0]):\n out += elem.lbasis(y, i)[0][0]\n self.assertAlmostEqual(out, 1, msg='failed for {}'.format(elem))\n\n\nclass TestElementLinePp(TestCase):\n\n def test_p_less_than_1_error(self):\n \"\"\"Tests that exception is thrown when initializing with p < 1.\"\"\"\n with self.assertRaises(ValueError):\n ElementLinePp(0)\n\n\nclass TestElementQuadBFS(TestCase):\n\n def test_throw_index_error(self):\n \"\"\"Tests that exception is thrown when i % 4 not in (0, 1, 2, 3).\"\"\"\n element = ElementQuadBFS()\n with self.assertRaises(ValueError):\n element.gdof(0, 0, -1)\n with self.assertRaises(ValueError):\n element.gdof(0, 0, 16)\n\n\[email protected](\n \"m,e,edg\",\n [\n (MeshTri().refined(), ElementTriP1(), ElementTriDG),\n (MeshTri().refined(), ElementTriP2(), ElementTriDG),\n (MeshTet().refined(), ElementTetP1(), ElementTetDG),\n (MeshTet().refined(), ElementTetP2(), ElementTetDG),\n (MeshTri().refined(), ElementTriArgyris(), ElementTriDG),\n (MeshTri().refined(), ElementTriMorley(), ElementTriDG),\n (MeshTri().refined(), ElementTriHermite(), ElementTriDG),\n (MeshHex().refined(), ElementHex1(), ElementHexDG),\n (MeshQuad().refined(), ElementQuad1(), ElementQuadDG),\n ]\n)\ndef test_dg_element(m, e, edg):\n\n edg = edg(e)\n\n @Functional\n def square(w):\n return w['random'] ** 2\n\n basis = InteriorBasis(m, e)\n basisdg = InteriorBasis(m, edg)\n\n assert_allclose(\n square.assemble(\n basis,\n random=basis.interpolate(\n basis.zeros() + 1)),\n square.assemble(\n basisdg,\n random=basisdg.interpolate(\n basisdg.zeros() + 1)),\n )\n\n\[email protected](\n \"e,edg\",\n [\n (ElementTriP1(), ElementTriDG),\n (ElementTetP2(), ElementTetDG),\n (ElementTriArgyris(), ElementTriDG),\n (ElementQuad1(), ElementQuadDG),\n (ElementQuadP(4), ElementQuadDG),\n (ElementHex2(), ElementHexDG),\n ]\n)\ndef test_initialize_dg_composite_elements(e, edg):\n E = edg(e) * e\n" ]
[ [ "numpy.asarray", "numpy.array" ], [ "numpy.sqrt", "numpy.nonzero", "numpy.eye", "numpy.ones", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
asears/stanza
[ "f91ca215e175d4f7b202259fe789374db7829395", "f91ca215e175d4f7b202259fe789374db7829395" ]
[ "stanza/models/parser.py", "stanza/models/langid/model.py" ]
[ "\"\"\"\nEntry point for training and evaluating a dependency parser.\n\nThis implementation combines a deep biaffine graph-based parser with linearization and distance features.\nFor details please refer to paper: https://nlp.stanford.edu/pubs/qi2018universal.pdf.\n\"\"\"\n\n\"\"\"\nTraining and evaluation for the parser.\n\"\"\"\n\nimport sys\nimport os\nimport shutil\nimport time\nimport argparse\nimport logging\nimport numpy as np\nimport random\nimport torch\nfrom torch import nn, optim\n\nimport stanza.models.depparse.data as data\nfrom stanza.models.depparse.data import DataLoader\nfrom stanza.models.depparse.trainer import Trainer\nfrom stanza.models.depparse import scorer\nfrom stanza.models.common import utils\nfrom stanza.models.common import pretrain\nfrom stanza.models.common.data import augment_punct\nfrom stanza.models.common.doc import *\nfrom stanza.utils.conll import CoNLL\nfrom stanza.models import _training_logging\n\nlogger = logging.getLogger('stanza')\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, default='data/depparse', help='Root dir for saving models.')\n parser.add_argument('--wordvec_dir', type=str, default='extern_data/word2vec', help='Directory of word vectors.')\n parser.add_argument('--wordvec_file', type=str, default=None, help='Word vectors filename.')\n parser.add_argument('--wordvec_pretrain_file', type=str, default=None, help='Exact name of the pretrain file to read')\n parser.add_argument('--train_file', type=str, default=None, help='Input file for data loader.')\n parser.add_argument('--eval_file', type=str, default=None, help='Input file for data loader.')\n parser.add_argument('--output_file', type=str, default=None, help='Output CoNLL-U file.')\n parser.add_argument('--gold_file', type=str, default=None, help='Output CoNLL-U file.')\n\n parser.add_argument('--mode', default='train', choices=['train', 'predict'])\n parser.add_argument('--lang', type=str, help='Language')\n parser.add_argument('--shorthand', type=str, help=\"Treebank shorthand\")\n\n parser.add_argument('--hidden_dim', type=int, default=400)\n parser.add_argument('--char_hidden_dim', type=int, default=400)\n parser.add_argument('--deep_biaff_hidden_dim', type=int, default=400)\n parser.add_argument('--composite_deep_biaff_hidden_dim', type=int, default=100)\n parser.add_argument('--word_emb_dim', type=int, default=75)\n parser.add_argument('--char_emb_dim', type=int, default=100)\n parser.add_argument('--tag_emb_dim', type=int, default=50)\n parser.add_argument('--transformed_dim', type=int, default=125)\n parser.add_argument('--num_layers', type=int, default=3)\n parser.add_argument('--char_num_layers', type=int, default=1)\n parser.add_argument('--pretrain_max_vocab', type=int, default=250000)\n parser.add_argument('--word_dropout', type=float, default=0.33)\n parser.add_argument('--dropout', type=float, default=0.5)\n parser.add_argument('--rec_dropout', type=float, default=0, help=\"Recurrent dropout\")\n parser.add_argument('--char_rec_dropout', type=float, default=0, help=\"Recurrent dropout\")\n parser.add_argument('--no_char', dest='char', action='store_false', help=\"Turn off character model.\")\n parser.add_argument('--no_pretrain', dest='pretrain', action='store_false', help=\"Turn off pretrained embeddings.\")\n parser.add_argument('--no_linearization', dest='linearization', action='store_false', help=\"Turn off linearization term.\")\n parser.add_argument('--no_distance', dest='distance', action='store_false', help=\"Turn off distance term.\")\n\n parser.add_argument('--sample_train', type=float, default=1.0, help='Subsample training data.')\n parser.add_argument('--optim', type=str, default='adam', help='sgd, adagrad, adam or adamax.')\n parser.add_argument('--lr', type=float, default=3e-3, help='Learning rate')\n parser.add_argument('--beta2', type=float, default=0.95)\n\n parser.add_argument('--max_steps', type=int, default=50000)\n parser.add_argument('--eval_interval', type=int, default=100)\n parser.add_argument('--max_steps_before_stop', type=int, default=3000)\n parser.add_argument('--batch_size', type=int, default=5000)\n parser.add_argument('--max_grad_norm', type=float, default=1.0, help='Gradient clipping.')\n parser.add_argument('--log_step', type=int, default=20, help='Print log every k steps.')\n parser.add_argument('--save_dir', type=str, default='saved_models/depparse', help='Root dir for saving models.')\n parser.add_argument('--save_name', type=str, default=None, help=\"File name to save the model\")\n\n parser.add_argument('--seed', type=int, default=1234)\n parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())\n parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')\n\n parser.add_argument('--augment_nopunct', type=float, default=None, help='Augment the training data by copying this fraction of punct-ending sentences as non-punct. Default of None will aim for roughly 10%')\n\n args = parser.parse_args(args=args)\n return args\n\ndef main(args=None):\n args = parse_args(args=args)\n\n if args.cpu:\n args.cuda = False\n utils.set_random_seed(args.seed, args.cuda)\n\n args = vars(args)\n logger.info(\"Running parser in {} mode\".format(args['mode']))\n\n if args['mode'] == 'train':\n train(args)\n else:\n evaluate(args)\n\n# TODO: refactor with tagger\ndef model_file_name(args):\n if args['save_name'] is not None:\n save_name = args['save_name']\n else:\n save_name = args['shorthand'] + \"_parser.pt\"\n\n return os.path.join(args['save_dir'], save_name)\n\n# TODO: refactor with everywhere\ndef load_pretrain(args):\n pt = None\n if args['pretrain']:\n pretrain_file = pretrain.find_pretrain_file(args['wordvec_pretrain_file'], args['save_dir'], args['shorthand'], args['lang'])\n if os.path.exists(pretrain_file):\n vec_file = None\n else:\n vec_file = args['wordvec_file'] if args['wordvec_file'] else utils.get_wordvec_file(args['wordvec_dir'], args['shorthand'])\n pt = pretrain.Pretrain(pretrain_file, vec_file, args['pretrain_max_vocab'])\n return pt\n\ndef train(args):\n model_file = model_file_name(args)\n utils.ensure_dir(os.path.split(model_file)[0])\n\n # load pretrained vectors if needed\n pretrain = load_pretrain(args)\n\n # load data\n logger.info(\"Loading data with batch size {}...\".format(args['batch_size']))\n train_data, _ = CoNLL.conll2dict(input_file=args['train_file'])\n # possibly augment the training data with some amount of fake data\n # based on the options chosen\n logger.info(\"Original data size: {}\".format(len(train_data)))\n train_data.extend(augment_punct(train_data, args['augment_nopunct'],\n keep_original_sentences=False))\n logger.info(\"Augmented data size: {}\".format(len(train_data)))\n train_doc = Document(train_data)\n train_batch = DataLoader(train_doc, args['batch_size'], args, pretrain, evaluation=False)\n vocab = train_batch.vocab\n dev_doc = CoNLL.conll2doc(input_file=args['eval_file'])\n dev_batch = DataLoader(dev_doc, args['batch_size'], args, pretrain, vocab=vocab, evaluation=True, sort_during_eval=True)\n\n # pred and gold path\n system_pred_file = args['output_file']\n gold_file = args['gold_file']\n\n # skip training if the language does not have training or dev data\n if len(train_batch) == 0 or len(dev_batch) == 0:\n logger.info(\"Skip training because no data available...\")\n sys.exit(0)\n\n logger.info(\"Training parser...\")\n trainer = Trainer(args=args, vocab=vocab, pretrain=pretrain, use_cuda=args['cuda'])\n\n global_step = 0\n max_steps = args['max_steps']\n dev_score_history = []\n best_dev_preds = []\n current_lr = args['lr']\n global_start_time = time.time()\n format_str = 'Finished STEP {}/{}, loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'\n\n using_amsgrad = False\n last_best_step = 0\n # start training\n train_loss = 0\n while True:\n do_break = False\n for i, batch in enumerate(train_batch):\n start_time = time.time()\n global_step += 1\n loss = trainer.update(batch, eval=False) # update step\n train_loss += loss\n if global_step % args['log_step'] == 0:\n duration = time.time() - start_time\n logger.info(format_str.format(global_step, max_steps, loss, duration, current_lr))\n\n if global_step % args['eval_interval'] == 0:\n # eval on dev\n logger.info(\"Evaluating on dev set...\")\n dev_preds = []\n for batch in dev_batch:\n preds = trainer.predict(batch)\n dev_preds += preds\n dev_preds = utils.unsort(dev_preds, dev_batch.data_orig_idx)\n\n dev_batch.doc.set([HEAD, DEPREL], [y for x in dev_preds for y in x])\n CoNLL.write_doc2conll(dev_batch.doc, system_pred_file)\n _, _, dev_score = scorer.score(system_pred_file, gold_file)\n\n train_loss = train_loss / args['eval_interval'] # avg loss per batch\n logger.info(\"step {}: train_loss = {:.6f}, dev_score = {:.4f}\".format(global_step, train_loss, dev_score))\n train_loss = 0\n\n # save best model\n if len(dev_score_history) == 0 or dev_score > max(dev_score_history):\n last_best_step = global_step\n trainer.save(model_file)\n logger.info(\"new best model saved.\")\n best_dev_preds = dev_preds\n\n dev_score_history += [dev_score]\n\n if global_step - last_best_step >= args['max_steps_before_stop']:\n if not using_amsgrad:\n logger.info(\"Switching to AMSGrad\")\n last_best_step = global_step\n using_amsgrad = True\n trainer.optimizer = optim.Adam(trainer.model.parameters(), amsgrad=True, lr=args['lr'], betas=(.9, args['beta2']), eps=1e-6)\n else:\n do_break = True\n break\n\n if global_step >= args['max_steps']:\n do_break = True\n break\n\n if do_break: break\n\n train_batch.reshuffle()\n\n logger.info(\"Training ended with {} steps.\".format(global_step))\n\n best_f, best_eval = max(dev_score_history)*100, np.argmax(dev_score_history)+1\n logger.info(\"Best dev F1 = {:.2f}, at iteration = {}\".format(best_f, best_eval * args['eval_interval']))\n\ndef evaluate(args):\n # file paths\n system_pred_file = args['output_file']\n gold_file = args['gold_file']\n\n model_file = model_file_name(args)\n # load pretrained vectors if needed\n pretrain = load_pretrain(args)\n\n # load model\n logger.info(\"Loading model from: {}\".format(model_file))\n use_cuda = args['cuda'] and not args['cpu']\n trainer = Trainer(pretrain=pretrain, model_file=model_file, use_cuda=use_cuda)\n loaded_args, vocab = trainer.args, trainer.vocab\n\n # load config\n for k in args:\n if k.endswith('_dir') or k.endswith('_file') or k in ['shorthand'] or k == 'mode':\n loaded_args[k] = args[k]\n\n # load data\n logger.info(\"Loading data with batch size {}...\".format(args['batch_size']))\n doc = CoNLL.conll2doc(input_file=args['eval_file'])\n batch = DataLoader(doc, args['batch_size'], loaded_args, pretrain, vocab=vocab, evaluation=True, sort_during_eval=True)\n\n if len(batch) > 0:\n logger.info(\"Start evaluation...\")\n preds = []\n for i, b in enumerate(batch):\n preds += trainer.predict(b)\n else:\n # skip eval if dev data does not exist\n preds = []\n preds = utils.unsort(preds, batch.data_orig_idx)\n\n # write to file and score\n batch.doc.set([HEAD, DEPREL], [y for x in preds for y in x])\n CoNLL.write_doc2conll(batch.doc, system_pred_file)\n\n if gold_file is not None:\n _, _, score = scorer.score(system_pred_file, gold_file)\n\n logger.info(\"Parser score:\")\n logger.info(\"{} {:.2f}\".format(args['shorthand'], score*100))\n\nif __name__ == '__main__':\n main()\n", "import torch\nimport torch.nn as nn\n\n\nclass LangIDBiLSTM(nn.Module):\n \"\"\"\n Multi-layer BiLSTM model for language detecting. A recreation of \"A reproduction of Apple's bi-directional LSTM models\n for language identification in short strings.\" (Toftrup et al 2021)\n\n Arxiv: https://arxiv.org/abs/2102.06282\n GitHub: https://github.com/AU-DIS/LSTM_langid\n \"\"\"\n\n def __init__(self, char_to_idx, tag_to_idx, num_layers, embedding_dim, hidden_dim, batch_size=64, weights=None, \n dropout=0.0, lang_subset=None):\n super(LangIDBiLSTM, self).__init__()\n self.num_layers = num_layers\n self.embedding_dim = embedding_dim\n self.hidden_dim = hidden_dim\n self.char_to_idx = char_to_idx\n self.vocab_size = len(char_to_idx)\n self.tag_to_idx = tag_to_idx\n self.idx_to_tag = [i[1] for i in sorted([(v,k) for k,v in self.tag_to_idx.items()])]\n self.lang_subset = lang_subset\n self.padding_idx = char_to_idx[\"<PAD>\"]\n self.tagset_size = len(tag_to_idx)\n self.batch_size = batch_size\n self.loss_train = nn.CrossEntropyLoss(weight=weights)\n self.dropout_prob = dropout\n \n # embeddings for chars\n self.char_embeds = nn.Embedding(\n num_embeddings=self.vocab_size, \n embedding_dim=self.embedding_dim,\n padding_idx=self.padding_idx\n )\n\n # the bidirectional LSTM\n self.lstm = nn.LSTM(\n self.embedding_dim, \n self.hidden_dim,\n num_layers=self.num_layers,\n bidirectional=True,\n batch_first=True\n )\n\n # convert output to tag space\n self.hidden_to_tag = nn.Linear(\n self.hidden_dim * 2, \n self.tagset_size\n )\n\n # dropout layer\n self.dropout = nn.Dropout(p=self.dropout_prob)\n\n def build_lang_mask(self, use_gpu=None):\n \"\"\"\n Build language mask if a lang subset is specified (e.g. [\"en\", \"fr\"])\n \"\"\"\n device = torch.device(\"cuda\") if use_gpu else None\n lang_mask_list = [int(lang in self.lang_subset) for lang in self.idx_to_tag] if self.lang_subset else \\\n [1 for lang in self.idx_to_tag]\n self.lang_mask = torch.tensor(lang_mask_list, device=device, dtype=torch.float)\n\n def loss(self, Y_hat, Y):\n return self.loss_train(Y_hat, Y)\n\n def forward(self, x):\n # embed input\n x = self.char_embeds(x)\n \n # run through LSTM\n x, _ = self.lstm(x)\n \n # run through linear layer\n x = self.hidden_to_tag(x)\n \n # sum character outputs for each sequence\n x = torch.sum(x, dim=1)\n\n return x\n\n def prediction_scores(self, x):\n prediction_probs = self(x)\n if self.lang_subset:\n prediction_batch_size = prediction_probs.size()[0]\n batch_mask = torch.stack([self.lang_mask for _ in range(prediction_batch_size)])\n prediction_probs = prediction_probs * batch_mask\n return torch.argmax(prediction_probs, dim=1)\n\n def save(self, path):\n \"\"\" Save a model at path \"\"\"\n checkpoint = {\n \"char_to_idx\": self.char_to_idx,\n \"tag_to_idx\": self.tag_to_idx,\n \"num_layers\": self.num_layers,\n \"embedding_dim\": self.embedding_dim,\n \"hidden_dim\": self.hidden_dim,\n \"model_state_dict\": self.state_dict()\n }\n torch.save(checkpoint, path)\n \n @classmethod\n def load(cls, path, use_cuda=False, batch_size=64, lang_subset=None):\n \"\"\" Load a serialized model located at path \"\"\"\n if use_cuda:\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n else:\n device = torch.device(\"cpu\")\n checkpoint = torch.load(path, map_location=torch.device(\"cpu\"))\n weights = checkpoint[\"model_state_dict\"][\"loss_train.weight\"]\n model = cls(checkpoint[\"char_to_idx\"], checkpoint[\"tag_to_idx\"], checkpoint[\"num_layers\"],\n checkpoint[\"embedding_dim\"], checkpoint[\"hidden_dim\"], batch_size=batch_size, weights=weights,\n lang_subset=lang_subset)\n model.load_state_dict(checkpoint[\"model_state_dict\"])\n if use_cuda:\n model.to(torch.device(\"cuda\"))\n model.build_lang_mask(use_gpu=use_cuda)\n return model\n\n" ]
[ [ "numpy.argmax", "torch.cuda.is_available" ], [ "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.nn.LSTM", "torch.sum", "torch.nn.Embedding", "torch.tensor", "torch.nn.Linear", "torch.save", "torch.cuda.is_available", "torch.device", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
st--/jupytext
[ "72aa6c4968da714323fbd7a7c548ee4b1274c946" ]
[ "demo/World population.pct.py" ]
[ "# ---\n# jupyter:\n# jupytext:\n# cell_markers: region,endregion\n# formats: ipynb,.pct.py:percent,.lgt.py:light,.spx.py:sphinx,md,Rmd,.pandoc.md:pandoc\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.2'\n# jupytext_version: 1.1.0\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown]\n# # A quick insight at world population\n#\n# ## Collecting population data\n#\n# In the below we retrieve population data from the\n# [World Bank](http://www.worldbank.org/)\n# using the [wbdata](https://github.com/OliverSherouse/wbdata) python package\n\n# %%\nimport pandas as pd\nimport wbdata as wb\n\npd.options.display.max_rows = 6\npd.options.display.max_columns = 20\n\n# %% [markdown]\n# Corresponding indicator is found using search method - or, directly,\n# the World Bank site.\n\n# %%\nwb.search_indicators('Population, total') # SP.POP.TOTL\n# wb.search_indicators('area')\n# => https://data.worldbank.org/indicator is easier to use\n\n# %% [markdown]\n# Now we download the population data\n\n# %%\nindicators = {'SP.POP.TOTL': 'Population, total',\n 'AG.SRF.TOTL.K2': 'Surface area (sq. km)',\n 'AG.LND.TOTL.K2': 'Land area (sq. km)',\n 'AG.LND.ARBL.ZS': 'Arable land (% of land area)'}\ndata = wb.get_dataframe(indicators, convert_date=True).sort_index()\ndata\n\n# %% [markdown]\n# World is one of the countries\n\n# %%\ndata.loc['World']\n\n# %% [markdown]\n# Can we classify over continents?\n\n# %%\ndata.loc[(slice(None), '2017-01-01'), :]['Population, total'].dropna(\n).sort_values().tail(60).index.get_level_values('country')\n\n# %% [markdown]\n# Extract zones manually (in order of increasing population)\n\n# %%\nzones = ['North America', 'Middle East & North Africa',\n 'Latin America & Caribbean', 'Europe & Central Asia',\n 'Sub-Saharan Africa', 'South Asia',\n 'East Asia & Pacific'][::-1]\n\n# %% [markdown]\n# And extract population information (and check total is right)\n\n# %%\npopulation = data.loc[zones]['Population, total'].swaplevel().unstack()\npopulation = population[zones]\nassert all(data.loc['World']['Population, total'] == population.sum(axis=1))\n\n# %% [markdown]\n# ## Stacked area plot with matplotlib\n\n# %%\nimport matplotlib.pyplot as plt\n\n# %%\nplt.clf()\nplt.figure(figsize=(10, 5), dpi=100)\nplt.stackplot(population.index, population.values.T / 1e9)\nplt.legend(population.columns, loc='upper left')\nplt.ylabel('Population count (B)')\nplt.show()\n\n# %% [markdown]\n# ## Stacked bar plot with plotly\n\n# %%\nimport plotly.offline as offline\nimport plotly.graph_objs as go\n\noffline.init_notebook_mode()\n\n# %%\ndata = [go.Scatter(x=population.index, y=population[zone], name=zone, stackgroup='World')\n for zone in zones]\nfig = go.Figure(data=data,\n layout=go.Layout(title='World population'))\noffline.iplot(fig)\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.clf", "matplotlib.pyplot.stackplot", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Zwysilence/tensorflow
[ "b55001be83da044bb21d539d433dec6231eaec55", "b55001be83da044bb21d539d433dec6231eaec55", "b55001be83da044bb21d539d433dec6231eaec55", "b55001be83da044bb21d539d433dec6231eaec55", "b55001be83da044bb21d539d433dec6231eaec55", "b55001be83da044bb21d539d433dec6231eaec55", "b55001be83da044bb21d539d433dec6231eaec55", "6aa83398ab03bfae822f36772757097bcb98b6ed", "b55001be83da044bb21d539d433dec6231eaec55", "b55001be83da044bb21d539d433dec6231eaec55", "b55001be83da044bb21d539d433dec6231eaec55", "b55001be83da044bb21d539d433dec6231eaec55", "b55001be83da044bb21d539d433dec6231eaec55", "b55001be83da044bb21d539d433dec6231eaec55", "b55001be83da044bb21d539d433dec6231eaec55", "6aa83398ab03bfae822f36772757097bcb98b6ed", "b55001be83da044bb21d539d433dec6231eaec55", "b55001be83da044bb21d539d433dec6231eaec55" ]
[ "tensorflow/contrib/rnn/python/ops/rnn_cell.py", "tensorflow/python/ops/image_ops_test.py", "tensorflow/compiler/tests/momentum_test.py", "tensorflow/python/ops/boosted_trees_ops.py", "tensorflow/contrib/framework/python/ops/sort_ops_test.py", "tensorflow/python/kernel_tests/matrix_exponential_op_test.py", "tensorflow/python/estimator/gc.py", "tensorflow/compiler/tests/reduce_window_test.py", "tensorflow/python/training/monitored_session_test.py", "tensorflow/python/estimator/canned/boosted_trees_test.py", "tensorflow/contrib/gan/python/features/python/random_tensor_pool_test.py", "tensorflow/python/debug/wrappers/disk_usage_test.py", "tensorflow/contrib/rate/rate_test.py", "tensorflow/compiler/tests/gather_test.py", "tensorflow/contrib/data/python/kernel_tests/reader_dataset_ops_test_base.py", "tensorflow/contrib/learn/python/learn/estimators/run_config.py", "tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_base.py", "tensorflow/contrib/lite/python/lite_test.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Module for constructing RNN Cells.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport math\n\nfrom tensorflow.contrib.compiler import jit\nfrom tensorflow.contrib.layers.python.layers import layers\nfrom tensorflow.contrib.rnn.python.ops import core_rnn_cell\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import op_def_registry\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.layers import base as base_layer\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import clip_ops\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_impl # pylint: disable=unused-import\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import partitioned_variables # pylint: disable=unused-import\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import rnn_cell_impl\nfrom tensorflow.python.ops import variable_scope as vs\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import nest\n\n\ndef _get_concat_variable(name, shape, dtype, num_shards):\n \"\"\"Get a sharded variable concatenated into one tensor.\"\"\"\n sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)\n if len(sharded_variable) == 1:\n return sharded_variable[0]\n\n concat_name = name + \"/concat\"\n concat_full_name = vs.get_variable_scope().name + \"/\" + concat_name + \":0\"\n for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES):\n if value.name == concat_full_name:\n return value\n\n concat_variable = array_ops.concat(sharded_variable, 0, name=concat_name)\n ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES, concat_variable)\n return concat_variable\n\n\ndef _get_sharded_variable(name, shape, dtype, num_shards):\n \"\"\"Get a list of sharded variables with the given dtype.\"\"\"\n if num_shards > shape[0]:\n raise ValueError(\"Too many shards: shape=%s, num_shards=%d\" % (shape,\n num_shards))\n unit_shard_size = int(math.floor(shape[0] / num_shards))\n remaining_rows = shape[0] - unit_shard_size * num_shards\n\n shards = []\n for i in range(num_shards):\n current_size = unit_shard_size\n if i < remaining_rows:\n current_size += 1\n shards.append(\n vs.get_variable(\n name + \"_%d\" % i, [current_size] + shape[1:], dtype=dtype))\n return shards\n\n\ndef _norm(g, b, inp, scope):\n shape = inp.get_shape()[-1:]\n gamma_init = init_ops.constant_initializer(g)\n beta_init = init_ops.constant_initializer(b)\n with vs.variable_scope(scope):\n # Initialize beta and gamma for use by layer_norm.\n vs.get_variable(\"gamma\", shape=shape, initializer=gamma_init)\n vs.get_variable(\"beta\", shape=shape, initializer=beta_init)\n normalized = layers.layer_norm(inp, reuse=True, scope=scope)\n return normalized\n\n\nclass CoupledInputForgetGateLSTMCell(rnn_cell_impl.RNNCell):\n \"\"\"Long short-term memory unit (LSTM) recurrent network cell.\n\n The default non-peephole implementation is based on:\n\n http://www.bioinf.jku.at/publications/older/2604.pdf\n\n S. Hochreiter and J. Schmidhuber.\n \"Long Short-Term Memory\". Neural Computation, 9(8):1735-1780, 1997.\n\n The peephole implementation is based on:\n\n https://research.google.com/pubs/archive/43905.pdf\n\n Hasim Sak, Andrew Senior, and Francoise Beaufays.\n \"Long short-term memory recurrent neural network architectures for\n large scale acoustic modeling.\" INTERSPEECH, 2014.\n\n The coupling of input and forget gate is based on:\n\n http://arxiv.org/pdf/1503.04069.pdf\n\n Greff et al. \"LSTM: A Search Space Odyssey\"\n\n The class uses optional peep-hole connections, and an optional projection\n layer.\n Layer normalization implementation is based on:\n\n https://arxiv.org/abs/1607.06450.\n\n \"Layer Normalization\"\n Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton\n\n and is applied before the internal nonlinearities.\n\n \"\"\"\n\n def __init__(self,\n num_units,\n use_peepholes=False,\n initializer=None,\n num_proj=None,\n proj_clip=None,\n num_unit_shards=1,\n num_proj_shards=1,\n forget_bias=1.0,\n state_is_tuple=True,\n activation=math_ops.tanh,\n reuse=None,\n layer_norm=False,\n norm_gain=1.0,\n norm_shift=0.0):\n \"\"\"Initialize the parameters for an LSTM cell.\n\n Args:\n num_units: int, The number of units in the LSTM cell\n use_peepholes: bool, set True to enable diagonal/peephole connections.\n initializer: (optional) The initializer to use for the weight and\n projection matrices.\n num_proj: (optional) int, The output dimensionality for the projection\n matrices. If None, no projection is performed.\n proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is\n provided, then the projected values are clipped elementwise to within\n `[-proj_clip, proj_clip]`.\n num_unit_shards: How to split the weight matrix. If >1, the weight\n matrix is stored across num_unit_shards.\n num_proj_shards: How to split the projection matrix. If >1, the\n projection matrix is stored across num_proj_shards.\n forget_bias: Biases of the forget gate are initialized by default to 1\n in order to reduce the scale of forgetting at the beginning of\n the training.\n state_is_tuple: If True, accepted and returned states are 2-tuples of\n the `c_state` and `m_state`. By default (False), they are concatenated\n along the column axis. This default behavior will soon be deprecated.\n activation: Activation function of the inner states.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n layer_norm: If `True`, layer normalization will be applied.\n norm_gain: float, The layer normalization gain initial value. If\n `layer_norm` has been set to `False`, this argument will be ignored.\n norm_shift: float, The layer normalization shift initial value. If\n `layer_norm` has been set to `False`, this argument will be ignored.\n \"\"\"\n super(CoupledInputForgetGateLSTMCell, self).__init__(_reuse=reuse)\n if not state_is_tuple:\n logging.warn(\"%s: Using a concatenated state is slower and will soon be \"\n \"deprecated. Use state_is_tuple=True.\", self)\n self._num_units = num_units\n self._use_peepholes = use_peepholes\n self._initializer = initializer\n self._num_proj = num_proj\n self._proj_clip = proj_clip\n self._num_unit_shards = num_unit_shards\n self._num_proj_shards = num_proj_shards\n self._forget_bias = forget_bias\n self._state_is_tuple = state_is_tuple\n self._activation = activation\n self._reuse = reuse\n self._layer_norm = layer_norm\n self._norm_gain = norm_gain\n self._norm_shift = norm_shift\n\n if num_proj:\n self._state_size = (\n rnn_cell_impl.LSTMStateTuple(num_units, num_proj)\n if state_is_tuple else num_units + num_proj)\n self._output_size = num_proj\n else:\n self._state_size = (\n rnn_cell_impl.LSTMStateTuple(num_units, num_units)\n if state_is_tuple else 2 * num_units)\n self._output_size = num_units\n\n @property\n def state_size(self):\n return self._state_size\n\n @property\n def output_size(self):\n return self._output_size\n\n def call(self, inputs, state):\n \"\"\"Run one step of LSTM.\n\n Args:\n inputs: input Tensor, 2D, batch x num_units.\n state: if `state_is_tuple` is False, this must be a state Tensor,\n `2-D, batch x state_size`. If `state_is_tuple` is True, this must be a\n tuple of state Tensors, both `2-D`, with column sizes `c_state` and\n `m_state`.\n\n Returns:\n A tuple containing:\n - A `2-D, [batch x output_dim]`, Tensor representing the output of the\n LSTM after reading `inputs` when previous state was `state`.\n Here output_dim is:\n num_proj if num_proj was set,\n num_units otherwise.\n - Tensor(s) representing the new state of LSTM after reading `inputs` when\n the previous state was `state`. Same type and shape(s) as `state`.\n\n Raises:\n ValueError: If input size cannot be inferred from inputs via\n static shape inference.\n \"\"\"\n sigmoid = math_ops.sigmoid\n\n num_proj = self._num_units if self._num_proj is None else self._num_proj\n\n if self._state_is_tuple:\n (c_prev, m_prev) = state\n else:\n c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])\n m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])\n\n dtype = inputs.dtype\n input_size = inputs.get_shape().with_rank(2)[1]\n if input_size.value is None:\n raise ValueError(\"Could not infer input size from inputs.get_shape()[-1]\")\n concat_w = _get_concat_variable(\n \"W\", [input_size.value + num_proj, 3 * self._num_units], dtype,\n self._num_unit_shards)\n\n b = vs.get_variable(\n \"B\",\n shape=[3 * self._num_units],\n initializer=init_ops.zeros_initializer(),\n dtype=dtype)\n\n # j = new_input, f = forget_gate, o = output_gate\n cell_inputs = array_ops.concat([inputs, m_prev], 1)\n lstm_matrix = math_ops.matmul(cell_inputs, concat_w)\n\n # If layer nomalization is applied, do not add bias\n if not self._layer_norm:\n lstm_matrix = nn_ops.bias_add(lstm_matrix, b)\n\n j, f, o = array_ops.split(value=lstm_matrix, num_or_size_splits=3, axis=1)\n\n # Apply layer normalization\n if self._layer_norm:\n j = _norm(self._norm_gain, self._norm_shift, j, \"transform\")\n f = _norm(self._norm_gain, self._norm_shift, f, \"forget\")\n o = _norm(self._norm_gain, self._norm_shift, o, \"output\")\n\n # Diagonal connections\n if self._use_peepholes:\n w_f_diag = vs.get_variable(\n \"W_F_diag\", shape=[self._num_units], dtype=dtype)\n w_o_diag = vs.get_variable(\n \"W_O_diag\", shape=[self._num_units], dtype=dtype)\n\n if self._use_peepholes:\n f_act = sigmoid(f + self._forget_bias + w_f_diag * c_prev)\n else:\n f_act = sigmoid(f + self._forget_bias)\n c = (f_act * c_prev + (1 - f_act) * self._activation(j))\n\n # Apply layer normalization\n if self._layer_norm:\n c = _norm(self._norm_gain, self._norm_shift, c, \"state\")\n\n if self._use_peepholes:\n m = sigmoid(o + w_o_diag * c) * self._activation(c)\n else:\n m = sigmoid(o) * self._activation(c)\n\n if self._num_proj is not None:\n concat_w_proj = _get_concat_variable(\"W_P\",\n [self._num_units, self._num_proj],\n dtype, self._num_proj_shards)\n\n m = math_ops.matmul(m, concat_w_proj)\n if self._proj_clip is not None:\n # pylint: disable=invalid-unary-operand-type\n m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)\n # pylint: enable=invalid-unary-operand-type\n\n new_state = (\n rnn_cell_impl.LSTMStateTuple(c, m)\n if self._state_is_tuple else array_ops.concat([c, m], 1))\n return m, new_state\n\n\nclass TimeFreqLSTMCell(rnn_cell_impl.RNNCell):\n \"\"\"Time-Frequency Long short-term memory unit (LSTM) recurrent network cell.\n\n This implementation is based on:\n\n Tara N. Sainath and Bo Li\n \"Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures\n for LVCSR Tasks.\" submitted to INTERSPEECH, 2016.\n\n It uses peep-hole connections and optional cell clipping.\n \"\"\"\n\n def __init__(self,\n num_units,\n use_peepholes=False,\n cell_clip=None,\n initializer=None,\n num_unit_shards=1,\n forget_bias=1.0,\n feature_size=None,\n frequency_skip=1,\n reuse=None):\n \"\"\"Initialize the parameters for an LSTM cell.\n\n Args:\n num_units: int, The number of units in the LSTM cell\n use_peepholes: bool, set True to enable diagonal/peephole connections.\n cell_clip: (optional) A float value, if provided the cell state is clipped\n by this value prior to the cell output activation.\n initializer: (optional) The initializer to use for the weight and\n projection matrices.\n num_unit_shards: int, How to split the weight matrix. If >1, the weight\n matrix is stored across num_unit_shards.\n forget_bias: float, Biases of the forget gate are initialized by default\n to 1 in order to reduce the scale of forgetting at the beginning\n of the training.\n feature_size: int, The size of the input feature the LSTM spans over.\n frequency_skip: int, The amount the LSTM filter is shifted by in\n frequency.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n \"\"\"\n super(TimeFreqLSTMCell, self).__init__(_reuse=reuse)\n self._num_units = num_units\n self._use_peepholes = use_peepholes\n self._cell_clip = cell_clip\n self._initializer = initializer\n self._num_unit_shards = num_unit_shards\n self._forget_bias = forget_bias\n self._feature_size = feature_size\n self._frequency_skip = frequency_skip\n self._state_size = 2 * num_units\n self._output_size = num_units\n self._reuse = reuse\n\n @property\n def output_size(self):\n return self._output_size\n\n @property\n def state_size(self):\n return self._state_size\n\n def call(self, inputs, state):\n \"\"\"Run one step of LSTM.\n\n Args:\n inputs: input Tensor, 2D, batch x num_units.\n state: state Tensor, 2D, batch x state_size.\n\n Returns:\n A tuple containing:\n - A 2D, batch x output_dim, Tensor representing the output of the LSTM\n after reading \"inputs\" when previous state was \"state\".\n Here output_dim is num_units.\n - A 2D, batch x state_size, Tensor representing the new state of LSTM\n after reading \"inputs\" when previous state was \"state\".\n Raises:\n ValueError: if an input_size was specified and the provided inputs have\n a different dimension.\n \"\"\"\n sigmoid = math_ops.sigmoid\n tanh = math_ops.tanh\n\n freq_inputs = self._make_tf_features(inputs)\n dtype = inputs.dtype\n actual_input_size = freq_inputs[0].get_shape().as_list()[1]\n\n concat_w = _get_concat_variable(\n \"W\", [actual_input_size + 2 * self._num_units, 4 * self._num_units],\n dtype, self._num_unit_shards)\n\n b = vs.get_variable(\n \"B\",\n shape=[4 * self._num_units],\n initializer=init_ops.zeros_initializer(),\n dtype=dtype)\n\n # Diagonal connections\n if self._use_peepholes:\n w_f_diag = vs.get_variable(\n \"W_F_diag\", shape=[self._num_units], dtype=dtype)\n w_i_diag = vs.get_variable(\n \"W_I_diag\", shape=[self._num_units], dtype=dtype)\n w_o_diag = vs.get_variable(\n \"W_O_diag\", shape=[self._num_units], dtype=dtype)\n\n # initialize the first freq state to be zero\n m_prev_freq = array_ops.zeros(\n [inputs.shape[0].value or inputs.get_shape()[0], self._num_units],\n dtype)\n for fq in range(len(freq_inputs)):\n c_prev = array_ops.slice(state, [0, 2 * fq * self._num_units],\n [-1, self._num_units])\n m_prev = array_ops.slice(state, [0, (2 * fq + 1) * self._num_units],\n [-1, self._num_units])\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n cell_inputs = array_ops.concat([freq_inputs[fq], m_prev, m_prev_freq], 1)\n lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)\n i, j, f, o = array_ops.split(\n value=lstm_matrix, num_or_size_splits=4, axis=1)\n\n if self._use_peepholes:\n c = (\n sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +\n sigmoid(i + w_i_diag * c_prev) * tanh(j))\n else:\n c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * tanh(j))\n\n if self._cell_clip is not None:\n # pylint: disable=invalid-unary-operand-type\n c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)\n # pylint: enable=invalid-unary-operand-type\n\n if self._use_peepholes:\n m = sigmoid(o + w_o_diag * c) * tanh(c)\n else:\n m = sigmoid(o) * tanh(c)\n m_prev_freq = m\n if fq == 0:\n state_out = array_ops.concat([c, m], 1)\n m_out = m\n else:\n state_out = array_ops.concat([state_out, c, m], 1)\n m_out = array_ops.concat([m_out, m], 1)\n return m_out, state_out\n\n def _make_tf_features(self, input_feat):\n \"\"\"Make the frequency features.\n\n Args:\n input_feat: input Tensor, 2D, batch x num_units.\n\n Returns:\n A list of frequency features, with each element containing:\n - A 2D, batch x output_dim, Tensor representing the time-frequency feature\n for that frequency index. Here output_dim is feature_size.\n Raises:\n ValueError: if input_size cannot be inferred from static shape inference.\n \"\"\"\n input_size = input_feat.get_shape().with_rank(2)[-1].value\n if input_size is None:\n raise ValueError(\"Cannot infer input_size from static shape inference.\")\n num_feats = int(\n (input_size - self._feature_size) / (self._frequency_skip)) + 1\n freq_inputs = []\n for f in range(num_feats):\n cur_input = array_ops.slice(input_feat, [0, f * self._frequency_skip],\n [-1, self._feature_size])\n freq_inputs.append(cur_input)\n return freq_inputs\n\n\nclass GridLSTMCell(rnn_cell_impl.RNNCell):\n \"\"\"Grid Long short-term memory unit (LSTM) recurrent network cell.\n\n The default is based on:\n Nal Kalchbrenner, Ivo Danihelka and Alex Graves\n \"Grid Long Short-Term Memory,\" Proc. ICLR 2016.\n http://arxiv.org/abs/1507.01526\n\n When peephole connections are used, the implementation is based on:\n Tara N. Sainath and Bo Li\n \"Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures\n for LVCSR Tasks.\" submitted to INTERSPEECH, 2016.\n\n The code uses optional peephole connections, shared_weights and cell clipping.\n \"\"\"\n\n def __init__(self,\n num_units,\n use_peepholes=False,\n share_time_frequency_weights=False,\n cell_clip=None,\n initializer=None,\n num_unit_shards=1,\n forget_bias=1.0,\n feature_size=None,\n frequency_skip=None,\n num_frequency_blocks=None,\n start_freqindex_list=None,\n end_freqindex_list=None,\n couple_input_forget_gates=False,\n state_is_tuple=True,\n reuse=None):\n \"\"\"Initialize the parameters for an LSTM cell.\n\n Args:\n num_units: int, The number of units in the LSTM cell\n use_peepholes: (optional) bool, default False. Set True to enable\n diagonal/peephole connections.\n share_time_frequency_weights: (optional) bool, default False. Set True to\n enable shared cell weights between time and frequency LSTMs.\n cell_clip: (optional) A float value, default None, if provided the cell\n state is clipped by this value prior to the cell output activation.\n initializer: (optional) The initializer to use for the weight and\n projection matrices, default None.\n num_unit_shards: (optional) int, default 1, How to split the weight\n matrix. If > 1, the weight matrix is stored across num_unit_shards.\n forget_bias: (optional) float, default 1.0, The initial bias of the\n forget gates, used to reduce the scale of forgetting at the beginning\n of the training.\n feature_size: (optional) int, default None, The size of the input feature\n the LSTM spans over.\n frequency_skip: (optional) int, default None, The amount the LSTM filter\n is shifted by in frequency.\n num_frequency_blocks: [required] A list of frequency blocks needed to\n cover the whole input feature splitting defined by start_freqindex_list\n and end_freqindex_list.\n start_freqindex_list: [optional], list of ints, default None, The\n starting frequency index for each frequency block.\n end_freqindex_list: [optional], list of ints, default None. The ending\n frequency index for each frequency block.\n couple_input_forget_gates: (optional) bool, default False, Whether to\n couple the input and forget gates, i.e. f_gate = 1.0 - i_gate, to reduce\n model parameters and computation cost.\n state_is_tuple: If True, accepted and returned states are 2-tuples of\n the `c_state` and `m_state`. By default (False), they are concatenated\n along the column axis. This default behavior will soon be deprecated.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n Raises:\n ValueError: if the num_frequency_blocks list is not specified\n \"\"\"\n super(GridLSTMCell, self).__init__(_reuse=reuse)\n if not state_is_tuple:\n logging.warn(\"%s: Using a concatenated state is slower and will soon be \"\n \"deprecated. Use state_is_tuple=True.\", self)\n self._num_units = num_units\n self._use_peepholes = use_peepholes\n self._share_time_frequency_weights = share_time_frequency_weights\n self._couple_input_forget_gates = couple_input_forget_gates\n self._state_is_tuple = state_is_tuple\n self._cell_clip = cell_clip\n self._initializer = initializer\n self._num_unit_shards = num_unit_shards\n self._forget_bias = forget_bias\n self._feature_size = feature_size\n self._frequency_skip = frequency_skip\n self._start_freqindex_list = start_freqindex_list\n self._end_freqindex_list = end_freqindex_list\n self._num_frequency_blocks = num_frequency_blocks\n self._total_blocks = 0\n self._reuse = reuse\n if self._num_frequency_blocks is None:\n raise ValueError(\"Must specify num_frequency_blocks\")\n\n for block_index in range(len(self._num_frequency_blocks)):\n self._total_blocks += int(self._num_frequency_blocks[block_index])\n if state_is_tuple:\n state_names = \"\"\n for block_index in range(len(self._num_frequency_blocks)):\n for freq_index in range(self._num_frequency_blocks[block_index]):\n name_prefix = \"state_f%02d_b%02d\" % (freq_index, block_index)\n state_names += (\"%s_c, %s_m,\" % (name_prefix, name_prefix))\n self._state_tuple_type = collections.namedtuple(\"GridLSTMStateTuple\",\n state_names.strip(\",\"))\n self._state_size = self._state_tuple_type(*(\n [num_units, num_units] * self._total_blocks))\n else:\n self._state_tuple_type = None\n self._state_size = num_units * self._total_blocks * 2\n self._output_size = num_units * self._total_blocks * 2\n\n @property\n def output_size(self):\n return self._output_size\n\n @property\n def state_size(self):\n return self._state_size\n\n @property\n def state_tuple_type(self):\n return self._state_tuple_type\n\n def call(self, inputs, state):\n \"\"\"Run one step of LSTM.\n\n Args:\n inputs: input Tensor, 2D, [batch, feature_size].\n state: Tensor or tuple of Tensors, 2D, [batch, state_size], depends on the\n flag self._state_is_tuple.\n\n Returns:\n A tuple containing:\n - A 2D, [batch, output_dim], Tensor representing the output of the LSTM\n after reading \"inputs\" when previous state was \"state\".\n Here output_dim is num_units.\n - A 2D, [batch, state_size], Tensor representing the new state of LSTM\n after reading \"inputs\" when previous state was \"state\".\n Raises:\n ValueError: if an input_size was specified and the provided inputs have\n a different dimension.\n \"\"\"\n batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]\n freq_inputs = self._make_tf_features(inputs)\n m_out_lst = []\n state_out_lst = []\n for block in range(len(freq_inputs)):\n m_out_lst_current, state_out_lst_current = self._compute(\n freq_inputs[block],\n block,\n state,\n batch_size,\n state_is_tuple=self._state_is_tuple)\n m_out_lst.extend(m_out_lst_current)\n state_out_lst.extend(state_out_lst_current)\n if self._state_is_tuple:\n state_out = self._state_tuple_type(*state_out_lst)\n else:\n state_out = array_ops.concat(state_out_lst, 1)\n m_out = array_ops.concat(m_out_lst, 1)\n return m_out, state_out\n\n def _compute(self,\n freq_inputs,\n block,\n state,\n batch_size,\n state_prefix=\"state\",\n state_is_tuple=True):\n \"\"\"Run the actual computation of one step LSTM.\n\n Args:\n freq_inputs: list of Tensors, 2D, [batch, feature_size].\n block: int, current frequency block index to process.\n state: Tensor or tuple of Tensors, 2D, [batch, state_size], it depends on\n the flag state_is_tuple.\n batch_size: int32, batch size.\n state_prefix: (optional) string, name prefix for states, defaults to\n \"state\".\n state_is_tuple: boolean, indicates whether the state is a tuple or Tensor.\n\n Returns:\n A tuple, containing:\n - A list of [batch, output_dim] Tensors, representing the output of the\n LSTM given the inputs and state.\n - A list of [batch, state_size] Tensors, representing the LSTM state\n values given the inputs and previous state.\n \"\"\"\n sigmoid = math_ops.sigmoid\n tanh = math_ops.tanh\n num_gates = 3 if self._couple_input_forget_gates else 4\n dtype = freq_inputs[0].dtype\n actual_input_size = freq_inputs[0].get_shape().as_list()[1]\n\n concat_w_f = _get_concat_variable(\n \"W_f_%d\" % block,\n [actual_input_size + 2 * self._num_units, num_gates * self._num_units],\n dtype, self._num_unit_shards)\n b_f = vs.get_variable(\n \"B_f_%d\" % block,\n shape=[num_gates * self._num_units],\n initializer=init_ops.zeros_initializer(),\n dtype=dtype)\n if not self._share_time_frequency_weights:\n concat_w_t = _get_concat_variable(\"W_t_%d\" % block, [\n actual_input_size + 2 * self._num_units, num_gates * self._num_units\n ], dtype, self._num_unit_shards)\n b_t = vs.get_variable(\n \"B_t_%d\" % block,\n shape=[num_gates * self._num_units],\n initializer=init_ops.zeros_initializer(),\n dtype=dtype)\n\n if self._use_peepholes:\n # Diagonal connections\n if not self._couple_input_forget_gates:\n w_f_diag_freqf = vs.get_variable(\n \"W_F_diag_freqf_%d\" % block, shape=[self._num_units], dtype=dtype)\n w_f_diag_freqt = vs.get_variable(\n \"W_F_diag_freqt_%d\" % block, shape=[self._num_units], dtype=dtype)\n w_i_diag_freqf = vs.get_variable(\n \"W_I_diag_freqf_%d\" % block, shape=[self._num_units], dtype=dtype)\n w_i_diag_freqt = vs.get_variable(\n \"W_I_diag_freqt_%d\" % block, shape=[self._num_units], dtype=dtype)\n w_o_diag_freqf = vs.get_variable(\n \"W_O_diag_freqf_%d\" % block, shape=[self._num_units], dtype=dtype)\n w_o_diag_freqt = vs.get_variable(\n \"W_O_diag_freqt_%d\" % block, shape=[self._num_units], dtype=dtype)\n if not self._share_time_frequency_weights:\n if not self._couple_input_forget_gates:\n w_f_diag_timef = vs.get_variable(\n \"W_F_diag_timef_%d\" % block, shape=[self._num_units], dtype=dtype)\n w_f_diag_timet = vs.get_variable(\n \"W_F_diag_timet_%d\" % block, shape=[self._num_units], dtype=dtype)\n w_i_diag_timef = vs.get_variable(\n \"W_I_diag_timef_%d\" % block, shape=[self._num_units], dtype=dtype)\n w_i_diag_timet = vs.get_variable(\n \"W_I_diag_timet_%d\" % block, shape=[self._num_units], dtype=dtype)\n w_o_diag_timef = vs.get_variable(\n \"W_O_diag_timef_%d\" % block, shape=[self._num_units], dtype=dtype)\n w_o_diag_timet = vs.get_variable(\n \"W_O_diag_timet_%d\" % block, shape=[self._num_units], dtype=dtype)\n\n # initialize the first freq state to be zero\n m_prev_freq = array_ops.zeros([batch_size, self._num_units], dtype)\n c_prev_freq = array_ops.zeros([batch_size, self._num_units], dtype)\n for freq_index in range(len(freq_inputs)):\n if state_is_tuple:\n name_prefix = \"%s_f%02d_b%02d\" % (state_prefix, freq_index, block)\n c_prev_time = getattr(state, name_prefix + \"_c\")\n m_prev_time = getattr(state, name_prefix + \"_m\")\n else:\n c_prev_time = array_ops.slice(\n state, [0, 2 * freq_index * self._num_units], [-1, self._num_units])\n m_prev_time = array_ops.slice(\n state, [0, (2 * freq_index + 1) * self._num_units],\n [-1, self._num_units])\n\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n cell_inputs = array_ops.concat(\n [freq_inputs[freq_index], m_prev_time, m_prev_freq], 1)\n\n # F-LSTM\n lstm_matrix_freq = nn_ops.bias_add(\n math_ops.matmul(cell_inputs, concat_w_f), b_f)\n if self._couple_input_forget_gates:\n i_freq, j_freq, o_freq = array_ops.split(\n value=lstm_matrix_freq, num_or_size_splits=num_gates, axis=1)\n f_freq = None\n else:\n i_freq, j_freq, f_freq, o_freq = array_ops.split(\n value=lstm_matrix_freq, num_or_size_splits=num_gates, axis=1)\n # T-LSTM\n if self._share_time_frequency_weights:\n i_time = i_freq\n j_time = j_freq\n f_time = f_freq\n o_time = o_freq\n else:\n lstm_matrix_time = nn_ops.bias_add(\n math_ops.matmul(cell_inputs, concat_w_t), b_t)\n if self._couple_input_forget_gates:\n i_time, j_time, o_time = array_ops.split(\n value=lstm_matrix_time, num_or_size_splits=num_gates, axis=1)\n f_time = None\n else:\n i_time, j_time, f_time, o_time = array_ops.split(\n value=lstm_matrix_time, num_or_size_splits=num_gates, axis=1)\n\n # F-LSTM c_freq\n # input gate activations\n if self._use_peepholes:\n i_freq_g = sigmoid(i_freq + w_i_diag_freqf * c_prev_freq +\n w_i_diag_freqt * c_prev_time)\n else:\n i_freq_g = sigmoid(i_freq)\n # forget gate activations\n if self._couple_input_forget_gates:\n f_freq_g = 1.0 - i_freq_g\n else:\n if self._use_peepholes:\n f_freq_g = sigmoid(f_freq + self._forget_bias + w_f_diag_freqf *\n c_prev_freq + w_f_diag_freqt * c_prev_time)\n else:\n f_freq_g = sigmoid(f_freq + self._forget_bias)\n # cell state\n c_freq = f_freq_g * c_prev_freq + i_freq_g * tanh(j_freq)\n if self._cell_clip is not None:\n # pylint: disable=invalid-unary-operand-type\n c_freq = clip_ops.clip_by_value(c_freq, -self._cell_clip,\n self._cell_clip)\n # pylint: enable=invalid-unary-operand-type\n\n # T-LSTM c_freq\n # input gate activations\n if self._use_peepholes:\n if self._share_time_frequency_weights:\n i_time_g = sigmoid(i_time + w_i_diag_freqf * c_prev_freq +\n w_i_diag_freqt * c_prev_time)\n else:\n i_time_g = sigmoid(i_time + w_i_diag_timef * c_prev_freq +\n w_i_diag_timet * c_prev_time)\n else:\n i_time_g = sigmoid(i_time)\n # forget gate activations\n if self._couple_input_forget_gates:\n f_time_g = 1.0 - i_time_g\n else:\n if self._use_peepholes:\n if self._share_time_frequency_weights:\n f_time_g = sigmoid(f_time + self._forget_bias + w_f_diag_freqf *\n c_prev_freq + w_f_diag_freqt * c_prev_time)\n else:\n f_time_g = sigmoid(f_time + self._forget_bias + w_f_diag_timef *\n c_prev_freq + w_f_diag_timet * c_prev_time)\n else:\n f_time_g = sigmoid(f_time + self._forget_bias)\n # cell state\n c_time = f_time_g * c_prev_time + i_time_g * tanh(j_time)\n if self._cell_clip is not None:\n # pylint: disable=invalid-unary-operand-type\n c_time = clip_ops.clip_by_value(c_time, -self._cell_clip,\n self._cell_clip)\n # pylint: enable=invalid-unary-operand-type\n\n # F-LSTM m_freq\n if self._use_peepholes:\n m_freq = sigmoid(o_freq + w_o_diag_freqf * c_freq +\n w_o_diag_freqt * c_time) * tanh(c_freq)\n else:\n m_freq = sigmoid(o_freq) * tanh(c_freq)\n\n # T-LSTM m_time\n if self._use_peepholes:\n if self._share_time_frequency_weights:\n m_time = sigmoid(o_time + w_o_diag_freqf * c_freq +\n w_o_diag_freqt * c_time) * tanh(c_time)\n else:\n m_time = sigmoid(o_time + w_o_diag_timef * c_freq +\n w_o_diag_timet * c_time) * tanh(c_time)\n else:\n m_time = sigmoid(o_time) * tanh(c_time)\n\n m_prev_freq = m_freq\n c_prev_freq = c_freq\n # Concatenate the outputs for T-LSTM and F-LSTM for each shift\n if freq_index == 0:\n state_out_lst = [c_time, m_time]\n m_out_lst = [m_time, m_freq]\n else:\n state_out_lst.extend([c_time, m_time])\n m_out_lst.extend([m_time, m_freq])\n\n return m_out_lst, state_out_lst\n\n def _make_tf_features(self, input_feat, slice_offset=0):\n \"\"\"Make the frequency features.\n\n Args:\n input_feat: input Tensor, 2D, [batch, num_units].\n slice_offset: (optional) Python int, default 0, the slicing offset is only\n used for the backward processing in the BidirectionalGridLSTMCell. It\n specifies a different starting point instead of always 0 to enable the\n forward and backward processing look at different frequency blocks.\n\n Returns:\n A list of frequency features, with each element containing:\n - A 2D, [batch, output_dim], Tensor representing the time-frequency\n feature for that frequency index. Here output_dim is feature_size.\n Raises:\n ValueError: if input_size cannot be inferred from static shape inference.\n \"\"\"\n input_size = input_feat.get_shape().with_rank(2)[-1].value\n if input_size is None:\n raise ValueError(\"Cannot infer input_size from static shape inference.\")\n if slice_offset > 0:\n # Padding to the end\n inputs = array_ops.pad(input_feat,\n array_ops.constant(\n [0, 0, 0, slice_offset],\n shape=[2, 2],\n dtype=dtypes.int32), \"CONSTANT\")\n elif slice_offset < 0:\n # Padding to the front\n inputs = array_ops.pad(input_feat,\n array_ops.constant(\n [0, 0, -slice_offset, 0],\n shape=[2, 2],\n dtype=dtypes.int32), \"CONSTANT\")\n slice_offset = 0\n else:\n inputs = input_feat\n freq_inputs = []\n if not self._start_freqindex_list:\n if len(self._num_frequency_blocks) != 1:\n raise ValueError(\"Length of num_frequency_blocks\"\n \" is not 1, but instead is %d\",\n len(self._num_frequency_blocks))\n num_feats = int(\n (input_size - self._feature_size) / (self._frequency_skip)) + 1\n if num_feats != self._num_frequency_blocks[0]:\n raise ValueError(\n \"Invalid num_frequency_blocks, requires %d but gets %d, please\"\n \" check the input size and filter config are correct.\" %\n (self._num_frequency_blocks[0], num_feats))\n block_inputs = []\n for f in range(num_feats):\n cur_input = array_ops.slice(\n inputs, [0, slice_offset + f * self._frequency_skip],\n [-1, self._feature_size])\n block_inputs.append(cur_input)\n freq_inputs.append(block_inputs)\n else:\n if len(self._start_freqindex_list) != len(self._end_freqindex_list):\n raise ValueError(\"Length of start and end freqindex_list\"\n \" does not match %d %d\",\n len(self._start_freqindex_list),\n len(self._end_freqindex_list))\n if len(self._num_frequency_blocks) != len(self._start_freqindex_list):\n raise ValueError(\"Length of num_frequency_blocks\"\n \" is not equal to start_freqindex_list %d %d\",\n len(self._num_frequency_blocks),\n len(self._start_freqindex_list))\n for b in range(len(self._start_freqindex_list)):\n start_index = self._start_freqindex_list[b]\n end_index = self._end_freqindex_list[b]\n cur_size = end_index - start_index\n block_feats = int(\n (cur_size - self._feature_size) / (self._frequency_skip)) + 1\n if block_feats != self._num_frequency_blocks[b]:\n raise ValueError(\n \"Invalid num_frequency_blocks, requires %d but gets %d, please\"\n \" check the input size and filter config are correct.\" %\n (self._num_frequency_blocks[b], block_feats))\n block_inputs = []\n for f in range(block_feats):\n cur_input = array_ops.slice(\n inputs,\n [0, start_index + slice_offset + f * self._frequency_skip],\n [-1, self._feature_size])\n block_inputs.append(cur_input)\n freq_inputs.append(block_inputs)\n return freq_inputs\n\n\nclass BidirectionalGridLSTMCell(GridLSTMCell):\n \"\"\"Bidirectional GridLstm cell.\n\n The bidirection connection is only used in the frequency direction, which\n hence doesn't affect the time direction's real-time processing that is\n required for online recognition systems.\n The current implementation uses different weights for the two directions.\n \"\"\"\n\n def __init__(self,\n num_units,\n use_peepholes=False,\n share_time_frequency_weights=False,\n cell_clip=None,\n initializer=None,\n num_unit_shards=1,\n forget_bias=1.0,\n feature_size=None,\n frequency_skip=None,\n num_frequency_blocks=None,\n start_freqindex_list=None,\n end_freqindex_list=None,\n couple_input_forget_gates=False,\n backward_slice_offset=0,\n reuse=None):\n \"\"\"Initialize the parameters for an LSTM cell.\n\n Args:\n num_units: int, The number of units in the LSTM cell\n use_peepholes: (optional) bool, default False. Set True to enable\n diagonal/peephole connections.\n share_time_frequency_weights: (optional) bool, default False. Set True to\n enable shared cell weights between time and frequency LSTMs.\n cell_clip: (optional) A float value, default None, if provided the cell\n state is clipped by this value prior to the cell output activation.\n initializer: (optional) The initializer to use for the weight and\n projection matrices, default None.\n num_unit_shards: (optional) int, default 1, How to split the weight\n matrix. If > 1, the weight matrix is stored across num_unit_shards.\n forget_bias: (optional) float, default 1.0, The initial bias of the\n forget gates, used to reduce the scale of forgetting at the beginning\n of the training.\n feature_size: (optional) int, default None, The size of the input feature\n the LSTM spans over.\n frequency_skip: (optional) int, default None, The amount the LSTM filter\n is shifted by in frequency.\n num_frequency_blocks: [required] A list of frequency blocks needed to\n cover the whole input feature splitting defined by start_freqindex_list\n and end_freqindex_list.\n start_freqindex_list: [optional], list of ints, default None, The\n starting frequency index for each frequency block.\n end_freqindex_list: [optional], list of ints, default None. The ending\n frequency index for each frequency block.\n couple_input_forget_gates: (optional) bool, default False, Whether to\n couple the input and forget gates, i.e. f_gate = 1.0 - i_gate, to reduce\n model parameters and computation cost.\n backward_slice_offset: (optional) int32, default 0, the starting offset to\n slice the feature for backward processing.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n \"\"\"\n super(BidirectionalGridLSTMCell, self).__init__(\n num_units, use_peepholes, share_time_frequency_weights, cell_clip,\n initializer, num_unit_shards, forget_bias, feature_size, frequency_skip,\n num_frequency_blocks, start_freqindex_list, end_freqindex_list,\n couple_input_forget_gates, True, reuse)\n self._backward_slice_offset = int(backward_slice_offset)\n state_names = \"\"\n for direction in [\"fwd\", \"bwd\"]:\n for block_index in range(len(self._num_frequency_blocks)):\n for freq_index in range(self._num_frequency_blocks[block_index]):\n name_prefix = \"%s_state_f%02d_b%02d\" % (direction, freq_index,\n block_index)\n state_names += (\"%s_c, %s_m,\" % (name_prefix, name_prefix))\n self._state_tuple_type = collections.namedtuple(\n \"BidirectionalGridLSTMStateTuple\", state_names.strip(\",\"))\n self._state_size = self._state_tuple_type(*(\n [num_units, num_units] * self._total_blocks * 2))\n self._output_size = 2 * num_units * self._total_blocks * 2\n\n def call(self, inputs, state):\n \"\"\"Run one step of LSTM.\n\n Args:\n inputs: input Tensor, 2D, [batch, num_units].\n state: tuple of Tensors, 2D, [batch, state_size].\n\n Returns:\n A tuple containing:\n - A 2D, [batch, output_dim], Tensor representing the output of the LSTM\n after reading \"inputs\" when previous state was \"state\".\n Here output_dim is num_units.\n - A 2D, [batch, state_size], Tensor representing the new state of LSTM\n after reading \"inputs\" when previous state was \"state\".\n Raises:\n ValueError: if an input_size was specified and the provided inputs have\n a different dimension.\n \"\"\"\n batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]\n fwd_inputs = self._make_tf_features(inputs)\n if self._backward_slice_offset:\n bwd_inputs = self._make_tf_features(inputs, self._backward_slice_offset)\n else:\n bwd_inputs = fwd_inputs\n\n # Forward processing\n with vs.variable_scope(\"fwd\"):\n fwd_m_out_lst = []\n fwd_state_out_lst = []\n for block in range(len(fwd_inputs)):\n fwd_m_out_lst_current, fwd_state_out_lst_current = self._compute(\n fwd_inputs[block],\n block,\n state,\n batch_size,\n state_prefix=\"fwd_state\",\n state_is_tuple=True)\n fwd_m_out_lst.extend(fwd_m_out_lst_current)\n fwd_state_out_lst.extend(fwd_state_out_lst_current)\n # Backward processing\n bwd_m_out_lst = []\n bwd_state_out_lst = []\n with vs.variable_scope(\"bwd\"):\n for block in range(len(bwd_inputs)):\n # Reverse the blocks\n bwd_inputs_reverse = bwd_inputs[block][::-1]\n bwd_m_out_lst_current, bwd_state_out_lst_current = self._compute(\n bwd_inputs_reverse,\n block,\n state,\n batch_size,\n state_prefix=\"bwd_state\",\n state_is_tuple=True)\n bwd_m_out_lst.extend(bwd_m_out_lst_current)\n bwd_state_out_lst.extend(bwd_state_out_lst_current)\n state_out = self._state_tuple_type(*(fwd_state_out_lst + bwd_state_out_lst))\n # Outputs are always concated as it is never used separately.\n m_out = array_ops.concat(fwd_m_out_lst + bwd_m_out_lst, 1)\n return m_out, state_out\n\n\n# pylint: disable=protected-access\n_Linear = core_rnn_cell._Linear # pylint: disable=invalid-name\n\n# pylint: enable=protected-access\n\n\nclass AttentionCellWrapper(rnn_cell_impl.RNNCell):\n \"\"\"Basic attention cell wrapper.\n\n Implementation based on https://arxiv.org/abs/1409.0473.\n \"\"\"\n\n def __init__(self,\n cell,\n attn_length,\n attn_size=None,\n attn_vec_size=None,\n input_size=None,\n state_is_tuple=True,\n reuse=None):\n \"\"\"Create a cell with attention.\n\n Args:\n cell: an RNNCell, an attention is added to it.\n attn_length: integer, the size of an attention window.\n attn_size: integer, the size of an attention vector. Equal to\n cell.output_size by default.\n attn_vec_size: integer, the number of convolutional features calculated\n on attention state and a size of the hidden layer built from\n base cell state. Equal attn_size to by default.\n input_size: integer, the size of a hidden linear layer,\n built from inputs and attention. Derived from the input tensor\n by default.\n state_is_tuple: If True, accepted and returned states are n-tuples, where\n `n = len(cells)`. By default (False), the states are all\n concatenated along the column axis.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n\n Raises:\n TypeError: if cell is not an RNNCell.\n ValueError: if cell returns a state tuple but the flag\n `state_is_tuple` is `False` or if attn_length is zero or less.\n \"\"\"\n super(AttentionCellWrapper, self).__init__(_reuse=reuse)\n rnn_cell_impl.assert_like_rnncell(\"cell\", cell)\n if nest.is_sequence(cell.state_size) and not state_is_tuple:\n raise ValueError(\n \"Cell returns tuple of states, but the flag \"\n \"state_is_tuple is not set. State size is: %s\" % str(cell.state_size))\n if attn_length <= 0:\n raise ValueError(\n \"attn_length should be greater than zero, got %s\" % str(attn_length))\n if not state_is_tuple:\n logging.warn(\"%s: Using a concatenated state is slower and will soon be \"\n \"deprecated. Use state_is_tuple=True.\", self)\n if attn_size is None:\n attn_size = cell.output_size\n if attn_vec_size is None:\n attn_vec_size = attn_size\n self._state_is_tuple = state_is_tuple\n self._cell = cell\n self._attn_vec_size = attn_vec_size\n self._input_size = input_size\n self._attn_size = attn_size\n self._attn_length = attn_length\n self._reuse = reuse\n self._linear1 = None\n self._linear2 = None\n self._linear3 = None\n\n @property\n def state_size(self):\n size = (self._cell.state_size, self._attn_size,\n self._attn_size * self._attn_length)\n if self._state_is_tuple:\n return size\n else:\n return sum(list(size))\n\n @property\n def output_size(self):\n return self._attn_size\n\n def call(self, inputs, state):\n \"\"\"Long short-term memory cell with attention (LSTMA).\"\"\"\n if self._state_is_tuple:\n state, attns, attn_states = state\n else:\n states = state\n state = array_ops.slice(states, [0, 0], [-1, self._cell.state_size])\n attns = array_ops.slice(states, [0, self._cell.state_size],\n [-1, self._attn_size])\n attn_states = array_ops.slice(\n states, [0, self._cell.state_size + self._attn_size],\n [-1, self._attn_size * self._attn_length])\n attn_states = array_ops.reshape(attn_states,\n [-1, self._attn_length, self._attn_size])\n input_size = self._input_size\n if input_size is None:\n input_size = inputs.get_shape().as_list()[1]\n if self._linear1 is None:\n self._linear1 = _Linear([inputs, attns], input_size, True)\n inputs = self._linear1([inputs, attns])\n cell_output, new_state = self._cell(inputs, state)\n if self._state_is_tuple:\n new_state_cat = array_ops.concat(nest.flatten(new_state), 1)\n else:\n new_state_cat = new_state\n new_attns, new_attn_states = self._attention(new_state_cat, attn_states)\n with vs.variable_scope(\"attn_output_projection\"):\n if self._linear2 is None:\n self._linear2 = _Linear([cell_output, new_attns], self._attn_size, True)\n output = self._linear2([cell_output, new_attns])\n new_attn_states = array_ops.concat(\n [new_attn_states, array_ops.expand_dims(output, 1)], 1)\n new_attn_states = array_ops.reshape(\n new_attn_states, [-1, self._attn_length * self._attn_size])\n new_state = (new_state, new_attns, new_attn_states)\n if not self._state_is_tuple:\n new_state = array_ops.concat(list(new_state), 1)\n return output, new_state\n\n def _attention(self, query, attn_states):\n conv2d = nn_ops.conv2d\n reduce_sum = math_ops.reduce_sum\n softmax = nn_ops.softmax\n tanh = math_ops.tanh\n\n with vs.variable_scope(\"attention\"):\n k = vs.get_variable(\"attn_w\",\n [1, 1, self._attn_size, self._attn_vec_size])\n v = vs.get_variable(\"attn_v\", [self._attn_vec_size])\n hidden = array_ops.reshape(attn_states,\n [-1, self._attn_length, 1, self._attn_size])\n hidden_features = conv2d(hidden, k, [1, 1, 1, 1], \"SAME\")\n if self._linear3 is None:\n self._linear3 = _Linear(query, self._attn_vec_size, True)\n y = self._linear3(query)\n y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])\n s = reduce_sum(v * tanh(hidden_features + y), [2, 3])\n a = softmax(s)\n d = reduce_sum(\n array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])\n new_attns = array_ops.reshape(d, [-1, self._attn_size])\n new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])\n return new_attns, new_attn_states\n\n\nclass HighwayWrapper(rnn_cell_impl.RNNCell):\n \"\"\"RNNCell wrapper that adds highway connection on cell input and output.\n\n Based on:\n R. K. Srivastava, K. Greff, and J. Schmidhuber, \"Highway networks\",\n arXiv preprint arXiv:1505.00387, 2015.\n https://arxiv.org/abs/1505.00387\n \"\"\"\n\n def __init__(self,\n cell,\n couple_carry_transform_gates=True,\n carry_bias_init=1.0):\n \"\"\"Constructs a `HighwayWrapper` for `cell`.\n\n Args:\n cell: An instance of `RNNCell`.\n couple_carry_transform_gates: boolean, should the Carry and Transform gate\n be coupled.\n carry_bias_init: float, carry gates bias initialization.\n \"\"\"\n self._cell = cell\n self._couple_carry_transform_gates = couple_carry_transform_gates\n self._carry_bias_init = carry_bias_init\n\n @property\n def state_size(self):\n return self._cell.state_size\n\n @property\n def output_size(self):\n return self._cell.output_size\n\n def zero_state(self, batch_size, dtype):\n with ops.name_scope(type(self).__name__ + \"ZeroState\", values=[batch_size]):\n return self._cell.zero_state(batch_size, dtype)\n\n def _highway(self, inp, out):\n input_size = inp.get_shape().with_rank(2)[1].value\n carry_weight = vs.get_variable(\"carry_w\", [input_size, input_size])\n carry_bias = vs.get_variable(\n \"carry_b\", [input_size],\n initializer=init_ops.constant_initializer(self._carry_bias_init))\n carry = math_ops.sigmoid(nn_ops.xw_plus_b(inp, carry_weight, carry_bias))\n if self._couple_carry_transform_gates:\n transform = 1 - carry\n else:\n transform_weight = vs.get_variable(\"transform_w\",\n [input_size, input_size])\n transform_bias = vs.get_variable(\n \"transform_b\", [input_size],\n initializer=init_ops.constant_initializer(-self._carry_bias_init))\n transform = math_ops.sigmoid(\n nn_ops.xw_plus_b(inp, transform_weight, transform_bias))\n return inp * carry + out * transform\n\n def __call__(self, inputs, state, scope=None):\n \"\"\"Run the cell and add its inputs to its outputs.\n\n Args:\n inputs: cell inputs.\n state: cell state.\n scope: optional cell scope.\n\n Returns:\n Tuple of cell outputs and new state.\n\n Raises:\n TypeError: If cell inputs and outputs have different structure (type).\n ValueError: If cell inputs and outputs have different structure (value).\n \"\"\"\n outputs, new_state = self._cell(inputs, state, scope=scope)\n nest.assert_same_structure(inputs, outputs)\n\n # Ensure shapes match\n def assert_shape_match(inp, out):\n inp.get_shape().assert_is_compatible_with(out.get_shape())\n\n nest.map_structure(assert_shape_match, inputs, outputs)\n res_outputs = nest.map_structure(self._highway, inputs, outputs)\n return (res_outputs, new_state)\n\n\nclass LayerNormBasicLSTMCell(rnn_cell_impl.RNNCell):\n \"\"\"LSTM unit with layer normalization and recurrent dropout.\n\n This class adds layer normalization and recurrent dropout to a\n basic LSTM unit. Layer normalization implementation is based on:\n\n https://arxiv.org/abs/1607.06450.\n\n \"Layer Normalization\"\n Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton\n\n and is applied before the internal nonlinearities.\n Recurrent dropout is base on:\n\n https://arxiv.org/abs/1603.05118\n\n \"Recurrent Dropout without Memory Loss\"\n Stanislau Semeniuta, Aliaksei Severyn, Erhardt Barth.\n \"\"\"\n\n def __init__(self,\n num_units,\n forget_bias=1.0,\n input_size=None,\n activation=math_ops.tanh,\n layer_norm=True,\n norm_gain=1.0,\n norm_shift=0.0,\n dropout_keep_prob=1.0,\n dropout_prob_seed=None,\n reuse=None):\n \"\"\"Initializes the basic LSTM cell.\n\n Args:\n num_units: int, The number of units in the LSTM cell.\n forget_bias: float, The bias added to forget gates (see above).\n input_size: Deprecated and unused.\n activation: Activation function of the inner states.\n layer_norm: If `True`, layer normalization will be applied.\n norm_gain: float, The layer normalization gain initial value. If\n `layer_norm` has been set to `False`, this argument will be ignored.\n norm_shift: float, The layer normalization shift initial value. If\n `layer_norm` has been set to `False`, this argument will be ignored.\n dropout_keep_prob: unit Tensor or float between 0 and 1 representing the\n recurrent dropout probability value. If float and 1.0, no dropout will\n be applied.\n dropout_prob_seed: (optional) integer, the randomness seed.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n \"\"\"\n super(LayerNormBasicLSTMCell, self).__init__(_reuse=reuse)\n\n if input_size is not None:\n logging.warn(\"%s: The input_size parameter is deprecated.\", self)\n\n self._num_units = num_units\n self._activation = activation\n self._forget_bias = forget_bias\n self._keep_prob = dropout_keep_prob\n self._seed = dropout_prob_seed\n self._layer_norm = layer_norm\n self._norm_gain = norm_gain\n self._norm_shift = norm_shift\n self._reuse = reuse\n\n @property\n def state_size(self):\n return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)\n\n @property\n def output_size(self):\n return self._num_units\n\n def _norm(self, inp, scope, dtype=dtypes.float32):\n shape = inp.get_shape()[-1:]\n gamma_init = init_ops.constant_initializer(self._norm_gain)\n beta_init = init_ops.constant_initializer(self._norm_shift)\n with vs.variable_scope(scope):\n # Initialize beta and gamma for use by layer_norm.\n vs.get_variable(\"gamma\", shape=shape, initializer=gamma_init, dtype=dtype)\n vs.get_variable(\"beta\", shape=shape, initializer=beta_init, dtype=dtype)\n normalized = layers.layer_norm(inp, reuse=True, scope=scope)\n return normalized\n\n def _linear(self, args):\n out_size = 4 * self._num_units\n proj_size = args.get_shape()[-1]\n dtype = args.dtype\n weights = vs.get_variable(\"kernel\", [proj_size, out_size], dtype=dtype)\n out = math_ops.matmul(args, weights)\n if not self._layer_norm:\n bias = vs.get_variable(\"bias\", [out_size], dtype=dtype)\n out = nn_ops.bias_add(out, bias)\n return out\n\n def call(self, inputs, state):\n \"\"\"LSTM cell with layer normalization and recurrent dropout.\"\"\"\n c, h = state\n args = array_ops.concat([inputs, h], 1)\n concat = self._linear(args)\n dtype = args.dtype\n\n i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)\n if self._layer_norm:\n i = self._norm(i, \"input\", dtype=dtype)\n j = self._norm(j, \"transform\", dtype=dtype)\n f = self._norm(f, \"forget\", dtype=dtype)\n o = self._norm(o, \"output\", dtype=dtype)\n\n g = self._activation(j)\n if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1:\n g = nn_ops.dropout(g, self._keep_prob, seed=self._seed)\n\n new_c = (\n c * math_ops.sigmoid(f + self._forget_bias) + math_ops.sigmoid(i) * g)\n if self._layer_norm:\n new_c = self._norm(new_c, \"state\", dtype=dtype)\n new_h = self._activation(new_c) * math_ops.sigmoid(o)\n\n new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)\n return new_h, new_state\n\n\nclass NASCell(rnn_cell_impl.RNNCell):\n \"\"\"Neural Architecture Search (NAS) recurrent network cell.\n\n This implements the recurrent cell from the paper:\n\n https://arxiv.org/abs/1611.01578\n\n Barret Zoph and Quoc V. Le.\n \"Neural Architecture Search with Reinforcement Learning\" Proc. ICLR 2017.\n\n The class uses an optional projection layer.\n \"\"\"\n\n def __init__(self, num_units, num_proj=None, use_biases=False, reuse=None):\n \"\"\"Initialize the parameters for a NAS cell.\n\n Args:\n num_units: int, The number of units in the NAS cell\n num_proj: (optional) int, The output dimensionality for the projection\n matrices. If None, no projection is performed.\n use_biases: (optional) bool, If True then use biases within the cell. This\n is False by default.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n \"\"\"\n super(NASCell, self).__init__(_reuse=reuse)\n self._num_units = num_units\n self._num_proj = num_proj\n self._use_biases = use_biases\n self._reuse = reuse\n\n if num_proj is not None:\n self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_proj)\n self._output_size = num_proj\n else:\n self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_units)\n self._output_size = num_units\n\n @property\n def state_size(self):\n return self._state_size\n\n @property\n def output_size(self):\n return self._output_size\n\n def call(self, inputs, state):\n \"\"\"Run one step of NAS Cell.\n\n Args:\n inputs: input Tensor, 2D, batch x num_units.\n state: This must be a tuple of state Tensors, both `2-D`, with column\n sizes `c_state` and `m_state`.\n\n Returns:\n A tuple containing:\n - A `2-D, [batch x output_dim]`, Tensor representing the output of the\n NAS Cell after reading `inputs` when previous state was `state`.\n Here output_dim is:\n num_proj if num_proj was set,\n num_units otherwise.\n - Tensor(s) representing the new state of NAS Cell after reading `inputs`\n when the previous state was `state`. Same type and shape(s) as `state`.\n\n Raises:\n ValueError: If input size cannot be inferred from inputs via\n static shape inference.\n \"\"\"\n sigmoid = math_ops.sigmoid\n tanh = math_ops.tanh\n relu = nn_ops.relu\n\n num_proj = self._num_units if self._num_proj is None else self._num_proj\n\n (c_prev, m_prev) = state\n\n dtype = inputs.dtype\n input_size = inputs.get_shape().with_rank(2)[1]\n if input_size.value is None:\n raise ValueError(\"Could not infer input size from inputs.get_shape()[-1]\")\n # Variables for the NAS cell. W_m is all matrices multiplying the\n # hiddenstate and W_inputs is all matrices multiplying the inputs.\n concat_w_m = vs.get_variable(\"recurrent_kernel\",\n [num_proj, 8 * self._num_units], dtype)\n concat_w_inputs = vs.get_variable(\n \"kernel\", [input_size.value, 8 * self._num_units], dtype)\n\n m_matrix = math_ops.matmul(m_prev, concat_w_m)\n inputs_matrix = math_ops.matmul(inputs, concat_w_inputs)\n\n if self._use_biases:\n b = vs.get_variable(\n \"bias\",\n shape=[8 * self._num_units],\n initializer=init_ops.zeros_initializer(),\n dtype=dtype)\n m_matrix = nn_ops.bias_add(m_matrix, b)\n\n # The NAS cell branches into 8 different splits for both the hiddenstate\n # and the input\n m_matrix_splits = array_ops.split(\n axis=1, num_or_size_splits=8, value=m_matrix)\n inputs_matrix_splits = array_ops.split(\n axis=1, num_or_size_splits=8, value=inputs_matrix)\n\n # First layer\n layer1_0 = sigmoid(inputs_matrix_splits[0] + m_matrix_splits[0])\n layer1_1 = relu(inputs_matrix_splits[1] + m_matrix_splits[1])\n layer1_2 = sigmoid(inputs_matrix_splits[2] + m_matrix_splits[2])\n layer1_3 = relu(inputs_matrix_splits[3] * m_matrix_splits[3])\n layer1_4 = tanh(inputs_matrix_splits[4] + m_matrix_splits[4])\n layer1_5 = sigmoid(inputs_matrix_splits[5] + m_matrix_splits[5])\n layer1_6 = tanh(inputs_matrix_splits[6] + m_matrix_splits[6])\n layer1_7 = sigmoid(inputs_matrix_splits[7] + m_matrix_splits[7])\n\n # Second layer\n l2_0 = tanh(layer1_0 * layer1_1)\n l2_1 = tanh(layer1_2 + layer1_3)\n l2_2 = tanh(layer1_4 * layer1_5)\n l2_3 = sigmoid(layer1_6 + layer1_7)\n\n # Inject the cell\n l2_0 = tanh(l2_0 + c_prev)\n\n # Third layer\n l3_0_pre = l2_0 * l2_1\n new_c = l3_0_pre # create new cell\n l3_0 = l3_0_pre\n l3_1 = tanh(l2_2 + l2_3)\n\n # Final layer\n new_m = tanh(l3_0 * l3_1)\n\n # Projection layer if specified\n if self._num_proj is not None:\n concat_w_proj = vs.get_variable(\"projection_weights\",\n [self._num_units, self._num_proj], dtype)\n new_m = math_ops.matmul(new_m, concat_w_proj)\n\n new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_m)\n return new_m, new_state\n\n\nclass UGRNNCell(rnn_cell_impl.RNNCell):\n \"\"\"Update Gate Recurrent Neural Network (UGRNN) cell.\n\n Compromise between a LSTM/GRU and a vanilla RNN. There is only one\n gate, and that is to determine whether the unit should be\n integrating or computing instantaneously. This is the recurrent\n idea of the feedforward Highway Network.\n\n This implements the recurrent cell from the paper:\n\n https://arxiv.org/abs/1611.09913\n\n Jasmine Collins, Jascha Sohl-Dickstein, and David Sussillo.\n \"Capacity and Trainability in Recurrent Neural Networks\" Proc. ICLR 2017.\n \"\"\"\n\n def __init__(self,\n num_units,\n initializer=None,\n forget_bias=1.0,\n activation=math_ops.tanh,\n reuse=None):\n \"\"\"Initialize the parameters for an UGRNN cell.\n\n Args:\n num_units: int, The number of units in the UGRNN cell\n initializer: (optional) The initializer to use for the weight matrices.\n forget_bias: (optional) float, default 1.0, The initial bias of the\n forget gate, used to reduce the scale of forgetting at the beginning\n of the training.\n activation: (optional) Activation function of the inner states.\n Default is `tf.tanh`.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n \"\"\"\n super(UGRNNCell, self).__init__(_reuse=reuse)\n self._num_units = num_units\n self._initializer = initializer\n self._forget_bias = forget_bias\n self._activation = activation\n self._reuse = reuse\n self._linear = None\n\n @property\n def state_size(self):\n return self._num_units\n\n @property\n def output_size(self):\n return self._num_units\n\n def call(self, inputs, state):\n \"\"\"Run one step of UGRNN.\n\n Args:\n inputs: input Tensor, 2D, batch x input size.\n state: state Tensor, 2D, batch x num units.\n\n Returns:\n new_output: batch x num units, Tensor representing the output of the UGRNN\n after reading `inputs` when previous state was `state`. Identical to\n `new_state`.\n new_state: batch x num units, Tensor representing the state of the UGRNN\n after reading `inputs` when previous state was `state`.\n\n Raises:\n ValueError: If input size cannot be inferred from inputs via\n static shape inference.\n \"\"\"\n sigmoid = math_ops.sigmoid\n\n input_size = inputs.get_shape().with_rank(2)[1]\n if input_size.value is None:\n raise ValueError(\"Could not infer input size from inputs.get_shape()[-1]\")\n\n with vs.variable_scope(\n vs.get_variable_scope(), initializer=self._initializer):\n cell_inputs = array_ops.concat([inputs, state], 1)\n if self._linear is None:\n self._linear = _Linear(cell_inputs, 2 * self._num_units, True)\n rnn_matrix = self._linear(cell_inputs)\n\n [g_act, c_act] = array_ops.split(\n axis=1, num_or_size_splits=2, value=rnn_matrix)\n\n c = self._activation(c_act)\n g = sigmoid(g_act + self._forget_bias)\n new_state = g * state + (1.0 - g) * c\n new_output = new_state\n\n return new_output, new_state\n\n\nclass IntersectionRNNCell(rnn_cell_impl.RNNCell):\n \"\"\"Intersection Recurrent Neural Network (+RNN) cell.\n\n Architecture with coupled recurrent gate as well as coupled depth\n gate, designed to improve information flow through stacked RNNs. As the\n architecture uses depth gating, the dimensionality of the depth\n output (y) also should not change through depth (input size == output size).\n To achieve this, the first layer of a stacked Intersection RNN projects\n the inputs to N (num units) dimensions. Therefore when initializing an\n IntersectionRNNCell, one should set `num_in_proj = N` for the first layer\n and use default settings for subsequent layers.\n\n This implements the recurrent cell from the paper:\n\n https://arxiv.org/abs/1611.09913\n\n Jasmine Collins, Jascha Sohl-Dickstein, and David Sussillo.\n \"Capacity and Trainability in Recurrent Neural Networks\" Proc. ICLR 2017.\n\n The Intersection RNN is built for use in deeply stacked\n RNNs so it may not achieve best performance with depth 1.\n \"\"\"\n\n def __init__(self,\n num_units,\n num_in_proj=None,\n initializer=None,\n forget_bias=1.0,\n y_activation=nn_ops.relu,\n reuse=None):\n \"\"\"Initialize the parameters for an +RNN cell.\n\n Args:\n num_units: int, The number of units in the +RNN cell\n num_in_proj: (optional) int, The input dimensionality for the RNN.\n If creating the first layer of an +RNN, this should be set to\n `num_units`. Otherwise, this should be set to `None` (default).\n If `None`, dimensionality of `inputs` should be equal to `num_units`,\n otherwise ValueError is thrown.\n initializer: (optional) The initializer to use for the weight matrices.\n forget_bias: (optional) float, default 1.0, The initial bias of the\n forget gates, used to reduce the scale of forgetting at the beginning\n of the training.\n y_activation: (optional) Activation function of the states passed\n through depth. Default is 'tf.nn.relu`.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n \"\"\"\n super(IntersectionRNNCell, self).__init__(_reuse=reuse)\n self._num_units = num_units\n self._initializer = initializer\n self._forget_bias = forget_bias\n self._num_input_proj = num_in_proj\n self._y_activation = y_activation\n self._reuse = reuse\n self._linear1 = None\n self._linear2 = None\n\n @property\n def state_size(self):\n return self._num_units\n\n @property\n def output_size(self):\n return self._num_units\n\n def call(self, inputs, state):\n \"\"\"Run one step of the Intersection RNN.\n\n Args:\n inputs: input Tensor, 2D, batch x input size.\n state: state Tensor, 2D, batch x num units.\n\n Returns:\n new_y: batch x num units, Tensor representing the output of the +RNN\n after reading `inputs` when previous state was `state`.\n new_state: batch x num units, Tensor representing the state of the +RNN\n after reading `inputs` when previous state was `state`.\n\n Raises:\n ValueError: If input size cannot be inferred from `inputs` via\n static shape inference.\n ValueError: If input size != output size (these must be equal when\n using the Intersection RNN).\n \"\"\"\n sigmoid = math_ops.sigmoid\n tanh = math_ops.tanh\n\n input_size = inputs.get_shape().with_rank(2)[1]\n if input_size.value is None:\n raise ValueError(\"Could not infer input size from inputs.get_shape()[-1]\")\n\n with vs.variable_scope(\n vs.get_variable_scope(), initializer=self._initializer):\n # read-in projections (should be used for first layer in deep +RNN\n # to transform size of inputs from I --> N)\n if input_size.value != self._num_units:\n if self._num_input_proj:\n with vs.variable_scope(\"in_projection\"):\n if self._linear1 is None:\n self._linear1 = _Linear(inputs, self._num_units, True)\n inputs = self._linear1(inputs)\n else:\n raise ValueError(\"Must have input size == output size for \"\n \"Intersection RNN. To fix, num_in_proj should \"\n \"be set to num_units at cell init.\")\n\n n_dim = i_dim = self._num_units\n cell_inputs = array_ops.concat([inputs, state], 1)\n if self._linear2 is None:\n self._linear2 = _Linear(cell_inputs, 2 * n_dim + 2 * i_dim, True)\n rnn_matrix = self._linear2(cell_inputs)\n\n gh_act = rnn_matrix[:, :n_dim] # b x n\n h_act = rnn_matrix[:, n_dim:2 * n_dim] # b x n\n gy_act = rnn_matrix[:, 2 * n_dim:2 * n_dim + i_dim] # b x i\n y_act = rnn_matrix[:, 2 * n_dim + i_dim:2 * n_dim + 2 * i_dim] # b x i\n\n h = tanh(h_act)\n y = self._y_activation(y_act)\n gh = sigmoid(gh_act + self._forget_bias)\n gy = sigmoid(gy_act + self._forget_bias)\n\n new_state = gh * state + (1.0 - gh) * h # passed thru time\n new_y = gy * inputs + (1.0 - gy) * y # passed thru depth\n\n return new_y, new_state\n\n\n_REGISTERED_OPS = None\n\n\nclass CompiledWrapper(rnn_cell_impl.RNNCell):\n \"\"\"Wraps step execution in an XLA JIT scope.\"\"\"\n\n def __init__(self, cell, compile_stateful=False):\n \"\"\"Create CompiledWrapper cell.\n\n Args:\n cell: Instance of `RNNCell`.\n compile_stateful: Whether to compile stateful ops like initializers\n and random number generators (default: False).\n \"\"\"\n self._cell = cell\n self._compile_stateful = compile_stateful\n\n @property\n def state_size(self):\n return self._cell.state_size\n\n @property\n def output_size(self):\n return self._cell.output_size\n\n def zero_state(self, batch_size, dtype):\n with ops.name_scope(type(self).__name__ + \"ZeroState\", values=[batch_size]):\n return self._cell.zero_state(batch_size, dtype)\n\n def __call__(self, inputs, state, scope=None):\n if self._compile_stateful:\n compile_ops = True\n else:\n\n def compile_ops(node_def):\n global _REGISTERED_OPS\n if _REGISTERED_OPS is None:\n _REGISTERED_OPS = op_def_registry.get_registered_ops()\n return not _REGISTERED_OPS[node_def.op].is_stateful\n\n with jit.experimental_jit_scope(compile_ops=compile_ops):\n return self._cell(inputs, state, scope=scope)\n\n\ndef _random_exp_initializer(minval, maxval, seed=None, dtype=dtypes.float32):\n \"\"\"Returns an exponential distribution initializer.\n\n Args:\n minval: float or a scalar float Tensor. With value > 0. Lower bound of the\n range of random values to generate.\n maxval: float or a scalar float Tensor. With value > minval. Upper bound of\n the range of random values to generate.\n seed: An integer. Used to create random seeds.\n dtype: The data type.\n\n Returns:\n An initializer that generates tensors with an exponential distribution.\n \"\"\"\n\n def _initializer(shape, dtype=dtype, partition_info=None):\n del partition_info # Unused.\n return math_ops.exp(\n random_ops.random_uniform(\n shape, math_ops.log(minval), math_ops.log(maxval), dtype,\n seed=seed))\n\n return _initializer\n\n\nclass PhasedLSTMCell(rnn_cell_impl.RNNCell):\n \"\"\"Phased LSTM recurrent network cell.\n\n https://arxiv.org/pdf/1610.09513v1.pdf\n \"\"\"\n\n def __init__(self,\n num_units,\n use_peepholes=False,\n leak=0.001,\n ratio_on=0.1,\n trainable_ratio_on=True,\n period_init_min=1.0,\n period_init_max=1000.0,\n reuse=None):\n \"\"\"Initialize the Phased LSTM cell.\n\n Args:\n num_units: int, The number of units in the Phased LSTM cell.\n use_peepholes: bool, set True to enable peephole connections.\n leak: float or scalar float Tensor with value in [0, 1]. Leak applied\n during training.\n ratio_on: float or scalar float Tensor with value in [0, 1]. Ratio of the\n period during which the gates are open.\n trainable_ratio_on: bool, weather ratio_on is trainable.\n period_init_min: float or scalar float Tensor. With value > 0.\n Minimum value of the initialized period.\n The period values are initialized by drawing from the distribution:\n e^U(log(period_init_min), log(period_init_max))\n Where U(.,.) is the uniform distribution.\n period_init_max: float or scalar float Tensor.\n With value > period_init_min. Maximum value of the initialized period.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n \"\"\"\n super(PhasedLSTMCell, self).__init__(_reuse=reuse)\n self._num_units = num_units\n self._use_peepholes = use_peepholes\n self._leak = leak\n self._ratio_on = ratio_on\n self._trainable_ratio_on = trainable_ratio_on\n self._period_init_min = period_init_min\n self._period_init_max = period_init_max\n self._reuse = reuse\n self._linear1 = None\n self._linear2 = None\n self._linear3 = None\n\n @property\n def state_size(self):\n return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)\n\n @property\n def output_size(self):\n return self._num_units\n\n def _mod(self, x, y):\n \"\"\"Modulo function that propagates x gradients.\"\"\"\n return array_ops.stop_gradient(math_ops.mod(x, y) - x) + x\n\n def _get_cycle_ratio(self, time, phase, period):\n \"\"\"Compute the cycle ratio in the dtype of the time.\"\"\"\n phase_casted = math_ops.cast(phase, dtype=time.dtype)\n period_casted = math_ops.cast(period, dtype=time.dtype)\n shifted_time = time - phase_casted\n cycle_ratio = self._mod(shifted_time, period_casted) / period_casted\n return math_ops.cast(cycle_ratio, dtype=dtypes.float32)\n\n def call(self, inputs, state):\n \"\"\"Phased LSTM Cell.\n\n Args:\n inputs: A tuple of 2 Tensor.\n The first Tensor has shape [batch, 1], and type float32 or float64.\n It stores the time.\n The second Tensor has shape [batch, features_size], and type float32.\n It stores the features.\n state: rnn_cell_impl.LSTMStateTuple, state from previous timestep.\n\n Returns:\n A tuple containing:\n - A Tensor of float32, and shape [batch_size, num_units], representing the\n output of the cell.\n - A rnn_cell_impl.LSTMStateTuple, containing 2 Tensors of float32, shape\n [batch_size, num_units], representing the new state and the output.\n \"\"\"\n (c_prev, h_prev) = state\n (time, x) = inputs\n\n in_mask_gates = [x, h_prev]\n if self._use_peepholes:\n in_mask_gates.append(c_prev)\n\n with vs.variable_scope(\"mask_gates\"):\n if self._linear1 is None:\n self._linear1 = _Linear(in_mask_gates, 2 * self._num_units, True)\n\n mask_gates = math_ops.sigmoid(self._linear1(in_mask_gates))\n [input_gate, forget_gate] = array_ops.split(\n axis=1, num_or_size_splits=2, value=mask_gates)\n\n with vs.variable_scope(\"new_input\"):\n if self._linear2 is None:\n self._linear2 = _Linear([x, h_prev], self._num_units, True)\n new_input = math_ops.tanh(self._linear2([x, h_prev]))\n\n new_c = (c_prev * forget_gate + input_gate * new_input)\n\n in_out_gate = [x, h_prev]\n if self._use_peepholes:\n in_out_gate.append(new_c)\n\n with vs.variable_scope(\"output_gate\"):\n if self._linear3 is None:\n self._linear3 = _Linear(in_out_gate, self._num_units, True)\n output_gate = math_ops.sigmoid(self._linear3(in_out_gate))\n\n new_h = math_ops.tanh(new_c) * output_gate\n\n period = vs.get_variable(\n \"period\", [self._num_units],\n initializer=_random_exp_initializer(self._period_init_min,\n self._period_init_max))\n phase = vs.get_variable(\n \"phase\", [self._num_units],\n initializer=init_ops.random_uniform_initializer(0.,\n period.initial_value))\n ratio_on = vs.get_variable(\n \"ratio_on\", [self._num_units],\n initializer=init_ops.constant_initializer(self._ratio_on),\n trainable=self._trainable_ratio_on)\n\n cycle_ratio = self._get_cycle_ratio(time, phase, period)\n\n k_up = 2 * cycle_ratio / ratio_on\n k_down = 2 - k_up\n k_closed = self._leak * cycle_ratio\n\n k = array_ops.where(cycle_ratio < ratio_on, k_down, k_closed)\n k = array_ops.where(cycle_ratio < 0.5 * ratio_on, k_up, k)\n\n new_c = k * new_c + (1 - k) * c_prev\n new_h = k * new_h + (1 - k) * h_prev\n\n new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)\n\n return new_h, new_state\n\n\nclass ConvLSTMCell(rnn_cell_impl.RNNCell):\n \"\"\"Convolutional LSTM recurrent network cell.\n\n https://arxiv.org/pdf/1506.04214v1.pdf\n \"\"\"\n\n def __init__(self,\n conv_ndims,\n input_shape,\n output_channels,\n kernel_shape,\n use_bias=True,\n skip_connection=False,\n forget_bias=1.0,\n initializers=None,\n name=\"conv_lstm_cell\"):\n \"\"\"Construct ConvLSTMCell.\n\n Args:\n conv_ndims: Convolution dimensionality (1, 2 or 3).\n input_shape: Shape of the input as int tuple, excluding the batch size.\n output_channels: int, number of output channels of the conv LSTM.\n kernel_shape: Shape of kernel as in tuple (of size 1,2 or 3).\n use_bias: (bool) Use bias in convolutions.\n skip_connection: If set to `True`, concatenate the input to the\n output of the conv LSTM. Default: `False`.\n forget_bias: Forget bias.\n initializers: Unused.\n name: Name of the module.\n\n Raises:\n ValueError: If `skip_connection` is `True` and stride is different from 1\n or if `input_shape` is incompatible with `conv_ndims`.\n \"\"\"\n super(ConvLSTMCell, self).__init__(name=name)\n\n if conv_ndims != len(input_shape) - 1:\n raise ValueError(\"Invalid input_shape {} for conv_ndims={}.\".format(\n input_shape, conv_ndims))\n\n self._conv_ndims = conv_ndims\n self._input_shape = input_shape\n self._output_channels = output_channels\n self._kernel_shape = kernel_shape\n self._use_bias = use_bias\n self._forget_bias = forget_bias\n self._skip_connection = skip_connection\n\n self._total_output_channels = output_channels\n if self._skip_connection:\n self._total_output_channels += self._input_shape[-1]\n\n state_size = tensor_shape.TensorShape(\n self._input_shape[:-1] + [self._output_channels])\n self._state_size = rnn_cell_impl.LSTMStateTuple(state_size, state_size)\n self._output_size = tensor_shape.TensorShape(\n self._input_shape[:-1] + [self._total_output_channels])\n\n @property\n def output_size(self):\n return self._output_size\n\n @property\n def state_size(self):\n return self._state_size\n\n def call(self, inputs, state, scope=None):\n cell, hidden = state\n new_hidden = _conv([inputs, hidden], self._kernel_shape,\n 4 * self._output_channels, self._use_bias)\n gates = array_ops.split(\n value=new_hidden, num_or_size_splits=4, axis=self._conv_ndims + 1)\n\n input_gate, new_input, forget_gate, output_gate = gates\n new_cell = math_ops.sigmoid(forget_gate + self._forget_bias) * cell\n new_cell += math_ops.sigmoid(input_gate) * math_ops.tanh(new_input)\n output = math_ops.tanh(new_cell) * math_ops.sigmoid(output_gate)\n\n if self._skip_connection:\n output = array_ops.concat([output, inputs], axis=-1)\n new_state = rnn_cell_impl.LSTMStateTuple(new_cell, output)\n return output, new_state\n\n\nclass Conv1DLSTMCell(ConvLSTMCell):\n \"\"\"1D Convolutional LSTM recurrent network cell.\n\n https://arxiv.org/pdf/1506.04214v1.pdf\n \"\"\"\n\n def __init__(self, name=\"conv_1d_lstm_cell\", **kwargs):\n \"\"\"Construct Conv1DLSTM. See `ConvLSTMCell` for more details.\"\"\"\n super(Conv1DLSTMCell, self).__init__(conv_ndims=1, name=name, **kwargs)\n\n\nclass Conv2DLSTMCell(ConvLSTMCell):\n \"\"\"2D Convolutional LSTM recurrent network cell.\n\n https://arxiv.org/pdf/1506.04214v1.pdf\n \"\"\"\n\n def __init__(self, name=\"conv_2d_lstm_cell\", **kwargs):\n \"\"\"Construct Conv2DLSTM. See `ConvLSTMCell` for more details.\"\"\"\n super(Conv2DLSTMCell, self).__init__(conv_ndims=2, name=name, **kwargs)\n\n\nclass Conv3DLSTMCell(ConvLSTMCell):\n \"\"\"3D Convolutional LSTM recurrent network cell.\n\n https://arxiv.org/pdf/1506.04214v1.pdf\n \"\"\"\n\n def __init__(self, name=\"conv_3d_lstm_cell\", **kwargs):\n \"\"\"Construct Conv3DLSTM. See `ConvLSTMCell` for more details.\"\"\"\n super(Conv3DLSTMCell, self).__init__(conv_ndims=3, name=name, **kwargs)\n\n\ndef _conv(args, filter_size, num_features, bias, bias_start=0.0):\n \"\"\"Convolution.\n\n Args:\n args: a Tensor or a list of Tensors of dimension 3D, 4D or 5D,\n batch x n, Tensors.\n filter_size: int tuple of filter height and width.\n num_features: int, number of features.\n bias: Whether to use biases in the convolution layer.\n bias_start: starting value to initialize the bias; 0 by default.\n\n Returns:\n A 3D, 4D, or 5D Tensor with shape [batch ... num_features]\n\n Raises:\n ValueError: if some of the arguments has unspecified or wrong shape.\n \"\"\"\n\n # Calculate the total size of arguments on dimension 1.\n total_arg_size_depth = 0\n shapes = [a.get_shape().as_list() for a in args]\n shape_length = len(shapes[0])\n for shape in shapes:\n if len(shape) not in [3, 4, 5]:\n raise ValueError(\"Conv Linear expects 3D, 4D \"\n \"or 5D arguments: %s\" % str(shapes))\n if len(shape) != len(shapes[0]):\n raise ValueError(\"Conv Linear expects all args \"\n \"to be of same Dimension: %s\" % str(shapes))\n else:\n total_arg_size_depth += shape[-1]\n dtype = [a.dtype for a in args][0]\n\n # determine correct conv operation\n if shape_length == 3:\n conv_op = nn_ops.conv1d\n strides = 1\n elif shape_length == 4:\n conv_op = nn_ops.conv2d\n strides = shape_length * [1]\n elif shape_length == 5:\n conv_op = nn_ops.conv3d\n strides = shape_length * [1]\n\n # Now the computation.\n kernel = vs.get_variable(\n \"kernel\", filter_size + [total_arg_size_depth, num_features], dtype=dtype)\n if len(args) == 1:\n res = conv_op(args[0], kernel, strides, padding=\"SAME\")\n else:\n res = conv_op(\n array_ops.concat(axis=shape_length - 1, values=args),\n kernel,\n strides,\n padding=\"SAME\")\n if not bias:\n return res\n bias_term = vs.get_variable(\n \"biases\", [num_features],\n dtype=dtype,\n initializer=init_ops.constant_initializer(bias_start, dtype=dtype))\n return res + bias_term\n\n\nclass GLSTMCell(rnn_cell_impl.RNNCell):\n \"\"\"Group LSTM cell (G-LSTM).\n\n The implementation is based on:\n\n https://arxiv.org/abs/1703.10722\n\n O. Kuchaiev and B. Ginsburg\n \"Factorization Tricks for LSTM Networks\", ICLR 2017 workshop.\n\n In brief, a G-LSTM cell consists of one LSTM sub-cell per group, where each\n sub-cell operates on an evenly-sized sub-vector of the input and produces an\n evenly-sized sub-vector of the output. For example, a G-LSTM cell with 128\n units and 4 groups consists of 4 LSTMs sub-cells with 32 units each. If that\n G-LSTM cell is fed a 200-dim input, then each sub-cell receives a 50-dim part\n of the input and produces a 32-dim part of the output.\n \"\"\"\n\n def __init__(self,\n num_units,\n initializer=None,\n num_proj=None,\n number_of_groups=1,\n forget_bias=1.0,\n activation=math_ops.tanh,\n reuse=None):\n \"\"\"Initialize the parameters of G-LSTM cell.\n\n Args:\n num_units: int, The number of units in the G-LSTM cell\n initializer: (optional) The initializer to use for the weight and\n projection matrices.\n num_proj: (optional) int, The output dimensionality for the projection\n matrices. If None, no projection is performed.\n number_of_groups: (optional) int, number of groups to use.\n If `number_of_groups` is 1, then it should be equivalent to LSTM cell\n forget_bias: Biases of the forget gate are initialized by default to 1\n in order to reduce the scale of forgetting at the beginning of\n the training.\n activation: Activation function of the inner states.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already\n has the given variables, an error is raised.\n\n Raises:\n ValueError: If `num_units` or `num_proj` is not divisible by\n `number_of_groups`.\n \"\"\"\n super(GLSTMCell, self).__init__(_reuse=reuse)\n self._num_units = num_units\n self._initializer = initializer\n self._num_proj = num_proj\n self._forget_bias = forget_bias\n self._activation = activation\n self._number_of_groups = number_of_groups\n\n if self._num_units % self._number_of_groups != 0:\n raise ValueError(\"num_units must be divisible by number_of_groups\")\n if self._num_proj:\n if self._num_proj % self._number_of_groups != 0:\n raise ValueError(\"num_proj must be divisible by number_of_groups\")\n self._group_shape = [\n int(self._num_proj / self._number_of_groups),\n int(self._num_units / self._number_of_groups)\n ]\n else:\n self._group_shape = [\n int(self._num_units / self._number_of_groups),\n int(self._num_units / self._number_of_groups)\n ]\n\n if num_proj:\n self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_proj)\n self._output_size = num_proj\n else:\n self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_units)\n self._output_size = num_units\n self._linear1 = [None] * number_of_groups\n self._linear2 = None\n\n @property\n def state_size(self):\n return self._state_size\n\n @property\n def output_size(self):\n return self._output_size\n\n def _get_input_for_group(self, inputs, group_id, group_size):\n \"\"\"Slices inputs into groups to prepare for processing by cell's groups.\n\n Args:\n inputs: cell input or it's previous state,\n a Tensor, 2D, [batch x num_units]\n group_id: group id, a Scalar, for which to prepare input\n group_size: size of the group\n\n Returns:\n subset of inputs corresponding to group \"group_id\",\n a Tensor, 2D, [batch x num_units/number_of_groups]\n \"\"\"\n return array_ops.slice(\n input_=inputs,\n begin=[0, group_id * group_size],\n size=[self._batch_size, group_size],\n name=(\"GLSTM_group%d_input_generation\" % group_id))\n\n def call(self, inputs, state):\n \"\"\"Run one step of G-LSTM.\n\n Args:\n inputs: input Tensor, 2D, [batch x num_inputs]. num_inputs must be\n statically-known and evenly divisible into groups. The innermost\n vectors of the inputs are split into evenly-sized sub-vectors and fed\n into the per-group LSTM sub-cells.\n state: this must be a tuple of state Tensors, both `2-D`, with column\n sizes `c_state` and `m_state`.\n\n Returns:\n A tuple containing:\n\n - A `2-D, [batch x output_dim]`, Tensor representing the output of the\n G-LSTM after reading `inputs` when previous state was `state`.\n Here output_dim is:\n num_proj if num_proj was set,\n num_units otherwise.\n - LSTMStateTuple representing the new state of G-LSTM cell\n after reading `inputs` when the previous state was `state`.\n\n Raises:\n ValueError: If input size cannot be inferred from inputs via\n static shape inference, or if the input shape is incompatible\n with the number of groups.\n \"\"\"\n (c_prev, m_prev) = state\n\n self._batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]\n\n # If the input size is statically-known, calculate and validate its group\n # size. Otherwise, use the output group size.\n input_size = inputs.shape[1].value\n if input_size is None:\n raise ValueError(\"input size must be statically known\")\n if input_size % self._number_of_groups != 0:\n raise ValueError(\n \"input size (%d) must be divisible by number_of_groups (%d)\" %\n (input_size, self._number_of_groups))\n input_group_size = int(input_size / self._number_of_groups)\n\n dtype = inputs.dtype\n scope = vs.get_variable_scope()\n with vs.variable_scope(scope, initializer=self._initializer):\n i_parts = []\n j_parts = []\n f_parts = []\n o_parts = []\n\n for group_id in range(self._number_of_groups):\n with vs.variable_scope(\"group%d\" % group_id):\n x_g_id = array_ops.concat(\n [\n self._get_input_for_group(inputs, group_id, input_group_size),\n self._get_input_for_group(m_prev, group_id,\n self._group_shape[0])\n ],\n axis=1)\n linear = self._linear1[group_id]\n if linear is None:\n linear = _Linear(x_g_id, 4 * self._group_shape[1], False)\n self._linear1[group_id] = linear\n R_k = linear(x_g_id) # pylint: disable=invalid-name\n i_k, j_k, f_k, o_k = array_ops.split(R_k, 4, 1)\n\n i_parts.append(i_k)\n j_parts.append(j_k)\n f_parts.append(f_k)\n o_parts.append(o_k)\n\n bi = vs.get_variable(\n name=\"bias_i\",\n shape=[self._num_units],\n dtype=dtype,\n initializer=init_ops.constant_initializer(0.0, dtype=dtype))\n bj = vs.get_variable(\n name=\"bias_j\",\n shape=[self._num_units],\n dtype=dtype,\n initializer=init_ops.constant_initializer(0.0, dtype=dtype))\n bf = vs.get_variable(\n name=\"bias_f\",\n shape=[self._num_units],\n dtype=dtype,\n initializer=init_ops.constant_initializer(0.0, dtype=dtype))\n bo = vs.get_variable(\n name=\"bias_o\",\n shape=[self._num_units],\n dtype=dtype,\n initializer=init_ops.constant_initializer(0.0, dtype=dtype))\n\n i = nn_ops.bias_add(array_ops.concat(i_parts, axis=1), bi)\n j = nn_ops.bias_add(array_ops.concat(j_parts, axis=1), bj)\n f = nn_ops.bias_add(array_ops.concat(f_parts, axis=1), bf)\n o = nn_ops.bias_add(array_ops.concat(o_parts, axis=1), bo)\n\n c = (\n math_ops.sigmoid(f + self._forget_bias) * c_prev +\n math_ops.sigmoid(i) * math_ops.tanh(j))\n m = math_ops.sigmoid(o) * self._activation(c)\n\n if self._num_proj is not None:\n with vs.variable_scope(\"projection\"):\n if self._linear2 is None:\n self._linear2 = _Linear(m, self._num_proj, False)\n m = self._linear2(m)\n\n new_state = rnn_cell_impl.LSTMStateTuple(c, m)\n return m, new_state\n\n\nclass LayerNormLSTMCell(rnn_cell_impl.RNNCell):\n \"\"\"Long short-term memory unit (LSTM) recurrent network cell.\n\n The default non-peephole implementation is based on:\n\n http://www.bioinf.jku.at/publications/older/2604.pdf\n\n S. Hochreiter and J. Schmidhuber.\n \"Long Short-Term Memory\". Neural Computation, 9(8):1735-1780, 1997.\n\n The peephole implementation is based on:\n\n https://research.google.com/pubs/archive/43905.pdf\n\n Hasim Sak, Andrew Senior, and Francoise Beaufays.\n \"Long short-term memory recurrent neural network architectures for\n large scale acoustic modeling.\" INTERSPEECH, 2014.\n\n The class uses optional peep-hole connections, optional cell clipping, and\n an optional projection layer.\n\n Layer normalization implementation is based on:\n\n https://arxiv.org/abs/1607.06450.\n\n \"Layer Normalization\"\n Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton\n\n and is applied before the internal nonlinearities.\n\n \"\"\"\n\n def __init__(self,\n num_units,\n use_peepholes=False,\n cell_clip=None,\n initializer=None,\n num_proj=None,\n proj_clip=None,\n forget_bias=1.0,\n activation=None,\n layer_norm=False,\n norm_gain=1.0,\n norm_shift=0.0,\n reuse=None):\n \"\"\"Initialize the parameters for an LSTM cell.\n\n Args:\n num_units: int, The number of units in the LSTM cell\n use_peepholes: bool, set True to enable diagonal/peephole connections.\n cell_clip: (optional) A float value, if provided the cell state is clipped\n by this value prior to the cell output activation.\n initializer: (optional) The initializer to use for the weight and\n projection matrices.\n num_proj: (optional) int, The output dimensionality for the projection\n matrices. If None, no projection is performed.\n proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is\n provided, then the projected values are clipped elementwise to within\n `[-proj_clip, proj_clip]`.\n forget_bias: Biases of the forget gate are initialized by default to 1\n in order to reduce the scale of forgetting at the beginning of\n the training. Must set it manually to `0.0` when restoring from\n CudnnLSTM trained checkpoints.\n activation: Activation function of the inner states. Default: `tanh`.\n layer_norm: If `True`, layer normalization will be applied.\n norm_gain: float, The layer normalization gain initial value. If\n `layer_norm` has been set to `False`, this argument will be ignored.\n norm_shift: float, The layer normalization shift initial value. If\n `layer_norm` has been set to `False`, this argument will be ignored.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n\n When restoring from CudnnLSTM-trained checkpoints, must use\n CudnnCompatibleLSTMCell instead.\n \"\"\"\n super(LayerNormLSTMCell, self).__init__(_reuse=reuse)\n\n self._num_units = num_units\n self._use_peepholes = use_peepholes\n self._cell_clip = cell_clip\n self._initializer = initializer\n self._num_proj = num_proj\n self._proj_clip = proj_clip\n self._forget_bias = forget_bias\n self._activation = activation or math_ops.tanh\n self._layer_norm = layer_norm\n self._norm_gain = norm_gain\n self._norm_shift = norm_shift\n\n if num_proj:\n self._state_size = (rnn_cell_impl.LSTMStateTuple(num_units, num_proj))\n self._output_size = num_proj\n else:\n self._state_size = (rnn_cell_impl.LSTMStateTuple(num_units, num_units))\n self._output_size = num_units\n\n @property\n def state_size(self):\n return self._state_size\n\n @property\n def output_size(self):\n return self._output_size\n\n def _linear(self,\n args,\n output_size,\n bias,\n bias_initializer=None,\n kernel_initializer=None,\n layer_norm=False):\n \"\"\"Linear map: sum_i(args[i] * W[i]), where W[i] is a Variable.\n\n Args:\n args: a 2D Tensor or a list of 2D, batch x n, Tensors.\n output_size: int, second dimension of W[i].\n bias: boolean, whether to add a bias term or not.\n bias_initializer: starting value to initialize the bias\n (default is all zeros).\n kernel_initializer: starting value to initialize the weight.\n layer_norm: boolean, whether to apply layer normalization.\n\n\n Returns:\n A 2D Tensor with shape [batch x output_size] taking value\n sum_i(args[i] * W[i]), where each W[i] is a newly created Variable.\n\n Raises:\n ValueError: if some of the arguments has unspecified or wrong shape.\n \"\"\"\n if args is None or (nest.is_sequence(args) and not args):\n raise ValueError(\"`args` must be specified\")\n if not nest.is_sequence(args):\n args = [args]\n\n # Calculate the total size of arguments on dimension 1.\n total_arg_size = 0\n shapes = [a.get_shape() for a in args]\n for shape in shapes:\n if shape.ndims != 2:\n raise ValueError(\"linear is expecting 2D arguments: %s\" % shapes)\n if shape[1].value is None:\n raise ValueError(\"linear expects shape[1] to be provided for shape %s, \"\n \"but saw %s\" % (shape, shape[1]))\n else:\n total_arg_size += shape[1].value\n\n dtype = [a.dtype for a in args][0]\n\n # Now the computation.\n scope = vs.get_variable_scope()\n with vs.variable_scope(scope) as outer_scope:\n weights = vs.get_variable(\n \"kernel\", [total_arg_size, output_size],\n dtype=dtype,\n initializer=kernel_initializer)\n if len(args) == 1:\n res = math_ops.matmul(args[0], weights)\n else:\n res = math_ops.matmul(array_ops.concat(args, 1), weights)\n if not bias:\n return res\n with vs.variable_scope(outer_scope) as inner_scope:\n inner_scope.set_partitioner(None)\n if bias_initializer is None:\n bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype)\n biases = vs.get_variable(\n \"bias\", [output_size], dtype=dtype, initializer=bias_initializer)\n\n if not layer_norm:\n res = nn_ops.bias_add(res, biases)\n\n return res\n\n def call(self, inputs, state):\n \"\"\"Run one step of LSTM.\n\n Args:\n inputs: input Tensor, 2D, batch x num_units.\n state: this must be a tuple of state Tensors,\n both `2-D`, with column sizes `c_state` and\n `m_state`.\n\n Returns:\n A tuple containing:\n\n - A `2-D, [batch x output_dim]`, Tensor representing the output of the\n LSTM after reading `inputs` when previous state was `state`.\n Here output_dim is:\n num_proj if num_proj was set,\n num_units otherwise.\n - Tensor(s) representing the new state of LSTM after reading `inputs` when\n the previous state was `state`. Same type and shape(s) as `state`.\n\n Raises:\n ValueError: If input size cannot be inferred from inputs via\n static shape inference.\n \"\"\"\n sigmoid = math_ops.sigmoid\n\n (c_prev, m_prev) = state\n\n dtype = inputs.dtype\n input_size = inputs.get_shape().with_rank(2)[1]\n if input_size.value is None:\n raise ValueError(\"Could not infer input size from inputs.get_shape()[-1]\")\n scope = vs.get_variable_scope()\n with vs.variable_scope(scope, initializer=self._initializer) as unit_scope:\n\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n lstm_matrix = self._linear(\n [inputs, m_prev],\n 4 * self._num_units,\n bias=True,\n bias_initializer=None,\n layer_norm=self._layer_norm)\n i, j, f, o = array_ops.split(\n value=lstm_matrix, num_or_size_splits=4, axis=1)\n\n if self._layer_norm:\n i = _norm(self._norm_gain, self._norm_shift, i, \"input\")\n j = _norm(self._norm_gain, self._norm_shift, j, \"transform\")\n f = _norm(self._norm_gain, self._norm_shift, f, \"forget\")\n o = _norm(self._norm_gain, self._norm_shift, o, \"output\")\n\n # Diagonal connections\n if self._use_peepholes:\n with vs.variable_scope(unit_scope):\n w_f_diag = vs.get_variable(\n \"w_f_diag\", shape=[self._num_units], dtype=dtype)\n w_i_diag = vs.get_variable(\n \"w_i_diag\", shape=[self._num_units], dtype=dtype)\n w_o_diag = vs.get_variable(\n \"w_o_diag\", shape=[self._num_units], dtype=dtype)\n\n if self._use_peepholes:\n c = (\n sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +\n sigmoid(i + w_i_diag * c_prev) * self._activation(j))\n else:\n c = (\n sigmoid(f + self._forget_bias) * c_prev +\n sigmoid(i) * self._activation(j))\n\n if self._layer_norm:\n c = _norm(self._norm_gain, self._norm_shift, c, \"state\")\n\n if self._cell_clip is not None:\n # pylint: disable=invalid-unary-operand-type\n c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)\n # pylint: enable=invalid-unary-operand-type\n if self._use_peepholes:\n m = sigmoid(o + w_o_diag * c) * self._activation(c)\n else:\n m = sigmoid(o) * self._activation(c)\n\n if self._num_proj is not None:\n with vs.variable_scope(\"projection\"):\n m = self._linear(m, self._num_proj, bias=False)\n\n if self._proj_clip is not None:\n # pylint: disable=invalid-unary-operand-type\n m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)\n # pylint: enable=invalid-unary-operand-type\n\n new_state = (rnn_cell_impl.LSTMStateTuple(c, m))\n return m, new_state\n\n\nclass SRUCell(rnn_cell_impl.LayerRNNCell):\n \"\"\"SRU, Simple Recurrent Unit.\n\n Implementation based on\n Training RNNs as Fast as CNNs (cf. https://arxiv.org/abs/1709.02755).\n\n This variation of RNN cell is characterized by the simplified data\n dependence\n between hidden states of two consecutive time steps. Traditionally, hidden\n states from a cell at time step t-1 needs to be multiplied with a matrix\n W_hh before being fed into the ensuing cell at time step t.\n This flavor of RNN replaces the matrix multiplication between h_{t-1}\n and W_hh with a pointwise multiplication, resulting in performance\n gain.\n\n Args:\n num_units: int, The number of units in the SRU cell.\n activation: Nonlinearity to use. Default: `tanh`.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n name: (optional) String, the name of the layer. Layers with the same name\n will share weights, but to avoid mistakes we require reuse=True in such\n cases.\n \"\"\"\n\n def __init__(self, num_units, activation=None, reuse=None, name=None):\n super(SRUCell, self).__init__(_reuse=reuse, name=name)\n self._num_units = num_units\n self._activation = activation or math_ops.tanh\n\n # Restrict inputs to be 2-dimensional matrices\n self.input_spec = base_layer.InputSpec(ndim=2)\n\n @property\n def state_size(self):\n return self._num_units\n\n @property\n def output_size(self):\n return self._num_units\n\n def build(self, inputs_shape):\n if inputs_shape[1].value is None:\n raise ValueError(\n \"Expected inputs.shape[-1] to be known, saw shape: %s\" % inputs_shape)\n\n input_depth = inputs_shape[1].value\n\n # pylint: disable=protected-access\n self._kernel = self.add_variable(\n rnn_cell_impl._WEIGHTS_VARIABLE_NAME,\n shape=[input_depth, 4 * self._num_units])\n # pylint: enable=protected-access\n self._bias = self.add_variable(\n rnn_cell_impl._BIAS_VARIABLE_NAME, # pylint: disable=protected-access\n shape=[2 * self._num_units],\n initializer=init_ops.constant_initializer(0.0, dtype=self.dtype))\n\n self._built = True\n\n def call(self, inputs, state):\n \"\"\"Simple recurrent unit (SRU) with num_units cells.\"\"\"\n\n U = math_ops.matmul(inputs, self._kernel) # pylint: disable=invalid-name\n x_bar, f_intermediate, r_intermediate, x_tx = array_ops.split(\n value=U, num_or_size_splits=4, axis=1)\n\n f_r = math_ops.sigmoid(\n nn_ops.bias_add(\n array_ops.concat([f_intermediate, r_intermediate], 1), self._bias))\n f, r = array_ops.split(value=f_r, num_or_size_splits=2, axis=1)\n\n c = f * state + (1.0 - f) * x_bar\n h = r * self._activation(c) + (1.0 - r) * x_tx\n\n return h, c\n\n\nclass WeightNormLSTMCell(rnn_cell_impl.RNNCell):\n \"\"\"Weight normalized LSTM Cell. Adapted from `rnn_cell_impl.LSTMCell`.\n\n The weight-norm implementation is based on:\n https://arxiv.org/abs/1602.07868\n Tim Salimans, Diederik P. Kingma.\n Weight Normalization: A Simple Reparameterization to Accelerate\n Training of Deep Neural Networks\n\n The default LSTM implementation based on:\n http://www.bioinf.jku.at/publications/older/2604.pdf\n S. Hochreiter and J. Schmidhuber.\n \"Long Short-Term Memory\". Neural Computation, 9(8):1735-1780, 1997.\n\n The class uses optional peephole connections, optional cell clipping\n and an optional projection layer.\n\n The optional peephole implementation is based on:\n https://research.google.com/pubs/archive/43905.pdf\n Hasim Sak, Andrew Senior, and Francoise Beaufays.\n \"Long short-term memory recurrent neural network architectures for\n large scale acoustic modeling.\" INTERSPEECH, 2014.\n \"\"\"\n\n def __init__(self,\n num_units,\n norm=True,\n use_peepholes=False,\n cell_clip=None,\n initializer=None,\n num_proj=None,\n proj_clip=None,\n forget_bias=1,\n activation=None,\n reuse=None):\n \"\"\"Initialize the parameters of a weight-normalized LSTM cell.\n\n Args:\n num_units: int, The number of units in the LSTM cell\n norm: If `True`, apply normalization to the weight matrices. If False,\n the result is identical to that obtained from `rnn_cell_impl.LSTMCell`\n use_peepholes: bool, set `True` to enable diagonal/peephole connections.\n cell_clip: (optional) A float value, if provided the cell state is clipped\n by this value prior to the cell output activation.\n initializer: (optional) The initializer to use for the weight matrices.\n num_proj: (optional) int, The output dimensionality for the projection\n matrices. If None, no projection is performed.\n proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is\n provided, then the projected values are clipped elementwise to within\n `[-proj_clip, proj_clip]`.\n forget_bias: Biases of the forget gate are initialized by default to 1\n in order to reduce the scale of forgetting at the beginning of\n the training.\n activation: Activation function of the inner states. Default: `tanh`.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n \"\"\"\n super(WeightNormLSTMCell, self).__init__(_reuse=reuse)\n\n self._scope = \"wn_lstm_cell\"\n self._num_units = num_units\n self._norm = norm\n self._initializer = initializer\n self._use_peepholes = use_peepholes\n self._cell_clip = cell_clip\n self._num_proj = num_proj\n self._proj_clip = proj_clip\n self._activation = activation or math_ops.tanh\n self._forget_bias = forget_bias\n\n self._weights_variable_name = \"kernel\"\n self._bias_variable_name = \"bias\"\n\n if num_proj:\n self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_proj)\n self._output_size = num_proj\n else:\n self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_units)\n self._output_size = num_units\n\n @property\n def state_size(self):\n return self._state_size\n\n @property\n def output_size(self):\n return self._output_size\n\n def _normalize(self, weight, name):\n \"\"\"Apply weight normalization.\n\n Args:\n weight: a 2D tensor with known number of columns.\n name: string, variable name for the normalizer.\n Returns:\n A tensor with the same shape as `weight`.\n \"\"\"\n\n output_size = weight.get_shape().as_list()[1]\n g = vs.get_variable(name, [output_size], dtype=weight.dtype)\n return nn_impl.l2_normalize(weight, axis=0) * g\n\n def _linear(self,\n args,\n output_size,\n norm,\n bias,\n bias_initializer=None,\n kernel_initializer=None):\n \"\"\"Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.\n\n Args:\n args: a 2D Tensor or a list of 2D, batch x n, Tensors.\n output_size: int, second dimension of W[i].\n norm: bool, whether to normalize the weights.\n bias: boolean, whether to add a bias term or not.\n bias_initializer: starting value to initialize the bias\n (default is all zeros).\n kernel_initializer: starting value to initialize the weight.\n\n Returns:\n A 2D Tensor with shape [batch x output_size] equal to\n sum_i(args[i] * W[i]), where W[i]s are newly created matrices.\n\n Raises:\n ValueError: if some of the arguments has unspecified or wrong shape.\n \"\"\"\n if args is None or (nest.is_sequence(args) and not args):\n raise ValueError(\"`args` must be specified\")\n if not nest.is_sequence(args):\n args = [args]\n\n # Calculate the total size of arguments on dimension 1.\n total_arg_size = 0\n shapes = [a.get_shape() for a in args]\n for shape in shapes:\n if shape.ndims != 2:\n raise ValueError(\"linear is expecting 2D arguments: %s\" % shapes)\n if shape[1].value is None:\n raise ValueError(\"linear expects shape[1] to be provided for shape %s, \"\n \"but saw %s\" % (shape, shape[1]))\n else:\n total_arg_size += shape[1].value\n\n dtype = [a.dtype for a in args][0]\n\n # Now the computation.\n scope = vs.get_variable_scope()\n with vs.variable_scope(scope) as outer_scope:\n weights = vs.get_variable(\n self._weights_variable_name, [total_arg_size, output_size],\n dtype=dtype,\n initializer=kernel_initializer)\n if norm:\n wn = []\n st = 0\n with ops.control_dependencies(None):\n for i in range(len(args)):\n en = st + shapes[i][1].value\n wn.append(\n self._normalize(weights[st:en, :], name=\"norm_{}\".format(i)))\n st = en\n\n weights = array_ops.concat(wn, axis=0)\n\n if len(args) == 1:\n res = math_ops.matmul(args[0], weights)\n else:\n res = math_ops.matmul(array_ops.concat(args, 1), weights)\n if not bias:\n return res\n\n with vs.variable_scope(outer_scope) as inner_scope:\n inner_scope.set_partitioner(None)\n if bias_initializer is None:\n bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype)\n\n biases = vs.get_variable(\n self._bias_variable_name, [output_size],\n dtype=dtype,\n initializer=bias_initializer)\n\n return nn_ops.bias_add(res, biases)\n\n def call(self, inputs, state):\n \"\"\"Run one step of LSTM.\n\n Args:\n inputs: input Tensor, 2D, batch x num_units.\n state: A tuple of state Tensors, both `2-D`, with column sizes\n `c_state` and `m_state`.\n\n Returns:\n A tuple containing:\n\n - A `2-D, [batch x output_dim]`, Tensor representing the output of the\n LSTM after reading `inputs` when previous state was `state`.\n Here output_dim is:\n num_proj if num_proj was set,\n num_units otherwise.\n - Tensor(s) representing the new state of LSTM after reading `inputs` when\n the previous state was `state`. Same type and shape(s) as `state`.\n\n Raises:\n ValueError: If input size cannot be inferred from inputs via\n static shape inference.\n \"\"\"\n dtype = inputs.dtype\n num_units = self._num_units\n sigmoid = math_ops.sigmoid\n c, h = state\n\n input_size = inputs.get_shape().with_rank(2)[1]\n if input_size.value is None:\n raise ValueError(\"Could not infer input size from inputs.get_shape()[-1]\")\n\n with vs.variable_scope(self._scope, initializer=self._initializer):\n\n concat = self._linear(\n [inputs, h], 4 * num_units, norm=self._norm, bias=True)\n\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)\n\n if self._use_peepholes:\n w_f_diag = vs.get_variable(\"w_f_diag\", shape=[num_units], dtype=dtype)\n w_i_diag = vs.get_variable(\"w_i_diag\", shape=[num_units], dtype=dtype)\n w_o_diag = vs.get_variable(\"w_o_diag\", shape=[num_units], dtype=dtype)\n\n new_c = (\n c * sigmoid(f + self._forget_bias + w_f_diag * c) +\n sigmoid(i + w_i_diag * c) * self._activation(j))\n else:\n new_c = (\n c * sigmoid(f + self._forget_bias) +\n sigmoid(i) * self._activation(j))\n\n if self._cell_clip is not None:\n # pylint: disable=invalid-unary-operand-type\n new_c = clip_ops.clip_by_value(new_c, -self._cell_clip, self._cell_clip)\n # pylint: enable=invalid-unary-operand-type\n if self._use_peepholes:\n new_h = sigmoid(o + w_o_diag * new_c) * self._activation(new_c)\n else:\n new_h = sigmoid(o) * self._activation(new_c)\n\n if self._num_proj is not None:\n with vs.variable_scope(\"projection\"):\n new_h = self._linear(\n new_h, self._num_proj, norm=self._norm, bias=False)\n\n if self._proj_clip is not None:\n # pylint: disable=invalid-unary-operand-type\n new_h = clip_ops.clip_by_value(new_h, -self._proj_clip,\n self._proj_clip)\n # pylint: enable=invalid-unary-operand-type\n\n new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)\n return new_h, new_state\n\n\nclass IndRNNCell(rnn_cell_impl.LayerRNNCell):\n \"\"\"Independently Recurrent Neural Network (IndRNN) cell\n (cf. https://arxiv.org/abs/1803.04831).\n\n Args:\n num_units: int, The number of units in the RNN cell.\n activation: Nonlinearity to use. Default: `tanh`.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n name: String, the name of the layer. Layers with the same name will\n share weights, but to avoid mistakes we require reuse=True in such\n cases.\n dtype: Default dtype of the layer (default of `None` means use the type\n of the first input). Required when `build` is called before `call`.\n \"\"\"\n\n def __init__(self,\n num_units,\n activation=None,\n reuse=None,\n name=None,\n dtype=None):\n super(IndRNNCell, self).__init__(_reuse=reuse, name=name, dtype=dtype)\n\n # Inputs must be 2-dimensional.\n self.input_spec = base_layer.InputSpec(ndim=2)\n\n self._num_units = num_units\n self._activation = activation or math_ops.tanh\n\n @property\n def state_size(self):\n return self._num_units\n\n @property\n def output_size(self):\n return self._num_units\n\n def build(self, inputs_shape):\n if inputs_shape[1].value is None:\n raise ValueError(\n \"Expected inputs.shape[-1] to be known, saw shape: %s\" % inputs_shape)\n\n input_depth = inputs_shape[1].value\n # pylint: disable=protected-access\n self._kernel_w = self.add_variable(\n \"%s_w\" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,\n shape=[input_depth, self._num_units])\n self._kernel_u = self.add_variable(\n \"%s_u\" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,\n shape=[1, self._num_units],\n initializer=init_ops.random_uniform_initializer(\n minval=-1, maxval=1, dtype=self.dtype))\n self._bias = self.add_variable(\n rnn_cell_impl._BIAS_VARIABLE_NAME,\n shape=[self._num_units],\n initializer=init_ops.zeros_initializer(dtype=self.dtype))\n # pylint: enable=protected-access\n\n self.built = True\n\n def call(self, inputs, state):\n \"\"\"IndRNN: output = new_state = act(W * input + u * state + B).\"\"\"\n\n gate_inputs = math_ops.matmul(inputs, self._kernel_w) + (\n state * self._kernel_u)\n gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)\n output = self._activation(gate_inputs)\n return output, output\n\n\nclass IndyGRUCell(rnn_cell_impl.LayerRNNCell):\n r\"\"\"Independently Gated Recurrent Unit cell.\n\n Based on IndRNNs (https://arxiv.org/abs/1803.04831) and similar to GRUCell,\n yet with the \\(U_r\\), \\(U_z\\), and \\(U\\) matrices in equations 5, 6, and\n 8 of http://arxiv.org/abs/1406.1078 respectively replaced by diagonal\n matrices, i.e. a Hadamard product with a single vector:\n\n $$r_j = \\sigma\\left([\\mathbf W_r\\mathbf x]_j +\n [\\mathbf u_r\\circ \\mathbf h_{(t-1)}]_j\\right)$$\n $$z_j = \\sigma\\left([\\mathbf W_z\\mathbf x]_j +\n [\\mathbf u_z\\circ \\mathbf h_{(t-1)}]_j\\right)$$\n $$\\tilde{h}^{(t)}_j = \\phi\\left([\\mathbf W \\mathbf x]_j +\n [\\mathbf u \\circ \\mathbf r \\circ \\mathbf h_{(t-1)}]_j\\right)$$\n\n where \\(\\circ\\) denotes the Hadamard operator. This means that each IndyGRU\n node sees only its own state, as opposed to seeing all states in the same\n layer.\n\n TODO(gonnet): Write a paper describing this and add a reference here.\n\n Args:\n num_units: int, The number of units in the GRU cell.\n activation: Nonlinearity to use. Default: `tanh`.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n kernel_initializer: (optional) The initializer to use for the weight\n matrices applied to the input.\n bias_initializer: (optional) The initializer to use for the bias.\n name: String, the name of the layer. Layers with the same name will\n share weights, but to avoid mistakes we require reuse=True in such\n cases.\n dtype: Default dtype of the layer (default of `None` means use the type\n of the first input). Required when `build` is called before `call`.\n \"\"\"\n\n def __init__(self,\n num_units,\n activation=None,\n reuse=None,\n kernel_initializer=None,\n bias_initializer=None,\n name=None,\n dtype=None):\n super(IndyGRUCell, self).__init__(_reuse=reuse, name=name, dtype=dtype)\n\n # Inputs must be 2-dimensional.\n self.input_spec = base_layer.InputSpec(ndim=2)\n\n self._num_units = num_units\n self._activation = activation or math_ops.tanh\n self._kernel_initializer = kernel_initializer\n self._bias_initializer = bias_initializer\n\n @property\n def state_size(self):\n return self._num_units\n\n @property\n def output_size(self):\n return self._num_units\n\n def build(self, inputs_shape):\n if inputs_shape[1].value is None:\n raise ValueError(\n \"Expected inputs.shape[-1] to be known, saw shape: %s\" % inputs_shape)\n\n input_depth = inputs_shape[1].value\n # pylint: disable=protected-access\n self._gate_kernel_w = self.add_variable(\n \"gates/%s_w\" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,\n shape=[input_depth, 2 * self._num_units],\n initializer=self._kernel_initializer)\n self._gate_kernel_u = self.add_variable(\n \"gates/%s_u\" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,\n shape=[1, 2 * self._num_units],\n initializer=init_ops.random_uniform_initializer(\n minval=-1, maxval=1, dtype=self.dtype))\n self._gate_bias = self.add_variable(\n \"gates/%s\" % rnn_cell_impl._BIAS_VARIABLE_NAME,\n shape=[2 * self._num_units],\n initializer=(self._bias_initializer\n if self._bias_initializer is not None else\n init_ops.constant_initializer(1.0, dtype=self.dtype)))\n self._candidate_kernel_w = self.add_variable(\n \"candidate/%s\" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,\n shape=[input_depth, self._num_units],\n initializer=self._kernel_initializer)\n self._candidate_kernel_u = self.add_variable(\n \"candidate/%s_u\" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,\n shape=[1, self._num_units],\n initializer=init_ops.random_uniform_initializer(\n minval=-1, maxval=1, dtype=self.dtype))\n self._candidate_bias = self.add_variable(\n \"candidate/%s\" % rnn_cell_impl._BIAS_VARIABLE_NAME,\n shape=[self._num_units],\n initializer=(self._bias_initializer\n if self._bias_initializer is not None else\n init_ops.zeros_initializer(dtype=self.dtype)))\n # pylint: enable=protected-access\n\n self.built = True\n\n def call(self, inputs, state):\n \"\"\"Gated recurrent unit (GRU) with nunits cells.\"\"\"\n\n gate_inputs = math_ops.matmul(inputs, self._gate_kernel_w) + (\n gen_array_ops.tile(state, [1, 2]) * self._gate_kernel_u)\n gate_inputs = nn_ops.bias_add(gate_inputs, self._gate_bias)\n\n value = math_ops.sigmoid(gate_inputs)\n r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)\n\n r_state = r * state\n\n candidate = math_ops.matmul(inputs, self._candidate_kernel_w) + (\n r_state * self._candidate_kernel_u)\n candidate = nn_ops.bias_add(candidate, self._candidate_bias)\n\n c = self._activation(candidate)\n new_h = u * state + (1 - u) * c\n return new_h, new_h\n\n\nclass IndyLSTMCell(rnn_cell_impl.LayerRNNCell):\n r\"\"\"Basic IndyLSTM recurrent network cell.\n\n Based on IndRNNs (https://arxiv.org/abs/1803.04831) and similar to\n BasicLSTMCell, yet with the \\(U_f\\), \\(U_i\\), \\(U_o\\) and \\(U_c\\)\n matrices in\n https://en.wikipedia.org/wiki/Long_short-term_memory#LSTM_with_a_forget_gate\n replaced by diagonal matrices, i.e. a Hadamard product with a single vector:\n\n $$f_t = \\sigma_g\\left(W_f x_t + u_f \\circ h_{t-1} + b_f\\right)$$\n $$i_t = \\sigma_g\\left(W_i x_t + u_i \\circ h_{t-1} + b_i\\right)$$\n $$o_t = \\sigma_g\\left(W_o x_t + u_o \\circ h_{t-1} + b_o\\right)$$\n $$c_t = f_t \\circ c_{t-1} +\n i_t \\circ \\sigma_c\\left(W_c x_t + u_c \\circ h_{t-1} + b_c\\right)$$\n\n where \\(\\circ\\) denotes the Hadamard operator. This means that each IndyLSTM\n node sees only its own state \\(h\\) and \\(c\\), as opposed to seeing all\n states in the same layer.\n\n We add forget_bias (default: 1) to the biases of the forget gate in order to\n reduce the scale of forgetting in the beginning of the training.\n\n It does not allow cell clipping, a projection layer, and does not\n use peep-hole connections: it is the basic baseline.\n\n For advanced models, please use the full `tf.nn.rnn_cell.LSTMCell`\n that follows.\n\n TODO(gonnet): Write a paper describing this and add a reference here.\n \"\"\"\n\n def __init__(self,\n num_units,\n forget_bias=1.0,\n activation=None,\n reuse=None,\n kernel_initializer=None,\n bias_initializer=None,\n name=None,\n dtype=None):\n \"\"\"Initialize the IndyLSTM cell.\n\n Args:\n num_units: int, The number of units in the LSTM cell.\n forget_bias: float, The bias added to forget gates (see above).\n Must set to `0.0` manually when restoring from CudnnLSTM-trained\n checkpoints.\n activation: Activation function of the inner states. Default: `tanh`.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n kernel_initializer: (optional) The initializer to use for the weight\n matrix applied to the inputs.\n bias_initializer: (optional) The initializer to use for the bias.\n name: String, the name of the layer. Layers with the same name will\n share weights, but to avoid mistakes we require reuse=True in such\n cases.\n dtype: Default dtype of the layer (default of `None` means use the type\n of the first input). Required when `build` is called before `call`.\n \"\"\"\n super(IndyLSTMCell, self).__init__(_reuse=reuse, name=name, dtype=dtype)\n\n # Inputs must be 2-dimensional.\n self.input_spec = base_layer.InputSpec(ndim=2)\n\n self._num_units = num_units\n self._forget_bias = forget_bias\n self._activation = activation or math_ops.tanh\n self._kernel_initializer = kernel_initializer\n self._bias_initializer = bias_initializer\n\n @property\n def state_size(self):\n return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)\n\n @property\n def output_size(self):\n return self._num_units\n\n def build(self, inputs_shape):\n if inputs_shape[1].value is None:\n raise ValueError(\n \"Expected inputs.shape[-1] to be known, saw shape: %s\" % inputs_shape)\n\n input_depth = inputs_shape[1].value\n # pylint: disable=protected-access\n self._kernel_w = self.add_variable(\n \"%s_w\" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,\n shape=[input_depth, 4 * self._num_units],\n initializer=self._kernel_initializer)\n self._kernel_u = self.add_variable(\n \"%s_u\" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,\n shape=[1, 4 * self._num_units],\n initializer=init_ops.random_uniform_initializer(\n minval=-1, maxval=1, dtype=self.dtype))\n self._bias = self.add_variable(\n rnn_cell_impl._BIAS_VARIABLE_NAME,\n shape=[4 * self._num_units],\n initializer=(self._bias_initializer\n if self._bias_initializer is not None else\n init_ops.zeros_initializer(dtype=self.dtype)))\n # pylint: enable=protected-access\n\n self.built = True\n\n def call(self, inputs, state):\n \"\"\"Independent Long short-term memory cell (IndyLSTM).\n\n Args:\n inputs: `2-D` tensor with shape `[batch_size, input_size]`.\n state: An `LSTMStateTuple` of state tensors, each shaped\n `[batch_size, num_units]`.\n\n Returns:\n A pair containing the new hidden state, and the new state (a\n `LSTMStateTuple`).\n \"\"\"\n sigmoid = math_ops.sigmoid\n one = constant_op.constant(1, dtype=dtypes.int32)\n c, h = state\n\n gate_inputs = math_ops.matmul(inputs, self._kernel_w)\n gate_inputs += gen_array_ops.tile(h, [1, 4]) * self._kernel_u\n gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)\n\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n i, j, f, o = array_ops.split(\n value=gate_inputs, num_or_size_splits=4, axis=one)\n\n forget_bias_tensor = constant_op.constant(self._forget_bias, dtype=f.dtype)\n # Note that using `add` and `multiply` instead of `+` and `*` gives a\n # performance improvement. So using those at the cost of readability.\n add = math_ops.add\n multiply = math_ops.multiply\n new_c = add(\n multiply(c, sigmoid(add(f, forget_bias_tensor))),\n multiply(sigmoid(i), self._activation(j)))\n new_h = multiply(self._activation(new_c), sigmoid(o))\n\n new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)\n return new_h, new_state\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow.ops.image_ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport colorsys\nimport functools\nimport itertools\nimport math\nimport os\nimport time\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_image_ops\nfrom tensorflow.python.ops import gradients\nfrom tensorflow.python.ops import image_ops\nfrom tensorflow.python.ops import image_ops_impl\nfrom tensorflow.python.ops import io_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.platform import test\n\n\nclass RGBToHSVTest(test_util.TensorFlowTestCase):\n\n def testBatch(self):\n # Build an arbitrary RGB image\n np.random.seed(7)\n batch_size = 5\n shape = (batch_size, 2, 7, 3)\n\n for nptype in [np.float32, np.float64]:\n inp = np.random.rand(*shape).astype(nptype)\n\n # Convert to HSV and back, as a batch and individually\n with self.test_session(use_gpu=True) as sess:\n batch0 = constant_op.constant(inp)\n batch1 = image_ops.rgb_to_hsv(batch0)\n batch2 = image_ops.hsv_to_rgb(batch1)\n split0 = array_ops.unstack(batch0)\n split1 = list(map(image_ops.rgb_to_hsv, split0))\n split2 = list(map(image_ops.hsv_to_rgb, split1))\n join1 = array_ops.stack(split1)\n join2 = array_ops.stack(split2)\n batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2])\n\n # Verify that processing batch elements together is the same as separate\n self.assertAllClose(batch1, join1)\n self.assertAllClose(batch2, join2)\n self.assertAllClose(batch2, inp)\n\n def testRGBToHSVRoundTrip(self):\n data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n for nptype in [np.float32, np.float64]:\n rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.\n with self.test_session(use_gpu=True):\n hsv = image_ops.rgb_to_hsv(rgb_np)\n rgb = image_ops.hsv_to_rgb(hsv)\n rgb_tf = rgb.eval()\n self.assertAllClose(rgb_tf, rgb_np)\n\n\nclass RGBToYIQTest(test_util.TensorFlowTestCase):\n\n def testBatch(self):\n # Build an arbitrary RGB image\n np.random.seed(7)\n batch_size = 5\n shape = (batch_size, 2, 7, 3)\n\n for nptype in [np.float32, np.float64]:\n inp = np.random.rand(*shape).astype(nptype)\n\n # Convert to YIQ and back, as a batch and individually\n with self.test_session(use_gpu=True) as sess:\n batch0 = constant_op.constant(inp)\n batch1 = image_ops.rgb_to_yiq(batch0)\n batch2 = image_ops.yiq_to_rgb(batch1)\n split0 = array_ops.unstack(batch0)\n split1 = list(map(image_ops.rgb_to_yiq, split0))\n split2 = list(map(image_ops.yiq_to_rgb, split1))\n join1 = array_ops.stack(split1)\n join2 = array_ops.stack(split2)\n batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2])\n\n # Verify that processing batch elements together is the same as separate\n self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)\n self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)\n self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)\n\n\nclass RGBToYUVTest(test_util.TensorFlowTestCase):\n\n def testBatch(self):\n # Build an arbitrary RGB image\n np.random.seed(7)\n batch_size = 5\n shape = (batch_size, 2, 7, 3)\n\n for nptype in [np.float32, np.float64]:\n inp = np.random.rand(*shape).astype(nptype)\n\n # Convert to YUV and back, as a batch and individually\n with self.test_session(use_gpu=True) as sess:\n batch0 = constant_op.constant(inp)\n batch1 = image_ops.rgb_to_yuv(batch0)\n batch2 = image_ops.yuv_to_rgb(batch1)\n split0 = array_ops.unstack(batch0)\n split1 = list(map(image_ops.rgb_to_yuv, split0))\n split2 = list(map(image_ops.yuv_to_rgb, split1))\n join1 = array_ops.stack(split1)\n join2 = array_ops.stack(split2)\n batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2])\n\n # Verify that processing batch elements together is the same as separate\n self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)\n self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)\n self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)\n\n\nclass GrayscaleToRGBTest(test_util.TensorFlowTestCase):\n\n def _RGBToGrayscale(self, images):\n is_batch = True\n if len(images.shape) == 3:\n is_batch = False\n images = np.expand_dims(images, axis=0)\n out_shape = images.shape[0:3] + (1,)\n out = np.zeros(shape=out_shape, dtype=np.uint8)\n for batch in xrange(images.shape[0]):\n for y in xrange(images.shape[1]):\n for x in xrange(images.shape[2]):\n red = images[batch, y, x, 0]\n green = images[batch, y, x, 1]\n blue = images[batch, y, x, 2]\n gray = 0.2989 * red + 0.5870 * green + 0.1140 * blue\n out[batch, y, x, 0] = int(gray)\n if not is_batch:\n out = np.squeeze(out, axis=0)\n return out\n\n def _TestRGBToGrayscale(self, x_np):\n y_np = self._RGBToGrayscale(x_np)\n\n with self.test_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.rgb_to_grayscale(x_tf)\n y_tf = y.eval()\n self.assertAllEqual(y_tf, y_np)\n\n def testBasicRGBToGrayscale(self):\n # 4-D input with batch dimension.\n x_np = np.array(\n [[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 1, 2, 3])\n self._TestRGBToGrayscale(x_np)\n\n # 3-D input with no batch dimension.\n x_np = np.array([[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 2, 3])\n self._TestRGBToGrayscale(x_np)\n\n def testBasicGrayscaleToRGB(self):\n # 4-D input with batch dimension.\n x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2, 1])\n y_np = np.array(\n [[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 1, 2, 3])\n\n with self.test_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.grayscale_to_rgb(x_tf)\n y_tf = y.eval()\n self.assertAllEqual(y_tf, y_np)\n\n # 3-D input with no batch dimension.\n x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 2, 1])\n y_np = np.array([[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 2, 3])\n\n with self.test_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.grayscale_to_rgb(x_tf)\n y_tf = y.eval()\n self.assertAllEqual(y_tf, y_np)\n\n def testShapeInference(self):\n # Shape inference works and produces expected output where possible\n rgb_shape = [7, None, 19, 3]\n gray_shape = rgb_shape[:-1] + [1]\n with self.test_session(use_gpu=True):\n rgb_tf = array_ops.placeholder(dtypes.uint8, shape=rgb_shape)\n gray = image_ops.rgb_to_grayscale(rgb_tf)\n self.assertEqual(gray_shape, gray.get_shape().as_list())\n\n with self.test_session(use_gpu=True):\n gray_tf = array_ops.placeholder(dtypes.uint8, shape=gray_shape)\n rgb = image_ops.grayscale_to_rgb(gray_tf)\n self.assertEqual(rgb_shape, rgb.get_shape().as_list())\n\n # Shape inference does not break for unknown shapes\n with self.test_session(use_gpu=True):\n rgb_tf_unknown = array_ops.placeholder(dtypes.uint8)\n gray_unknown = image_ops.rgb_to_grayscale(rgb_tf_unknown)\n self.assertFalse(gray_unknown.get_shape())\n\n with self.test_session(use_gpu=True):\n gray_tf_unknown = array_ops.placeholder(dtypes.uint8)\n rgb_unknown = image_ops.grayscale_to_rgb(gray_tf_unknown)\n self.assertFalse(rgb_unknown.get_shape())\n\n\nclass AdjustGamma(test_util.TensorFlowTestCase):\n\n def test_adjust_gamma_one(self):\n \"\"\"Same image should be returned for gamma equal to one\"\"\"\n with self.cached_session():\n x_data = np.random.uniform(0, 255, (8, 8))\n x_np = np.array(x_data, dtype=np.float32)\n\n x = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.adjust_gamma(x, gamma=1)\n\n y_tf = y.eval()\n y_np = x_np\n\n self.assertAllClose(y_tf, y_np, 1e-6)\n\n def test_adjust_gamma_less_zero(self):\n \"\"\"White image should be returned for gamma equal to zero\"\"\"\n with self.cached_session():\n x_data = np.random.uniform(0, 255, (8, 8))\n x_np = np.array(x_data, dtype=np.float32)\n\n x = constant_op.constant(x_np, shape=x_np.shape)\n\n err_msg = \"Gamma should be a non-negative real number.\"\n\n try:\n image_ops.adjust_gamma(x, gamma=-1)\n except Exception as e:\n if err_msg not in str(e):\n raise\n else:\n raise AssertionError(\"Exception not raised: %s\" % err_msg)\n\n def test_adjust_gamma_less_zero_tensor(self):\n \"\"\"White image should be returned for gamma equal to zero\"\"\"\n with self.cached_session():\n x_data = np.random.uniform(0, 255, (8, 8))\n x_np = np.array(x_data, dtype=np.float32)\n\n x = constant_op.constant(x_np, shape=x_np.shape)\n y = constant_op.constant(-1.0, dtype=dtypes.float32)\n\n image = image_ops.adjust_gamma(x, gamma=y)\n\n err_msg = \"Gamma should be a non-negative real number.\"\n try:\n image.eval()\n except Exception as e:\n if err_msg not in str(e):\n raise\n else:\n raise AssertionError(\"Exception not raised: %s\" % err_msg)\n\n def test_adjust_gamma_zero(self):\n \"\"\"White image should be returned for gamma equal to zero\"\"\"\n with self.cached_session():\n x_data = np.random.uniform(0, 255, (8, 8))\n x_np = np.array(x_data, dtype=np.float32)\n\n x = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.adjust_gamma(x, gamma=0)\n\n y_tf = y.eval()\n\n dtype = x.dtype.as_numpy_dtype\n y_np = np.array([dtypes.dtype_range[dtype][1]] * x_np.size)\n y_np = y_np.reshape((8, 8))\n\n self.assertAllClose(y_tf, y_np, 1e-6)\n\n def test_adjust_gamma_less_one(self):\n \"\"\"Verifying the output with expected results for gamma\n correction with gamma equal to half\"\"\"\n with self.cached_session():\n x_np = np.arange(0, 255, 4, np.uint8).reshape(8, 8)\n y = image_ops.adjust_gamma(x_np, gamma=0.5)\n y_tf = np.trunc(y.eval())\n\n y_np = np.array(\n [[0, 31, 45, 55, 63, 71, 78, 84], [\n 90, 95, 100, 105, 110, 115, 119, 123\n ], [127, 131, 135, 139, 142, 146, 149, 153], [\n 156, 159, 162, 165, 168, 171, 174, 177\n ], [180, 183, 186, 188, 191, 194, 196, 199], [\n 201, 204, 206, 209, 211, 214, 216, 218\n ], [221, 223, 225, 228, 230, 232, 234, 236],\n [238, 241, 243, 245, 247, 249, 251, 253]],\n dtype=np.float32)\n\n self.assertAllClose(y_tf, y_np, 1e-6)\n\n def test_adjust_gamma_greater_one(self):\n \"\"\"Verifying the output with expected results for gamma\n correction with gamma equal to two\"\"\"\n with self.cached_session():\n x_np = np.arange(0, 255, 4, np.uint8).reshape(8, 8)\n y = image_ops.adjust_gamma(x_np, gamma=2)\n y_tf = np.trunc(y.eval())\n\n y_np = np.array(\n [[0, 0, 0, 0, 1, 1, 2, 3], [4, 5, 6, 7, 9, 10, 12, 14], [\n 16, 18, 20, 22, 25, 27, 30, 33\n ], [36, 39, 42, 45, 49, 52, 56, 60], [64, 68, 72, 76, 81, 85, 90, 95],\n [100, 105, 110, 116, 121, 127, 132, 138], [\n 144, 150, 156, 163, 169, 176, 182, 189\n ], [196, 203, 211, 218, 225, 233, 241, 249]],\n dtype=np.float32)\n\n self.assertAllClose(y_tf, y_np, 1e-6)\n\n\nclass AdjustHueTest(test_util.TensorFlowTestCase):\n\n def testAdjustNegativeHue(self):\n x_shape = [2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n delta = -0.25\n y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n with self.test_session(use_gpu=True):\n x = constant_op.constant(x_np, shape=x_shape)\n y = image_ops.adjust_hue(x, delta)\n y_tf = y.eval()\n self.assertAllEqual(y_tf, y_np)\n\n def testAdjustPositiveHue(self):\n x_shape = [2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n delta = 0.25\n y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n with self.test_session(use_gpu=True):\n x = constant_op.constant(x_np, shape=x_shape)\n y = image_ops.adjust_hue(x, delta)\n y_tf = y.eval()\n self.assertAllEqual(y_tf, y_np)\n\n def testBatchAdjustHue(self):\n x_shape = [2, 1, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n delta = 0.25\n y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n with self.test_session(use_gpu=True):\n x = constant_op.constant(x_np, shape=x_shape)\n y = image_ops.adjust_hue(x, delta)\n y_tf = y.eval()\n self.assertAllEqual(y_tf, y_np)\n\n def _adjustHueNp(self, x_np, delta_h):\n self.assertEqual(x_np.shape[-1], 3)\n x_v = x_np.reshape([-1, 3])\n y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)\n channel_count = x_v.shape[0]\n for i in xrange(channel_count):\n r = x_v[i][0]\n g = x_v[i][1]\n b = x_v[i][2]\n h, s, v = colorsys.rgb_to_hsv(r, g, b)\n h += delta_h\n h = math.fmod(h + 10.0, 1.0)\n r, g, b = colorsys.hsv_to_rgb(h, s, v)\n y_v[i][0] = r\n y_v[i][1] = g\n y_v[i][2] = b\n return y_v.reshape(x_np.shape)\n\n def _adjustHueTf(self, x_np, delta_h):\n with self.test_session(use_gpu=True):\n x = constant_op.constant(x_np)\n y = image_ops.adjust_hue(x, delta_h)\n y_tf = y.eval()\n return y_tf\n\n def testAdjustRandomHue(self):\n x_shapes = [\n [2, 2, 3],\n [4, 2, 3],\n [2, 4, 3],\n [2, 5, 3],\n [1000, 1, 3],\n ]\n test_styles = [\n \"all_random\",\n \"rg_same\",\n \"rb_same\",\n \"gb_same\",\n \"rgb_same\",\n ]\n for x_shape in x_shapes:\n for test_style in test_styles:\n x_np = np.random.rand(*x_shape) * 255.\n delta_h = np.random.rand() * 2.0 - 1.0\n if test_style == \"all_random\":\n pass\n elif test_style == \"rg_same\":\n x_np[..., 1] = x_np[..., 0]\n elif test_style == \"rb_same\":\n x_np[..., 2] = x_np[..., 0]\n elif test_style == \"gb_same\":\n x_np[..., 2] = x_np[..., 1]\n elif test_style == \"rgb_same\":\n x_np[..., 1] = x_np[..., 0]\n x_np[..., 2] = x_np[..., 0]\n else:\n raise AssertionError(\"Invalid test style: %s\" % (test_style))\n y_np = self._adjustHueNp(x_np, delta_h)\n y_tf = self._adjustHueTf(x_np, delta_h)\n self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-5)\n\n def testInvalidShapes(self):\n fused = False\n if not fused:\n # The tests are known to pass with the fused adjust_hue. We will enable\n # them when the fused implementation is the default.\n return\n x_np = np.random.rand(2, 3) * 255.\n delta_h = np.random.rand() * 2.0 - 1.0\n fused = False\n with self.assertRaisesRegexp(ValueError, \"Shape must be at least rank 3\"):\n self._adjustHueTf(x_np, delta_h)\n x_np = np.random.rand(4, 2, 4) * 255.\n delta_h = np.random.rand() * 2.0 - 1.0\n with self.assertRaisesOpError(\"input must have 3 channels\"):\n self._adjustHueTf(x_np, delta_h)\n\n\nclass FlipImageBenchmark(test.Benchmark):\n\n def _benchmarkFlipLeftRight(self, device, cpu_count):\n image_shape = [299, 299, 3]\n warmup_rounds = 100\n benchmark_rounds = 1000\n config = config_pb2.ConfigProto()\n if cpu_count is not None:\n config.inter_op_parallelism_threads = 1\n config.intra_op_parallelism_threads = cpu_count\n with session.Session(\"\", graph=ops.Graph(), config=config) as sess:\n with ops.device(device):\n inputs = variables.Variable(\n random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,\n trainable=False,\n dtype=dtypes.float32)\n run_op = image_ops.flip_left_right(inputs)\n sess.run(variables.global_variables_initializer())\n for i in xrange(warmup_rounds + benchmark_rounds):\n if i == warmup_rounds:\n start = time.time()\n sess.run(run_op)\n end = time.time()\n step_time = (end - start) / benchmark_rounds\n tag = device + \"_%s\" % (cpu_count if cpu_count is not None else \"_all\")\n print(\"benchmarkFlipLeftRight_299_299_3_%s step_time: %.2f us\" %\n (tag, step_time * 1e6))\n self.report_benchmark(\n name=\"benchmarkFlipLeftRight_299_299_3_%s\" % (tag),\n iters=benchmark_rounds,\n wall_time=step_time)\n\n def _benchmarkRandomFlipLeftRight(self, device, cpu_count):\n image_shape = [299, 299, 3]\n warmup_rounds = 100\n benchmark_rounds = 1000\n config = config_pb2.ConfigProto()\n if cpu_count is not None:\n config.inter_op_parallelism_threads = 1\n config.intra_op_parallelism_threads = cpu_count\n with session.Session(\"\", graph=ops.Graph(), config=config) as sess:\n with ops.device(device):\n inputs = variables.Variable(\n random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,\n trainable=False,\n dtype=dtypes.float32)\n run_op = image_ops.random_flip_left_right(inputs)\n sess.run(variables.global_variables_initializer())\n for i in xrange(warmup_rounds + benchmark_rounds):\n if i == warmup_rounds:\n start = time.time()\n sess.run(run_op)\n end = time.time()\n step_time = (end - start) / benchmark_rounds\n tag = device + \"_%s\" % (cpu_count if cpu_count is not None else \"_all\")\n print(\"benchmarkRandomFlipLeftRight_299_299_3_%s step_time: %.2f us\" %\n (tag, step_time * 1e6))\n self.report_benchmark(\n name=\"benchmarkRandomFlipLeftRight_299_299_3_%s\" % (tag),\n iters=benchmark_rounds,\n wall_time=step_time)\n\n def _benchmarkBatchedRandomFlipLeftRight(self, device, cpu_count):\n image_shape = [16, 299, 299, 3]\n warmup_rounds = 100\n benchmark_rounds = 1000\n config = config_pb2.ConfigProto()\n if cpu_count is not None:\n config.inter_op_parallelism_threads = 1\n config.intra_op_parallelism_threads = cpu_count\n with session.Session(\"\", graph=ops.Graph(), config=config) as sess:\n with ops.device(device):\n inputs = variables.Variable(\n random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,\n trainable=False,\n dtype=dtypes.float32)\n run_op = image_ops.random_flip_left_right(inputs)\n sess.run(variables.global_variables_initializer())\n for i in xrange(warmup_rounds + benchmark_rounds):\n if i == warmup_rounds:\n start = time.time()\n sess.run(run_op)\n end = time.time()\n step_time = (end - start) / benchmark_rounds\n tag = device + \"_%s\" % (cpu_count if cpu_count is not None else \"_all\")\n print(\"benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s step_time: \"\n \"%.2f us\" %\n (tag, step_time * 1e6))\n self.report_benchmark(\n name=\"benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s\" % (tag),\n iters=benchmark_rounds,\n wall_time=step_time)\n\n def benchmarkFlipLeftRightCpu1(self):\n self._benchmarkFlipLeftRight(\"/cpu:0\", 1)\n\n def benchmarkFlipLeftRightCpuAll(self):\n self._benchmarkFlipLeftRight(\"/cpu:0\", None)\n\n def benchmarkFlipLeftRightGpu(self):\n self._benchmarkFlipLeftRight(test.gpu_device_name(), None)\n\n def benchmarkRandomFlipLeftRightCpu1(self):\n self._benchmarkRandomFlipLeftRight(\"/cpu:0\", 1)\n\n def benchmarkRandomFlipLeftRightCpuAll(self):\n self._benchmarkRandomFlipLeftRight(\"/cpu:0\", None)\n\n def benchmarkRandomFlipLeftRightGpu(self):\n self._benchmarkRandomFlipLeftRight(test.gpu_device_name(), None)\n\n def benchmarkBatchedRandomFlipLeftRightCpu1(self):\n self._benchmarkBatchedRandomFlipLeftRight(\"/cpu:0\", 1)\n\n def benchmarkBatchedRandomFlipLeftRightCpuAll(self):\n self._benchmarkBatchedRandomFlipLeftRight(\"/cpu:0\", None)\n\n def benchmarkBatchedRandomFlipLeftRightGpu(self):\n self._benchmarkBatchedRandomFlipLeftRight(test.gpu_device_name(), None)\n\n\nclass AdjustHueBenchmark(test.Benchmark):\n\n def _benchmarkAdjustHue(self, device, cpu_count):\n image_shape = [299, 299, 3]\n warmup_rounds = 100\n benchmark_rounds = 1000\n config = config_pb2.ConfigProto()\n if cpu_count is not None:\n config.inter_op_parallelism_threads = 1\n config.intra_op_parallelism_threads = cpu_count\n with session.Session(\"\", graph=ops.Graph(), config=config) as sess:\n with ops.device(device):\n inputs = variables.Variable(\n random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,\n trainable=False,\n dtype=dtypes.float32)\n delta = constant_op.constant(0.1, dtype=dtypes.float32)\n outputs = image_ops.adjust_hue(inputs, delta)\n run_op = control_flow_ops.group(outputs)\n sess.run(variables.global_variables_initializer())\n for i in xrange(warmup_rounds + benchmark_rounds):\n if i == warmup_rounds:\n start = time.time()\n sess.run(run_op)\n end = time.time()\n step_time = (end - start) / benchmark_rounds\n tag = device + \"_%s\" % (cpu_count if cpu_count is not None else \"_all\")\n print(\"benchmarkAdjustHue_299_299_3_%s step_time: %.2f us\" %\n (tag, step_time * 1e6))\n self.report_benchmark(\n name=\"benchmarkAdjustHue_299_299_3_%s\" % (tag),\n iters=benchmark_rounds,\n wall_time=step_time)\n\n def benchmarkAdjustHueCpu1(self):\n self._benchmarkAdjustHue(\"/cpu:0\", 1)\n\n def benchmarkAdjustHueCpuAll(self):\n self._benchmarkAdjustHue(\"/cpu:0\", None)\n\n def benchmarkAdjustHueGpu(self):\n self._benchmarkAdjustHue(test.gpu_device_name(), None)\n\n\nclass AdjustSaturationBenchmark(test.Benchmark):\n\n def _benchmarkAdjustSaturation(self, device, cpu_count):\n image_shape = [299, 299, 3]\n warmup_rounds = 100\n benchmark_rounds = 1000\n config = config_pb2.ConfigProto()\n if cpu_count is not None:\n config.inter_op_parallelism_threads = 1\n config.intra_op_parallelism_threads = cpu_count\n with session.Session(\"\", graph=ops.Graph(), config=config) as sess:\n with ops.device(device):\n inputs = variables.Variable(\n random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,\n trainable=False,\n dtype=dtypes.float32)\n delta = constant_op.constant(0.1, dtype=dtypes.float32)\n outputs = image_ops.adjust_saturation(inputs, delta)\n run_op = control_flow_ops.group(outputs)\n sess.run(variables.global_variables_initializer())\n for _ in xrange(warmup_rounds):\n sess.run(run_op)\n start = time.time()\n for _ in xrange(benchmark_rounds):\n sess.run(run_op)\n end = time.time()\n step_time = (end - start) / benchmark_rounds\n tag = device + \"_%s\" % (cpu_count if cpu_count is not None else \"_all\")\n print(\"benchmarkAdjustSaturation_299_299_3_%s step_time: %.2f us\" %\n (tag, step_time * 1e6))\n self.report_benchmark(\n name=\"benchmarkAdjustSaturation_299_299_3_%s\" % (tag),\n iters=benchmark_rounds,\n wall_time=step_time)\n\n def benchmarkAdjustSaturationCpu1(self):\n self._benchmarkAdjustSaturation(\"/cpu:0\", 1)\n\n def benchmarkAdjustSaturationCpuAll(self):\n self._benchmarkAdjustSaturation(\"/cpu:0\", None)\n\n def benchmarkAdjustSaturationGpu(self):\n self._benchmarkAdjustSaturation(test.gpu_device_name(), None)\n\n\nclass ResizeBilinearBenchmark(test.Benchmark):\n\n def _benchmarkResize(self, image_size, num_channels):\n batch_size = 1\n num_ops = 1000\n img = variables.Variable(\n random_ops.random_normal(\n [batch_size, image_size[0], image_size[1], num_channels]),\n name=\"img\")\n\n deps = []\n for _ in xrange(num_ops):\n with ops.control_dependencies(deps):\n resize_op = image_ops.resize_bilinear(\n img, [299, 299], align_corners=False)\n deps = [resize_op]\n benchmark_op = control_flow_ops.group(*deps)\n\n with session.Session() as sess:\n sess.run(variables.global_variables_initializer())\n results = self.run_op_benchmark(\n sess,\n benchmark_op,\n name=(\"resize_bilinear_%s_%s_%s\" % (image_size[0], image_size[1],\n num_channels)))\n print(\"%s : %.2f ms/img\" %\n (results[\"name\"],\n 1000 * results[\"wall_time\"] / (batch_size * num_ops)))\n\n def benchmarkSimilar3Channel(self):\n self._benchmarkResize((183, 229), 3)\n\n def benchmarkScaleUp3Channel(self):\n self._benchmarkResize((141, 186), 3)\n\n def benchmarkScaleDown3Channel(self):\n self._benchmarkResize((749, 603), 3)\n\n def benchmarkSimilar1Channel(self):\n self._benchmarkResize((183, 229), 1)\n\n def benchmarkScaleUp1Channel(self):\n self._benchmarkResize((141, 186), 1)\n\n def benchmarkScaleDown1Channel(self):\n self._benchmarkResize((749, 603), 1)\n\n\nclass ResizeBicubicBenchmark(test.Benchmark):\n\n def _benchmarkResize(self, image_size, num_channels):\n batch_size = 1\n num_ops = 1000\n img = variables.Variable(\n random_ops.random_normal(\n [batch_size, image_size[0], image_size[1], num_channels]),\n name=\"img\")\n\n deps = []\n for _ in xrange(num_ops):\n with ops.control_dependencies(deps):\n resize_op = image_ops.resize_bicubic(\n img, [299, 299], align_corners=False)\n deps = [resize_op]\n benchmark_op = control_flow_ops.group(*deps)\n\n with session.Session() as sess:\n sess.run(variables.global_variables_initializer())\n results = self.run_op_benchmark(\n sess,\n benchmark_op,\n min_iters=20,\n name=(\"resize_bicubic_%s_%s_%s\" % (image_size[0], image_size[1],\n num_channels)))\n print(\"%s : %.2f ms/img\" %\n (results[\"name\"],\n 1000 * results[\"wall_time\"] / (batch_size * num_ops)))\n\n def benchmarkSimilar3Channel(self):\n self._benchmarkResize((183, 229), 3)\n\n def benchmarkScaleUp3Channel(self):\n self._benchmarkResize((141, 186), 3)\n\n def benchmarkScaleDown3Channel(self):\n self._benchmarkResize((749, 603), 3)\n\n def benchmarkSimilar1Channel(self):\n self._benchmarkResize((183, 229), 1)\n\n def benchmarkScaleUp1Channel(self):\n self._benchmarkResize((141, 186), 1)\n\n def benchmarkScaleDown1Channel(self):\n self._benchmarkResize((749, 603), 1)\n\n def benchmarkSimilar4Channel(self):\n self._benchmarkResize((183, 229), 4)\n\n def benchmarkScaleUp4Channel(self):\n self._benchmarkResize((141, 186), 4)\n\n def benchmarkScaleDown4Channel(self):\n self._benchmarkResize((749, 603), 4)\n\n\nclass ResizeAreaBenchmark(test.Benchmark):\n\n def _benchmarkResize(self, image_size, num_channels):\n batch_size = 1\n num_ops = 1000\n img = variables.Variable(\n random_ops.random_normal(\n [batch_size, image_size[0], image_size[1], num_channels]),\n name=\"img\")\n\n deps = []\n for _ in xrange(num_ops):\n with ops.control_dependencies(deps):\n resize_op = image_ops.resize_area(img, [299, 299], align_corners=False)\n deps = [resize_op]\n benchmark_op = control_flow_ops.group(*deps)\n\n with session.Session() as sess:\n sess.run(variables.global_variables_initializer())\n results = self.run_op_benchmark(\n sess,\n benchmark_op,\n name=(\"resize_area_%s_%s_%s\" % (image_size[0], image_size[1],\n num_channels)))\n print(\"%s : %.2f ms/img\" %\n (results[\"name\"],\n 1000 * results[\"wall_time\"] / (batch_size * num_ops)))\n\n def benchmarkSimilar3Channel(self):\n self._benchmarkResize((183, 229), 3)\n\n def benchmarkScaleUp3Channel(self):\n self._benchmarkResize((141, 186), 3)\n\n def benchmarkScaleDown3Channel(self):\n self._benchmarkResize((749, 603), 3)\n\n def benchmarkSimilar1Channel(self):\n self._benchmarkResize((183, 229), 1)\n\n def benchmarkScaleUp1Channel(self):\n self._benchmarkResize((141, 186), 1)\n\n def benchmarkScaleDown1Channel(self):\n self._benchmarkResize((749, 603), 1)\n\n\nclass AdjustSaturationTest(test_util.TensorFlowTestCase):\n\n def testHalfSaturation(self):\n x_shape = [2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n saturation_factor = 0.5\n y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n with self.test_session(use_gpu=True):\n x = constant_op.constant(x_np, shape=x_shape)\n y = image_ops.adjust_saturation(x, saturation_factor)\n y_tf = y.eval()\n self.assertAllEqual(y_tf, y_np)\n\n def testTwiceSaturation(self):\n x_shape = [2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n saturation_factor = 2.0\n y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n with self.test_session(use_gpu=True):\n x = constant_op.constant(x_np, shape=x_shape)\n y = image_ops.adjust_saturation(x, saturation_factor)\n y_tf = y.eval()\n self.assertAllEqual(y_tf, y_np)\n\n def testBatchSaturation(self):\n x_shape = [2, 1, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n saturation_factor = 0.5\n y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n with self.test_session(use_gpu=True):\n x = constant_op.constant(x_np, shape=x_shape)\n y = image_ops.adjust_saturation(x, saturation_factor)\n y_tf = y.eval()\n self.assertAllEqual(y_tf, y_np)\n\n def _adjust_saturation(self, image, saturation_factor):\n image = ops.convert_to_tensor(image, name=\"image\")\n orig_dtype = image.dtype\n flt_image = image_ops.convert_image_dtype(image, dtypes.float32)\n saturation_adjusted_image = gen_image_ops.adjust_saturation(\n flt_image, saturation_factor)\n return image_ops.convert_image_dtype(saturation_adjusted_image, orig_dtype)\n\n def testHalfSaturationFused(self):\n x_shape = [2, 2, 3]\n x_rgb_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_rgb_data, dtype=np.uint8).reshape(x_shape)\n\n saturation_factor = 0.5\n y_rgb_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]\n y_np = np.array(y_rgb_data, dtype=np.uint8).reshape(x_shape)\n\n with self.test_session(use_gpu=True):\n x = constant_op.constant(x_np, shape=x_shape)\n y = self._adjust_saturation(x, saturation_factor)\n y_tf = y.eval()\n self.assertAllEqual(y_tf, y_np)\n\n def testTwiceSaturationFused(self):\n x_shape = [2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n saturation_factor = 2.0\n y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n with self.test_session(use_gpu=True):\n x = constant_op.constant(x_np, shape=x_shape)\n y = self._adjust_saturation(x, saturation_factor)\n y_tf = y.eval()\n self.assertAllEqual(y_tf, y_np)\n\n def _adjustSaturationNp(self, x_np, scale):\n self.assertEqual(x_np.shape[-1], 3)\n x_v = x_np.reshape([-1, 3])\n y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)\n channel_count = x_v.shape[0]\n for i in xrange(channel_count):\n r = x_v[i][0]\n g = x_v[i][1]\n b = x_v[i][2]\n h, s, v = colorsys.rgb_to_hsv(r, g, b)\n s *= scale\n s = min(1.0, max(0.0, s))\n r, g, b = colorsys.hsv_to_rgb(h, s, v)\n y_v[i][0] = r\n y_v[i][1] = g\n y_v[i][2] = b\n return y_v.reshape(x_np.shape)\n\n def testAdjustRandomSaturation(self):\n x_shapes = [\n [2, 2, 3],\n [4, 2, 3],\n [2, 4, 3],\n [2, 5, 3],\n [1000, 1, 3],\n ]\n test_styles = [\n \"all_random\",\n \"rg_same\",\n \"rb_same\",\n \"gb_same\",\n \"rgb_same\",\n ]\n with self.test_session(use_gpu=True):\n for x_shape in x_shapes:\n for test_style in test_styles:\n x_np = np.random.rand(*x_shape) * 255.\n scale = np.random.rand()\n if test_style == \"all_random\":\n pass\n elif test_style == \"rg_same\":\n x_np[..., 1] = x_np[..., 0]\n elif test_style == \"rb_same\":\n x_np[..., 2] = x_np[..., 0]\n elif test_style == \"gb_same\":\n x_np[..., 2] = x_np[..., 1]\n elif test_style == \"rgb_same\":\n x_np[..., 1] = x_np[..., 0]\n x_np[..., 2] = x_np[..., 0]\n else:\n raise AssertionError(\"Invalid test style: %s\" % (test_style))\n y_baseline = self._adjustSaturationNp(x_np, scale)\n y_fused = self._adjust_saturation(x_np, scale).eval()\n self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5)\n\n\nclass FlipTransposeRotateTest(test_util.TensorFlowTestCase):\n\n def testInvolutionLeftRight(self):\n x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])\n with self.test_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))\n y_tf = y.eval()\n self.assertAllEqual(y_tf, x_np)\n\n def testInvolutionLeftRightWithBatch(self):\n x_np = np.array(\n [[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],\n dtype=np.uint8).reshape([2, 2, 3, 1])\n with self.test_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))\n y_tf = y.eval()\n self.assertAllEqual(y_tf, x_np)\n\n def testLeftRight(self):\n x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])\n y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])\n\n with self.test_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.flip_left_right(x_tf)\n self.assertTrue(y.op.name.startswith(\"flip_left_right\"))\n y_tf = y.eval()\n self.assertAllEqual(y_tf, y_np)\n\n def testLeftRightWithBatch(self):\n x_np = np.array(\n [[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],\n dtype=np.uint8).reshape([2, 2, 3, 1])\n y_np = np.array(\n [[[3, 2, 1], [3, 2, 1]], [[3, 2, 1], [3, 2, 1]]],\n dtype=np.uint8).reshape([2, 2, 3, 1])\n\n with self.test_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.flip_left_right(x_tf)\n y_tf = y.eval()\n self.assertAllEqual(y_tf, y_np)\n\n def testRandomFlipLeftRight(self):\n x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])\n y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])\n seed = 42\n\n with self.test_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.random_flip_left_right(x_tf, seed=seed)\n self.assertTrue(y.op.name.startswith(\"random_flip_left_right\"))\n\n count_flipped = 0\n count_unflipped = 0\n for _ in range(100):\n y_tf = y.eval()\n if y_tf[0][0] == 1:\n self.assertAllEqual(y_tf, x_np)\n count_unflipped += 1\n else:\n self.assertAllEqual(y_tf, y_np)\n count_flipped += 1\n\n # 100 trials\n # Mean: 50\n # Std Dev: ~5\n # Six Sigma: 50 - (5 * 6) = 20\n self.assertGreaterEqual(count_flipped, 20)\n self.assertGreaterEqual(count_unflipped, 20)\n\n def testRandomFlipLeftRightWithBatch(self):\n batch_size = 16\n seed = 42\n\n # create single item of test data\n x_np_raw = np.array(\n [[1, 2, 3], [1, 2, 3]], dtype=np.uint8\n ).reshape([1, 2, 3, 1])\n y_np_raw = np.array(\n [[3, 2, 1], [3, 2, 1]], dtype=np.uint8\n ).reshape([1, 2, 3, 1])\n\n # create batched test data\n x_np = np.vstack([x_np_raw for _ in range(batch_size)])\n y_np = np.vstack([y_np_raw for _ in range(batch_size)])\n\n with self.test_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.random_flip_left_right(x_tf, seed=seed)\n self.assertTrue(y.op.name.startswith(\"random_flip_left_right\"))\n\n count_flipped = 0\n count_unflipped = 0\n for _ in range(100):\n y_tf = y.eval()\n\n # check every element of the batch\n for i in range(batch_size):\n if y_tf[i][0][0] == 1:\n self.assertAllEqual(y_tf[i], x_np[i])\n count_unflipped += 1\n else:\n self.assertAllEqual(y_tf[i], y_np[i])\n count_flipped += 1\n\n # 100 trials, each containing batch_size elements\n # Mean: 50 * batch_size\n # Std Dev: ~5 * sqrt(batch_size)\n # Six Sigma: 50 * batch_size - (5 * 6 * sqrt(batch_size))\n # = 50 * batch_size - 30 * sqrt(batch_size) = 800 - 30 * 4 = 680\n six_sigma = 50 * batch_size - 30 * np.sqrt(batch_size)\n self.assertGreaterEqual(count_flipped, six_sigma)\n self.assertGreaterEqual(count_unflipped, six_sigma)\n\n def testInvolutionUpDown(self):\n x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])\n\n with self.test_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))\n y_tf = y.eval()\n self.assertAllEqual(y_tf, x_np)\n\n def testInvolutionUpDownWithBatch(self):\n x_np = np.array(\n [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],\n dtype=np.uint8).reshape([2, 2, 3, 1])\n\n with self.test_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))\n y_tf = y.eval()\n self.assertAllEqual(y_tf, x_np)\n\n def testUpDown(self):\n x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])\n y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])\n\n with self.test_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.flip_up_down(x_tf)\n self.assertTrue(y.op.name.startswith(\"flip_up_down\"))\n y_tf = y.eval()\n self.assertAllEqual(y_tf, y_np)\n\n def testUpDownWithBatch(self):\n x_np = np.array(\n [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],\n dtype=np.uint8).reshape([2, 2, 3, 1])\n y_np = np.array(\n [[[4, 5, 6], [1, 2, 3]], [[10, 11, 12], [7, 8, 9]]],\n dtype=np.uint8).reshape([2, 2, 3, 1])\n\n with self.test_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.flip_up_down(x_tf)\n y_tf = y.eval()\n self.assertAllEqual(y_tf, y_np)\n\n def testRandomFlipUpDown(self):\n x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])\n y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])\n\n seed = 42\n\n with self.test_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.random_flip_up_down(x_tf, seed=seed)\n self.assertTrue(y.op.name.startswith(\"random_flip_up_down\"))\n count_flipped = 0\n count_unflipped = 0\n for _ in range(100):\n y_tf = y.eval()\n if y_tf[0][0] == 1:\n self.assertAllEqual(y_tf, x_np)\n count_unflipped += 1\n else:\n self.assertAllEqual(y_tf, y_np)\n count_flipped += 1\n\n # 100 trials\n # Mean: 50\n # Std Dev: ~5\n # Six Sigma: 50 - (5 * 6) = 20\n self.assertGreaterEqual(count_flipped, 20)\n self.assertGreaterEqual(count_unflipped, 20)\n\n def testRandomFlipUpDownWithBatch(self):\n batch_size = 16\n seed = 42\n\n # create single item of test data\n x_np_raw = np.array(\n [[1, 2, 3], [4, 5, 6]], dtype=np.uint8\n ).reshape([1, 2, 3, 1])\n y_np_raw = np.array(\n [[4, 5, 6], [1, 2, 3]], dtype=np.uint8\n ).reshape([1, 2, 3, 1])\n\n # create batched test data\n x_np = np.vstack([x_np_raw for _ in range(batch_size)])\n y_np = np.vstack([y_np_raw for _ in range(batch_size)])\n\n with self.test_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.random_flip_up_down(x_tf, seed=seed)\n self.assertTrue(y.op.name.startswith(\"random_flip_up_down\"))\n\n count_flipped = 0\n count_unflipped = 0\n for _ in range(100):\n y_tf = y.eval()\n\n # check every element of the batch\n for i in range(batch_size):\n if y_tf[i][0][0] == 1:\n self.assertAllEqual(y_tf[i], x_np[i])\n count_unflipped += 1\n else:\n self.assertAllEqual(y_tf[i], y_np[i])\n count_flipped += 1\n\n # 100 trials, each containing batch_size elements\n # Mean: 50 * batch_size\n # Std Dev: ~5 * sqrt(batch_size)\n # Six Sigma: 50 * batch_size - (5 * 6 * sqrt(batch_size))\n # = 50 * batch_size - 30 * sqrt(batch_size) = 800 - 30 * 4 = 680\n six_sigma = 50 * batch_size - 30 * np.sqrt(batch_size)\n self.assertGreaterEqual(count_flipped, six_sigma)\n self.assertGreaterEqual(count_unflipped, six_sigma)\n\n def testInvolutionTranspose(self):\n x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])\n\n with self.test_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.transpose_image(image_ops.transpose_image(x_tf))\n y_tf = y.eval()\n self.assertAllEqual(y_tf, x_np)\n\n def testInvolutionTransposeWithBatch(self):\n x_np = np.array(\n [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],\n dtype=np.uint8).reshape([2, 2, 3, 1])\n\n with self.test_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.transpose_image(image_ops.transpose_image(x_tf))\n y_tf = y.eval()\n self.assertAllEqual(y_tf, x_np)\n\n def testTranspose(self):\n x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])\n y_np = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.uint8).reshape([3, 2, 1])\n\n with self.test_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.transpose_image(x_tf)\n self.assertTrue(y.op.name.startswith(\"transpose_image\"))\n y_tf = y.eval()\n self.assertAllEqual(y_tf, y_np)\n\n def testTransposeWithBatch(self):\n x_np = np.array(\n [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],\n dtype=np.uint8).reshape([2, 2, 3, 1])\n\n y_np = np.array(\n [[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]],\n dtype=np.uint8).reshape([2, 3, 2, 1])\n\n with self.test_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.transpose_image(x_tf)\n y_tf = y.eval()\n self.assertAllEqual(y_tf, y_np)\n\n def testPartialShapes(self):\n p_unknown_rank = array_ops.placeholder(dtypes.uint8)\n p_unknown_dims_3 = array_ops.placeholder(\n dtypes.uint8, shape=[None, None, None])\n p_unknown_dims_4 = array_ops.placeholder(\n dtypes.uint8, shape=[None, None, None, None])\n p_unknown_width = array_ops.placeholder(dtypes.uint8, shape=[64, None, 3])\n p_unknown_batch = array_ops.placeholder(\n dtypes.uint8, shape=[None, 64, 64, 3])\n p_wrong_rank = array_ops.placeholder(dtypes.uint8, shape=[None, None])\n p_zero_dim = array_ops.placeholder(dtypes.uint8, shape=[64, 0, 3])\n\n #Ops that support 3D input\n for op in [\n image_ops.flip_left_right, image_ops.flip_up_down,\n image_ops.random_flip_left_right, image_ops.random_flip_up_down,\n image_ops.transpose_image, image_ops.rot90\n ]:\n transformed_unknown_rank = op(p_unknown_rank)\n self.assertEqual(3, transformed_unknown_rank.get_shape().ndims)\n transformed_unknown_dims_3 = op(p_unknown_dims_3)\n self.assertEqual(3, transformed_unknown_dims_3.get_shape().ndims)\n transformed_unknown_width = op(p_unknown_width)\n self.assertEqual(3, transformed_unknown_width.get_shape().ndims)\n\n with self.assertRaisesRegexp(ValueError, \"must be > 0\"):\n op(p_zero_dim)\n\n #Ops that support 4D input\n for op in [\n image_ops.flip_left_right, image_ops.flip_up_down,\n image_ops.random_flip_left_right, image_ops.random_flip_up_down,\n image_ops.transpose_image, image_ops.rot90\n ]:\n transformed_unknown_dims_4 = op(p_unknown_dims_4)\n self.assertEqual(4, transformed_unknown_dims_4.get_shape().ndims)\n transformed_unknown_batch = op(p_unknown_batch)\n self.assertEqual(4, transformed_unknown_batch.get_shape().ndims)\n with self.assertRaisesRegexp(ValueError,\n \"must be at least three-dimensional\"):\n op(p_wrong_rank)\n\n def testRot90GroupOrder(self):\n image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])\n with self.test_session(use_gpu=True):\n rotated = image\n for _ in xrange(4):\n rotated = image_ops.rot90(rotated)\n self.assertAllEqual(image, rotated.eval())\n\n def testRot90GroupOrderWithBatch(self):\n image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])\n with self.test_session(use_gpu=True):\n rotated = image\n for _ in xrange(4):\n rotated = image_ops.rot90(rotated)\n self.assertAllEqual(image, rotated.eval())\n\n def testRot90NumpyEquivalence(self):\n image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])\n with self.test_session(use_gpu=True):\n k_placeholder = array_ops.placeholder(dtypes.int32, shape=[])\n y_tf = image_ops.rot90(image, k_placeholder)\n for k in xrange(4):\n y_np = np.rot90(image, k=k)\n self.assertAllEqual(y_np, y_tf.eval({k_placeholder: k}))\n\n def testRot90NumpyEquivalenceWithBatch(self):\n image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])\n with self.test_session(use_gpu=True):\n k_placeholder = array_ops.placeholder(dtypes.int32, shape=[])\n y_tf = image_ops.rot90(image, k_placeholder)\n for k in xrange(4):\n y_np = np.rot90(image, k=k, axes=(1, 2))\n self.assertAllEqual(y_np, y_tf.eval({k_placeholder: k}))\n\nclass AdjustContrastTest(test_util.TensorFlowTestCase):\n\n def _testContrast(self, x_np, y_np, contrast_factor):\n with self.test_session(use_gpu=True):\n x = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.adjust_contrast(x, contrast_factor)\n y_tf = y.eval()\n self.assertAllClose(y_tf, y_np, 1e-6)\n\n def testDoubleContrastUint8(self):\n x_shape = [1, 2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n y_data = [0, 0, 0, 62, 169, 255, 28, 0, 255, 135, 255, 0]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n self._testContrast(x_np, y_np, contrast_factor=2.0)\n\n def testDoubleContrastFloat(self):\n x_shape = [1, 2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.float).reshape(x_shape) / 255.\n\n y_data = [\n -45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,\n 134.75, 409.25, -116.5\n ]\n y_np = np.array(y_data, dtype=np.float).reshape(x_shape) / 255.\n\n self._testContrast(x_np, y_np, contrast_factor=2.0)\n\n def testHalfContrastUint8(self):\n x_shape = [1, 2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n y_data = [22, 52, 65, 49, 118, 172, 41, 54, 176, 67, 178, 59]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n self._testContrast(x_np, y_np, contrast_factor=0.5)\n\n def testBatchDoubleContrast(self):\n x_shape = [2, 1, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n self._testContrast(x_np, y_np, contrast_factor=2.0)\n\n def _adjustContrastNp(self, x_np, contrast_factor):\n mean = np.mean(x_np, (1, 2), keepdims=True)\n y_np = mean + contrast_factor * (x_np - mean)\n return y_np\n\n def _adjustContrastTf(self, x_np, contrast_factor):\n with self.test_session(use_gpu=True):\n x = constant_op.constant(x_np)\n y = image_ops.adjust_contrast(x, contrast_factor)\n y_tf = y.eval()\n return y_tf\n\n def testRandomContrast(self):\n x_shapes = [\n [1, 2, 2, 3],\n [2, 1, 2, 3],\n [1, 2, 2, 3],\n [2, 5, 5, 3],\n [2, 1, 1, 3],\n ]\n for x_shape in x_shapes:\n x_np = np.random.rand(*x_shape) * 255.\n contrast_factor = np.random.rand() * 2.0 + 0.1\n y_np = self._adjustContrastNp(x_np, contrast_factor)\n y_tf = self._adjustContrastTf(x_np, contrast_factor)\n self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)\n\n def testContrastFactorShape(self):\n x_shape = [1, 2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n with self.assertRaisesRegexp(\n ValueError, 'Shape must be rank 0 but is rank 1'):\n image_ops.adjust_contrast(x_np, [2.0])\n\n\nclass AdjustBrightnessTest(test_util.TensorFlowTestCase):\n\n def _testBrightness(self, x_np, y_np, delta):\n with self.test_session(use_gpu=True):\n x = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.adjust_brightness(x, delta)\n y_tf = y.eval()\n self.assertAllClose(y_tf, y_np, 1e-6)\n\n def testPositiveDeltaUint8(self):\n x_shape = [2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 255, 11]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n self._testBrightness(x_np, y_np, delta=10. / 255.)\n\n def testPositiveDeltaFloat(self):\n x_shape = [2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.\n\n y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]\n y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.\n\n self._testBrightness(x_np, y_np, delta=10. / 255.)\n\n def testNegativeDelta(self):\n x_shape = [2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n y_data = [0, 0, 3, 44, 125, 216, 27, 0, 224, 80, 245, 0]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n self._testBrightness(x_np, y_np, delta=-10. / 255.)\n\n\nclass PerImageWhiteningTest(test_util.TensorFlowTestCase):\n\n def _NumpyPerImageWhitening(self, x):\n num_pixels = np.prod(x.shape)\n x2 = np.square(x).astype(np.float32)\n mn = np.mean(x)\n vr = np.mean(x2) - (mn * mn)\n stddev = max(math.sqrt(vr), 1.0 / math.sqrt(num_pixels))\n\n y = x.astype(np.float32)\n y -= mn\n y /= stddev\n return y\n\n def testBasic(self):\n x_shape = [13, 9, 3]\n x_np = np.arange(0, np.prod(x_shape), dtype=np.int32).reshape(x_shape)\n y_np = self._NumpyPerImageWhitening(x_np)\n\n with self.test_session(use_gpu=True):\n x = constant_op.constant(x_np, shape=x_shape)\n y = image_ops.per_image_standardization(x)\n self.assertTrue(y.op.name.startswith(\"per_image_standardization\"))\n y_tf = y.eval()\n self.assertAllClose(y_tf, y_np, atol=1e-4)\n\n def testUniformImage(self):\n im_np = np.ones([19, 19, 3]).astype(np.float32) * 249\n im = constant_op.constant(im_np)\n whiten = image_ops.per_image_standardization(im)\n with self.test_session(use_gpu=True):\n whiten_np = whiten.eval()\n self.assertFalse(np.any(np.isnan(whiten_np)))\n\n\nclass CropToBoundingBoxTest(test_util.TensorFlowTestCase):\n\n def _CropToBoundingBox(self, x, offset_height, offset_width, target_height,\n target_width, use_tensor_inputs):\n if use_tensor_inputs:\n offset_height = ops.convert_to_tensor(offset_height)\n offset_width = ops.convert_to_tensor(offset_width)\n target_height = ops.convert_to_tensor(target_height)\n target_width = ops.convert_to_tensor(target_width)\n x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)\n feed_dict = {x_tensor: x}\n else:\n x_tensor = x\n feed_dict = {}\n\n y = image_ops.crop_to_bounding_box(x_tensor, offset_height, offset_width,\n target_height, target_width)\n if not use_tensor_inputs:\n self.assertTrue(y.get_shape().is_fully_defined())\n\n with self.test_session(use_gpu=True):\n return y.eval(feed_dict=feed_dict)\n\n def _assertReturns(self,\n x,\n x_shape,\n offset_height,\n offset_width,\n y,\n y_shape,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n target_height, target_width, _ = y_shape\n x = np.array(x).reshape(x_shape)\n y = np.array(y).reshape(y_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n y_tf = self._CropToBoundingBox(x, offset_height, offset_width,\n target_height, target_width,\n use_tensor_inputs)\n self.assertAllClose(y, y_tf)\n\n def _assertRaises(self,\n x,\n x_shape,\n offset_height,\n offset_width,\n target_height,\n target_width,\n err_msg,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n x = np.array(x).reshape(x_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n try:\n self._CropToBoundingBox(x, offset_height, offset_width, target_height,\n target_width, use_tensor_inputs)\n except Exception as e:\n if err_msg not in str(e):\n raise\n else:\n raise AssertionError(\"Exception not raised: %s\" % err_msg)\n\n def _assertShapeInference(self, pre_shape, height, width, post_shape):\n image = array_ops.placeholder(dtypes.float32, shape=pre_shape)\n y = image_ops.crop_to_bounding_box(image, 0, 0, height, width)\n self.assertEqual(y.get_shape().as_list(), post_shape)\n\n def testNoOp(self):\n x_shape = [10, 10, 10]\n x = np.random.uniform(size=x_shape)\n self._assertReturns(x, x_shape, 0, 0, x, x_shape)\n\n def testCrop(self):\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n x_shape = [3, 3, 1]\n\n offset_height, offset_width = [1, 0]\n y_shape = [2, 3, 1]\n y = [4, 5, 6, 7, 8, 9]\n self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)\n\n offset_height, offset_width = [0, 1]\n y_shape = [3, 2, 1]\n y = [2, 3, 5, 6, 8, 9]\n self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)\n\n offset_height, offset_width = [0, 0]\n y_shape = [2, 3, 1]\n y = [1, 2, 3, 4, 5, 6]\n self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)\n\n offset_height, offset_width = [0, 0]\n y_shape = [3, 2, 1]\n y = [1, 2, 4, 5, 7, 8]\n self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)\n\n def testShapeInference(self):\n self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])\n self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])\n self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])\n self._assertShapeInference(None, 55, 66, [55, 66, None])\n\n def testNon3DInput(self):\n # Input image is not 3D\n x = [0] * 15\n offset_height, offset_width = [0, 0]\n target_height, target_width = [2, 2]\n\n for x_shape in ([3, 5], [1, 3, 5, 1, 1]):\n self._assertRaises(x, x_shape, offset_height, offset_width, target_height,\n target_width,\n \"'image' must have either 3 or 4 dimensions.\")\n\n def testZeroLengthInput(self):\n # Input image has 0-length dimension(s).\n # Each line is a test configuration:\n # x_shape, target_height, target_width\n test_config = (([0, 2, 2], 1, 1), ([2, 0, 2], 1, 1), ([2, 2, 0], 1, 1),\n ([0, 2, 2], 0, 1), ([2, 0, 2], 1, 0))\n offset_height, offset_width = [0, 0]\n x = []\n\n for x_shape, target_height, target_width in test_config:\n self._assertRaises(\n x,\n x_shape,\n offset_height,\n offset_width,\n target_height,\n target_width,\n \"all dims of 'image.shape' must be > 0\",\n use_tensor_inputs_options=[False])\n # Multiple assertion could fail, but the evaluation order is arbitrary.\n # Match gainst generic pattern.\n self._assertRaises(\n x,\n x_shape,\n offset_height,\n offset_width,\n target_height,\n target_width,\n \"assertion failed:\",\n use_tensor_inputs_options=[True])\n\n def testBadParams(self):\n x_shape = [4, 4, 1]\n x = np.zeros(x_shape)\n\n # Each line is a test configuration:\n # (offset_height, offset_width, target_height, target_width), err_msg\n test_config = (([-1, 0, 3, 3], \"offset_height must be >= 0\"), ([\n 0, -1, 3, 3\n ], \"offset_width must be >= 0\"), ([0, 0, 0, 3],\n \"target_height must be > 0\"),\n ([0, 0, 3, 0], \"target_width must be > 0\"),\n ([2, 0, 3, 3], \"height must be >= target + offset\"),\n ([0, 2, 3, 3], \"width must be >= target + offset\"))\n\n for params, err_msg in test_config:\n self._assertRaises(x, x_shape, *params, err_msg=err_msg)\n\n def testNameScope(self):\n image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])\n y = image_ops.crop_to_bounding_box(image, 0, 0, 55, 66)\n self.assertTrue(y.name.startswith(\"crop_to_bounding_box\"))\n\n\nclass CentralCropTest(test_util.TensorFlowTestCase):\n\n def _assertShapeInference(self, pre_shape, fraction, post_shape):\n image = array_ops.placeholder(dtypes.float32, shape=pre_shape)\n y = image_ops.central_crop(image, fraction)\n if post_shape is None:\n self.assertEqual(y.get_shape().dims, None)\n else:\n self.assertEqual(y.get_shape().as_list(), post_shape)\n\n def testNoOp(self):\n x_shapes = [[13, 9, 3], [5, 13, 9, 3]]\n for x_shape in x_shapes:\n x_np = np.ones(x_shape, dtype=np.float32)\n for use_gpu in [True, False]:\n with self.test_session(use_gpu=use_gpu):\n x = constant_op.constant(x_np, shape=x_shape)\n y = image_ops.central_crop(x, 1.0)\n y_tf = y.eval()\n self.assertAllEqual(y_tf, x_np)\n self.assertEqual(y.op.name, x.op.name)\n\n def testCropping(self):\n x_shape = [4, 8, 1]\n x_np = np.array(\n [[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],\n [1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8]],\n dtype=np.int32).reshape(x_shape)\n y_np = np.array([[3, 4, 5, 6], [3, 4, 5, 6]]).reshape([2, 4, 1])\n for use_gpu in [True, False]:\n with self.test_session(use_gpu=use_gpu):\n x = constant_op.constant(x_np, shape=x_shape)\n y = image_ops.central_crop(x, 0.5)\n y_tf = y.eval()\n self.assertAllEqual(y_tf, y_np)\n self.assertAllEqual(y_tf.shape, y_np.shape)\n\n x_shape = [2, 4, 8, 1]\n x_np = np.array(\n [[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],\n [1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],\n [8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],\n [8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1]],\n dtype=np.int32).reshape(x_shape)\n y_np = np.array([[[3, 4, 5, 6], [3, 4, 5, 6]],\n [[6, 5, 4, 3], [6, 5, 4, 3]]]).reshape([2, 2, 4, 1])\n with self.test_session(use_gpu=True):\n x = constant_op.constant(x_np, shape=x_shape)\n y = image_ops.central_crop(x, 0.5)\n y_tf = y.eval()\n self.assertAllEqual(y_tf, y_np)\n self.assertAllEqual(y_tf.shape, y_np.shape)\n\n def testCropping2(self):\n # Test case for 10315\n x_shapes = [[240, 320, 3], [5, 240, 320, 3]]\n expected_y_shapes = [[80, 106, 3], [5, 80, 106, 3]]\n\n for x_shape, y_shape in zip(x_shapes, expected_y_shapes):\n x_np = np.zeros(x_shape, dtype=np.int32)\n y_np = np.zeros(y_shape, dtype=np.int32)\n for use_gpu in [True, False]:\n with self.test_session(use_gpu=use_gpu):\n x = array_ops.placeholder(shape=x_shape, dtype=dtypes.int32)\n y = image_ops.central_crop(x, 0.33)\n y_tf = y.eval(feed_dict={x: x_np})\n self.assertAllEqual(y_tf, y_np)\n self.assertAllEqual(y_tf.shape, y_np.shape)\n\n def testShapeInference(self):\n # Test no-op fraction=1.0, with 3-D tensors.\n self._assertShapeInference([50, 60, 3], 1.0, [50, 60, 3])\n self._assertShapeInference([None, 60, 3], 1.0, [None, 60, 3])\n self._assertShapeInference([50, None, 3], 1.0, [50, None, 3])\n self._assertShapeInference([None, None, 3], 1.0, [None, None, 3])\n self._assertShapeInference([50, 60, None], 1.0, [50, 60, None])\n self._assertShapeInference([None, None, None], 1.0, [None, None, None])\n\n # Test no-op fraction=0.5, with 3-D tensors.\n self._assertShapeInference([50, 60, 3], 0.5, [26, 30, 3])\n self._assertShapeInference([None, 60, 3], 0.5, [None, 30, 3])\n self._assertShapeInference([50, None, 3], 0.5, [26, None, 3])\n self._assertShapeInference([None, None, 3], 0.5, [None, None, 3])\n self._assertShapeInference([50, 60, None], 0.5, [26, 30, None])\n self._assertShapeInference([None, None, None], 0.5, [None, None, None])\n\n # Test no-op fraction=1.0, with 4-D tensors.\n self._assertShapeInference([5, 50, 60, 3], 1.0, [5, 50, 60, 3])\n self._assertShapeInference([5, None, 60, 3], 1.0, [5, None, 60, 3])\n self._assertShapeInference([5, 50, None, 3], 1.0, [5, 50, None, 3])\n self._assertShapeInference([5, None, None, 3], 1.0, [5, None, None, 3])\n self._assertShapeInference([5, 50, 60, None], 1.0, [5, 50, 60, None])\n self._assertShapeInference([5, None, None, None], 1.0,\n [5, None, None, None])\n self._assertShapeInference([None, None, None, None], 1.0,\n [None, None, None, None])\n\n # Test no-op fraction=0.5, with 4-D tensors.\n self._assertShapeInference([5, 50, 60, 3], 0.5, [5, 26, 30, 3])\n self._assertShapeInference([5, None, 60, 3], 0.5, [5, None, 30, 3])\n self._assertShapeInference([5, 50, None, 3], 0.5, [5, 26, None, 3])\n self._assertShapeInference([5, None, None, 3], 0.5, [5, None, None, 3])\n self._assertShapeInference([5, 50, 60, None], 0.5, [5, 26, 30, None])\n self._assertShapeInference([5, None, None, None], 0.5,\n [5, None, None, None])\n self._assertShapeInference([None, None, None, None], 0.5,\n [None, None, None, None])\n\n def testErrorOnInvalidCentralCropFractionValues(self):\n x_shape = [13, 9, 3]\n x_np = np.ones(x_shape, dtype=np.float32)\n for use_gpu in [True, False]:\n with self.test_session(use_gpu=use_gpu):\n x = constant_op.constant(x_np, shape=x_shape)\n with self.assertRaises(ValueError):\n _ = image_ops.central_crop(x, 0.0)\n with self.assertRaises(ValueError):\n _ = image_ops.central_crop(x, 1.01)\n\n def testErrorOnInvalidShapes(self):\n x_shapes = [None, [], [3], [3, 9], [3, 9, 3, 9, 3]]\n for x_shape in x_shapes:\n x_np = np.ones(x_shape, dtype=np.float32)\n for use_gpu in [True, False]:\n with self.test_session(use_gpu=use_gpu):\n x = constant_op.constant(x_np, shape=x_shape)\n with self.assertRaises(ValueError):\n _ = image_ops.central_crop(x, 0.5)\n\n def testNameScope(self):\n x_shape = [13, 9, 3]\n x_np = np.ones(x_shape, dtype=np.float32)\n for use_gpu in [True, False]:\n with self.test_session(use_gpu=use_gpu):\n y = image_ops.central_crop(x_np, 1.0)\n self.assertTrue(y.op.name.startswith(\"central_crop\"))\n\n\nclass PadToBoundingBoxTest(test_util.TensorFlowTestCase):\n\n def _PadToBoundingBox(self, x, offset_height, offset_width, target_height,\n target_width, use_tensor_inputs):\n if use_tensor_inputs:\n offset_height = ops.convert_to_tensor(offset_height)\n offset_width = ops.convert_to_tensor(offset_width)\n target_height = ops.convert_to_tensor(target_height)\n target_width = ops.convert_to_tensor(target_width)\n x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)\n feed_dict = {x_tensor: x}\n else:\n x_tensor = x\n feed_dict = {}\n\n y = image_ops.pad_to_bounding_box(x_tensor, offset_height, offset_width,\n target_height, target_width)\n if not use_tensor_inputs:\n self.assertTrue(y.get_shape().is_fully_defined())\n\n with self.test_session(use_gpu=True):\n return y.eval(feed_dict=feed_dict)\n\n def _assertReturns(self,\n x,\n x_shape,\n offset_height,\n offset_width,\n y,\n y_shape,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n target_height, target_width, _ = y_shape\n x = np.array(x).reshape(x_shape)\n y = np.array(y).reshape(y_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n y_tf = self._PadToBoundingBox(x, offset_height, offset_width,\n target_height, target_width,\n use_tensor_inputs)\n self.assertAllClose(y, y_tf)\n\n def _assertRaises(self,\n x,\n x_shape,\n offset_height,\n offset_width,\n target_height,\n target_width,\n err_msg,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n x = np.array(x).reshape(x_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n try:\n self._PadToBoundingBox(x, offset_height, offset_width, target_height,\n target_width, use_tensor_inputs)\n except Exception as e:\n if err_msg not in str(e):\n raise\n else:\n raise AssertionError(\"Exception not raised: %s\" % err_msg)\n\n def _assertShapeInference(self, pre_shape, height, width, post_shape):\n image = array_ops.placeholder(dtypes.float32, shape=pre_shape)\n y = image_ops.pad_to_bounding_box(image, 0, 0, height, width)\n self.assertEqual(y.get_shape().as_list(), post_shape)\n\n def testInt64(self):\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n x_shape = [3, 3, 1]\n\n y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n y_shape = [4, 3, 1]\n x = np.array(x).reshape(x_shape)\n y = np.array(y).reshape(y_shape)\n\n i = constant_op.constant([1, 0, 4, 3], dtype=dtypes.int64)\n y_tf = image_ops.pad_to_bounding_box(x, i[0], i[1], i[2], i[3])\n with self.test_session(use_gpu=True):\n self.assertAllClose(y, y_tf.eval())\n\n def testNoOp(self):\n x_shape = [10, 10, 10]\n x = np.random.uniform(size=x_shape)\n offset_height, offset_width = [0, 0]\n self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape)\n\n def testPadding(self):\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n x_shape = [3, 3, 1]\n\n offset_height, offset_width = [1, 0]\n y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n y_shape = [4, 3, 1]\n self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)\n\n offset_height, offset_width = [0, 1]\n y = [0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9]\n y_shape = [3, 4, 1]\n self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)\n\n offset_height, offset_width = [0, 0]\n y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0]\n y_shape = [4, 3, 1]\n self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)\n\n offset_height, offset_width = [0, 0]\n y = [1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0]\n y_shape = [3, 4, 1]\n self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)\n\n def testShapeInference(self):\n self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])\n self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])\n self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])\n self._assertShapeInference(None, 55, 66, [55, 66, None])\n\n def testNon3DInput(self):\n # Input image is not 3D\n x = [0] * 15\n offset_height, offset_width = [0, 0]\n target_height, target_width = [2, 2]\n\n for x_shape in ([3, 5], [1, 3, 5, 1, 1]):\n self._assertRaises(x, x_shape, offset_height, offset_width, target_height,\n target_width,\n \"'image' must have either 3 or 4 dimensions.\")\n\n def testZeroLengthInput(self):\n # Input image has 0-length dimension(s).\n # Each line is a test configuration:\n # x_shape, target_height, target_width\n test_config = (([0, 2, 2], 2, 2), ([2, 0, 2], 2, 2), ([2, 2, 0], 2, 2))\n offset_height, offset_width = [0, 0]\n x = []\n\n for x_shape, target_height, target_width in test_config:\n self._assertRaises(\n x,\n x_shape,\n offset_height,\n offset_width,\n target_height,\n target_width,\n \"all dims of 'image.shape' must be > 0\",\n use_tensor_inputs_options=[False])\n\n # The original error message does not contain back slashes. However, they\n # are added by either the assert op or the runtime. If this behavior\n # changes in the future, the match string will also needs to be changed.\n self._assertRaises(\n x,\n x_shape,\n offset_height,\n offset_width,\n target_height,\n target_width,\n \"all dims of \\\\'image.shape\\\\' must be > 0\",\n use_tensor_inputs_options=[True])\n\n def testBadParams(self):\n x_shape = [3, 3, 1]\n x = np.zeros(x_shape)\n\n # Each line is a test configuration:\n # offset_height, offset_width, target_height, target_width, err_msg\n test_config = ((-1, 0, 4, 4, \"offset_height must be >= 0\"),\n (0, -1, 4, 4, \"offset_width must be >= 0\"),\n (2, 0, 4, 4, \"height must be <= target - offset\"),\n (0, 2, 4, 4, \"width must be <= target - offset\"))\n\n for config_item in test_config:\n self._assertRaises(x, x_shape, *config_item)\n\n def testNameScope(self):\n image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])\n y = image_ops.pad_to_bounding_box(image, 0, 0, 55, 66)\n self.assertTrue(y.op.name.startswith(\"pad_to_bounding_box\"))\n\n\nclass SelectDistortedCropBoxTest(test_util.TensorFlowTestCase):\n\n def _testSampleDistortedBoundingBox(self, image, bounding_box,\n min_object_covered, aspect_ratio_range,\n area_range):\n original_area = float(np.prod(image.shape))\n bounding_box_area = float((bounding_box[3] - bounding_box[1]) *\n (bounding_box[2] - bounding_box[0]))\n\n image_size_np = np.array(image.shape, dtype=np.int32)\n bounding_box_np = (\n np.array(bounding_box, dtype=np.float32).reshape([1, 1, 4]))\n\n aspect_ratios = []\n area_ratios = []\n\n fraction_object_covered = []\n\n num_iter = 1000\n with self.test_session(use_gpu=True):\n image_tf = constant_op.constant(image, shape=image.shape)\n image_size_tf = constant_op.constant(\n image_size_np, shape=image_size_np.shape)\n bounding_box_tf = constant_op.constant(\n bounding_box_np, dtype=dtypes.float32, shape=bounding_box_np.shape)\n\n begin, size, _ = image_ops.sample_distorted_bounding_box(\n image_size=image_size_tf,\n bounding_boxes=bounding_box_tf,\n min_object_covered=min_object_covered,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range)\n y = array_ops.strided_slice(image_tf, begin, begin + size)\n\n for _ in xrange(num_iter):\n y_tf = y.eval()\n crop_height = y_tf.shape[0]\n crop_width = y_tf.shape[1]\n aspect_ratio = float(crop_width) / float(crop_height)\n area = float(crop_width * crop_height)\n\n aspect_ratios.append(aspect_ratio)\n area_ratios.append(area / original_area)\n fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)\n\n # min_object_covered as tensor\n min_object_covered_placeholder = array_ops.placeholder(dtypes.float32)\n begin, size, _ = image_ops.sample_distorted_bounding_box(\n image_size=image_size_tf,\n bounding_boxes=bounding_box_tf,\n min_object_covered=min_object_covered_placeholder,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range)\n y = array_ops.strided_slice(image_tf, begin, begin + size)\n\n for _ in xrange(num_iter):\n y_tf = y.eval(feed_dict={\n min_object_covered_placeholder: min_object_covered\n })\n crop_height = y_tf.shape[0]\n crop_width = y_tf.shape[1]\n aspect_ratio = float(crop_width) / float(crop_height)\n area = float(crop_width * crop_height)\n\n aspect_ratios.append(aspect_ratio)\n area_ratios.append(area / original_area)\n fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)\n\n # Ensure that each entry is observed within 3 standard deviations.\n # num_bins = 10\n # aspect_ratio_hist, _ = np.histogram(aspect_ratios,\n # bins=num_bins,\n # range=aspect_ratio_range)\n # mean = np.mean(aspect_ratio_hist)\n # stddev = np.sqrt(mean)\n # TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.\n # TODO(irving): Since the rejection probability is not independent of the\n # aspect ratio, the aspect_ratio random value is not exactly uniformly\n # distributed in [min_aspect_ratio, max_aspect_ratio). This test should be\n # fixed to reflect the true statistical property, then tightened to enforce\n # a stricter bound. Or, ideally, the sample_distorted_bounding_box Op\n # be fixed to not use rejection sampling and generate correctly uniform\n # aspect ratios.\n # self.assertAllClose(aspect_ratio_hist,\n # [mean] * num_bins, atol=3.6 * stddev)\n\n # The resulting crop will not be uniformly distributed in area. In practice,\n # we find that the area skews towards the small sizes. Instead, we perform\n # a weaker test to ensure that the area ratios are merely within the\n # specified bounds.\n self.assertLessEqual(max(area_ratios), area_range[1])\n self.assertGreaterEqual(min(area_ratios), area_range[0])\n\n # For reference, here is what the distribution of area ratios look like.\n area_ratio_hist, _ = np.histogram(area_ratios, bins=10, range=area_range)\n print(\"area_ratio_hist \", area_ratio_hist)\n\n # Ensure that fraction_object_covered is satisfied.\n # TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.\n # self.assertGreaterEqual(min(fraction_object_covered), min_object_covered)\n\n def testWholeImageBoundingBox(self):\n height = 40\n width = 50\n image_size = [height, width, 1]\n bounding_box = [0.0, 0.0, 1.0, 1.0]\n image = np.arange(\n 0, np.prod(image_size), dtype=np.int32).reshape(image_size)\n self._testSampleDistortedBoundingBox(\n image,\n bounding_box,\n min_object_covered=0.1,\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.05, 1.0))\n\n def testWithBoundingBox(self):\n height = 40\n width = 50\n x_shape = [height, width, 1]\n image = np.zeros(x_shape, dtype=np.int32)\n\n # Create an object with 1's in a region with area A and require that\n # the total pixel values >= 0.1 * A.\n min_object_covered = 0.1\n\n xmin = 2\n ymin = 3\n xmax = 12\n ymax = 13\n for x in np.arange(xmin, xmax + 1, 1):\n for y in np.arange(ymin, ymax + 1, 1):\n image[x, y] = 1\n\n # Bounding box is specified as (ymin, xmin, ymax, xmax) in\n # relative coordinates.\n bounding_box = (float(ymin) / height, float(xmin) / width,\n float(ymax) / height, float(xmax) / width)\n\n self._testSampleDistortedBoundingBox(\n image,\n bounding_box=bounding_box,\n min_object_covered=min_object_covered,\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.05, 1.0))\n\n def testSampleDistortedBoundingBoxShape(self):\n with self.test_session(use_gpu=True):\n image_size = constant_op.constant(\n [40, 50, 1], shape=[3], dtype=dtypes.int32)\n bounding_box = constant_op.constant(\n [[[0.0, 0.0, 1.0, 1.0]]],\n shape=[1, 1, 4],\n dtype=dtypes.float32,\n )\n begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(\n image_size=image_size,\n bounding_boxes=bounding_box,\n min_object_covered=0.1,\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.05, 1.0))\n\n # Test that the shapes are correct.\n self.assertAllEqual([3], begin.get_shape().as_list())\n self.assertAllEqual([3], end.get_shape().as_list())\n self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())\n # Actual run to make sure shape is correct inside Compute().\n begin = begin.eval()\n end = end.eval()\n bbox_for_drawing = bbox_for_drawing.eval()\n\n begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(\n image_size=image_size,\n bounding_boxes=bounding_box,\n min_object_covered=array_ops.placeholder(dtypes.float32),\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.05, 1.0))\n\n # Test that the shapes are correct.\n self.assertAllEqual([3], begin.get_shape().as_list())\n self.assertAllEqual([3], end.get_shape().as_list())\n self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())\n\n def testDefaultMinObjectCovered(self):\n # By default min_object_covered=0.1 if not provided\n with self.test_session(use_gpu=True):\n image_size = constant_op.constant(\n [40, 50, 1], shape=[3], dtype=dtypes.int32)\n bounding_box = constant_op.constant(\n [[[0.0, 0.0, 1.0, 1.0]]],\n shape=[1, 1, 4],\n dtype=dtypes.float32,\n )\n begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(\n image_size=image_size,\n bounding_boxes=bounding_box,\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.05, 1.0))\n\n self.assertAllEqual([3], begin.get_shape().as_list())\n self.assertAllEqual([3], end.get_shape().as_list())\n self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())\n # Actual run to make sure shape is correct inside Compute().\n begin = begin.eval()\n end = end.eval()\n bbox_for_drawing = bbox_for_drawing.eval()\n\n\nclass ResizeImagesTest(test_util.TensorFlowTestCase):\n\n OPTIONS = [\n image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,\n image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA\n ]\n\n TYPES = [\n np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,\n np.float32, np.float64\n ]\n\n def _assertShapeInference(self, pre_shape, size, post_shape):\n # Try single image resize\n single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)\n y = image_ops.resize_images(single_image, size)\n self.assertEqual(y.get_shape().as_list(), post_shape)\n # Try batch images resize with known batch size\n images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)\n y = image_ops.resize_images(images, size)\n self.assertEqual(y.get_shape().as_list(), [99] + post_shape)\n # Try batch images resize with unknown batch size\n images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)\n y = image_ops.resize_images(images, size)\n self.assertEqual(y.get_shape().as_list(), [None] + post_shape)\n\n def shouldRunOnGPU(self, opt, nptype):\n if (opt == image_ops.ResizeMethod.NEAREST_NEIGHBOR and\n nptype in [np.float32, np.float64]):\n return True\n else:\n return False\n\n def testNoOp(self):\n img_shape = [1, 6, 4, 1]\n single_shape = [6, 4, 1]\n # This test is also conducted with int8, so 127 is the maximum\n # value that can be used.\n data = [\n 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,\n 50, 50, 100, 100, 50, 50, 100, 100\n ]\n target_height = 6\n target_width = 4\n\n for nptype in self.TYPES:\n img_np = np.array(data, dtype=nptype).reshape(img_shape)\n\n for opt in self.OPTIONS:\n with self.test_session(use_gpu=True) as sess:\n image = constant_op.constant(img_np, shape=img_shape)\n y = image_ops.resize_images(image, [target_height, target_width], opt)\n yshape = array_ops.shape(y)\n resized, newshape = sess.run([y, yshape])\n self.assertAllEqual(img_shape, newshape)\n self.assertAllClose(resized, img_np, atol=1e-5)\n\n # Resizing with a single image must leave the shape unchanged also.\n with self.test_session(use_gpu=True):\n img_single = img_np.reshape(single_shape)\n image = constant_op.constant(img_single, shape=single_shape)\n y = image_ops.resize_images(image, [target_height, target_width],\n self.OPTIONS[0])\n yshape = array_ops.shape(y)\n newshape = yshape.eval()\n self.assertAllEqual(single_shape, newshape)\n\n def testTensorArguments(self):\n img_shape = [1, 6, 4, 1]\n single_shape = [6, 4, 1]\n # This test is also conducted with int8, so 127 is the maximum\n # value that can be used.\n data = [\n 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,\n 50, 50, 100, 100, 50, 50, 100, 100\n ]\n new_size = array_ops.placeholder(dtypes.int32, shape=(2))\n\n img_np = np.array(data, dtype=np.uint8).reshape(img_shape)\n\n for opt in self.OPTIONS:\n with self.test_session(use_gpu=True) as sess:\n image = constant_op.constant(img_np, shape=img_shape)\n y = image_ops.resize_images(image, new_size, opt)\n yshape = array_ops.shape(y)\n resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})\n self.assertAllEqual(img_shape, newshape)\n self.assertAllClose(resized, img_np, atol=1e-5)\n\n # Resizing with a single image must leave the shape unchanged also.\n with self.test_session(use_gpu=True):\n img_single = img_np.reshape(single_shape)\n image = constant_op.constant(img_single, shape=single_shape)\n y = image_ops.resize_images(image, new_size, self.OPTIONS[0])\n yshape = array_ops.shape(y)\n resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})\n self.assertAllEqual(single_shape, newshape)\n self.assertAllClose(resized, img_single, atol=1e-5)\n\n # Incorrect shape.\n with self.assertRaises(ValueError):\n new_size = constant_op.constant(4)\n _ = image_ops.resize_images(image, new_size,\n image_ops.ResizeMethod.BILINEAR)\n with self.assertRaises(ValueError):\n new_size = constant_op.constant([4])\n _ = image_ops.resize_images(image, new_size,\n image_ops.ResizeMethod.BILINEAR)\n with self.assertRaises(ValueError):\n new_size = constant_op.constant([1, 2, 3])\n _ = image_ops.resize_images(image, new_size,\n image_ops.ResizeMethod.BILINEAR)\n\n # Incorrect dtypes.\n with self.assertRaises(ValueError):\n new_size = constant_op.constant([6.0, 4])\n _ = image_ops.resize_images(image, new_size,\n image_ops.ResizeMethod.BILINEAR)\n with self.assertRaises(ValueError):\n _ = image_ops.resize_images(image, [6, 4.0],\n image_ops.ResizeMethod.BILINEAR)\n with self.assertRaises(ValueError):\n _ = image_ops.resize_images(image, [None, 4],\n image_ops.ResizeMethod.BILINEAR)\n with self.assertRaises(ValueError):\n _ = image_ops.resize_images(image, [6, None],\n image_ops.ResizeMethod.BILINEAR)\n\n def testReturnDtype(self):\n target_shapes = [[6, 4], [3, 2], [\n array_ops.placeholder(dtypes.int32),\n array_ops.placeholder(dtypes.int32)\n ]]\n for nptype in self.TYPES:\n image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])\n for opt in self.OPTIONS:\n for target_shape in target_shapes:\n y = image_ops.resize_images(image, target_shape, opt)\n if (opt == image_ops.ResizeMethod.NEAREST_NEIGHBOR or\n target_shape == image.shape[1:3]):\n expected_dtype = image.dtype\n else:\n expected_dtype = dtypes.float32\n self.assertEqual(y.dtype, expected_dtype)\n\n def testSumTensor(self):\n img_shape = [1, 6, 4, 1]\n # This test is also conducted with int8, so 127 is the maximum\n # value that can be used.\n data = [\n 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,\n 50, 50, 100, 100, 50, 50, 100, 100\n ]\n # Test size where width is specified as a tensor which is a sum\n # of two tensors.\n width_1 = constant_op.constant(1)\n width_2 = constant_op.constant(3)\n width = math_ops.add(width_1, width_2)\n height = constant_op.constant(6)\n\n img_np = np.array(data, dtype=np.uint8).reshape(img_shape)\n\n for opt in self.OPTIONS:\n with self.cached_session() as sess:\n image = constant_op.constant(img_np, shape=img_shape)\n y = image_ops.resize_images(image, [height, width], opt)\n yshape = array_ops.shape(y)\n resized, newshape = sess.run([y, yshape])\n self.assertAllEqual(img_shape, newshape)\n self.assertAllClose(resized, img_np, atol=1e-5)\n\n def testResizeDown(self):\n # This test is also conducted with int8, so 127 is the maximum\n # value that can be used.\n data = [\n 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,\n 50, 50, 100, 100, 50, 50, 100, 100\n ]\n expected_data = [127, 64, 64, 127, 50, 100]\n target_height = 3\n target_width = 2\n\n # Test out 3-D and 4-D image shapes.\n img_shapes = [[1, 6, 4, 1], [6, 4, 1]]\n target_shapes = [[1, target_height, target_width, 1],\n [target_height, target_width, 1]]\n\n for target_shape, img_shape in zip(target_shapes, img_shapes):\n\n for nptype in self.TYPES:\n img_np = np.array(data, dtype=nptype).reshape(img_shape)\n\n for opt in self.OPTIONS:\n if test.is_gpu_available() and self.shouldRunOnGPU(opt, nptype):\n with self.test_session(use_gpu=True):\n image = constant_op.constant(img_np, shape=img_shape)\n y = image_ops.resize_images(image, [target_height, target_width],\n opt)\n expected = np.array(expected_data).reshape(target_shape)\n resized = y.eval()\n self.assertAllClose(resized, expected, atol=1e-5)\n\n def testResizeUpAlignCornersFalse(self):\n img_shape = [1, 3, 2, 1]\n data = [64, 32, 32, 64, 50, 100]\n target_height = 6\n target_width = 4\n expected_data = {}\n expected_data[image_ops.ResizeMethod.BILINEAR] = [\n 64.0, 48.0, 32.0, 32.0, 48.0, 48.0, 48.0, 48.0, 32.0, 48.0, 64.0, 64.0,\n 41.0, 61.5, 82.0, 82.0, 50.0, 75.0, 100.0, 100.0, 50.0, 75.0, 100.0,\n 100.0\n ]\n expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [\n 64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,\n 32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,\n 100.0\n ]\n expected_data[image_ops.ResizeMethod.AREA] = [\n 64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,\n 32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,\n 100.0\n ]\n\n for nptype in self.TYPES:\n for opt in [\n image_ops.ResizeMethod.BILINEAR,\n image_ops.ResizeMethod.NEAREST_NEIGHBOR, image_ops.ResizeMethod.AREA\n ]:\n with self.test_session(use_gpu=True):\n img_np = np.array(data, dtype=nptype).reshape(img_shape)\n image = constant_op.constant(img_np, shape=img_shape)\n y = image_ops.resize_images(\n image, [target_height, target_width], opt, align_corners=False)\n resized = y.eval()\n expected = np.array(expected_data[opt]).reshape(\n [1, target_height, target_width, 1])\n self.assertAllClose(resized, expected, atol=1e-05)\n\n def testResizeUpAlignCornersTrue(self):\n img_shape = [1, 3, 2, 1]\n data = [6, 3, 3, 6, 6, 9]\n target_height = 5\n target_width = 4\n expected_data = {}\n expected_data[image_ops.ResizeMethod.BILINEAR] = [\n 6.0, 5.0, 4.0, 3.0, 4.5, 4.5, 4.5, 4.5, 3.0, 4.0, 5.0, 6.0, 4.5, 5.5,\n 6.5, 7.5, 6.0, 7.0, 8.0, 9.0\n ]\n expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [\n 6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 6.0, 3.0, 3.0, 6.0, 6.0, 6.0, 6.0,\n 9.0, 9.0, 6.0, 6.0, 9.0, 9.0\n ]\n # TODO(b/37749740): Improve alignment of ResizeMethod.AREA when\n # align_corners=True.\n expected_data[image_ops.ResizeMethod.AREA] = [\n 6.0, 6.0, 6.0, 3.0, 6.0, 6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 3.0, 3.0,\n 3.0, 6.0, 6.0, 6.0, 6.0, 9.0\n ]\n\n for nptype in self.TYPES:\n for opt in [\n image_ops.ResizeMethod.BILINEAR,\n image_ops.ResizeMethod.NEAREST_NEIGHBOR, image_ops.ResizeMethod.AREA\n ]:\n with self.test_session(use_gpu=True):\n img_np = np.array(data, dtype=nptype).reshape(img_shape)\n image = constant_op.constant(img_np, shape=img_shape)\n y = image_ops.resize_images(\n image, [target_height, target_width], opt, align_corners=True)\n resized = y.eval()\n expected = np.array(expected_data[opt]).reshape(\n [1, target_height, target_width, 1])\n self.assertAllClose(resized, expected, atol=1e-05)\n\n def testResizeUpBicubic(self):\n img_shape = [1, 6, 6, 1]\n data = [\n 128, 128, 64, 64, 128, 128, 64, 64, 64, 64, 128, 128, 64, 64, 128, 128,\n 50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100,\n 50, 50, 100, 100\n ]\n img_np = np.array(data, dtype=np.uint8).reshape(img_shape)\n\n target_height = 8\n target_width = 8\n expected_data = [\n 128, 135, 96, 55, 64, 114, 134, 128, 78, 81, 68, 52, 57, 118, 144, 136,\n 55, 49, 79, 109, 103, 89, 83, 84, 74, 70, 95, 122, 115, 69, 49, 55, 100,\n 105, 75, 43, 50, 89, 105, 100, 57, 54, 74, 96, 91, 65, 55, 58, 70, 69,\n 75, 81, 80, 72, 69, 70, 105, 112, 75, 36, 45, 92, 111, 105\n ]\n\n with self.test_session(use_gpu=True):\n image = constant_op.constant(img_np, shape=img_shape)\n y = image_ops.resize_images(image, [target_height, target_width],\n image_ops.ResizeMethod.BICUBIC)\n resized = y.eval()\n expected = np.array(expected_data).reshape(\n [1, target_height, target_width, 1])\n self.assertAllClose(resized, expected, atol=1)\n\n def testResizeDownArea(self):\n img_shape = [1, 6, 6, 1]\n data = [\n 128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,\n 10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30\n ]\n img_np = np.array(data, dtype=np.uint8).reshape(img_shape)\n\n target_height = 4\n target_width = 4\n expected_data = [\n 73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21\n ]\n\n with self.test_session(use_gpu=True):\n image = constant_op.constant(img_np, shape=img_shape)\n y = image_ops.resize_images(image, [target_height, target_width],\n image_ops.ResizeMethod.AREA)\n expected = np.array(expected_data).reshape(\n [1, target_height, target_width, 1])\n resized = y.eval()\n self.assertAllClose(resized, expected, atol=1)\n\n def testCompareNearestNeighbor(self):\n if test.is_gpu_available():\n input_shape = [1, 5, 6, 3]\n target_height = 8\n target_width = 12\n for nptype in [np.float32, np.float64]:\n for align_corners in [True, False]:\n img_np = np.arange(\n 0, np.prod(input_shape), dtype=nptype).reshape(input_shape)\n with self.test_session(use_gpu=True):\n image = constant_op.constant(img_np, shape=input_shape)\n new_size = constant_op.constant([target_height, target_width])\n out_op = image_ops.resize_images(\n image,\n new_size,\n image_ops.ResizeMethod.NEAREST_NEIGHBOR,\n align_corners=align_corners)\n gpu_val = out_op.eval()\n with self.test_session(use_gpu=False):\n image = constant_op.constant(img_np, shape=input_shape)\n new_size = constant_op.constant([target_height, target_width])\n out_op = image_ops.resize_images(\n image,\n new_size,\n image_ops.ResizeMethod.NEAREST_NEIGHBOR,\n align_corners=align_corners)\n cpu_val = out_op.eval()\n self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)\n\n def testCompareBilinear(self):\n if test.is_gpu_available():\n input_shape = [1, 5, 6, 3]\n target_height = 8\n target_width = 12\n for nptype in [np.float32, np.float64]:\n for align_corners in [True, False]:\n img_np = np.arange(\n 0, np.prod(input_shape), dtype=nptype).reshape(input_shape)\n value = {}\n for use_gpu in [True, False]:\n with self.test_session(use_gpu=use_gpu):\n image = constant_op.constant(img_np, shape=input_shape)\n new_size = constant_op.constant([target_height, target_width])\n out_op = image_ops.resize_images(\n image,\n new_size,\n image_ops.ResizeMethod.BILINEAR,\n align_corners=align_corners)\n value[use_gpu] = out_op.eval()\n self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)\n\n def testShapeInference(self):\n self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])\n self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])\n self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])\n self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])\n self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])\n self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])\n\n def testNameScope(self):\n img_shape = [1, 3, 2, 1]\n with self.test_session(use_gpu=True):\n single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])\n y = image_ops.resize_images(single_image, [55, 66])\n self.assertTrue(y.op.name.startswith(\"resize_images\"))\n\n def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio,\n use_tensor_inputs):\n if use_tensor_inputs:\n target_max = ops.convert_to_tensor([max_h, max_w])\n x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)\n feed_dict = {x_tensor: x}\n else:\n target_max = [max_h, max_w]\n x_tensor = x\n feed_dict = {}\n\n y = image_ops.resize_images(x_tensor, target_max,\n preserve_aspect_ratio=preserve_aspect_ratio)\n\n with self.test_session(use_gpu=True):\n return y.eval(feed_dict=feed_dict)\n\n def _assertResizeEqual(self, x, x_shape, y, y_shape,\n preserve_aspect_ratio=True,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n target_height, target_width, _ = y_shape\n x = np.array(x).reshape(x_shape)\n y = np.array(y).reshape(y_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n y_tf = self._ResizeImageCall(x, target_height, target_width,\n preserve_aspect_ratio, use_tensor_inputs)\n self.assertAllClose(y, y_tf)\n\n def _assertResizeCheckShape(self, x, x_shape, target_shape,\n y_shape, preserve_aspect_ratio=True,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n target_height, target_width = target_shape\n x = np.array(x).reshape(x_shape)\n y = np.zeros(y_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n y_tf = self._ResizeImageCall(x, target_height, target_width,\n preserve_aspect_ratio, use_tensor_inputs)\n self.assertShapeEqual(y, ops.convert_to_tensor(y_tf))\n\n def testPreserveAspectRatioMultipleImages(self):\n x_shape = [10, 100, 100, 10]\n x = np.random.uniform(size=x_shape)\n\n self._assertResizeCheckShape(x, x_shape, [250, 250], [10, 250, 250, 10],\n preserve_aspect_ratio=False)\n\n def testPreserveAspectRatioNoOp(self):\n x_shape = [10, 10, 10]\n x = np.random.uniform(size=x_shape)\n\n self._assertResizeEqual(x, x_shape, x, x_shape)\n\n def testPreserveAspectRatioSmaller(self):\n x_shape = [100, 100, 10]\n x = np.random.uniform(size=x_shape)\n\n self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10])\n\n def testPreserveAspectRatioSmallerMultipleImages(self):\n x_shape = [10, 100, 100, 10]\n x = np.random.uniform(size=x_shape)\n\n self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10])\n\n def testPreserveAspectRatioLarger(self):\n x_shape = [100, 100, 10]\n x = np.random.uniform(size=x_shape)\n\n self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10])\n\n def testPreserveAspectRatioSameRatio(self):\n x_shape = [1920, 1080, 3]\n x = np.random.uniform(size=x_shape)\n\n self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3])\n\n\nclass ResizeImageWithPadTest(test_util.TensorFlowTestCase):\n\n def _ResizeImageWithPad(self, x, target_height, target_width,\n use_tensor_inputs):\n if use_tensor_inputs:\n target_height = ops.convert_to_tensor(target_height)\n target_width = ops.convert_to_tensor(target_width)\n x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)\n feed_dict = {x_tensor: x}\n else:\n x_tensor = x\n feed_dict = {}\n\n y = image_ops.resize_image_with_pad(x_tensor, target_height,\n target_width)\n if not use_tensor_inputs:\n self.assertTrue(y.get_shape().is_fully_defined())\n\n with self.test_session(use_gpu=True):\n return y.eval(feed_dict=feed_dict)\n\n def _assertReturns(self,\n x,\n x_shape,\n y,\n y_shape,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n target_height, target_width, _ = y_shape\n x = np.array(x).reshape(x_shape)\n y = np.array(y).reshape(y_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n y_tf = self._ResizeImageWithPad(x, target_height, target_width,\n use_tensor_inputs)\n self.assertAllClose(y, y_tf)\n\n def _assertRaises(self,\n x,\n x_shape,\n target_height,\n target_width,\n err_msg,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n x = np.array(x).reshape(x_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n try:\n self._ResizeImageWithPad(x, target_height, target_width,\n use_tensor_inputs)\n except Exception as e: # pylint: disable=broad-except\n if err_msg not in str(e):\n raise\n else:\n raise AssertionError(\"Exception not raised: %s\" % err_msg)\n\n def _assertShapeInference(self, pre_shape, height, width, post_shape):\n image = array_ops.placeholder(dtypes.float32, shape=pre_shape)\n y = image_ops.resize_image_with_pad(image, height, width)\n self.assertEqual(y.get_shape().as_list(), post_shape)\n\n def testNoOp(self):\n x_shape = [10, 10, 10]\n x = np.random.uniform(size=x_shape)\n\n self._assertReturns(x, x_shape, x, x_shape)\n\n def testPad(self):\n # Reduce vertical dimension\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [2, 4, 1]\n\n y = [0, 1, 3, 0]\n y_shape = [1, 4, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n # Reduce horizontal dimension\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [2, 4, 1]\n\n y = [1, 3, 0, 0]\n y_shape = [2, 2, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [2, 4, 1]\n\n y = [1, 3]\n y_shape = [1, 2, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n\nclass ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase):\n\n def _ResizeImageWithCropOrPad(self, x, target_height, target_width,\n use_tensor_inputs):\n if use_tensor_inputs:\n target_height = ops.convert_to_tensor(target_height)\n target_width = ops.convert_to_tensor(target_width)\n x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)\n feed_dict = {x_tensor: x}\n else:\n x_tensor = x\n feed_dict = {}\n\n y = image_ops.resize_image_with_crop_or_pad(x_tensor, target_height,\n target_width)\n if not use_tensor_inputs:\n self.assertTrue(y.get_shape().is_fully_defined())\n\n with self.test_session(use_gpu=True):\n return y.eval(feed_dict=feed_dict)\n\n def _assertReturns(self,\n x,\n x_shape,\n y,\n y_shape,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n target_height, target_width, _ = y_shape\n x = np.array(x).reshape(x_shape)\n y = np.array(y).reshape(y_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n y_tf = self._ResizeImageWithCropOrPad(x, target_height, target_width,\n use_tensor_inputs)\n self.assertAllClose(y, y_tf)\n\n def _assertRaises(self,\n x,\n x_shape,\n target_height,\n target_width,\n err_msg,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n x = np.array(x).reshape(x_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n try:\n self._ResizeImageWithCropOrPad(x, target_height, target_width,\n use_tensor_inputs)\n except Exception as e:\n if err_msg not in str(e):\n raise\n else:\n raise AssertionError(\"Exception not raised: %s\" % err_msg)\n\n def _assertShapeInference(self, pre_shape, height, width, post_shape):\n image = array_ops.placeholder(dtypes.float32, shape=pre_shape)\n y = image_ops.resize_image_with_crop_or_pad(image, height, width)\n self.assertEqual(y.get_shape().as_list(), post_shape)\n\n def testNoOp(self):\n x_shape = [10, 10, 10]\n x = np.random.uniform(size=x_shape)\n\n self._assertReturns(x, x_shape, x, x_shape)\n\n def testPad(self):\n # Pad even along col.\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [2, 4, 1]\n\n y = [0, 1, 2, 3, 4, 0, 0, 5, 6, 7, 8, 0]\n y_shape = [2, 6, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n # Pad odd along col.\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [2, 4, 1]\n\n y = [0, 1, 2, 3, 4, 0, 0, 0, 5, 6, 7, 8, 0, 0]\n y_shape = [2, 7, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n # Pad even along row.\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [2, 4, 1]\n\n y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0]\n y_shape = [4, 4, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n # Pad odd along row.\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [2, 4, 1]\n\n y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0]\n y_shape = [5, 4, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n def testCrop(self):\n # Crop even along col.\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [2, 4, 1]\n\n y = [2, 3, 6, 7]\n y_shape = [2, 2, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n # Crop odd along col.\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n x_shape = [2, 6, 1]\n\n y = [2, 3, 4, 8, 9, 10]\n y_shape = [2, 3, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n # Crop even along row.\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [4, 2, 1]\n\n y = [3, 4, 5, 6]\n y_shape = [2, 2, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n # Crop odd along row.\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]\n x_shape = [8, 2, 1]\n\n y = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n y_shape = [5, 2, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n def testCropAndPad(self):\n # Pad along row but crop along col.\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [2, 4, 1]\n\n y = [0, 0, 2, 3, 6, 7, 0, 0]\n y_shape = [4, 2, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n # Crop along row but pad along col.\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [4, 2, 1]\n\n y = [0, 3, 4, 0, 0, 5, 6, 0]\n y_shape = [2, 4, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n def testShapeInference(self):\n self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([50, 69, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([59, 60, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])\n self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])\n self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])\n self._assertShapeInference([50, 69, None], 55, 66, [55, 66, None])\n self._assertShapeInference([59, 60, None], 55, 66, [55, 66, None])\n self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])\n self._assertShapeInference(None, 55, 66, [55, 66, None])\n\n def testNon3DInput(self):\n # Input image is not 3D\n x = [0] * 15\n target_height, target_width = [4, 4]\n\n for x_shape in ([3, 5],):\n self._assertRaises(x, x_shape, target_height, target_width,\n \"'image' must have either 3 or 4 dimensions.\")\n\n for x_shape in ([1, 3, 5, 1, 1],):\n self._assertRaises(x, x_shape, target_height, target_width,\n \"'image' must have either 3 or 4 dimensions.\")\n\n def testZeroLengthInput(self):\n # Input image has 0-length dimension(s).\n target_height, target_width = [1, 1]\n x = []\n\n for x_shape in ([0, 2, 2], [2, 0, 2], [2, 2, 0]):\n self._assertRaises(\n x,\n x_shape,\n target_height,\n target_width,\n \"all dims of 'image.shape' must be > 0\",\n use_tensor_inputs_options=[False])\n\n # The original error message does not contain back slashes. However, they\n # are added by either the assert op or the runtime. If this behavior\n # changes in the future, the match string will also needs to be changed.\n self._assertRaises(\n x,\n x_shape,\n target_height,\n target_width,\n \"all dims of \\\\'image.shape\\\\' must be > 0\",\n use_tensor_inputs_options=[True])\n\n def testBadParams(self):\n x_shape = [4, 4, 1]\n x = np.zeros(x_shape)\n\n # target_height <= 0\n target_height, target_width = [0, 5]\n self._assertRaises(x, x_shape, target_height, target_width,\n \"target_height must be > 0\")\n\n # target_width <= 0\n target_height, target_width = [5, 0]\n self._assertRaises(x, x_shape, target_height, target_width,\n \"target_width must be > 0\")\n\n def testNameScope(self):\n image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])\n y = image_ops.resize_image_with_crop_or_pad(image, 55, 66)\n self.assertTrue(y.op.name.startswith(\"resize_image_with_crop_or_pad\"))\n\n\ndef _SimpleColorRamp():\n \"\"\"Build a simple color ramp RGB image.\"\"\"\n w, h = 256, 200\n i = np.arange(h)[:, None]\n j = np.arange(w)\n image = np.empty((h, w, 3), dtype=np.uint8)\n image[:, :, 0] = i\n image[:, :, 1] = j\n image[:, :, 2] = (i + j) >> 1\n return image\n\n\nclass JpegTest(test_util.TensorFlowTestCase):\n\n # TODO(irving): Add self.assertAverageLess or similar to test_util\n def averageError(self, image0, image1):\n self.assertEqual(image0.shape, image1.shape)\n image0 = image0.astype(int) # Avoid overflow\n return np.abs(image0 - image1).sum() / np.prod(image0.shape)\n\n def testExisting(self):\n # Read a real jpeg and verify shape\n path = (\"tensorflow/core/lib/jpeg/testdata/\"\n \"jpeg_merge_test1.jpg\")\n with self.test_session(use_gpu=True) as sess:\n jpeg0 = io_ops.read_file(path)\n image0 = image_ops.decode_jpeg(jpeg0)\n image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0))\n jpeg0, image0, image1 = sess.run([jpeg0, image0, image1])\n self.assertEqual(len(jpeg0), 3771)\n self.assertEqual(image0.shape, (256, 128, 3))\n self.assertLess(self.averageError(image0, image1), 1.4)\n\n def testCmyk(self):\n # Confirm that CMYK reads in as RGB\n base = \"tensorflow/core/lib/jpeg/testdata\"\n rgb_path = os.path.join(base, \"jpeg_merge_test1.jpg\")\n cmyk_path = os.path.join(base, \"jpeg_merge_test1_cmyk.jpg\")\n shape = 256, 128, 3\n for channels in 3, 0:\n with self.test_session(use_gpu=True) as sess:\n rgb = image_ops.decode_jpeg(\n io_ops.read_file(rgb_path), channels=channels)\n cmyk = image_ops.decode_jpeg(\n io_ops.read_file(cmyk_path), channels=channels)\n rgb, cmyk = sess.run([rgb, cmyk])\n self.assertEqual(rgb.shape, shape)\n self.assertEqual(cmyk.shape, shape)\n error = self.averageError(rgb, cmyk)\n self.assertLess(error, 4)\n\n def testCropAndDecodeJpeg(self):\n with self.cached_session() as sess:\n # Encode it, then decode it, then encode it\n base = \"tensorflow/core/lib/jpeg/testdata\"\n jpeg0 = io_ops.read_file(os.path.join(base, \"jpeg_merge_test1.jpg\"))\n\n h, w, _ = 256, 128, 3\n crop_windows = [[0, 0, 5, 5], [0, 0, 5, w], [0, 0, h, 5],\n [h - 6, w - 5, 6, 5], [6, 5, 15, 10], [0, 0, h, w]]\n for crop_window in crop_windows:\n # Explicit two stages: decode + crop.\n image1 = image_ops.decode_jpeg(jpeg0)\n y, x, h, w = crop_window\n image1_crop = image_ops.crop_to_bounding_box(image1, y, x, h, w)\n\n # Combined decode+crop.\n image2 = image_ops.decode_and_crop_jpeg(jpeg0, crop_window)\n\n # Combined decode+crop should have the same shape inference\n self.assertAllEqual(image1_crop.get_shape().as_list(),\n image2.get_shape().as_list())\n\n # CropAndDecode should be equal to DecodeJpeg+Crop.\n image1_crop, image2 = sess.run([image1_crop, image2])\n self.assertAllEqual(image1_crop, image2)\n\n def testCropAndDecodeJpegWithInvalidCropWindow(self):\n with self.cached_session() as sess:\n # Encode it, then decode it, then encode it\n base = \"tensorflow/core/lib/jpeg/testdata\"\n jpeg0 = io_ops.read_file(os.path.join(base, \"jpeg_merge_test1.jpg\"))\n\n h, w, _ = 256, 128, 3\n # Invalid crop windows.\n crop_windows = [[-1, 11, 11, 11], [11, -1, 11, 11], [11, 11, -1, 11],\n [11, 11, 11, -1], [11, 11, 0, 11], [11, 11, 11, 0],\n [0, 0, h + 1, w], [0, 0, h, w + 1]]\n for crop_window in crop_windows:\n result = image_ops.decode_and_crop_jpeg(jpeg0, crop_window)\n with self.assertRaisesWithPredicateMatch(\n errors.InvalidArgumentError,\n lambda e: \"Invalid JPEG data or crop window\" in str(e)):\n sess.run(result)\n\n def testSynthetic(self):\n with self.test_session(use_gpu=True) as sess:\n # Encode it, then decode it, then encode it\n image0 = constant_op.constant(_SimpleColorRamp())\n jpeg0 = image_ops.encode_jpeg(image0)\n image1 = image_ops.decode_jpeg(jpeg0, dct_method=\"INTEGER_ACCURATE\")\n image2 = image_ops.decode_jpeg(\n image_ops.encode_jpeg(image1), dct_method=\"INTEGER_ACCURATE\")\n jpeg0, image0, image1, image2 = sess.run([jpeg0, image0, image1, image2])\n\n # The decoded-encoded image should be similar to the input\n self.assertLess(self.averageError(image0, image1), 0.6)\n\n # We should be very close to a fixpoint\n self.assertLess(self.averageError(image1, image2), 0.02)\n\n # Smooth ramps compress well (input size is 153600)\n self.assertGreaterEqual(len(jpeg0), 5000)\n self.assertLessEqual(len(jpeg0), 6000)\n\n def testSyntheticFasterAlgorithm(self):\n with self.test_session(use_gpu=True) as sess:\n # Encode it, then decode it, then encode it\n image0 = constant_op.constant(_SimpleColorRamp())\n jpeg0 = image_ops.encode_jpeg(image0)\n image1 = image_ops.decode_jpeg(jpeg0, dct_method=\"INTEGER_FAST\")\n image2 = image_ops.decode_jpeg(\n image_ops.encode_jpeg(image1), dct_method=\"INTEGER_FAST\")\n jpeg0, image0, image1, image2 = sess.run([jpeg0, image0, image1, image2])\n\n # The decoded-encoded image should be similar to the input, but\n # note this is worse than the slower algorithm because it is\n # less accurate.\n self.assertLess(self.averageError(image0, image1), 0.95)\n\n # Repeated compression / decompression will have a higher error\n # with a lossier algorithm.\n self.assertLess(self.averageError(image1, image2), 1.05)\n\n # Smooth ramps compress well (input size is 153600)\n self.assertGreaterEqual(len(jpeg0), 5000)\n self.assertLessEqual(len(jpeg0), 6000)\n\n def testDefaultDCTMethodIsIntegerFast(self):\n with self.test_session(use_gpu=True) as sess:\n # Compare decoding with both dct_option=INTEGER_FAST and\n # default. They should be the same.\n image0 = constant_op.constant(_SimpleColorRamp())\n jpeg0 = image_ops.encode_jpeg(image0)\n image1 = image_ops.decode_jpeg(jpeg0, dct_method=\"INTEGER_FAST\")\n image2 = image_ops.decode_jpeg(jpeg0)\n image1, image2 = sess.run([image1, image2])\n\n # The images should be the same.\n self.assertAllClose(image1, image2)\n\n def testShape(self):\n with self.test_session(use_gpu=True) as sess:\n jpeg = constant_op.constant(\"nonsense\")\n for channels in 0, 1, 3:\n image = image_ops.decode_jpeg(jpeg, channels=channels)\n self.assertEqual(image.get_shape().as_list(),\n [None, None, channels or None])\n\n def testExtractJpegShape(self):\n # Read a real jpeg and verify shape.\n path = (\"tensorflow/core/lib/jpeg/testdata/\"\n \"jpeg_merge_test1.jpg\")\n with self.test_session(use_gpu=True) as sess:\n jpeg = io_ops.read_file(path)\n # Extract shape without decoding.\n [image_shape] = sess.run([image_ops.extract_jpeg_shape(jpeg)])\n self.assertEqual(image_shape.tolist(), [256, 128, 3])\n\n def testExtractJpegShapeforCmyk(self):\n # Read a cmyk jpeg image, and verify its shape.\n path = (\"tensorflow/core/lib/jpeg/testdata/\"\n \"jpeg_merge_test1_cmyk.jpg\")\n with self.test_session(use_gpu=True) as sess:\n jpeg = io_ops.read_file(path)\n [image_shape] = sess.run([image_ops.extract_jpeg_shape(jpeg)])\n # Cmyk jpeg image has 4 channels.\n self.assertEqual(image_shape.tolist(), [256, 128, 4])\n\n\nclass PngTest(test_util.TensorFlowTestCase):\n\n def testExisting(self):\n # Read some real PNGs, converting to different channel numbers\n prefix = \"tensorflow/core/lib/png/testdata/\"\n inputs = ((1, \"lena_gray.png\"), (4, \"lena_rgba.png\"),\n (3, \"lena_palette.png\"), (4, \"lena_palette_trns.png\"))\n for channels_in, filename in inputs:\n for channels in 0, 1, 3, 4:\n with self.test_session(use_gpu=True) as sess:\n png0 = io_ops.read_file(prefix + filename)\n image0 = image_ops.decode_png(png0, channels=channels)\n png0, image0 = sess.run([png0, image0])\n self.assertEqual(image0.shape, (26, 51, channels or channels_in))\n if channels == channels_in:\n image1 = image_ops.decode_png(image_ops.encode_png(image0))\n self.assertAllEqual(image0, image1.eval())\n\n def testSynthetic(self):\n with self.test_session(use_gpu=True) as sess:\n # Encode it, then decode it\n image0 = constant_op.constant(_SimpleColorRamp())\n png0 = image_ops.encode_png(image0, compression=7)\n image1 = image_ops.decode_png(png0)\n png0, image0, image1 = sess.run([png0, image0, image1])\n\n # PNG is lossless\n self.assertAllEqual(image0, image1)\n\n # Smooth ramps compress well, but not too well\n self.assertGreaterEqual(len(png0), 400)\n self.assertLessEqual(len(png0), 750)\n\n def testSyntheticUint16(self):\n with self.test_session(use_gpu=True) as sess:\n # Encode it, then decode it\n image0 = constant_op.constant(_SimpleColorRamp(), dtype=dtypes.uint16)\n png0 = image_ops.encode_png(image0, compression=7)\n image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)\n png0, image0, image1 = sess.run([png0, image0, image1])\n\n # PNG is lossless\n self.assertAllEqual(image0, image1)\n\n # Smooth ramps compress well, but not too well\n self.assertGreaterEqual(len(png0), 800)\n self.assertLessEqual(len(png0), 1500)\n\n def testSyntheticTwoChannel(self):\n with self.test_session(use_gpu=True) as sess:\n # Strip the b channel from an rgb image to get a two-channel image.\n gray_alpha = _SimpleColorRamp()[:, :, 0:2]\n image0 = constant_op.constant(gray_alpha)\n png0 = image_ops.encode_png(image0, compression=7)\n image1 = image_ops.decode_png(png0)\n png0, image0, image1 = sess.run([png0, image0, image1])\n self.assertEqual(2, image0.shape[-1])\n self.assertAllEqual(image0, image1)\n\n def testSyntheticTwoChannelUint16(self):\n with self.test_session(use_gpu=True) as sess:\n # Strip the b channel from an rgb image to get a two-channel image.\n gray_alpha = _SimpleColorRamp()[:, :, 0:2]\n image0 = constant_op.constant(gray_alpha, dtype=dtypes.uint16)\n png0 = image_ops.encode_png(image0, compression=7)\n image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)\n png0, image0, image1 = sess.run([png0, image0, image1])\n self.assertEqual(2, image0.shape[-1])\n self.assertAllEqual(image0, image1)\n\n def testShape(self):\n with self.test_session(use_gpu=True):\n png = constant_op.constant(\"nonsense\")\n for channels in 0, 1, 3:\n image = image_ops.decode_png(png, channels=channels)\n self.assertEqual(image.get_shape().as_list(),\n [None, None, channels or None])\n\n\nclass GifTest(test_util.TensorFlowTestCase):\n\n def _testValid(self, filename):\n # Read some real GIFs\n prefix = \"tensorflow/core/lib/gif/testdata/\"\n WIDTH = 20\n HEIGHT = 40\n STRIDE = 5\n shape = (12, HEIGHT, WIDTH, 3)\n\n with self.test_session(use_gpu=True) as sess:\n gif0 = io_ops.read_file(prefix + filename)\n image0 = image_ops.decode_gif(gif0)\n gif0, image0 = sess.run([gif0, image0])\n\n self.assertEqual(image0.shape, shape)\n\n for frame_idx, frame in enumerate(image0):\n gt = np.zeros(shape[1:], dtype=np.uint8)\n start = frame_idx * STRIDE\n end = (frame_idx + 1) * STRIDE\n print(frame_idx)\n if end <= WIDTH:\n gt[:, start:end, :] = 255\n else:\n start -= WIDTH\n end -= WIDTH\n gt[start:end, :, :] = 255\n\n self.assertAllClose(frame, gt)\n\n def testValid(self):\n self._testValid(\"scan.gif\")\n self._testValid(\"optimized.gif\")\n\n def testShape(self):\n with self.test_session(use_gpu=True) as sess:\n gif = constant_op.constant(\"nonsense\")\n image = image_ops.decode_gif(gif)\n self.assertEqual(image.get_shape().as_list(), [None, None, None, 3])\n\n\nclass ConvertImageTest(test_util.TensorFlowTestCase):\n\n def _convert(self, original, original_dtype, output_dtype, expected):\n x_np = np.array(original, dtype=original_dtype.as_numpy_dtype())\n y_np = np.array(expected, dtype=output_dtype.as_numpy_dtype())\n\n with self.test_session(use_gpu=True):\n image = constant_op.constant(x_np)\n y = image_ops.convert_image_dtype(image, output_dtype)\n self.assertTrue(y.dtype == output_dtype)\n self.assertAllClose(y.eval(), y_np, atol=1e-5)\n if output_dtype in [\n dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64\n ]:\n y_saturate = image_ops.convert_image_dtype(\n image, output_dtype, saturate=True)\n self.assertTrue(y_saturate.dtype == output_dtype)\n self.assertAllClose(y_saturate.eval(), y_np, atol=1e-5)\n\n def testNoConvert(self):\n # Make sure converting to the same data type creates only an identity op\n with self.test_session(use_gpu=True):\n image = constant_op.constant([1], dtype=dtypes.uint8)\n image_ops.convert_image_dtype(image, dtypes.uint8)\n y = image_ops.convert_image_dtype(image, dtypes.uint8)\n self.assertEquals(y.op.type, \"Identity\")\n self.assertEquals(y.op.inputs[0], image)\n\n def testConvertBetweenInteger(self):\n # Make sure converting to between integer types scales appropriately\n with self.test_session(use_gpu=True):\n self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128])\n self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255])\n self._convert([0, 2**32], dtypes.int64, dtypes.int32, [0, 1])\n self._convert([0, 1], dtypes.int32, dtypes.int64, [0, 2**32])\n\n def testConvertBetweenFloat(self):\n # Make sure converting to between float types does nothing interesting\n with self.test_session(use_gpu=True):\n self._convert([-1.0, 0, 1.0, 200000], dtypes.float32, dtypes.float64,\n [-1.0, 0, 1.0, 200000])\n self._convert([-1.0, 0, 1.0, 200000], dtypes.float64, dtypes.float32,\n [-1.0, 0, 1.0, 200000])\n\n def testConvertBetweenIntegerAndFloat(self):\n # Make sure converting from and to a float type scales appropriately\n with self.test_session(use_gpu=True):\n self._convert([0, 1, 255], dtypes.uint8, dtypes.float32,\n [0, 1.0 / 255.0, 1])\n self._convert([0, 1.1 / 255.0, 1], dtypes.float32, dtypes.uint8,\n [0, 1, 255])\n\n def testConvertBetweenInt16AndInt8(self):\n with self.test_session(use_gpu=True):\n # uint8, uint16\n self._convert([0, 255 * 256], dtypes.uint16, dtypes.uint8, [0, 255])\n self._convert([0, 255], dtypes.uint8, dtypes.uint16, [0, 255 * 256])\n # int8, uint16\n self._convert([0, 127 * 2 * 256], dtypes.uint16, dtypes.int8, [0, 127])\n self._convert([0, 127], dtypes.int8, dtypes.uint16, [0, 127 * 2 * 256])\n # int16, uint16\n self._convert([0, 255 * 256], dtypes.uint16, dtypes.int16, [0, 255 * 128])\n self._convert([0, 255 * 128], dtypes.int16, dtypes.uint16, [0, 255 * 256])\n\n\nclass TotalVariationTest(test_util.TensorFlowTestCase):\n \"\"\"Tests the function total_variation() in image_ops.\n\n We test a few small handmade examples, as well as\n some larger examples using an equivalent numpy\n implementation of the total_variation() function.\n\n We do NOT test for overflows and invalid / edge-case arguments.\n \"\"\"\n\n def _test(self, x_np, y_np):\n \"\"\"Test that the TensorFlow implementation of\n total_variation(x_np) calculates the values in y_np.\n\n Note that these may be float-numbers so we only test\n for approximate equality within some narrow error-bound.\n \"\"\"\n\n # Create a TensorFlow session.\n with self.test_session(use_gpu=True):\n # Add a constant to the TensorFlow graph that holds the input.\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n\n # Add ops for calculating the total variation using TensorFlow.\n y = image_ops.total_variation(images=x_tf)\n\n # Run the TensorFlow session to calculate the result.\n y_tf = y.eval()\n\n # Assert that the results are as expected within\n # some small error-bound in case they are float-values.\n self.assertAllClose(y_tf, y_np)\n\n def _total_variation_np(self, x_np):\n \"\"\"Calculate the total variation of x_np using numpy.\n This implements the same function as TensorFlow but\n using numpy instead.\n\n Args:\n x_np: Numpy array with 3 or 4 dimensions.\n \"\"\"\n\n dim = len(x_np.shape)\n\n if dim == 3:\n # Calculate differences for neighboring pixel-values using slices.\n dif1 = x_np[1:, :, :] - x_np[:-1, :, :]\n dif2 = x_np[:, 1:, :] - x_np[:, :-1, :]\n\n # Sum for all axis.\n sum_axis = None\n elif dim == 4:\n # Calculate differences for neighboring pixel-values using slices.\n dif1 = x_np[:, 1:, :, :] - x_np[:, :-1, :, :]\n dif2 = x_np[:, :, 1:, :] - x_np[:, :, :-1, :]\n\n # Only sum for the last 3 axis.\n sum_axis = (1, 2, 3)\n else:\n # This should not occur in this test-code.\n pass\n\n tot_var = np.sum(np.abs(dif1), axis=sum_axis) + \\\n np.sum(np.abs(dif2), axis=sum_axis)\n\n return tot_var\n\n def _test_tensorflow_vs_numpy(self, x_np):\n \"\"\"Test the TensorFlow implementation against a numpy implementation.\n\n Args:\n x_np: Numpy array with 3 or 4 dimensions.\n \"\"\"\n\n # Calculate the y-values using the numpy implementation.\n y_np = self._total_variation_np(x_np)\n\n self._test(x_np, y_np)\n\n def _generateArray(self, shape):\n \"\"\"Generate an array of the given shape for use in testing.\n The numbers are calculated as the cumulative sum, which\n causes the difference between neighboring numbers to vary.\"\"\"\n\n # Flattened length of the array.\n flat_len = np.prod(shape)\n\n a = np.array(range(flat_len), dtype=int)\n a = np.cumsum(a)\n a = a.reshape(shape)\n\n return a\n\n def testTotalVariationNumpy(self):\n \"\"\"Test the TensorFlow implementation against a numpy implementation.\n The two implementations are very similar so it is possible that both\n have the same bug, which would not be detected by this test. It is\n therefore necessary to test with manually crafted data as well.\"\"\"\n\n # Generate a test-array.\n # This is an 'image' with 100x80 pixels and 3 color channels.\n a = self._generateArray(shape=(100, 80, 3))\n\n # Test the TensorFlow implementation vs. numpy implementation.\n # We use a numpy implementation to check the results that are\n # calculated using TensorFlow are correct.\n self._test_tensorflow_vs_numpy(a)\n self._test_tensorflow_vs_numpy(a + 1)\n self._test_tensorflow_vs_numpy(-a)\n self._test_tensorflow_vs_numpy(1.1 * a)\n\n # Expand to a 4-dim array.\n b = a[np.newaxis, :]\n\n # Combine several variations of the image into a single 4-dim array.\n multi = np.vstack((b, b + 1, -b, 1.1 * b))\n\n # Test that the TensorFlow function can also handle 4-dim arrays.\n self._test_tensorflow_vs_numpy(multi)\n\n def testTotalVariationHandmade(self):\n \"\"\"Test the total variation for a few handmade examples.\"\"\"\n\n # We create an image that is 2x2 pixels with 3 color channels.\n # The image is very small so we can check the result by hand.\n\n # Red color channel.\n # The following are the sum of absolute differences between the pixels.\n # sum row dif = (4-1) + (7-2) = 3 + 5 = 8\n # sum col dif = (2-1) + (7-4) = 1 + 3 = 4\n r = [[1, 2], [4, 7]]\n\n # Blue color channel.\n # sum row dif = 18 + 29 = 47\n # sum col dif = 7 + 18 = 25\n g = [[11, 18], [29, 47]]\n\n # Green color channel.\n # sum row dif = 120 + 193 = 313\n # sum col dif = 47 + 120 = 167\n b = [[73, 120], [193, 313]]\n\n # Combine the 3 color channels into a single 3-dim array.\n # The shape is (2, 2, 3) corresponding to (height, width and color).\n a = np.dstack((r, g, b))\n\n # Total variation for this image.\n # Sum of all pixel differences = 8 + 4 + 47 + 25 + 313 + 167 = 564\n tot_var = 564\n\n # Calculate the total variation using TensorFlow and assert it is correct.\n self._test(a, tot_var)\n\n # If we add 1 to all pixel-values then the total variation is unchanged.\n self._test(a + 1, tot_var)\n\n # If we negate all pixel-values then the total variation is unchanged.\n self._test(-a, tot_var)\n\n # Scale the pixel-values by a float. This scales the total variation as well.\n b = 1.1 * a\n self._test(b, 1.1 * tot_var)\n\n # Scale by another float.\n c = 1.2 * a\n self._test(c, 1.2 * tot_var)\n\n # Combine these 3 images into a single array of shape (3, 2, 2, 3)\n # where the first dimension is for the image-number.\n multi = np.vstack((a[np.newaxis, :], b[np.newaxis, :], c[np.newaxis, :]))\n\n # Check that TensorFlow correctly calculates the total variation\n # for each image individually and returns the correct array.\n self._test(multi, tot_var * np.array([1.0, 1.1, 1.2]))\n\n\nclass FormatTest(test_util.TensorFlowTestCase):\n\n def testFormats(self):\n prefix = \"tensorflow/core/lib\"\n paths = (\"png/testdata/lena_gray.png\", \"jpeg/testdata/jpeg_merge_test1.jpg\",\n \"gif/testdata/lena.gif\")\n decoders = {\n \"jpeg\": functools.partial(image_ops.decode_jpeg, channels=3),\n \"png\": functools.partial(image_ops.decode_png, channels=3),\n \"gif\": lambda s: array_ops.squeeze(image_ops.decode_gif(s), axis=0),\n }\n with self.cached_session():\n for path in paths:\n contents = io_ops.read_file(os.path.join(prefix, path)).eval()\n images = {}\n for name, decode in decoders.items():\n image = decode(contents).eval()\n self.assertEqual(image.ndim, 3)\n for prev_name, prev in images.items():\n print(\"path %s, names %s %s, shapes %s %s\" %\n (path, name, prev_name, image.shape, prev.shape))\n self.assertAllEqual(image, prev)\n images[name] = image\n\n def testError(self):\n path = \"tensorflow/core/lib/gif/testdata/scan.gif\"\n with self.cached_session():\n for decode in image_ops.decode_jpeg, image_ops.decode_png:\n with self.assertRaisesOpError(r\"Got 12 frames\"):\n decode(io_ops.read_file(path)).eval()\n\n\nclass NonMaxSuppressionTest(test_util.TensorFlowTestCase):\n\n def testSelectFromThreeClusters(self):\n boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],\n [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]\n scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]\n max_output_size_np = 3\n iou_threshold_np = 0.5\n with self.cached_session():\n boxes = constant_op.constant(boxes_np)\n scores = constant_op.constant(scores_np)\n max_output_size = constant_op.constant(max_output_size_np)\n iou_threshold = constant_op.constant(iou_threshold_np)\n selected_indices = image_ops.non_max_suppression(\n boxes, scores, max_output_size, iou_threshold).eval()\n self.assertAllClose(selected_indices, [3, 0, 5])\n\n def testInvalidShape(self):\n # The boxes should be 2D of shape [num_boxes, 4].\n with self.assertRaisesRegexp(ValueError,\n \"Shape must be rank 2 but is rank 1\"):\n boxes = constant_op.constant([0.0, 0.0, 1.0, 1.0])\n scores = constant_op.constant([0.9])\n image_ops.non_max_suppression(boxes, scores, 3, 0.5)\n\n with self.assertRaisesRegexp(ValueError, \"Dimension must be 4 but is 3\"):\n boxes = constant_op.constant([[0.0, 0.0, 1.0]])\n scores = constant_op.constant([0.9])\n image_ops.non_max_suppression(boxes, scores, 3, 0.5)\n\n # The boxes is of shape [num_boxes, 4], and the scores is\n # of shape [num_boxes]. So an error will thrown.\n with self.assertRaisesRegexp(ValueError,\n \"Dimensions must be equal, but are 1 and 2\"):\n boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])\n scores = constant_op.constant([0.9, 0.75])\n selected_indices = image_ops.non_max_suppression(boxes, scores, 3, 0.5)\n\n # The scores should be 1D of shape [num_boxes].\n with self.assertRaisesRegexp(ValueError,\n \"Shape must be rank 1 but is rank 2\"):\n boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])\n scores = constant_op.constant([[0.9]])\n image_ops.non_max_suppression(boxes, scores, 3, 0.5)\n\n # The max_output_size should be a scaler (0-D).\n with self.assertRaisesRegexp(ValueError,\n \"Shape must be rank 0 but is rank 1\"):\n boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])\n scores = constant_op.constant([0.9])\n image_ops.non_max_suppression(boxes, scores, [3], 0.5)\n\n # The iou_threshold should be a scaler (0-D).\n with self.assertRaisesRegexp(ValueError,\n \"Shape must be rank 0 but is rank 2\"):\n boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])\n scores = constant_op.constant([0.9])\n image_ops.non_max_suppression(boxes, scores, 3, [[0.5]])\n\n\nclass NonMaxSuppressionPaddedTest(test_util.TensorFlowTestCase):\n\n def testSelectFromThreeClusters(self):\n boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],\n [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]\n scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]\n max_output_size_np = 5\n iou_threshold_np = 0.5\n boxes = constant_op.constant(boxes_np)\n scores = constant_op.constant(scores_np)\n max_output_size = constant_op.constant(max_output_size_np)\n iou_threshold = constant_op.constant(iou_threshold_np)\n selected_indices_padded, num_valid_padded = \\\n image_ops.non_max_suppression_padded(\n boxes,\n scores,\n max_output_size,\n iou_threshold,\n pad_to_max_output_size=True)\n selected_indices, num_valid = image_ops.non_max_suppression_padded(\n boxes,\n scores,\n max_output_size,\n iou_threshold,\n pad_to_max_output_size=False)\n # The output shape of the padded operation must be fully defined.\n self.assertEqual(selected_indices_padded.shape.is_fully_defined(), True)\n self.assertEqual(selected_indices.shape.is_fully_defined(), False)\n with self.cached_session():\n self.assertAllClose(selected_indices_padded.eval(), [3, 0, 5, 0, 0])\n self.assertEqual(num_valid_padded.eval(), 3)\n self.assertAllClose(selected_indices.eval(), [3, 0, 5])\n self.assertEqual(num_valid.eval(), 3)\n\n\nclass VerifyCompatibleImageShapesTest(test_util.TensorFlowTestCase):\n \"\"\"Tests utility function used by ssim() and psnr().\"\"\"\n\n def testWrongDims(self):\n img = array_ops.placeholder(dtype=dtypes.float32)\n img_np = np.array((2, 2))\n\n with self.test_session(use_gpu=True) as sess:\n _, _, checks = image_ops_impl._verify_compatible_image_shapes(img, img)\n with self.assertRaises(errors.InvalidArgumentError):\n sess.run(checks, {img: img_np})\n\n def testShapeMismatch(self):\n img1 = array_ops.placeholder(dtype=dtypes.float32)\n img2 = array_ops.placeholder(dtype=dtypes.float32)\n\n img1_np = np.array([1, 2, 2, 1])\n img2_np = np.array([1, 3, 3, 1])\n\n with self.test_session(use_gpu=True) as sess:\n _, _, checks = image_ops_impl._verify_compatible_image_shapes(img1, img2)\n with self.assertRaises(errors.InvalidArgumentError):\n sess.run(checks, {img1: img1_np, img2: img2_np})\n\n\nclass PSNRTest(test_util.TensorFlowTestCase):\n \"\"\"Tests for PSNR.\"\"\"\n\n def _LoadTestImage(self, sess, filename):\n content = io_ops.read_file(os.path.join(\n \"tensorflow/core/lib/psnr/testdata\", filename))\n im = image_ops.decode_jpeg(content, dct_method=\"INTEGER_ACCURATE\")\n im = image_ops.convert_image_dtype(im, dtypes.float32)\n im, = sess.run([im])\n return np.expand_dims(im, axis=0)\n\n def _LoadTestImages(self):\n with self.test_session(use_gpu=True) as sess:\n q20 = self._LoadTestImage(sess, \"cat_q20.jpg\")\n q72 = self._LoadTestImage(sess, \"cat_q72.jpg\")\n q95 = self._LoadTestImage(sess, \"cat_q95.jpg\")\n return q20, q72, q95\n\n def _PSNR_NumPy(self, orig, target, max_value):\n \"\"\"Numpy implementation of PSNR.\"\"\"\n mse = ((orig - target) ** 2).mean(axis=(-3, -2, -1))\n return 20 * np.log10(max_value) - 10 * np.log10(mse)\n\n def _RandomImage(self, shape, max_val):\n \"\"\"Returns an image or image batch with given shape.\"\"\"\n return np.random.rand(*shape).astype(np.float32) * max_val\n\n def testPSNRSingleImage(self):\n image1 = self._RandomImage((8, 8, 1), 1)\n image2 = self._RandomImage((8, 8, 1), 1)\n psnr = self._PSNR_NumPy(image1, image2, 1)\n\n with self.test_session(use_gpu=True):\n tf_image1 = constant_op.constant(image1, shape=image1.shape,\n dtype=dtypes.float32)\n tf_image2 = constant_op.constant(image2, shape=image2.shape,\n dtype=dtypes.float32)\n tf_psnr = image_ops.psnr(tf_image1, tf_image2, 1.0, \"psnr\").eval()\n self.assertAllClose(psnr, tf_psnr, atol=0.001)\n\n def testPSNRMultiImage(self):\n image1 = self._RandomImage((10, 8, 8, 1), 1)\n image2 = self._RandomImage((10, 8, 8, 1), 1)\n psnr = self._PSNR_NumPy(image1, image2, 1)\n\n with self.test_session(use_gpu=True):\n tf_image1 = constant_op.constant(image1, shape=image1.shape,\n dtype=dtypes.float32)\n tf_image2 = constant_op.constant(image2, shape=image2.shape,\n dtype=dtypes.float32)\n tf_psnr = image_ops.psnr(tf_image1, tf_image2, 1, \"psnr\").eval()\n self.assertAllClose(psnr, tf_psnr, atol=0.001)\n\n def testGoldenPSNR(self):\n q20, q72, q95 = self._LoadTestImages()\n\n # Verify NumPy implementation first.\n # Golden values are generated using GNU Octave's psnr() function.\n psnr1 = self._PSNR_NumPy(q20, q72, 1)\n self.assertNear(30.321, psnr1, 0.001, msg=\"q20.dtype=\" + str(q20.dtype))\n psnr2 = self._PSNR_NumPy(q20, q95, 1)\n self.assertNear(29.994, psnr2, 0.001)\n psnr3 = self._PSNR_NumPy(q72, q95, 1)\n self.assertNear(35.302, psnr3, 0.001)\n\n # Test TensorFlow implementation.\n with self.test_session(use_gpu=True):\n tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)\n tf_q72 = constant_op.constant(q72, shape=q72.shape, dtype=dtypes.float32)\n tf_q95 = constant_op.constant(q95, shape=q95.shape, dtype=dtypes.float32)\n tf_psnr1 = image_ops.psnr(tf_q20, tf_q72, 1, \"psnr1\").eval()\n tf_psnr2 = image_ops.psnr(tf_q20, tf_q95, 1, \"psnr2\").eval()\n tf_psnr3 = image_ops.psnr(tf_q72, tf_q95, 1, \"psnr3\").eval()\n self.assertAllClose(psnr1, tf_psnr1, atol=0.001)\n self.assertAllClose(psnr2, tf_psnr2, atol=0.001)\n self.assertAllClose(psnr3, tf_psnr3, atol=0.001)\n\n def testInfinity(self):\n q20, _, _ = self._LoadTestImages()\n psnr = self._PSNR_NumPy(q20, q20, 1)\n with self.test_session(use_gpu=True):\n tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)\n tf_psnr = image_ops.psnr(tf_q20, tf_q20, 1, \"psnr\").eval()\n self.assertAllClose(psnr, tf_psnr, atol=0.001)\n\n def testInt(self):\n img1 = self._RandomImage((10, 8, 8, 1), 255)\n img2 = self._RandomImage((10, 8, 8, 1), 255)\n img1 = constant_op.constant(img1, dtypes.uint8)\n img2 = constant_op.constant(img2, dtypes.uint8)\n psnr_uint8 = image_ops.psnr(img1, img2, 255)\n img1 = image_ops.convert_image_dtype(img1, dtypes.float32)\n img2 = image_ops.convert_image_dtype(img2, dtypes.float32)\n psnr_float32 = image_ops.psnr(img1, img2, 1.0)\n with self.test_session(use_gpu=True):\n self.assertAllClose(psnr_uint8.eval(), psnr_float32.eval(), atol=0.001)\n\n\nclass SSIMTest(test_util.TensorFlowTestCase):\n \"\"\"Tests for SSIM.\"\"\"\n\n _filenames = [\"checkerboard1.png\",\n \"checkerboard2.png\",\n \"checkerboard3.png\",]\n\n _ssim = np.asarray([[1.000000, 0.230880, 0.231153],\n [0.230880, 1.000000, 0.996828],\n [0.231153, 0.996828, 1.000000]])\n\n def _LoadTestImage(self, sess, filename):\n content = io_ops.read_file(os.path.join(\n \"tensorflow/core/lib/ssim/testdata\", filename))\n im = image_ops.decode_png(content)\n im = image_ops.convert_image_dtype(im, dtypes.float32)\n im, = sess.run([im])\n return np.expand_dims(im, axis=0)\n\n def _LoadTestImages(self):\n with self.test_session(use_gpu=True) as sess:\n return [self._LoadTestImage(sess, f) for f in self._filenames]\n\n def _RandomImage(self, shape, max_val):\n \"\"\"Returns an image or image batch with given shape.\"\"\"\n return np.random.rand(*shape).astype(np.float32) * max_val\n\n def testAgainstMatlab(self):\n \"\"\"Tests against values produced by Matlab.\"\"\"\n img = self._LoadTestImages()\n expected = self._ssim[np.triu_indices(3)]\n\n ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)]\n ssim = image_ops.ssim(*ph, max_val=1.0)\n with self.test_session(use_gpu=True):\n scores = [ssim.eval(dict(zip(ph, t)))\n for t in itertools.combinations_with_replacement(img, 2)]\n self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)\n\n def testBatch(self):\n img = self._LoadTestImages()\n expected = self._ssim[np.triu_indices(3, k=1)]\n\n img1, img2 = zip(*itertools.combinations(img, 2))\n img1 = np.concatenate(img1)\n img2 = np.concatenate(img2)\n\n ssim = image_ops.ssim(constant_op.constant(img1),\n constant_op.constant(img2), 1.0)\n with self.test_session(use_gpu=True):\n self.assertAllClose(expected, ssim.eval(), atol=1e-4)\n\n def testBroadcast(self):\n img = self._LoadTestImages()[:2]\n expected = self._ssim[:2, :2]\n\n img = constant_op.constant(np.concatenate(img))\n img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.\n img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.\n\n ssim = image_ops.ssim(img1, img2, 1.0)\n with self.test_session(use_gpu=True):\n self.assertAllClose(expected, ssim.eval(), atol=1e-4)\n\n def testNegative(self):\n \"\"\"Tests against negative SSIM index.\"\"\"\n step = np.expand_dims(np.arange(0, 256, 16, dtype=np.uint8), axis=0)\n img1 = np.tile(step, (16, 1))\n img2 = np.fliplr(img1)\n\n img1 = img1.reshape((1, 16, 16, 1))\n img2 = img2.reshape((1, 16, 16, 1))\n\n ssim = image_ops.ssim(constant_op.constant(img1),\n constant_op.constant(img2), 255)\n with self.test_session(use_gpu=True):\n self.assertLess(ssim.eval(), 0)\n\n def testInt(self):\n img1 = self._RandomImage((1, 16, 16, 3), 255)\n img2 = self._RandomImage((1, 16, 16, 3), 255)\n img1 = constant_op.constant(img1, dtypes.uint8)\n img2 = constant_op.constant(img2, dtypes.uint8)\n ssim_uint8 = image_ops.ssim(img1, img2, 255)\n img1 = image_ops.convert_image_dtype(img1, dtypes.float32)\n img2 = image_ops.convert_image_dtype(img2, dtypes.float32)\n ssim_float32 = image_ops.ssim(img1, img2, 1.0)\n with self.test_session(use_gpu=True):\n self.assertAllClose(ssim_uint8.eval(), ssim_float32.eval(), atol=0.001)\n\n\nclass MultiscaleSSIMTest(test_util.TensorFlowTestCase):\n \"\"\"Tests for MS-SSIM.\"\"\"\n\n _filenames = [\"checkerboard1.png\",\n \"checkerboard2.png\",\n \"checkerboard3.png\",]\n\n _msssim = np.asarray([[1.000000, 0.091016, 0.091025],\n [0.091016, 1.000000, 0.999567],\n [0.091025, 0.999567, 1.000000]])\n\n def _LoadTestImage(self, sess, filename):\n content = io_ops.read_file(os.path.join(\n \"tensorflow/core/lib/ssim/testdata\", filename))\n im = image_ops.decode_png(content)\n im = image_ops.convert_image_dtype(im, dtypes.float32)\n im, = sess.run([im])\n return np.expand_dims(im, axis=0)\n\n def _LoadTestImages(self):\n with self.test_session(use_gpu=True) as sess:\n return [self._LoadTestImage(sess, f) for f in self._filenames]\n\n def _RandomImage(self, shape, max_val):\n \"\"\"Returns an image or image batch with given shape.\"\"\"\n return np.random.rand(*shape).astype(np.float32) * max_val\n\n def testAgainstMatlab(self):\n \"\"\"Tests against MS-SSIM computed with Matlab implementation.\n\n For color images, MS-SSIM scores are averaged over color channels.\n \"\"\"\n img = self._LoadTestImages()\n expected = self._msssim[np.triu_indices(3)]\n\n ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)]\n msssim = image_ops.ssim_multiscale(*ph, max_val=1.0)\n with self.test_session(use_gpu=True):\n scores = [msssim.eval(dict(zip(ph, t)))\n for t in itertools.combinations_with_replacement(img, 2)]\n\n self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)\n\n def testUnweightedIsDifferentiable(self):\n img = self._LoadTestImages()\n ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)]\n scalar = constant_op.constant(1.0, dtype=dtypes.float32)\n scaled_ph = [x * scalar for x in ph]\n msssim = image_ops.ssim_multiscale(*scaled_ph, max_val=1.0,\n power_factors=(1, 1, 1, 1, 1))\n grads = gradients.gradients(msssim, scalar)\n with self.test_session(use_gpu=True) as sess:\n np_grads = sess.run(grads, feed_dict={ph[0]: img[0], ph[1]: img[1]})\n self.assertTrue(np.isfinite(np_grads).all())\n\n def testBatch(self):\n \"\"\"Tests MS-SSIM computed in batch.\"\"\"\n img = self._LoadTestImages()\n expected = self._msssim[np.triu_indices(3, k=1)]\n\n img1, img2 = zip(*itertools.combinations(img, 2))\n img1 = np.concatenate(img1)\n img2 = np.concatenate(img2)\n\n msssim = image_ops.ssim_multiscale(constant_op.constant(img1),\n constant_op.constant(img2), 1.0)\n with self.test_session(use_gpu=True):\n self.assertAllClose(expected, msssim.eval(), 1e-4)\n\n def testBroadcast(self):\n \"\"\"Tests MS-SSIM broadcasting.\"\"\"\n img = self._LoadTestImages()[:2]\n expected = self._msssim[:2, :2]\n\n img = constant_op.constant(np.concatenate(img))\n img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.\n img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.\n\n score_tensor = image_ops.ssim_multiscale(img1, img2, 1.0)\n with self.test_session(use_gpu=True):\n self.assertAllClose(expected, score_tensor.eval(), 1e-4)\n\n def testRange(self):\n \"\"\"Tests against low MS-SSIM score.\n\n MS-SSIM is a geometric mean of SSIM and CS scores of various scales.\n If any of the value is negative so that the geometric mean is not\n well-defined, then treat the MS-SSIM score as zero.\n \"\"\"\n with self.test_session(use_gpu=True) as sess:\n img1 = self._LoadTestImage(sess, \"checkerboard1.png\")\n img2 = self._LoadTestImage(sess, \"checkerboard3.png\")\n images = [img1, img2, np.zeros_like(img1),\n np.full_like(img1, fill_value=255)]\n\n images = [ops.convert_to_tensor(x, dtype=dtypes.float32) for x in images]\n msssim_ops = [image_ops.ssim_multiscale(x, y, 1.0)\n for x, y in itertools.combinations(images, 2)]\n msssim = sess.run(msssim_ops)\n msssim = np.squeeze(msssim)\n\n self.assertTrue(np.all(msssim >= 0.0))\n self.assertTrue(np.all(msssim <= 1.0))\n\n def testInt(self):\n img1 = self._RandomImage((1, 180, 240, 3), 255)\n img2 = self._RandomImage((1, 180, 240, 3), 255)\n img1 = constant_op.constant(img1, dtypes.uint8)\n img2 = constant_op.constant(img2, dtypes.uint8)\n ssim_uint8 = image_ops.ssim_multiscale(img1, img2, 255)\n img1 = image_ops.convert_image_dtype(img1, dtypes.float32)\n img2 = image_ops.convert_image_dtype(img2, dtypes.float32)\n ssim_float32 = image_ops.ssim_multiscale(img1, img2, 1.0)\n with self.test_session(use_gpu=True):\n self.assertAllClose(ssim_uint8.eval(), ssim_float32.eval(), atol=0.001)\n\n\nclass ImageGradientsTest(test_util.TensorFlowTestCase):\n\n def testImageGradients(self):\n shape = [1, 2, 4, 1]\n img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])\n img = array_ops.reshape(img, shape)\n\n expected_dy = np.reshape([[7, 4, 1, 4], [0, 0, 0, 0]], shape)\n expected_dx = np.reshape([[2, 1, -2, 0], [-1, -2, 1, 0]], shape)\n\n dy, dx = image_ops.image_gradients(img)\n with self.cached_session():\n actual_dy = dy.eval()\n actual_dx = dx.eval()\n self.assertAllClose(expected_dy, actual_dy)\n self.assertAllClose(expected_dx, actual_dx)\n\n def testImageGradientsMultiChannelBatch(self):\n batch = [[[[1, 2], [2, 5], [3, 3]],\n [[8, 4], [5, 1], [9, 8]]],\n [[[5, 3], [7, 9], [1, 6]],\n [[1, 2], [6, 3], [6, 3]]]]\n\n expected_dy = [[[[7, 2], [3, -4], [6, 5]],\n [[0, 0], [0, 0], [0, 0]]],\n [[[-4, -1], [-1, -6], [5, -3]],\n [[0, 0], [0, 0], [0, 0]]]]\n\n expected_dx = [[[[1, 3], [1, -2], [0, 0]],\n [[-3, -3], [4, 7], [0, 0]]],\n [[[2, 6], [-6, -3], [0, 0]],\n [[5, 1], [0, 0], [0, 0]]]]\n\n batch = constant_op.constant(batch)\n assert batch.get_shape().as_list() == [2, 2, 3, 2]\n dy, dx = image_ops.image_gradients(batch)\n with self.test_session(use_gpu=True):\n actual_dy = dy.eval()\n actual_dx = dx.eval()\n self.assertAllClose(expected_dy, actual_dy)\n self.assertAllClose(expected_dx, actual_dx)\n\n def testImageGradientsBadShape(self):\n # [2 x 4] image but missing batch and depth dimensions.\n img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])\n with self.assertRaises(ValueError):\n image_ops.image_gradients(img)\n\n\nclass SobelEdgesTest(test_util.TensorFlowTestCase):\n\n def testSobelEdges1x2x3x1(self):\n img = constant_op.constant([[1, 3, 6], [4, 1, 5]],\n dtype=dtypes.float32, shape=[1, 2, 3, 1])\n expected = np.reshape([[[0, 0], [0, 12], [0, 0]],\n [[0, 0], [0, 12], [0, 0]]], [1, 2, 3, 1, 2])\n sobel = image_ops.sobel_edges(img)\n with self.test_session(use_gpu=True):\n actual_sobel = sobel.eval()\n self.assertAllClose(expected, actual_sobel)\n\n def testSobelEdges5x3x4x2(self):\n batch_size = 5\n plane = np.reshape([[1, 3, 6, 2], [4, 1, 5, 7], [2, 5, 1, 4]],\n [1, 3, 4, 1])\n two_channel = np.concatenate([plane, plane], axis=3)\n batch = np.concatenate([two_channel] * batch_size, axis=0)\n img = constant_op.constant(batch, dtype=dtypes.float32,\n shape=[batch_size, 3, 4, 2])\n\n expected_plane = np.reshape([[[0, 0], [0, 12], [0, 10], [0, 0]],\n [[6, 0], [0, 6], [-6, 10], [-6, 0]],\n [[0, 0], [0, 0], [0, 10], [0, 0]]],\n [1, 3, 4, 1, 2])\n expected_two_channel = np.concatenate(\n [expected_plane, expected_plane], axis=3)\n expected_batch = np.concatenate([expected_two_channel] * batch_size, axis=0)\n\n sobel = image_ops.sobel_edges(img)\n with self.test_session(use_gpu=True):\n actual_sobel = sobel.eval()\n self.assertAllClose(expected_batch, actual_sobel)\n\n\nclass DecodeImageTest(test_util.TensorFlowTestCase):\n\n def testJpegUint16(self):\n with self.test_session(use_gpu=True) as sess:\n base = \"tensorflow/core/lib/jpeg/testdata\"\n jpeg0 = io_ops.read_file(os.path.join(base, \"jpeg_merge_test1.jpg\"))\n image0 = image_ops.decode_image(jpeg0, dtype=dtypes.uint16)\n image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),\n dtypes.uint16)\n image0, image1 = sess.run([image0, image1])\n self.assertAllEqual(image0, image1)\n\n def testPngUint16(self):\n with self.test_session(use_gpu=True) as sess:\n base = \"tensorflow/core/lib/png/testdata\"\n png0 = io_ops.read_file(os.path.join(base, \"lena_rgba.png\"))\n image0 = image_ops.decode_image(png0, dtype=dtypes.uint16)\n image1 = image_ops.convert_image_dtype(\n image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.uint16)\n image0, image1 = sess.run([image0, image1])\n self.assertAllEqual(image0, image1)\n\n def testGifUint16(self):\n with self.test_session(use_gpu=True) as sess:\n base = \"tensorflow/core/lib/gif/testdata\"\n gif0 = io_ops.read_file(os.path.join(base, \"scan.gif\"))\n image0 = image_ops.decode_image(gif0, dtype=dtypes.uint16)\n image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),\n dtypes.uint16)\n image0, image1 = sess.run([image0, image1])\n self.assertAllEqual(image0, image1)\n\n def testBmpUint16(self):\n with self.test_session(use_gpu=True) as sess:\n base = \"tensorflow/core/lib/bmp/testdata\"\n bmp0 = io_ops.read_file(os.path.join(base, \"lena.bmp\"))\n image0 = image_ops.decode_image(bmp0, dtype=dtypes.uint16)\n image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),\n dtypes.uint16)\n image0, image1 = sess.run([image0, image1])\n self.assertAllEqual(image0, image1)\n\n def testJpegFloat32(self):\n with self.test_session(use_gpu=True) as sess:\n base = \"tensorflow/core/lib/jpeg/testdata\"\n jpeg0 = io_ops.read_file(os.path.join(base, \"jpeg_merge_test1.jpg\"))\n image0 = image_ops.decode_image(jpeg0, dtype=dtypes.float32)\n image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),\n dtypes.float32)\n image0, image1 = sess.run([image0, image1])\n self.assertAllEqual(image0, image1)\n\n def testPngFloat32(self):\n with self.test_session(use_gpu=True) as sess:\n base = \"tensorflow/core/lib/png/testdata\"\n png0 = io_ops.read_file(os.path.join(base, \"lena_rgba.png\"))\n image0 = image_ops.decode_image(png0, dtype=dtypes.float32)\n image1 = image_ops.convert_image_dtype(\n image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.float32)\n image0, image1 = sess.run([image0, image1])\n self.assertAllEqual(image0, image1)\n\n def testGifFloat32(self):\n with self.test_session(use_gpu=True) as sess:\n base = \"tensorflow/core/lib/gif/testdata\"\n gif0 = io_ops.read_file(os.path.join(base, \"scan.gif\"))\n image0 = image_ops.decode_image(gif0, dtype=dtypes.float32)\n image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),\n dtypes.float32)\n image0, image1 = sess.run([image0, image1])\n self.assertAllEqual(image0, image1)\n\n def testBmpFloat32(self):\n with self.test_session(use_gpu=True) as sess:\n base = \"tensorflow/core/lib/bmp/testdata\"\n bmp0 = io_ops.read_file(os.path.join(base, \"lena.bmp\"))\n image0 = image_ops.decode_image(bmp0, dtype=dtypes.float32)\n image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),\n dtypes.float32)\n image0, image1 = sess.run([image0, image1])\n self.assertAllEqual(image0, image1)\n\n\nif __name__ == \"__main__\":\n googletest.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Momentum.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.compiler.tests import xla_test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import momentum as momentum_lib\n\n\nclass MomentumOptimizerTest(xla_test.XLATestCase):\n\n def _update_nesterov_momentum_numpy(self, var, accum, g, lr, momentum):\n var += accum * lr * momentum\n accum = accum * momentum + g\n var -= lr * accum\n var -= accum * lr * momentum\n return var, accum\n\n def testBasic(self):\n for dtype in self.float_types:\n with self.cached_session(), self.test_scope():\n var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)\n var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)\n grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)\n grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)\n mom_opt = momentum_lib.MomentumOptimizer(\n learning_rate=2.0, momentum=0.9)\n mom_update = mom_opt.apply_gradients(\n zip([grads0, grads1], [var0, var1]))\n variables.global_variables_initializer().run()\n # Check we have slots\n self.assertEqual([\"momentum\"], mom_opt.get_slot_names())\n slot0 = mom_opt.get_slot(var0, \"momentum\")\n self.assertEquals(slot0.get_shape(), var0.get_shape())\n self.assertFalse(slot0 in variables.trainable_variables())\n slot1 = mom_opt.get_slot(var1, \"momentum\")\n self.assertEquals(slot1.get_shape(), var1.get_shape())\n self.assertFalse(slot1 in variables.trainable_variables())\n\n # Fetch params to validate initial values\n self.assertAllClose([1.0, 2.0], var0.eval())\n self.assertAllClose([3.0, 4.0], var1.eval())\n # Step 1: the momentum accumulators where 0. So we should see a normal\n # update: v -= grad * learning_rate\n mom_update.run()\n # Check that the momentum accumulators have been updated.\n self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())\n self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())\n # Check that the parameters have been updated.\n self.assertAllCloseAccordingToType(\n np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval())\n self.assertAllCloseAccordingToType(\n np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval())\n # Step 2: the momentum accumulators contain the previous update.\n mom_update.run()\n # Check that the momentum accumulators have been updated.\n self.assertAllCloseAccordingToType(\n np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval())\n self.assertAllCloseAccordingToType(\n np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval())\n # Check that the parameters have been updated.\n self.assertAllCloseAccordingToType(\n np.array([\n 1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),\n 2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)\n ]), var0.eval())\n self.assertAllCloseAccordingToType(\n np.array([\n 2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - (\n (0.9 * 0.01 + 0.01) * 2.0)\n ]), var1.eval())\n\n def testNesterovMomentum(self):\n for dtype in self.float_types:\n with self.cached_session(), self.test_scope():\n var0 = resource_variable_ops.ResourceVariable([0.1, 0.2], dtype=dtype)\n var1 = resource_variable_ops.ResourceVariable([0.3, 0.4], dtype=dtype)\n var0_np = np.array([0.1, 0.2], dtype=dtype)\n var1_np = np.array([0.3, 0.4], dtype=dtype)\n accum0_np = np.array([0.0, 0.0], dtype=dtype)\n accum1_np = np.array([0.0, 0.0], dtype=dtype)\n cost = 0.4 * var0 * var0 + 0.9 * var1\n global_step = resource_variable_ops.ResourceVariable(\n array_ops.zeros([], dtypes.int32), name=\"global_step\")\n mom_op = momentum_lib.MomentumOptimizer(\n learning_rate=0.1, momentum=0.9, use_nesterov=True)\n opt_op = mom_op.minimize(cost, global_step, [var0, var1])\n variables.global_variables_initializer().run()\n for _ in range(1, 5):\n opt_op.run()\n var0_np, accum0_np = self._update_nesterov_momentum_numpy(\n var0_np, accum0_np, var0_np * 0.8, 0.1, 0.9)\n var1_np, accum1_np = self._update_nesterov_momentum_numpy(\n var1_np, accum1_np, 0.9, 0.1, 0.9)\n self.assertAllCloseAccordingToType(var0_np, var0.eval())\n self.assertAllCloseAccordingToType(var1_np, var1.eval())\n\n def testTensorLearningRateAndMomentum(self):\n for dtype in self.float_types:\n with self.cached_session(), self.test_scope():\n var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)\n var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)\n grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)\n grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)\n mom_opt = momentum_lib.MomentumOptimizer(\n learning_rate=constant_op.constant(2.0),\n momentum=constant_op.constant(0.9))\n mom_update = mom_opt.apply_gradients(\n zip([grads0, grads1], [var0, var1]))\n variables.global_variables_initializer().run()\n # Check we have slots\n self.assertEqual([\"momentum\"], mom_opt.get_slot_names())\n slot0 = mom_opt.get_slot(var0, \"momentum\")\n self.assertEquals(slot0.get_shape(), var0.get_shape())\n self.assertFalse(slot0 in variables.trainable_variables())\n slot1 = mom_opt.get_slot(var1, \"momentum\")\n self.assertEquals(slot1.get_shape(), var1.get_shape())\n self.assertFalse(slot1 in variables.trainable_variables())\n\n # Fetch params to validate initial values\n self.assertAllClose([1.0, 2.0], var0.eval())\n self.assertAllClose([3.0, 4.0], var1.eval())\n # Step 1: the momentum accumulators where 0. So we should see a normal\n # update: v -= grad * learning_rate\n mom_update.run()\n # Check that the momentum accumulators have been updated.\n self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())\n self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())\n # Check that the parameters have been updated.\n self.assertAllCloseAccordingToType(\n np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval())\n self.assertAllCloseAccordingToType(\n np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval())\n # Step 2: the momentum accumulators contain the previous update.\n mom_update.run()\n # Check that the momentum accumulators have been updated.\n self.assertAllCloseAccordingToType(\n np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval())\n self.assertAllCloseAccordingToType(\n np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval())\n # Check that the parameters have been updated.\n self.assertAllCloseAccordingToType(\n np.array([\n 1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),\n 2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)\n ]), var0.eval())\n self.assertAllCloseAccordingToType(\n np.array([\n 2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - (\n (0.9 * 0.01 + 0.01) * 2.0)\n ]), var1.eval())\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Ops for boosted_trees.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_boosted_trees_ops\nfrom tensorflow.python.ops import resources\n\n# Re-exporting ops used by other modules.\n# pylint: disable=unused-import\nfrom tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_calculate_best_gains_per_feature as calculate_best_gains_per_feature\nfrom tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_center_bias as center_bias\nfrom tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_example_debug_outputs as example_debug_outputs\nfrom tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_make_stats_summary as make_stats_summary\nfrom tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_predict as predict\nfrom tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_training_predict as training_predict\nfrom tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_update_ensemble as update_ensemble\n# pylint: enable=unused-import\n\nfrom tensorflow.python.training import saver\n\n\nclass PruningMode(object):\n \"\"\"Class for working with Pruning modes.\"\"\"\n NO_PRUNING, PRE_PRUNING, POST_PRUNING = range(0, 3)\n\n _map = {'none': NO_PRUNING, 'pre': PRE_PRUNING, 'post': POST_PRUNING}\n\n @classmethod\n def from_str(cls, mode):\n if mode in cls._map:\n return cls._map[mode]\n else:\n raise ValueError('pruning_mode mode must be one of: {}'.format(', '.join(\n sorted(cls._map))))\n\n\nclass _TreeEnsembleSavable(saver.BaseSaverBuilder.SaveableObject):\n \"\"\"SaveableObject implementation for TreeEnsemble.\"\"\"\n\n def __init__(self, resource_handle, create_op, name):\n \"\"\"Creates a _TreeEnsembleSavable object.\n\n Args:\n resource_handle: handle to the decision tree ensemble variable.\n create_op: the op to initialize the variable.\n name: the name to save the tree ensemble variable under.\n \"\"\"\n stamp_token, serialized = (\n gen_boosted_trees_ops.boosted_trees_serialize_ensemble(resource_handle))\n # slice_spec is useful for saving a slice from a variable.\n # It's not meaningful the tree ensemble variable. So we just pass an empty\n # value.\n slice_spec = ''\n specs = [\n saver.BaseSaverBuilder.SaveSpec(stamp_token, slice_spec,\n name + '_stamp'),\n saver.BaseSaverBuilder.SaveSpec(serialized, slice_spec,\n name + '_serialized'),\n ]\n super(_TreeEnsembleSavable, self).__init__(resource_handle, specs, name)\n self._resource_handle = resource_handle\n self._create_op = create_op\n\n def restore(self, restored_tensors, unused_restored_shapes):\n \"\"\"Restores the associated tree ensemble from 'restored_tensors'.\n\n Args:\n restored_tensors: the tensors that were loaded from a checkpoint.\n unused_restored_shapes: the shapes this object should conform to after\n restore. Not meaningful for trees.\n\n Returns:\n The operation that restores the state of the tree ensemble variable.\n \"\"\"\n with ops.control_dependencies([self._create_op]):\n return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble(\n self._resource_handle,\n stamp_token=restored_tensors[0],\n tree_ensemble_serialized=restored_tensors[1])\n\n\nclass TreeEnsemble(object):\n \"\"\"Creates TreeEnsemble resource.\"\"\"\n\n def __init__(self, name, stamp_token=0, is_local=False, serialized_proto=''):\n with ops.name_scope(name, 'TreeEnsemble') as name:\n self._resource_handle = (\n gen_boosted_trees_ops.boosted_trees_ensemble_resource_handle_op(\n container='', shared_name=name, name=name))\n create_op = gen_boosted_trees_ops.boosted_trees_create_ensemble(\n self.resource_handle,\n stamp_token,\n tree_ensemble_serialized=serialized_proto)\n is_initialized_op = (\n gen_boosted_trees_ops.is_boosted_trees_ensemble_initialized(\n self._resource_handle))\n # Adds the variable to the savable list.\n if not is_local:\n saveable = _TreeEnsembleSavable(self.resource_handle, create_op,\n self.resource_handle.name)\n ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)\n resources.register_resource(\n self.resource_handle,\n create_op,\n is_initialized_op,\n is_shared=not is_local)\n\n @property\n def resource_handle(self):\n return self._resource_handle\n\n def get_stamp_token(self):\n \"\"\"Returns the current stamp token of the resource.\"\"\"\n stamp_token, _, _, _, _ = (\n gen_boosted_trees_ops.boosted_trees_get_ensemble_states(\n self.resource_handle))\n return stamp_token\n\n def get_states(self):\n \"\"\"Returns states of the tree ensemble.\n\n Returns:\n stamp_token, num_trees, num_finalized_trees, num_attempted_layers and\n range of the nodes in the latest layer.\n \"\"\"\n (stamp_token, num_trees, num_finalized_trees, num_attempted_layers,\n nodes_range) = (\n gen_boosted_trees_ops.boosted_trees_get_ensemble_states(\n self.resource_handle))\n # Use identity to give names.\n return (array_ops.identity(stamp_token, name='stamp_token'),\n array_ops.identity(num_trees, name='num_trees'),\n array_ops.identity(num_finalized_trees, name='num_finalized_trees'),\n array_ops.identity(\n num_attempted_layers, name='num_attempted_layers'),\n array_ops.identity(nodes_range, name='last_layer_nodes_range'))\n\n def serialize(self):\n \"\"\"Serializes the ensemble into proto and returns the serialized proto.\n\n Returns:\n stamp_token: int64 scalar Tensor to denote the stamp of the resource.\n serialized_proto: string scalar Tensor of the serialized proto.\n \"\"\"\n return gen_boosted_trees_ops.boosted_trees_serialize_ensemble(\n self.resource_handle)\n\n def deserialize(self, stamp_token, serialized_proto):\n \"\"\"Deserialize the input proto and resets the ensemble from it.\n\n Args:\n stamp_token: int64 scalar Tensor to denote the stamp of the resource.\n serialized_proto: string scalar Tensor of the serialized proto.\n\n Returns:\n Operation (for dependencies).\n \"\"\"\n return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble(\n self.resource_handle, stamp_token, serialized_proto)\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the sort wrapper.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.framework.python.ops import sort_ops\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.platform import test\n\n\nclass SortTest(test.TestCase):\n\n def testRandom_lowDimensionality(self):\n self._testRandom_lowDimensionality(negative_axis=False)\n\n def testRandom_lowDimensionality_negative(self):\n self._testRandom_lowDimensionality(negative_axis=True)\n\n def _testRandom_lowDimensionality(self, negative_axis):\n np.random.seed(42)\n for _ in range(20):\n rank = np.random.randint(1, 3)\n shape = [np.random.randint(0, 20) for _ in range(rank)]\n arr = np.random.random(shape)\n sort_axis = np.random.choice(rank)\n if negative_axis:\n sort_axis = -1 - sort_axis\n with self.cached_session():\n self.assertAllEqual(\n np.sort(arr, axis=sort_axis),\n sort_ops.sort(constant_op.constant(arr), axis=sort_axis).eval())\n\n def testRandom_highDimensionality(self):\n np.random.seed(100)\n for _ in range(20):\n rank = np.random.randint(5, 15)\n shape = [np.random.randint(1, 4) for _ in range(rank)]\n arr = np.random.random(shape)\n sort_axis = np.random.choice(rank)\n with self.cached_session():\n self.assertAllEqual(\n np.sort(arr, axis=sort_axis),\n sort_ops.sort(constant_op.constant(arr), axis=sort_axis).eval())\n\n def testScalar(self):\n # Create an empty scalar where the static shape is unknown.\n zeros_length_1 = array_ops.zeros(\n random_ops.random_uniform([1], minval=0, maxval=1, dtype=dtypes.int32),\n dtype=dtypes.int32)\n scalar = array_ops.zeros(zeros_length_1)\n\n sort = sort_ops.sort(scalar)\n with self.cached_session():\n with self.assertRaises(errors.InvalidArgumentError):\n sort.eval()\n\n def testNegativeOutOfBounds_staticShape(self):\n arr = constant_op.constant([3, 4, 5])\n with self.assertRaises(ValueError):\n sort_ops.sort(arr, axis=-4)\n\n def testDescending(self):\n arr = np.random.random((10, 5, 5))\n with self.cached_session():\n self.assertAllEqual(\n np.sort(arr, axis=0)[::-1],\n sort_ops.sort(\n constant_op.constant(arr),\n axis=0,\n direction='DESCENDING').eval())\n\n def testSort_staticallyKnownRank_constantTransposition(self):\n # The transposition array should be a constant if the rank of \"values\" is\n # statically known.\n tensor = random_ops.random_uniform(\n # Rank is statically known to be 5, but the dimension lengths are not\n # known.\n random_ops.random_uniform(\n shape=(5,), minval=0, maxval=10, dtype=dtypes.int32))\n sort_ops.sort(tensor, axis=1)\n transposition = (\n ops.get_default_graph().get_tensor_by_name('sort/transposition:0'))\n self.assertFalse(tensor_util.constant_value(transposition) is None)\n self.assertAllEqual(\n # Swaps \"1\" and \"4\" to put \"1\" at the end.\n tensor_util.constant_value(transposition),\n [0, 4, 2, 3, 1])\n\n def testArgsort_1d(self):\n arr = np.random.random(42)\n with self.cached_session():\n self.assertAllEqual(\n np.sort(arr),\n array_ops.gather(arr, sort_ops.argsort(arr)).eval())\n\n def testArgsort(self):\n arr = np.random.random((5, 6, 7, 8))\n for axis in range(4):\n with self.cached_session():\n self.assertAllEqual(\n np.argsort(arr, axis=axis),\n sort_ops.argsort(arr, axis=axis).eval())\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow.ops.linalg.linalg_impl.matrix_exponential.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\nimport numpy as np\n\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.ops.linalg import linalg_impl\nfrom tensorflow.python.platform import test\n\n\ndef np_expm(x): # pylint: disable=invalid-name\n \"\"\"Slow but accurate Taylor series matrix exponential.\"\"\"\n y = np.zeros(x.shape, dtype=x.dtype)\n xn = np.eye(x.shape[0], dtype=x.dtype)\n for n in range(40):\n if n > 0:\n xn /= float(n)\n y += xn\n xn = np.dot(xn, x)\n return y\n\n\nclass ExponentialOpTest(test.TestCase):\n\n def _verifyExponential(self, x, np_type):\n inp = x.astype(np_type)\n with self.test_session(use_gpu=True):\n tf_ans = linalg_impl.matrix_exponential(inp)\n if x.size == 0:\n np_ans = np.empty(x.shape, dtype=np_type)\n else:\n if x.ndim > 2:\n np_ans = np.zeros(inp.shape, dtype=np_type)\n for i in itertools.product(*[range(x) for x in inp.shape[:-2]]):\n np_ans[i] = np_expm(inp[i])\n else:\n np_ans = np_expm(inp)\n out = tf_ans.eval()\n self.assertAllClose(np_ans, out, rtol=1e-4, atol=1e-3)\n\n def _verifyExponentialReal(self, x):\n for np_type in [np.float32, np.float64]:\n self._verifyExponential(x, np_type)\n\n def _verifyExponentialComplex(self, x):\n for np_type in [np.complex64, np.complex128]:\n self._verifyExponential(x, np_type)\n\n def _makeBatch(self, matrix1, matrix2):\n matrix_batch = np.concatenate(\n [np.expand_dims(matrix1, 0),\n np.expand_dims(matrix2, 0)])\n matrix_batch = np.tile(matrix_batch, [2, 3, 1, 1])\n return matrix_batch\n\n def testNonsymmetricReal(self):\n # 2x2 matrices\n matrix1 = np.array([[1., 2.], [3., 4.]])\n matrix2 = np.array([[1., 3.], [3., 5.]])\n self._verifyExponentialReal(matrix1)\n self._verifyExponentialReal(matrix2)\n # A multidimensional batch of 2x2 matrices\n self._verifyExponentialReal(self._makeBatch(matrix1, matrix2))\n\n def testNonsymmetricComplex(self):\n matrix1 = np.array([[1., 2.], [3., 4.]])\n matrix2 = np.array([[1., 3.], [3., 5.]])\n matrix1 = matrix1.astype(np.complex64)\n matrix1 += 1j * matrix1\n matrix2 = matrix2.astype(np.complex64)\n matrix2 += 1j * matrix2\n self._verifyExponentialComplex(matrix1)\n self._verifyExponentialComplex(matrix2)\n # Complex batch\n self._verifyExponentialComplex(self._makeBatch(matrix1, matrix2))\n\n def testSymmetricPositiveDefiniteReal(self):\n # 2x2 matrices\n matrix1 = np.array([[2., 1.], [1., 2.]])\n matrix2 = np.array([[3., -1.], [-1., 3.]])\n self._verifyExponentialReal(matrix1)\n self._verifyExponentialReal(matrix2)\n # A multidimensional batch of 2x2 matrices\n self._verifyExponentialReal(self._makeBatch(matrix1, matrix2))\n\n def testSymmetricPositiveDefiniteComplex(self):\n matrix1 = np.array([[2., 1.], [1., 2.]])\n matrix2 = np.array([[3., -1.], [-1., 3.]])\n matrix1 = matrix1.astype(np.complex64)\n matrix1 += 1j * matrix1\n matrix2 = matrix2.astype(np.complex64)\n matrix2 += 1j * matrix2\n self._verifyExponentialComplex(matrix1)\n self._verifyExponentialComplex(matrix2)\n # Complex batch\n self._verifyExponentialComplex(self._makeBatch(matrix1, matrix2))\n\n def testNonSquareMatrix(self):\n # When the exponential of a non-square matrix is attempted we should return\n # an error\n with self.assertRaises(ValueError):\n linalg_impl.matrix_exponential(np.array([[1., 2., 3.], [3., 4., 5.]]))\n\n def testWrongDimensions(self):\n # The input to the exponential should be at least a 2-dimensional tensor.\n tensor3 = constant_op.constant([1., 2.])\n with self.assertRaises(ValueError):\n linalg_impl.matrix_exponential(tensor3)\n\n def testEmpty(self):\n self._verifyExponentialReal(np.empty([0, 2, 2]))\n self._verifyExponentialReal(np.empty([2, 0, 0]))\n\n def testDynamic(self):\n with self.test_session(use_gpu=True) as sess:\n inp = array_ops.placeholder(ops.dtypes.float32)\n expm = linalg_impl.matrix_exponential(inp)\n matrix = np.array([[1., 2.], [3., 4.]])\n sess.run(expm, feed_dict={inp: matrix})\n\n def testConcurrentExecutesWithoutError(self):\n with self.test_session(use_gpu=True) as sess:\n matrix1 = random_ops.random_normal([5, 5], seed=42)\n matrix2 = random_ops.random_normal([5, 5], seed=42)\n expm1 = linalg_impl.matrix_exponential(matrix1)\n expm2 = linalg_impl.matrix_exponential(matrix2)\n expm = sess.run([expm1, expm2])\n self.assertAllEqual(expm[0], expm[1])\n\n\nclass MatrixExponentialBenchmark(test.Benchmark):\n\n shapes = [\n (4, 4),\n (10, 10),\n (16, 16),\n (101, 101),\n (256, 256),\n (1000, 1000),\n (1024, 1024),\n (2048, 2048),\n (513, 4, 4),\n (513, 16, 16),\n (513, 256, 256),\n ]\n\n def _GenerateMatrix(self, shape):\n batch_shape = shape[:-2]\n shape = shape[-2:]\n assert shape[0] == shape[1]\n n = shape[0]\n matrix = np.ones(shape).astype(np.float32) / (\n 2.0 * n) + np.diag(np.ones(n).astype(np.float32))\n return variables.Variable(np.tile(matrix, batch_shape + (1, 1)))\n\n def benchmarkMatrixExponentialOp(self):\n for shape in self.shapes:\n with ops.Graph().as_default(), \\\n session.Session() as sess, \\\n ops.device(\"/cpu:0\"):\n matrix = self._GenerateMatrix(shape)\n expm = linalg_impl.matrix_exponential(matrix)\n variables.global_variables_initializer().run()\n self.run_op_benchmark(\n sess,\n control_flow_ops.group(expm),\n min_iters=25,\n name=\"matrix_exponential_cpu_{shape}\".format(\n shape=shape))\n\n if test.is_gpu_available(True):\n with ops.Graph().as_default(), \\\n session.Session() as sess, \\\n ops.device(\"/gpu:0\"):\n matrix = self._GenerateMatrix(shape)\n expm = linalg_impl.matrix_exponential(matrix)\n variables.global_variables_initializer().run()\n self.run_op_benchmark(\n sess,\n control_flow_ops.group(expm),\n min_iters=25,\n name=\"matrix_exponential_gpu_{shape}\".format(\n shape=shape))\n\n\ndef _TestRandomSmall(dtype, batch_dims, size):\n\n def Test(self):\n np.random.seed(42)\n shape = batch_dims + (size, size)\n matrix = np.random.uniform(\n low=-1.0, high=1.0,\n size=shape).astype(dtype)\n self._verifyExponentialReal(matrix)\n\n return Test\n\n\ndef _TestL1Norms(dtype, shape, scale):\n\n def Test(self):\n np.random.seed(42)\n matrix = np.random.uniform(\n low=-1.0, high=1.0,\n size=np.prod(shape)).reshape(shape).astype(dtype)\n print(dtype, shape, scale, matrix)\n l1_norm = np.max(np.sum(np.abs(matrix), axis=matrix.ndim-2))\n matrix /= l1_norm\n self._verifyExponentialReal(scale * matrix)\n\n return Test\n\n\nif __name__ == \"__main__\":\n for dtype_ in [np.float32, np.float64, np.complex64, np.complex128]:\n for batch_ in [(), (2,), (2, 2)]:\n for size_ in [4, 7]:\n name = \"%s_%d_%d\" % (dtype_.__name__, len(batch_), size_)\n setattr(ExponentialOpTest, \"testL1Norms_\" + name,\n _TestRandomSmall(dtype_, batch_, size_))\n\n for shape_ in [(3, 3), (2, 3, 3)]:\n for dtype_ in [np.float32, np.complex64]:\n for scale_ in [0.1, 1.5, 5.0, 20.0]:\n name = \"%s_%d_%d\" % (dtype_.__name__, len(shape_), int(scale_*10))\n setattr(ExponentialOpTest, \"testL1Norms_\" + name,\n _TestL1Norms(dtype_, shape_, scale_))\n for dtype_ in [np.float64, np.complex128]:\n for scale_ in [0.01, 0.2, 0.5, 1.5, 6.0, 25.0]:\n name = \"%s_%d_%d\" % (dtype_.__name__, len(shape_), int(scale_*100))\n setattr(ExponentialOpTest, \"testL1Norms_\" + name,\n _TestL1Norms(dtype_, shape_, scale_))\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nr\"\"\"System for specifying garbage collection (GC) of path based data.\n\nThis framework allows for GC of data specified by path names, for example files\non disk. gc.Path objects each represent a single item stored at a path and may\nbe a base directory,\n /tmp/exports/0/...\n /tmp/exports/1/...\n ...\nor a fully qualified file,\n /tmp/train-1.ckpt\n /tmp/train-2.ckpt\n ...\n\nA gc filter function takes and returns a list of gc.Path items. Filter\nfunctions are responsible for selecting Path items for preservation or deletion.\nNote that functions should always return a sorted list.\n\nFor example,\n base_dir = \"/tmp\"\n # Create the directories.\n for e in xrange(10):\n os.mkdir(\"%s/%d\" % (base_dir, e), 0o755)\n\n # Create a simple parser that pulls the export_version from the directory.\n path_regex = \"^\" + re.escape(base_dir) + \"/(\\\\d+)$\"\n def parser(path):\n match = re.match(path_regex, path.path)\n if not match:\n return None\n return path._replace(export_version=int(match.group(1)))\n\n path_list = gc._get_paths(\"/tmp\", parser) # contains all ten Paths\n\n every_fifth = gc._mod_export_version(5)\n print(every_fifth(path_list)) # shows [\"/tmp/0\", \"/tmp/5\"]\n\n largest_three = gc.largest_export_versions(3)\n print(largest_three(all_paths)) # shows [\"/tmp/7\", \"/tmp/8\", \"/tmp/9\"]\n\n both = gc._union(every_fifth, largest_three)\n print(both(all_paths)) # shows [\"/tmp/0\", \"/tmp/5\",\n # \"/tmp/7\", \"/tmp/8\", \"/tmp/9\"]\n # Delete everything not in 'both'.\n to_delete = gc._negation(both)\n for p in to_delete(all_paths):\n gfile.DeleteRecursively(p.path) # deletes: \"/tmp/1\", \"/tmp/2\",\n # \"/tmp/3\", \"/tmp/4\", \"/tmp/6\",\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport heapq\nimport math\nimport os\n\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.util import compat\n\nPath = collections.namedtuple('Path', 'path export_version')\n\n\ndef _largest_export_versions(n):\n \"\"\"Creates a filter that keeps the largest n export versions.\n\n Args:\n n: number of versions to keep.\n\n Returns:\n A filter function that keeps the n largest paths.\n \"\"\"\n def keep(paths):\n heap = []\n for idx, path in enumerate(paths):\n if path.export_version is not None:\n heapq.heappush(heap, (path.export_version, idx))\n keepers = [paths[i] for _, i in heapq.nlargest(n, heap)]\n return sorted(keepers)\n\n return keep\n\n\ndef _one_of_every_n_export_versions(n):\n \"\"\"Creates a filter that keeps one of every n export versions.\n\n Args:\n n: interval size.\n\n Returns:\n A filter function that keeps exactly one path from each interval\n [0, n], (n, 2n], (2n, 3n], etc... If more than one path exists in an\n interval the largest is kept.\n \"\"\"\n def keep(paths):\n \"\"\"A filter function that keeps exactly one out of every n paths.\"\"\"\n\n keeper_map = {} # map from interval to largest path seen in that interval\n for p in paths:\n if p.export_version is None:\n # Skip missing export_versions.\n continue\n # Find the interval (with a special case to map export_version = 0 to\n # interval 0.\n interval = math.floor(\n (p.export_version - 1) / n) if p.export_version else 0\n existing = keeper_map.get(interval, None)\n if (not existing) or (existing.export_version < p.export_version):\n keeper_map[interval] = p\n return sorted(keeper_map.values())\n\n return keep\n\n\ndef _mod_export_version(n):\n \"\"\"Creates a filter that keeps every export that is a multiple of n.\n\n Args:\n n: step size.\n\n Returns:\n A filter function that keeps paths where export_version % n == 0.\n \"\"\"\n def keep(paths):\n keepers = []\n for p in paths:\n if p.export_version % n == 0:\n keepers.append(p)\n return sorted(keepers)\n return keep\n\n\ndef _union(lf, rf):\n \"\"\"Creates a filter that keeps the union of two filters.\n\n Args:\n lf: first filter\n rf: second filter\n\n Returns:\n A filter function that keeps the n largest paths.\n \"\"\"\n def keep(paths):\n l = set(lf(paths))\n r = set(rf(paths))\n return sorted(list(l|r))\n return keep\n\n\ndef _negation(f):\n \"\"\"Negate a filter.\n\n Args:\n f: filter function to invert\n\n Returns:\n A filter function that returns the negation of f.\n \"\"\"\n def keep(paths):\n l = set(paths)\n r = set(f(paths))\n return sorted(list(l-r))\n return keep\n\n\ndef _get_paths(base_dir, parser):\n \"\"\"Gets a list of Paths in a given directory.\n\n Args:\n base_dir: directory.\n parser: a function which gets the raw Path and can augment it with\n information such as the export_version, or ignore the path by returning\n None. An example parser may extract the export version from a path\n such as \"/tmp/exports/100\" an another may extract from a full file\n name such as \"/tmp/checkpoint-99.out\".\n\n Returns:\n A list of Paths contained in the base directory with the parsing function\n applied.\n By default the following fields are populated,\n - Path.path\n The parsing function is responsible for populating,\n - Path.export_version\n \"\"\"\n raw_paths = gfile.ListDirectory(base_dir)\n paths = []\n for r in raw_paths:\n # ListDirectory() return paths with \"/\" at the last if base_dir was GCS URL\n r = compat.as_str_any(r)\n if r[-1] == '/':\n r = r[0:len(r)-1]\n p = parser(Path(os.path.join(compat.as_str_any(base_dir), r), None))\n if p:\n paths.append(p)\n return sorted(paths)\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for xla.reduce_window.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.compiler.tests import xla_test\nfrom tensorflow.compiler.tf2xla.python import xla\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import function\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.platform import googletest\n\n\nclass ReduceWindowTest(xla_test.XLATestCase):\n \"\"\"Test cases for xla.reduce_window.\"\"\"\n\n def _reduce_window(self, operand, init, reducer, **kwargs):\n with self.cached_session():\n placeholder = array_ops.placeholder(operand.dtype)\n with self.test_scope():\n output = xla.reduce_window(placeholder, init, reducer, **kwargs)\n return output.eval(feed_dict={placeholder: operand})\n\n def testReduceWindow(self):\n\n # TODO(b/77644762): float16 and float64 ReduceWindow are unimplemented.\n for dtype in set(self.numeric_types).intersection(\n set([dtypes.bfloat16.as_numpy_dtype, np.float32])):\n\n @function.Defun(dtype, dtype)\n def sum_reducer(x, y):\n return x + y\n\n @function.Defun(dtype, dtype)\n def mul_reducer(x, y):\n return x * y\n\n self.assertAllClose(\n np.array([3, 5, 7, 9, 11, 13], dtype=dtype),\n self._reduce_window(\n np.array([1, 2, 3, 4, 5, 6, 7], dtype=dtype),\n 0.0,\n sum_reducer,\n window_dimensions=[2]))\n\n self.assertAllClose(\n np.array([3, 7, 11], dtype=dtype),\n self._reduce_window(\n np.array([1, 2, 3, 4, 5, 6, 7], dtype=dtype),\n 0.0,\n sum_reducer,\n window_dimensions=[2],\n window_strides=[2]))\n\n self.assertAllClose(\n np.array([1, 4, 7], dtype=dtype),\n self._reduce_window(\n np.array([1, 2, 3, 4, 5, 6, 7], dtype=dtype),\n 0.0,\n sum_reducer,\n window_dimensions=[1],\n window_strides=[3]))\n\n self.assertAllClose(\n np.array([[24, 36, 24], [96, 0, 0]], dtype=dtype),\n self._reduce_window(\n np.array([[1, 2, 3, 4], [4, 3, 2, 1], [2, 4, 0, 1]], dtype=dtype),\n 1.0,\n mul_reducer,\n window_dimensions=[2, 2],\n window_strides=[1, 1]))\n\n self.assertAllClose(\n np.array([[0, 0, 0], [5, 10, 5], [2, 4, 1], [0, 0, 0]], dtype=dtype),\n self._reduce_window(\n np.array([[1, 2, 3, 4], [4, 3, 2, 1], [2, 4, 0, 1]], dtype=dtype),\n 0.0,\n sum_reducer,\n window_dimensions=[2, 2],\n window_strides=[2, 2],\n padding=[[2, 3], [1, 2]]))\n\n\nif __name__ == '__main__':\n googletest.main()\n", "# pylint: disable=g-bad-file-header\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for monitored_session.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport glob\nimport os\nimport sys\nimport threading\nimport time\nimport traceback\n\nfrom tensorflow.contrib.framework.python.ops import variables as variables_lib\nfrom tensorflow.contrib.testing.python.framework import util_test\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.core.protobuf import debug_pb2\nfrom tensorflow.python.client import session as session_lib\nfrom tensorflow.python.distribute import distribute_coordinator\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.training import basic_session_run_hooks\nfrom tensorflow.python.training import checkpoint_management\nfrom tensorflow.python.training import coordinator\nfrom tensorflow.python.training import monitored_session\nfrom tensorflow.python.training import saver as saver_lib\nfrom tensorflow.python.training import session_run_hook\n\n\nclass ScaffoldTest(test.TestCase):\n \"\"\"Scaffold tests.\"\"\"\n\n def test_nothing_created_before_finalize(self):\n with ops.Graph().as_default():\n scaffold = monitored_session.Scaffold()\n self.assertEqual(None, scaffold.init_op)\n self.assertEqual(None, scaffold.init_feed_dict)\n self.assertEqual(None, scaffold.init_fn)\n self.assertEqual(None, scaffold.ready_op)\n self.assertEqual(None, scaffold.ready_for_local_init_op)\n self.assertEqual(None, scaffold.local_init_op)\n self.assertEqual(None, scaffold.saver)\n\n def test_defaults_empty_graph(self):\n with ops.Graph().as_default():\n scaffold = monitored_session.Scaffold()\n variables.Variable(1, name='my_var')\n variables.Variable(\n 2, name='my_local_var', collections=[ops.GraphKeys.LOCAL_VARIABLES])\n scaffold.finalize()\n self.assertTrue(isinstance(scaffold.init_op, ops.Operation))\n self.assertEqual(None, scaffold.init_feed_dict)\n self.assertEqual(None, scaffold.init_fn)\n self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))\n self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))\n self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))\n self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))\n with self.test_session() as sess:\n self.assertItemsEqual([b'my_var', b'my_local_var'],\n sess.run(scaffold.ready_op))\n self.assertItemsEqual([b'my_var'],\n sess.run(scaffold.ready_for_local_init_op))\n sess.run(scaffold.init_op)\n self.assertEqual(0, len(sess.run(scaffold.ready_for_local_init_op)))\n sess.run(scaffold.local_init_op)\n self.assertEqual(0, len(sess.run(scaffold.ready_op)))\n\n def test_defaults_no_variables(self):\n with ops.Graph().as_default():\n scaffold = monitored_session.Scaffold()\n constant_op.constant(1, name='my_const')\n scaffold.finalize()\n self.assertTrue(isinstance(scaffold.init_op, ops.Operation))\n self.assertEqual(None, scaffold.init_feed_dict)\n self.assertEqual(None, scaffold.init_fn)\n self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))\n self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))\n self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))\n self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))\n\n def test_caches_values(self):\n with ops.Graph().as_default():\n variables.Variable([1])\n scaffold1 = monitored_session.Scaffold()\n scaffold1.finalize()\n scaffold2 = monitored_session.Scaffold()\n scaffold2.finalize()\n self.assertEqual(scaffold1.init_op, scaffold2.init_op)\n self.assertEqual(scaffold1.ready_op, scaffold2.ready_op)\n self.assertEqual(scaffold1.ready_for_local_init_op,\n scaffold2.ready_for_local_init_op)\n self.assertEqual(scaffold1.local_init_op, scaffold2.local_init_op)\n self.assertEqual(scaffold1.saver, scaffold2.saver)\n\n def test_raise_error_if_more_than_one_cached_item(self):\n with ops.Graph().as_default():\n variables.Variable([1])\n ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())\n ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())\n with self.assertRaisesRegexp(RuntimeError, 'More than one item'):\n monitored_session.Scaffold().finalize()\n\n def test_uses_passed_values(self):\n with ops.Graph().as_default():\n variables.Variable([1])\n saver = saver_lib.Saver()\n scaffold = monitored_session.Scaffold(\n init_op=2,\n init_feed_dict=3,\n init_fn=lambda scaffold, sess: 4,\n ready_op=5,\n ready_for_local_init_op=6,\n local_init_op=7,\n saver=saver)\n scaffold.finalize()\n self.assertEqual(2, scaffold.init_op)\n self.assertEqual(3, scaffold.init_feed_dict)\n self.assertTrue(callable(scaffold.init_fn))\n self.assertEqual(5, scaffold.ready_op)\n self.assertEqual(6, scaffold.ready_for_local_init_op)\n self.assertEqual(7, scaffold.local_init_op)\n self.assertEqual(saver, scaffold.saver)\n\n def test_graph_is_finalized(self):\n with ops.Graph().as_default():\n variables.Variable([1])\n monitored_session.Scaffold().finalize()\n with self.assertRaisesRegexp(RuntimeError,\n 'Graph is finalized and cannot be modified'):\n constant_op.constant([0])\n\n def test_new_scaffold_from_default_scaffold(self):\n scaffold1 = monitored_session.Scaffold()\n with ops.Graph().as_default():\n variables.Variable([1])\n saver = saver_lib.Saver()\n scaffold2 = monitored_session.Scaffold(\n init_op=2,\n init_feed_dict=3,\n init_fn=lambda scaffold, sess: 4,\n ready_op=5,\n ready_for_local_init_op=6,\n local_init_op=7,\n saver=saver,\n copy_from_scaffold=scaffold1)\n\n scaffold2.finalize()\n self.assertEqual(2, scaffold2.init_op)\n self.assertEqual(3, scaffold2.init_feed_dict)\n self.assertTrue(callable(scaffold2.init_fn))\n self.assertEqual(5, scaffold2.ready_op)\n self.assertEqual(6, scaffold2.ready_for_local_init_op)\n self.assertEqual(7, scaffold2.local_init_op)\n self.assertEqual(saver, scaffold2.saver)\n\n def test_new_scaffold_from_existing_scaffold(self):\n with ops.Graph().as_default():\n variables.Variable([1])\n saver = saver_lib.Saver()\n scaffold1 = monitored_session.Scaffold(\n init_op=2,\n init_feed_dict=3,\n init_fn=lambda scaffold, sess: 4,\n ready_op=5,\n ready_for_local_init_op=6,\n local_init_op=7,\n saver=saver)\n\n scaffold2 = monitored_session.Scaffold(\n init_op=4,\n init_feed_dict=6,\n init_fn=lambda scaffold, sess: 8,\n ready_op=10,\n ready_for_local_init_op=12,\n local_init_op=14,\n saver=saver,\n copy_from_scaffold=scaffold1)\n\n scaffold2.finalize()\n self.assertEqual(4, scaffold2.init_op)\n self.assertEqual(6, scaffold2.init_feed_dict)\n self.assertTrue(callable(scaffold2.init_fn))\n self.assertEqual(10, scaffold2.ready_op)\n self.assertEqual(12, scaffold2.ready_for_local_init_op)\n self.assertEqual(14, scaffold2.local_init_op)\n self.assertEqual(saver, scaffold2.saver)\n\n def test_copy_from_scaffold_is_scaffold(self):\n with ops.Graph().as_default():\n with self.assertRaisesRegexp(\n TypeError, 'copy_from_scaffold is not a Scaffold instance'):\n monitored_session.Scaffold(copy_from_scaffold=1)\n\n\ndef _test_dir(temp_dir, test_name):\n \"\"\"Create an empty dir to use for tests.\n\n Args:\n temp_dir: Tmp directory path.\n test_name: Name of the test.\n\n Returns:\n Absolute path to the test directory.\n \"\"\"\n test_dir = os.path.join(temp_dir, test_name)\n if os.path.isdir(test_dir):\n for f in glob.glob('%s/*' % test_dir):\n os.remove(f)\n else:\n os.makedirs(test_dir)\n return test_dir\n\n\nclass FakeHook(session_run_hook.SessionRunHook):\n\n def __init__(self):\n self.should_stop = False\n self.request = None\n self.call_counter = collections.Counter()\n self.last_run_context = None\n self.last_run_values = None\n\n def begin(self):\n self.call_counter['begin'] += 1\n\n def after_create_session(self, session, coord): # pylint: disable=unused-argument\n self.call_counter['after_create_session'] += 1\n\n def before_run(self, run_context):\n self.call_counter['before_run'] += 1\n self.last_run_context = run_context\n return self.request\n\n def after_run(self, run_context, run_values):\n self.call_counter['after_run'] += 1\n self.last_run_values = run_values\n if self.should_stop:\n run_context.request_stop()\n\n def end(self, session):\n self.call_counter['end'] += 1\n\n\nclass MonitoredTrainingSessionTest(test.TestCase):\n \"\"\"Tests MonitoredTrainingSession.\"\"\"\n\n def test_saving_restoring_checkpoint(self):\n logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n with monitored_session.MonitoredTrainingSession(\n is_chief=True, checkpoint_dir=logdir) as session:\n self.assertEqual(0, session.run(gstep))\n self.assertEqual(1, session.run(do_step))\n self.assertEqual(2, session.run(do_step))\n # A restart will find the checkpoint and recover automatically.\n with monitored_session.MonitoredTrainingSession(\n is_chief=True, checkpoint_dir=logdir) as session:\n self.assertEqual(2, session.run(gstep))\n\n def test_save_checkpoint_steps(self):\n logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_steps')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n new_gstep = state_ops.assign_add(gstep, 1)\n with monitored_session.MonitoredTrainingSession(\n is_chief=True,\n checkpoint_dir=logdir,\n save_checkpoint_steps=100,\n log_step_count_steps=10) as session:\n for _ in range(100):\n session.run(new_gstep)\n # A restart will find the checkpoint and recover automatically.\n with monitored_session.MonitoredTrainingSession(\n is_chief=True, checkpoint_dir=logdir) as session:\n self.assertEqual(100, session.run(gstep))\n\n def test_save_checkpoint_secs(self):\n logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_secs')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n new_gstep = state_ops.assign_add(gstep, 1)\n with monitored_session.MonitoredTrainingSession(\n is_chief=True,\n checkpoint_dir=logdir,\n save_checkpoint_secs=0.1,\n log_step_count_steps=10) as session:\n session.run(new_gstep)\n time.sleep(0.2)\n for _ in range(10):\n session.run(new_gstep)\n # A restart will find the checkpoint and recover automatically.\n with monitored_session.MonitoredTrainingSession(\n is_chief=True, checkpoint_dir=logdir) as session:\n self.assertEqual(11, session.run(gstep))\n\n def test_summaries_steps(self):\n logdir = _test_dir(self.get_temp_dir(), 'test_summaries_steps')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n new_gstep = state_ops.assign_add(gstep, 1)\n summary.scalar('my_summary_tag', new_gstep * 2)\n with monitored_session.MonitoredTrainingSession(\n is_chief=True,\n checkpoint_dir=logdir,\n save_summaries_steps=100,\n log_step_count_steps=10) as session:\n for _ in range(101):\n session.run(new_gstep)\n summaries = util_test.latest_summaries(logdir)\n tags = [s.summary.value[0].tag for s in summaries]\n self.assertIn('my_summary_tag', tags)\n self.assertIn('global_step/sec', tags)\n\n def test_summaries_secs(self):\n logdir = _test_dir(self.get_temp_dir(), 'test_summaries_secs')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n new_gstep = state_ops.assign_add(gstep, 1)\n summary.scalar('my_summary_tag', new_gstep * 2)\n with monitored_session.MonitoredTrainingSession(\n is_chief=True,\n checkpoint_dir=logdir,\n save_summaries_steps=None,\n save_summaries_secs=0.1,\n log_step_count_steps=10) as session:\n session.run(new_gstep)\n time.sleep(0.2)\n for _ in range(101):\n session.run(new_gstep)\n summaries = util_test.latest_summaries(logdir)\n tags = [s.summary.value[0].tag for s in summaries]\n self.assertIn('my_summary_tag', tags)\n self.assertIn('global_step/sec', tags)\n\n def test_custom_saving(self):\n logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')\n fake_hook = FakeHook()\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n with monitored_session.MonitoredTrainingSession(\n is_chief=True,\n checkpoint_dir=logdir,\n chief_only_hooks=[fake_hook],\n save_checkpoint_secs=0) as session:\n self.assertEqual(0, session.run(gstep))\n self.assertEqual(1, session.run(do_step))\n self.assertEqual(2, session.run(do_step))\n\n # Check whether custom hook called or not\n self.assertEqual(1, fake_hook.call_counter['begin'])\n # A restart will not find the checkpoint, since we didn't save.\n with monitored_session.MonitoredTrainingSession(\n is_chief=True, checkpoint_dir=logdir) as session:\n self.assertEqual(0, session.run(gstep))\n\n\nclass MockStrategy(object):\n\n def __init__(self,\n between_graph=False,\n should_init=True,\n should_checkpoint=None,\n should_save_summary=None):\n self._between_graph = between_graph\n self._should_init = should_init\n self._should_checkpoint = should_checkpoint\n self._should_save_summary = should_save_summary\n\n @property\n def between_graph(self):\n return self._between_graph\n\n @property\n def should_init(self):\n return self._should_init\n\n @property\n def should_checkpoint(self):\n return self._should_checkpoint\n\n @property\n def should_save_summary(self):\n return self._should_save_summary\n\n\nclass MonitoredTrainingSessionWithDistributeCoordinatorTest(test.TestCase):\n \"\"\"Test distribute coordinator controls summary saving and checkpointing.\"\"\"\n\n def test_summary_hook_enabled(self):\n context = distribute_coordinator._WorkerContext(\n MockStrategy(should_save_summary=True), None, None, None)\n\n logdir = _test_dir(self.get_temp_dir(), 'test_summaries_enabled')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n new_gstep = state_ops.assign_add(gstep, 1)\n summary.scalar('my_summary_tag', new_gstep * 2)\n with context, monitored_session.MonitoredTrainingSession(\n checkpoint_dir=logdir,\n save_summaries_steps=100,\n log_step_count_steps=10) as session:\n for _ in range(101):\n session.run(new_gstep)\n\n summaries = util_test.latest_summaries(logdir)\n tags = [s.summary.value[0].tag for s in summaries]\n self.assertIn('my_summary_tag', tags)\n self.assertIn('global_step/sec', tags)\n\n def test_summary_hook_disabled(self):\n context = distribute_coordinator._WorkerContext(\n MockStrategy(should_save_summary=False), None, None, None)\n\n logdir = _test_dir(self.get_temp_dir(), 'test_summaries_disabled')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n new_gstep = state_ops.assign_add(gstep, 1)\n summary.scalar('my_summary_tag', new_gstep * 2)\n with context, monitored_session.MonitoredTrainingSession(\n checkpoint_dir=logdir,\n save_summaries_steps=100,\n log_step_count_steps=10) as session:\n for _ in range(101):\n session.run(new_gstep)\n\n # No summary is saved.\n summaries = util_test.latest_summaries(logdir)\n self.assertEqual(len(summaries), 0)\n\n def test_checkpoint_hook_enabled(self):\n context = distribute_coordinator._WorkerContext(\n MockStrategy(should_checkpoint=True), None, None, None)\n\n logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_enabled')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n new_gstep = state_ops.assign_add(gstep, 1)\n with context, monitored_session.MonitoredTrainingSession(\n checkpoint_dir=logdir,\n save_checkpoint_steps=100,\n log_step_count_steps=10) as session:\n for _ in range(100):\n session.run(new_gstep)\n\n # A restart will find the checkpoint and recover automatically.\n with monitored_session.MonitoredTrainingSession(\n is_chief=True, checkpoint_dir=logdir) as session:\n self.assertEqual(100, session.run(gstep))\n\n def test_checkpoint_hook_disabled(self):\n context = distribute_coordinator._WorkerContext(\n MockStrategy(should_checkpoint=False), None, None, None)\n\n logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_disabled')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n new_gstep = state_ops.assign_add(gstep, 1)\n with context, monitored_session.MonitoredTrainingSession(\n checkpoint_dir=logdir,\n save_checkpoint_steps=100,\n log_step_count_steps=10) as session:\n for _ in range(100):\n session.run(new_gstep)\n\n # No checkpoint is saved.\n checkpoint = checkpoint_management.latest_checkpoint(logdir)\n self.assertIsNone(checkpoint)\n\n\nclass StopAtNSession(monitored_session._WrappedSession):\n \"\"\"A wrapped session that stops at the N-th call to _check_stop.\"\"\"\n\n def __init__(self, sess, n):\n super(StopAtNSession, self).__init__(sess)\n self._count = n\n\n def _check_stop(self):\n if self._count == 0:\n return True\n self._count -= 1\n return False\n\n\nclass WrappedSessionTest(test.TestCase):\n \"\"\"_WrappedSession tests.\"\"\"\n\n def test_properties(self):\n with self.test_session() as sess:\n constant_op.constant(0.0)\n wrapped_sess = monitored_session._WrappedSession(sess)\n self.assertEquals(sess.graph, wrapped_sess.graph)\n self.assertEquals(sess.sess_str, wrapped_sess.sess_str)\n\n def test_should_stop_on_close(self):\n with self.test_session() as sess:\n wrapped_sess = monitored_session._WrappedSession(sess)\n self.assertFalse(wrapped_sess.should_stop())\n wrapped_sess.close()\n self.assertTrue(wrapped_sess.should_stop())\n\n def test_should_stop_uses_check_stop(self):\n with self.test_session() as sess:\n wrapped_sess = StopAtNSession(sess, 3)\n self.assertFalse(wrapped_sess.should_stop())\n self.assertFalse(wrapped_sess.should_stop())\n self.assertFalse(wrapped_sess.should_stop())\n self.assertTrue(wrapped_sess.should_stop())\n\n def test_should_stop_delegates_to_wrapped_session(self):\n with self.test_session() as sess:\n wrapped_sess0 = StopAtNSession(sess, 4)\n wrapped_sess1 = monitored_session._WrappedSession(wrapped_sess0)\n self.assertFalse(wrapped_sess1.should_stop())\n self.assertFalse(wrapped_sess1.should_stop())\n self.assertFalse(wrapped_sess1.should_stop())\n self.assertFalse(wrapped_sess1.should_stop())\n self.assertTrue(wrapped_sess1.should_stop())\n\n def test_close_twice(self):\n with self.test_session() as sess:\n wrapped_sess = monitored_session._WrappedSession(sess)\n wrapped_sess.close()\n self.assertTrue(wrapped_sess.should_stop())\n wrapped_sess.close()\n self.assertTrue(wrapped_sess.should_stop())\n\n def test_run(self):\n with self.test_session() as sess:\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n self.assertEqual(42, sess.run(v, feed_dict={c: 42}))\n wrapped_sess = monitored_session._WrappedSession(sess)\n self.assertEqual(51, wrapped_sess.run(v, feed_dict={c: 51}))\n\n\ndef busy_wait_for_coord_stop(coord):\n while not coord.should_stop():\n time.sleep(0.001)\n\n\nclass CoordinatedSessionTest(test.TestCase):\n \"\"\"_CoordinatedSession tests.\"\"\"\n\n def test_properties(self):\n with self.test_session() as sess:\n constant_op.constant(0.0)\n coord = coordinator.Coordinator()\n coord_sess = monitored_session._CoordinatedSession(sess, coord)\n self.assertEquals(sess.graph, coord_sess.graph)\n self.assertEquals(sess.sess_str, coord_sess.sess_str)\n\n def test_run(self):\n with self.test_session() as sess:\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n coord = coordinator.Coordinator()\n coord_sess = monitored_session._CoordinatedSession(sess, coord)\n self.assertEqual(42, coord_sess.run(v, feed_dict={c: 42}))\n\n def test_should_stop_on_close(self):\n with self.test_session() as sess:\n coord = coordinator.Coordinator()\n coord_sess = monitored_session._CoordinatedSession(sess, coord)\n self.assertFalse(coord_sess.should_stop())\n coord_sess.close()\n self.assertTrue(coord_sess.should_stop())\n\n def test_should_stop_on_coord_stop(self):\n with self.test_session() as sess:\n coord = coordinator.Coordinator()\n coord_sess = monitored_session._CoordinatedSession(sess, coord)\n self.assertFalse(coord_sess.should_stop())\n coord.request_stop()\n self.assertTrue(coord_sess.should_stop())\n\n def test_dont_request_stop_on_exception_in_main_thread(self):\n with self.test_session() as sess:\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n coord = coordinator.Coordinator()\n coord_sess = monitored_session._CoordinatedSession(sess, coord)\n self.assertFalse(coord_sess.should_stop())\n self.assertEqual(0, coord_sess.run(c))\n self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))\n with self.assertRaisesRegexp(TypeError, 'None has invalid type'):\n coord_sess.run([None], feed_dict={c: 2})\n self.assertFalse(coord.should_stop())\n self.assertFalse(coord_sess.should_stop())\n\n def test_stop_threads_on_close_after_exception(self):\n with self.test_session() as sess:\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n coord = coordinator.Coordinator()\n threads = [\n threading.Thread(\n target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)\n ]\n for t in threads:\n coord.register_thread(t)\n t.start()\n coord_sess = monitored_session._CoordinatedSession(sess, coord)\n self.assertFalse(coord_sess.should_stop())\n for t in threads:\n self.assertTrue(t.is_alive())\n self.assertEqual(0, coord_sess.run(c))\n for t in threads:\n self.assertTrue(t.is_alive())\n self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))\n for t in threads:\n self.assertTrue(t.is_alive())\n with self.assertRaisesRegexp(TypeError, 'None has invalid type'):\n coord_sess.run([None], feed_dict={c: 2})\n coord_sess.close()\n for t in threads:\n self.assertFalse(t.is_alive())\n self.assertTrue(coord.should_stop())\n self.assertTrue(coord_sess.should_stop())\n\n def test_stop_threads_on_close(self):\n with self.test_session() as sess:\n coord = coordinator.Coordinator()\n threads = [\n threading.Thread(\n target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)\n ]\n for t in threads:\n coord.register_thread(t)\n t.start()\n coord_sess = monitored_session._CoordinatedSession(sess, coord)\n coord_sess.close()\n for t in threads:\n self.assertFalse(t.is_alive())\n self.assertTrue(coord.should_stop())\n self.assertTrue(coord_sess.should_stop())\n\n def test_propagates_exception_trace(self):\n assertion = control_flow_ops.Assert(False, ['This should fail.'])\n with self.test_session() as sess:\n coord = coordinator.Coordinator(clean_stop_exception_types=())\n coord_sess = monitored_session._CoordinatedSession(sess, coord)\n try:\n coord_sess.run([assertion])\n self.fail('No exception was raised by assertion.')\n except errors_impl.InvalidArgumentError:\n # Extract the name of the file where the exception was first raised.\n _, _, exc_traceback = sys.exc_info()\n tb = traceback.extract_tb(exc_traceback)\n exc_source_file = tb[-1][0]\n exc_source_basename = os.path.basename(exc_source_file)\n # If it's monitored_session.py then the original stack trace was not\n # correctly propagated.\n self.assertIn(\n exc_source_basename, ['session.py', 'monitored_session.py'],\n 'The exception was raised from an unrecognized file. This unit '\n 'test probably needs to be updated. Traceback:\\n%s\\n' % tb)\n self.assertEqual(\n exc_source_basename, 'session.py',\n 'Original stack trace was not propagated by MonitoredSession. '\n 'Traceback:\\n%s' % tb)\n\n\nclass AbortAtNSession(object):\n \"\"\"A mock session that aborts at the N-th run call.\"\"\"\n\n def __init__(self, sess, n):\n self._sess = sess\n self._count = n\n\n def close(self):\n pass\n\n def run(self, *args, **kwargs):\n if self._count == 0:\n raise errors_impl.AbortedError('Aborted at N', None, None)\n self._count -= 1\n return self._sess.run(*args, **kwargs)\n\n\nclass StopCoordinatorWithException(session_run_hook.SessionRunHook):\n \"\"\"With this hook Coordinator throws an exception after N-runs.\"\"\"\n\n def __init__(self, calls_before_stopping, exception_to_raise=None):\n self._started_the_side_thread_already = False\n self._lock = threading.Lock()\n self._stored_exception_event = threading.Event()\n self._calls_before_stopping = calls_before_stopping\n self._exception_to_raise = (exception_to_raise or errors_impl.AbortedError(\n None, None, 'Aborted at N'))\n\n def _maybe_stop_with_exception(self, coord):\n while True:\n with self._lock:\n if self._calls_before_stopping == 0:\n try:\n raise self._exception_to_raise\n except Exception as e: # pylint: disable=broad-except\n coord.request_stop(e)\n self._stored_exception_event.set()\n break\n\n def after_create_session(self, session, coord):\n if self._started_the_side_thread_already:\n return\n\n separate_thread = threading.Thread(\n target=self._maybe_stop_with_exception, args=(coord,))\n\n coord.register_thread(separate_thread)\n separate_thread.start()\n self._started_the_side_thread_already = True\n # Coordinator will take care of joining `separate_thread`.\n\n def after_run(self, run_context, run_values):\n stopping_now = False\n with self._lock:\n self._calls_before_stopping -= 1\n if self._calls_before_stopping == 0:\n stopping_now = True\n\n if stopping_now:\n self._stored_exception_event.wait()\n\n\nclass FailTrainingAfterCoordinatorStopped(StopCoordinatorWithException):\n \"\"\"With this hook training encounters an exception after N-runs.\"\"\"\n\n def __init__(self, calls_before_stopping):\n StopCoordinatorWithException.__init__(self, calls_before_stopping)\n self._coord = None\n\n def after_create_session(self, session, coord):\n self._coord = coord\n return StopCoordinatorWithException.after_create_session(\n self, session, coord)\n\n def after_run(self, run_context, run_values):\n StopCoordinatorWithException.after_run(self, run_context, run_values)\n try:\n # After a `run`, an exception could have been stored inside the\n # coordinator.\n self._coord.raise_requested_exception()\n except errors_impl.AbortedError:\n # In real world, the main thread may or may not know about the exception\n # that stopped the coordinator. Because the coordinator has stopped, the\n # main thread could have gotten stuck as well (for example, the\n # coordinator was supposed to execute `FIFOQueue.enqueue` while the main\n # thread is executing a blocking `FIFOQueue.dequeue`). After it got stuck,\n # the session is going to get garbage collected after some time with:\n raise errors_impl.CancelledError(None, None,\n 'Session got garbage-collected.')\n\n\nclass CountingSessionCreator(object):\n \"\"\"A creator that counts the number of created sessions.\"\"\"\n\n def __init__(self, session):\n self._initial_session = session\n # We only have one session per test case. We can't re-create it, thus\n # it shouldn't be closed.\n self._initial_session.close = lambda *args: None\n self._create_session_calls = 0\n\n @property\n def number_of_sessions_created(self):\n return self._create_session_calls\n\n def create_session(self):\n self._create_session_calls += 1\n return self._initial_session\n\n\nclass RecoverableSessionTest(test.TestCase):\n \"\"\"_RecoverableSession tests.\"\"\"\n\n class _SessionReturner(object):\n\n def __init__(self, sess):\n self._sess = sess\n\n def create_session(self):\n return self._sess\n\n def test_properties(self):\n with self.test_session() as sess:\n constant_op.constant(0.0)\n recoverable_sess = monitored_session._RecoverableSession(\n self._SessionReturner(sess))\n self.assertEquals(sess.graph, recoverable_sess.graph)\n self.assertEquals(sess.sess_str, recoverable_sess.sess_str)\n\n def test_run(self):\n with self.test_session() as sess:\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n recoverable_sess = monitored_session._RecoverableSession(\n self._SessionReturner(sess))\n self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))\n\n def test_recovery(self):\n with self.test_session() as sess:\n\n class StackSessionCreator(object):\n\n def __init__(self, sess):\n self.sessions_to_use = [\n AbortAtNSession(sess, x + 1) for x in range(3)\n ]\n\n def create_session(self):\n return self.sessions_to_use.pop(0)\n\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n session_creator = StackSessionCreator(sess)\n # List of 3 sessions to use for recovery. The first one aborts\n # after 1 run() call, the second after 2 run calls, the third\n # after 3 run calls.\n self.assertEqual(3, len(session_creator.sessions_to_use))\n # Make the recoverable session uses these 3 sessions in sequence by\n # passing a factory that pops from the session_to_use list.\n recoverable_sess = monitored_session._RecoverableSession(session_creator)\n self.assertEqual(\n 2, len(session_creator.sessions_to_use)) # One session popped.\n # Using first session.\n self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))\n self.assertEqual(\n 2, len(session_creator.sessions_to_use)) # Still 2 sessions available\n # This will fail and recover by picking up the second session.\n self.assertEqual(42, recoverable_sess.run(v, feed_dict={c: 42}))\n self.assertEqual(\n 1, len(session_creator.sessions_to_use)) # Still 1 session available\n self.assertEqual(33, recoverable_sess.run(v, feed_dict={c: 33}))\n self.assertEqual(\n 1, len(session_creator.sessions_to_use)) # Still 1 session available\n # This will fail and recover by picking up the last session.\n self.assertEqual(24, recoverable_sess.run(v, feed_dict={c: 24}))\n self.assertEqual(\n 0, len(session_creator.sessions_to_use)) # All sessions used.\n self.assertEqual(11, recoverable_sess.run(v, feed_dict={c: 11}))\n self.assertEqual(0, recoverable_sess.run(v, feed_dict={c: 0}))\n # This will fail and throw a real error as the pop() will fail.\n with self.assertRaisesRegexp(IndexError, 'pop from empty list'):\n recoverable_sess.run(v, feed_dict={c: -12})\n\n def test_recovery_from_coordinator_exception(self):\n with self.test_session() as test_session:\n session_creator = CountingSessionCreator(test_session)\n session = monitored_session.MonitoredSession(\n session_creator,\n [StopCoordinatorWithException(calls_before_stopping=2)])\n\n self.assertEqual(1, session_creator.number_of_sessions_created)\n self.assertFalse(session.should_stop())\n\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n\n # The coordinator will not abort during this call, since it's the call\n # number 0.\n self.assertEqual(51, session.run(v, feed_dict={c: 51}))\n self.assertFalse(session.should_stop())\n # The coordinator will abort during the next call, since it's the call\n # number 1.\n self.assertEqual(42, session.run(v, feed_dict={c: 42}))\n # Even though the coordinator was asked to stop, the underlying session is\n # recreated and is to be continued.\n self.assertFalse(session.should_stop())\n self.assertEqual(2, session_creator.number_of_sessions_created)\n\n def test_recovery_from_non_preemption_in_coordinator(self):\n with self.test_session() as test_session:\n session_creator = CountingSessionCreator(test_session)\n hook = StopCoordinatorWithException(\n calls_before_stopping=2,\n exception_to_raise=errors_impl.UnknownError(\n None, None, 'Some fatal exception inside the coordinator.'))\n session = monitored_session.MonitoredSession(session_creator, [hook])\n\n self.assertEqual(1, session_creator.number_of_sessions_created)\n self.assertFalse(session.should_stop())\n\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n\n # The coordinator will not abort during this call, since it's the call\n # number 0.\n self.assertEqual(51, session.run(v, feed_dict={c: 51}))\n self.assertFalse(session.should_stop())\n # The coordinator will abort during the next call, since it's the call\n # number 1.\n self.assertEqual(42, session.run(v, feed_dict={c: 42}))\n # The coordinator was asked to stop due to non-redeemable error. Training\n # should stop and the session should not be recreated.\n self.assertTrue(session.should_stop())\n self.assertEqual(1, session_creator.number_of_sessions_created)\n with self.assertRaises(errors_impl.UnknownError):\n session.close()\n\n def test_recovery_from_session_getting_stuck(self):\n with self.test_session() as test_session:\n session_creator = CountingSessionCreator(test_session)\n session = monitored_session.MonitoredSession(\n session_creator,\n [FailTrainingAfterCoordinatorStopped(calls_before_stopping=2)])\n\n self.assertEqual(1, session_creator.number_of_sessions_created)\n self.assertFalse(session.should_stop())\n\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n\n # Training will not fail, since it's the call number 0.\n self.assertEqual(51, session.run(v, feed_dict={c: 51}))\n self.assertFalse(session.should_stop())\n # Training will fail during the next call, since it's the call\n # number 1.\n self.assertEqual(42, session.run(v, feed_dict={c: 42}))\n # Even though the coordinator stopped which and training failed, the\n # underlying session is recreated and training is to be continued.\n self.assertFalse(session.should_stop())\n self.assertEqual(2, session_creator.number_of_sessions_created)\n\n def test_step_fn_recovery_from_coordinator_exception_when_run_hooks(self):\n with self.test_session() as test_session:\n session_creator = CountingSessionCreator(test_session)\n session = monitored_session.MonitoredSession(\n session_creator,\n [StopCoordinatorWithException(calls_before_stopping=2)])\n\n self.assertEqual(1, session_creator.number_of_sessions_created)\n self.assertFalse(session.should_stop())\n\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n\n def feed_step_fn(value):\n def step_fn(step_context):\n return step_context.run_with_hooks(fetches=v, feed_dict={c: value})\n return step_fn\n\n # The coordinator will not abort during this call, since it's the call\n # number 0.\n self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))\n self.assertFalse(session.should_stop())\n # The coordinator will abort during the next call, since it's the call\n # number 1.\n self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))\n # Even though the coordinator was asked to stop, the underlying session is\n # recreated and is to be continued.\n self.assertFalse(session.should_stop())\n self.assertEqual(2, session_creator.number_of_sessions_created)\n\n def test_recovery_from_non_preemption_in_coordinator_when_run_hooks(self):\n with self.test_session() as test_session:\n session_creator = CountingSessionCreator(test_session)\n hook = StopCoordinatorWithException(\n calls_before_stopping=2,\n exception_to_raise=errors_impl.UnknownError(\n None, None, 'Some fatal exception inside the coordinator.'))\n session = monitored_session.MonitoredSession(session_creator, [hook])\n\n self.assertEqual(1, session_creator.number_of_sessions_created)\n self.assertFalse(session.should_stop())\n\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n\n def feed_step_fn(value):\n def step_fn(step_context):\n return step_context.run_with_hooks(fetches=v, feed_dict={c: value})\n return step_fn\n\n # The coordinator will not abort during this call, since it's the call\n # number 0.\n self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))\n self.assertFalse(session.should_stop())\n # The coordinator will abort during the next call, since it's the call\n # number 1.\n self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))\n # The coordinator was asked to stop due to non-redeemable error. Training\n # should stop and the session should not be recreated.\n self.assertTrue(session.should_stop())\n self.assertEqual(1, session_creator.number_of_sessions_created)\n with self.assertRaises(errors_impl.UnknownError):\n session.close()\n\n def test_recovery_from_session_getting_stuck_when_run_hooks(self):\n with self.test_session() as test_session:\n session_creator = CountingSessionCreator(test_session)\n session = monitored_session.MonitoredSession(\n session_creator,\n [FailTrainingAfterCoordinatorStopped(calls_before_stopping=2)])\n\n self.assertEqual(1, session_creator.number_of_sessions_created)\n self.assertFalse(session.should_stop())\n\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n\n def feed_step_fn(value):\n def step_fn(step_context):\n return step_context.run_with_hooks(fetches=v, feed_dict={c: value})\n return step_fn\n\n # Training will not fail, since it's the call number 0.\n self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))\n self.assertFalse(session.should_stop())\n # Training will fail during the next call, since it's the call\n # number 1.\n self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))\n # Even though the coordinator stopped which and training failed, the\n # underlying session is recreated and training is to be continued.\n self.assertFalse(session.should_stop())\n self.assertEqual(2, session_creator.number_of_sessions_created)\n\n def create_raw_session_with_failing_coordinator(self, session_creator, hook):\n \"\"\"Return MonitoredSession that triggers coordinator failures.\"\"\"\n session = monitored_session.MonitoredSession(session_creator, [hook])\n # We would like to test a situation where during fetches through the\n # raw session, the coordinator fails with an exception. To do that, we\n # are going to use (raw_session + StopCoordinatorWithException) hook\n # combination that is stored in\n # `MonitoredSession._RecoverableSession._CoordinatedSession._sess`\n # at this point:\n session._tf_sess = lambda: session._sess._sess._sess\n # `run()` on such a session is equivalent to `run()` on the raw session\n # with separate coordinator threads independently stopping with an\n # exception.\n return session\n\n def test_step_fn_recovery_from_coordinator_exception_with_raw_session(self):\n with self.test_session() as test_session:\n session_creator = CountingSessionCreator(test_session)\n session = self.create_raw_session_with_failing_coordinator(\n session_creator,\n StopCoordinatorWithException(calls_before_stopping=2))\n\n self.assertEqual(1, session_creator.number_of_sessions_created)\n self.assertFalse(session.should_stop())\n\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n\n def feed_step_fn(value):\n\n def step_fn(step_context):\n return step_context.session.run(fetches=v, feed_dict={c: value})\n\n return step_fn\n\n # The coordinator will not abort during this call, since it's the call\n # number 0.\n self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))\n self.assertFalse(session.should_stop())\n # The coordinator will abort during the next call, since it's the call\n # number 1.\n self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))\n # Even though the coordinator was asked to stop, the underlying session is\n # recreated and is to be continued.\n self.assertFalse(session.should_stop())\n self.assertEqual(2, session_creator.number_of_sessions_created)\n\n def test_recovery_from_non_preemption_in_coordinator_with_raw_session(self):\n with self.test_session() as test_session:\n session_creator = CountingSessionCreator(test_session)\n session = self.create_raw_session_with_failing_coordinator(\n session_creator,\n StopCoordinatorWithException(\n calls_before_stopping=2,\n exception_to_raise=errors_impl.UnknownError(\n None, None, 'Some fatal exception inside the coordinator.')))\n\n self.assertEqual(1, session_creator.number_of_sessions_created)\n self.assertFalse(session.should_stop())\n\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n\n def feed_step_fn(value):\n\n def step_fn(step_context):\n return step_context.run_with_hooks(fetches=v, feed_dict={c: value})\n\n return step_fn\n\n # The coordinator will not abort during this call, since it's the call\n # number 0.\n self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))\n self.assertFalse(session.should_stop())\n # The coordinator will abort during the next call, since it's the call\n # number 1.\n self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))\n # The coordinator was asked to stop due to non-redeemable error. Training\n # should stop and the session should not be recreated.\n self.assertTrue(session.should_stop())\n self.assertEqual(1, session_creator.number_of_sessions_created)\n with self.assertRaises(errors_impl.UnknownError):\n session.close()\n\n def test_recovery_from_session_getting_stuck_with_raw_session(self):\n with self.test_session() as test_session:\n session_creator = CountingSessionCreator(test_session)\n session = self.create_raw_session_with_failing_coordinator(\n session_creator,\n FailTrainingAfterCoordinatorStopped(calls_before_stopping=2))\n\n self.assertEqual(1, session_creator.number_of_sessions_created)\n self.assertFalse(session.should_stop())\n\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n\n def feed_step_fn(value):\n\n def step_fn(step_context):\n return step_context.run_with_hooks(fetches=v, feed_dict={c: value})\n\n return step_fn\n\n # Training will not fail, since it's the call number 0.\n self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))\n self.assertFalse(session.should_stop())\n # Training will fail during the next call, since it's the call\n # number 1.\n self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))\n # Even though the coordinator stopped which and training failed, the\n # underlying session is recreated and training is to be continued.\n self.assertFalse(session.should_stop())\n self.assertEqual(2, session_creator.number_of_sessions_created)\n\n\nclass FakeSession(monitored_session._WrappedSession):\n\n def __init__(self, sess):\n monitored_session._WrappedSession.__init__(self, sess)\n self.args_called = {}\n\n def run(self, fetches, **kwargs):\n self.args_called = dict(kwargs)\n # Call run only with fetches since we directly pass other arguments.\n return monitored_session._WrappedSession.run(self, fetches)\n\n\nclass HookedSessionTest(test.TestCase):\n \"\"\"Tests of _HookedSession.\"\"\"\n\n def testRunPassesAllArguments(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n mock_run = FakeSession(sess)\n mon_sess = monitored_session._HookedSession(sess=mock_run, hooks=[])\n a_tensor = constant_op.constant([0], name='a_tensor')\n sess.run(variables.global_variables_initializer())\n output = mon_sess.run(fetches=a_tensor,\n feed_dict='a_feed',\n options='an_option',\n run_metadata='a_metadata')\n self.assertEqual(output, [0])\n self.assertEqual(mock_run.args_called, {\n 'feed_dict': 'a_feed',\n 'options': 'an_option',\n 'run_metadata': 'a_metadata'\n })\n\n def testCallsHooksBeginEnd(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n mock_hook = FakeHook()\n mock_hook2 = FakeHook()\n mon_sess = monitored_session._HookedSession(\n sess=sess, hooks=[mock_hook, mock_hook2])\n a_tensor = constant_op.constant([0], name='a_tensor')\n sess.run(variables.global_variables_initializer())\n mon_sess.run(a_tensor)\n\n for hook in [mock_hook, mock_hook2]:\n self.assertEqual(\n hook.last_run_values,\n session_run_hook.SessionRunValues(\n results=None,\n options=config_pb2.RunOptions(),\n run_metadata=config_pb2.RunMetadata()))\n self.assertEqual(hook.last_run_context.original_args,\n session_run_hook.SessionRunArgs(a_tensor))\n self.assertEqual(hook.last_run_context.session, sess)\n self.assertEqual(hook.call_counter['begin'], 0)\n self.assertEqual(hook.call_counter['after_create_session'], 0)\n self.assertEqual(hook.call_counter['before_run'], 1)\n self.assertEqual(hook.call_counter['after_run'], 1)\n\n def testShouldStop(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n mock_hook = FakeHook()\n mock_hook2 = FakeHook()\n mon_sess = monitored_session._HookedSession(\n sess=sess, hooks=[mock_hook, mock_hook2])\n constant_op.constant([0], name='a_tensor')\n sess.run(variables.global_variables_initializer())\n\n mon_sess.run(fetches='a_tensor')\n self.assertFalse(mon_sess.should_stop())\n\n mock_hook.should_stop = True\n mon_sess.run(fetches='a_tensor')\n self.assertTrue(mon_sess.should_stop())\n\n def testFetchesHookRequests(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n mock_hook = FakeHook()\n mock_hook2 = FakeHook()\n mon_sess = monitored_session._HookedSession(\n sess=sess, hooks=[mock_hook, mock_hook2])\n a_tensor = constant_op.constant([0], name='a_tensor')\n another_tensor = constant_op.constant([5], name='another_tensor')\n third_tensor = constant_op.constant([10], name='third_tensor')\n mock_hook.request = session_run_hook.SessionRunArgs([another_tensor])\n mock_hook2.request = session_run_hook.SessionRunArgs([third_tensor])\n sess.run(variables.global_variables_initializer())\n\n output = mon_sess.run(fetches=a_tensor)\n self.assertEqual(output, [0])\n self.assertEqual(mock_hook.last_run_values.results, [5])\n self.assertEqual(mock_hook2.last_run_values.results, [10])\n\n def testOnlyHooksHaveFeeds(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n mock_hook = FakeHook()\n mock_hook2 = FakeHook()\n mon_sess = monitored_session._HookedSession(\n sess=sess, hooks=[mock_hook, mock_hook2])\n a_tensor = constant_op.constant([0], name='a_tensor')\n b_tensor = constant_op.constant([0], name='b_tensor')\n add_tensor = a_tensor + b_tensor\n mock_hook.request = session_run_hook.SessionRunArgs(\n None, feed_dict={a_tensor: [5]})\n mock_hook2.request = session_run_hook.SessionRunArgs(\n None, feed_dict={b_tensor: [10]})\n sess.run(variables.global_variables_initializer())\n\n self.assertEqual(mon_sess.run(fetches=add_tensor), [15])\n\n def testBothHooksAndUserHaveFeeds(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n mock_hook = FakeHook()\n mock_hook2 = FakeHook()\n mon_sess = monitored_session._HookedSession(\n sess=sess, hooks=[mock_hook, mock_hook2])\n a_tensor = constant_op.constant([0], name='a_tensor')\n b_tensor = constant_op.constant([0], name='b_tensor')\n c_tensor = constant_op.constant([0], name='c_tensor')\n add_tensor = a_tensor + b_tensor + c_tensor\n mock_hook.request = session_run_hook.SessionRunArgs(\n None, feed_dict={a_tensor: [5]})\n mock_hook2.request = session_run_hook.SessionRunArgs(\n None, feed_dict={b_tensor: [10]})\n sess.run(variables.global_variables_initializer())\n\n feed_dict = {c_tensor: [20]}\n self.assertEqual(\n mon_sess.run(fetches=add_tensor, feed_dict=feed_dict), [35])\n # User feed_dict should not be changed\n self.assertEqual(len(feed_dict), 1)\n\n def testHooksFeedConflicts(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n mock_hook = FakeHook()\n mock_hook2 = FakeHook()\n mon_sess = monitored_session._HookedSession(\n sess=sess, hooks=[mock_hook, mock_hook2])\n a_tensor = constant_op.constant([0], name='a_tensor')\n b_tensor = constant_op.constant([0], name='b_tensor')\n add_tensor = a_tensor + b_tensor\n mock_hook.request = session_run_hook.SessionRunArgs(\n None, feed_dict={a_tensor: [5]})\n mock_hook2.request = session_run_hook.SessionRunArgs(\n None, feed_dict={a_tensor: [10]})\n sess.run(variables.global_variables_initializer())\n\n with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):\n mon_sess.run(fetches=add_tensor)\n\n def testHooksAndUserFeedConflicts(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n mock_hook = FakeHook()\n mock_hook2 = FakeHook()\n mon_sess = monitored_session._HookedSession(\n sess=sess, hooks=[mock_hook, mock_hook2])\n a_tensor = constant_op.constant([0], name='a_tensor')\n b_tensor = constant_op.constant([0], name='b_tensor')\n add_tensor = a_tensor + b_tensor\n mock_hook.request = session_run_hook.SessionRunArgs(\n None, feed_dict={a_tensor: [5]})\n mock_hook2.request = session_run_hook.SessionRunArgs(\n None, feed_dict={b_tensor: [10]})\n sess.run(variables.global_variables_initializer())\n\n with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):\n mon_sess.run(fetches=add_tensor, feed_dict={b_tensor: [10]})\n\n\nclass RaiseOnceAtCountN(session_run_hook.SessionRunHook):\n \"\"\"Hook that raises an Exception at step N.\"\"\"\n\n def __init__(self, n, ex):\n self.n = n\n self.ex = ex\n self.raised = False\n\n def before_run(self, run_context):\n # Raise the first time we reach step N.\n self.n -= 1\n if 0 == self.n and not self.raised:\n self.raised = True\n raise self.ex\n return None\n\n\nclass RunOptionsMetadataHook(session_run_hook.SessionRunHook):\n \"\"\"A hook that observes & optionally modifies RunOptions and RunMetadata.\"\"\"\n\n def __init__(self, trace_level, timeout_in_ms, output_partition_graphs,\n debug_tensor_watch):\n self._trace_level = trace_level\n self._timeout_in_ms = timeout_in_ms\n self._output_partition_graphs = output_partition_graphs\n self._debug_tensor_watch = debug_tensor_watch\n\n self.run_options_list = []\n self.run_metadata_list = []\n\n def before_run(self, run_context):\n options = config_pb2.RunOptions(\n trace_level=self._trace_level,\n timeout_in_ms=self._timeout_in_ms,\n output_partition_graphs=self._output_partition_graphs)\n options.debug_options.debug_tensor_watch_opts.extend(\n [self._debug_tensor_watch])\n return session_run_hook.SessionRunArgs(None, None, options=options)\n\n def after_run(self, run_context, run_values):\n self.run_options_list.append(run_values.options)\n self.run_metadata_list.append(run_values.run_metadata)\n\n\nclass MonitoredSessionTest(test.TestCase):\n \"\"\"MonitoredSession tests.\"\"\"\n\n def test_defaults(self):\n with ops.Graph().as_default():\n a_var = variables.Variable(0)\n with monitored_session.MonitoredSession() as session:\n self.assertEqual(0, session.run(a_var))\n\n def test_last_step(self):\n logdir = _test_dir(self.get_temp_dir(), 'test_last_step')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n # Run till step 3 and save.\n hooks = [basic_session_run_hooks.StopAtStepHook(last_step=3)]\n with monitored_session.MonitoredSession(hooks=hooks) as session:\n self.assertEqual(0, session.run(gstep))\n self.assertFalse(session.should_stop())\n self.assertEqual(1, session.run(do_step))\n self.assertFalse(session.should_stop())\n self.assertEqual(2, session.run(do_step))\n self.assertFalse(session.should_stop())\n self.assertEqual(3, session.run(do_step))\n self.assertTrue(session.should_stop())\n save_path = saver_lib._get_saver_or_default().save(\n session._coordinated_creator.tf_sess,\n os.path.join(logdir, 'step-3'))\n # Run till step 5 and save.\n def load_ckpt(scaffold, sess):\n scaffold.saver.restore(sess, save_path)\n\n session_creator = monitored_session.ChiefSessionCreator(\n monitored_session.Scaffold(init_fn=load_ckpt))\n hooks = [basic_session_run_hooks.StopAtStepHook(last_step=5)]\n with monitored_session.MonitoredSession(\n hooks=hooks, session_creator=session_creator) as session:\n self.assertEqual(3, session.run(gstep))\n self.assertFalse(session.should_stop())\n self.assertEqual(4, session.run(do_step))\n self.assertFalse(session.should_stop())\n self.assertEqual(5, session.run(do_step))\n self.assertTrue(session.should_stop())\n\n def test_num_steps(self):\n logdir = _test_dir(self.get_temp_dir(), 'test_num_steps')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n # Do 3 steps and save.\n hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=3)]\n with monitored_session.MonitoredSession(hooks=hooks) as session:\n session.run(do_step)\n self.assertFalse(session.should_stop())\n session.run(do_step)\n self.assertFalse(session.should_stop())\n session.run(do_step)\n self.assertTrue(session.should_stop())\n save_path = saver_lib._get_saver_or_default().save(\n session._coordinated_creator.tf_sess,\n os.path.join(logdir, 'step-3'))\n # Restore and do 4 steps.\n def load_ckpt(scaffold, sess):\n scaffold.saver.restore(sess, save_path)\n\n session_creator = monitored_session.ChiefSessionCreator(\n scaffold=monitored_session.Scaffold(init_fn=load_ckpt))\n hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=4)]\n with monitored_session.MonitoredSession(\n hooks=hooks, session_creator=session_creator) as session:\n self.assertEqual(4, session.run(do_step))\n self.assertFalse(session.should_stop())\n session.run(do_step)\n self.assertFalse(session.should_stop())\n session.run(do_step)\n self.assertFalse(session.should_stop())\n session.run(do_step)\n self.assertTrue(session.should_stop())\n\n # This set of tests, verifies the supervised session behavior when exceptions\n # are raised next to the innermost session run() call.\n\n def test_recovery(self):\n logdir = _test_dir(self.get_temp_dir(), 'test_recovery')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n scaffold = monitored_session.Scaffold()\n # Use a hook to save the model every 100 steps. It also saves it at\n # the end.\n hooks = [\n basic_session_run_hooks.CheckpointSaverHook(\n logdir, save_steps=1, scaffold=scaffold)\n ]\n with monitored_session.MonitoredSession(\n session_creator=monitored_session.ChiefSessionCreator(\n scaffold, checkpoint_dir=logdir),\n hooks=hooks) as session:\n self.assertEqual(0, session.run(gstep))\n self.assertEqual(1, session.run(do_step))\n self.assertEqual(2, session.run(do_step))\n # A restart will find the checkpoint and recover automatically.\n with monitored_session.MonitoredSession(\n session_creator=monitored_session.ChiefSessionCreator(\n scaffold, checkpoint_dir=logdir)) as session:\n self.assertEqual(2, session.run(gstep))\n # A restart will find the checkpoint and recover automatically.\n with monitored_session.MonitoredSession(\n session_creator=monitored_session.ChiefSessionCreator(\n scaffold,\n checkpoint_filename_with_path=checkpoint_management.\n latest_checkpoint(logdir))) as session:\n self.assertEqual(2, session.run(gstep))\n\n def test_retry_initialization_on_aborted_error(self):\n # Tests that we silently retry on abort during initialization.\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n self.init_raised_aborted_error = False\n\n def _init_fn(scaffold, session):\n _, _ = scaffold, session\n if not self.init_raised_aborted_error:\n self.init_raised_aborted_error = True\n raise errors_impl.AbortedError(None, None, 'Abort')\n\n with monitored_session.MonitoredSession(\n session_creator=monitored_session.ChiefSessionCreator(\n scaffold=monitored_session.Scaffold(\n init_fn=_init_fn))) as session:\n self.assertFalse(session.should_stop())\n self.assertEqual(0, session.run(gstep))\n self.assertTrue(self.init_raised_aborted_error)\n\n def _retry_test(self, ex):\n # Tests that we silently retry on error. Note that this does not test\n # recovery as we do not use a CheckpointSaver in this test.\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n hook = RaiseOnceAtCountN(4, ex)\n with monitored_session.MonitoredSession(hooks=[hook]) as session:\n self.assertEqual(0, session.run(gstep))\n self.assertEqual(1, session.run(do_step))\n self.assertEqual(2, session.run(do_step))\n self.assertFalse(session.should_stop())\n # Here at step 3, the hook triggers and raises AbortedError. The\n # MonitoredSession automatically retries and restart from a freshly\n # initialized session, so the step is back to 0 and running do_step\n # moves it to 1.\n self.assertEqual(1, session.run(do_step))\n self.assertFalse(session.should_stop())\n self.assertTrue(hook.raised)\n self.assertEqual(2, session.run(do_step))\n self.assertFalse(session.should_stop())\n\n def test_retry_on_aborted_error(self):\n self._retry_test(errors_impl.AbortedError(None, None, 'Abort'))\n\n def test_retry_on_unavailable_error(self):\n self._retry_test(errors_impl.UnavailableError(None, None, 'Unavailable'))\n\n def test_recover_and_retry_on_aborted_error(self):\n # Tests that we silently retry and recover on abort. This test uses\n # a CheckpointSaver to have something to recover from.\n logdir = _test_dir(self.get_temp_dir(),\n 'test_recover_and_retry_on_aborted_error')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n scaffold = monitored_session.Scaffold()\n abort_hook = RaiseOnceAtCountN(\n 4, errors_impl.AbortedError(None, None, 'Abort'))\n # Save after each step.\n ckpt_hook = basic_session_run_hooks.CheckpointSaverHook(\n logdir, save_steps=1, scaffold=scaffold)\n hooks = [abort_hook, ckpt_hook]\n with monitored_session.MonitoredSession(\n session_creator=monitored_session.ChiefSessionCreator(\n scaffold, checkpoint_dir=logdir),\n hooks=hooks) as session:\n self.assertEqual(0, session.run(gstep))\n self.assertEqual(1, session.run(do_step))\n self.assertEqual(2, session.run(do_step))\n self.assertFalse(session.should_stop())\n # Here at step 3, the hook triggers and raises AbortedError. The\n # MonitoredSession automatically restores and retries.\n self.assertEqual(3, session.run(do_step))\n self.assertTrue(abort_hook.raised)\n self.assertFalse(session.should_stop())\n self.assertEqual(4, session.run(do_step))\n self.assertFalse(session.should_stop())\n\n def test_exit_cleanly_on_out_of_range_exception(self):\n # Tests that we stop cleanly when OutOfRange is raised.\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,\n 'EOI'))\n session = monitored_session.MonitoredSession(hooks=[hook])\n # session should cleanly exit from the context.\n with session:\n self.assertEqual(0, session.run(gstep))\n self.assertFalse(session.should_stop())\n # Here at step 1, the hook triggers and raises OutOfRange. The\n # session should go into should_stop() mode. It should raise the\n # exception. So next step should not be executed.\n session.run(do_step)\n self.assertTrue(False)\n self.assertTrue(session.should_stop())\n\n def test_exit_cleanly_on_stop_iteration_exception(self):\n # Tests that we stop cleanly when OutOfRange is raised.\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n hook = RaiseOnceAtCountN(2, StopIteration)\n session = monitored_session.MonitoredSession(hooks=[hook])\n # session should cleanly exit from the context.\n with session:\n self.assertEqual(0, session.run(gstep))\n self.assertFalse(session.should_stop())\n # Here at step 1, the hook triggers and raises StopIteration. The\n # session should go into should_stop() mode. It should raise the\n # exception. So next step should not be executed.\n session.run(do_step)\n self.assertTrue(False)\n self.assertTrue(session.should_stop())\n\n def test_regular_exception_pass_through_run(self):\n # Tests that regular exceptions just pass through a \"with\n # MonitoredSession\" block and set the session in stop mode.\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n hook = RaiseOnceAtCountN(4, RuntimeError('regular exception'))\n session = monitored_session.MonitoredSession(hooks=[hook])\n with self.assertRaisesRegexp(RuntimeError, 'regular exception'):\n with session:\n self.assertEqual(0, session.run(gstep))\n self.assertEqual(1, session.run(do_step))\n self.assertEqual(2, session.run(do_step))\n self.assertFalse(session.should_stop())\n # This triggers the hook and raises the exception\n session.run(do_step)\n # We should not hit this\n self.assertFalse(True)\n self.assertTrue(hook.raised)\n self.assertTrue(session.should_stop())\n\n def test_regular_exception_reported_to_coord_pass_through_run(self):\n # Tests that regular exceptions reported to the coordinator from a thread\n # passes through a \"run()\" call within a \"with MonitoredSession\" block and\n # set the session in stop mode.\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n session = monitored_session.MonitoredSession()\n run_performed_without_error = False\n with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):\n with session:\n self.assertEqual(0, session.run(gstep))\n # Report an exception through the coordinator.\n try:\n raise RuntimeError('a thread wants to stop')\n except RuntimeError as e:\n session._coordinated_creator.coord.request_stop(e)\n # Call run() which should perform normally.\n self.assertEqual(0, session.run(gstep))\n run_performed_without_error = True\n self.assertTrue(run_performed_without_error)\n\n def test_regular_exception_reported_to_coord_pass_through_return(self):\n # Tests that regular exceptions reported to the coordinator from a thread\n # passes through returning from a \"with MonitoredSession\" block and\n # set the session in stop mode.\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n session = monitored_session.MonitoredSession()\n with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):\n with session:\n self.assertEqual(0, session.run(gstep))\n # Report an exception through the coordinator.\n try:\n raise RuntimeError('a thread wants to stop')\n except RuntimeError as e:\n session._coordinated_creator.coord.request_stop(e)\n self.assertTrue(session.should_stop())\n\n # This set of tests, verifies the session behavior when exceptions are raised\n # from code inside a \"with MonitoredSession:\" context.\n\n def test_stop_cleanly_when_no_exception_in_with_body(self):\n # Tests that regular exceptions pass through\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n session = monitored_session.MonitoredSession()\n with session:\n self.assertEqual(1, session.run(do_step))\n self.assertEqual(2, session.run(do_step))\n self.assertFalse(session.should_stop())\n # Should have closed.\n self.assertTrue(session.should_stop())\n self.assertTrue(session._is_closed())\n\n def test_raises_regular_exceptions_in_with_body(self):\n # Tests that regular exceptions in \"with body\" are seen outside.\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n session = monitored_session.MonitoredSession()\n # We should see that exception.\n with self.assertRaisesRegexp(RuntimeError, 'regular exception'):\n with session:\n self.assertEqual(1, session.run(do_step))\n self.assertEqual(2, session.run(do_step))\n self.assertFalse(session.should_stop())\n # Will be visible outside the \"with body\".\n raise RuntimeError('regular exception')\n # Should have closed.\n self.assertTrue(session.should_stop())\n self.assertTrue(session._is_closed())\n\n def test_graph(self):\n with ops.Graph().as_default() as g:\n with monitored_session.MonitoredSession() as session:\n self.assertEqual(g, session.graph)\n\n def test_graph_finalized_during_run_unfinalized_after_exit(self):\n with ops.Graph().as_default() as g:\n a_var = variables.Variable(0)\n with monitored_session.MonitoredSession() as session:\n self.assertEqual(0, session.run(a_var))\n self.assertTrue(g.finalized)\n self.assertFalse(g.finalized)\n\n def test_keep_finalized_graph_as_finalized(self):\n with ops.Graph().as_default() as g:\n a_var = variables.Variable(0)\n monitored_session.Scaffold().finalize()\n with monitored_session.MonitoredSession() as session:\n self.assertEqual(0, session.run(a_var))\n self.assertTrue(g.finalized)\n self.assertTrue(g.finalized)\n\n def test_merge_run_options_from_hooks(self):\n \"\"\"Test for rewriting RunOptions and observing RunMetadata with hooks.\"\"\"\n\n with ops.Graph().as_default():\n my_const = constant_op.constant(42, name='my_const')\n _ = constant_op.constant(24, name='my_const_2')\n\n watch_a = debug_pb2.DebugTensorWatch(\n node_name='my_const',\n output_slot=0,\n debug_ops=['DebugIdentity'],\n debug_urls=[])\n hook_a = RunOptionsMetadataHook(2, 30000, False, watch_a)\n watch_b = debug_pb2.DebugTensorWatch(\n node_name='my_const_2',\n output_slot=0,\n debug_ops=['DebugIdentity'],\n debug_urls=[])\n hook_b = RunOptionsMetadataHook(3, 60000, True, watch_b)\n with monitored_session.MonitoredSession(\n hooks=[hook_a, hook_b]) as session:\n self.assertEqual(42, session.run(my_const))\n\n # trace_level=3 should have overridden trace_level=2;\n # timeout_in_ms=60000 should have overridden 30000;\n # output_partition_graphs=True should have overridden False.\n # The two debug tensor watches should have been merged.\n self.assertEqual(\n [\n config_pb2.RunOptions(\n trace_level=3,\n timeout_in_ms=60000,\n output_partition_graphs=True,\n debug_options=debug_pb2.DebugOptions(\n debug_tensor_watch_opts=[watch_a, watch_b]))\n ],\n hook_b.run_options_list)\n self.assertEqual(1, len(hook_b.run_metadata_list))\n self.assertTrue(\n isinstance(hook_b.run_metadata_list[0], config_pb2.RunMetadata))\n self.assertGreater(len(hook_b.run_metadata_list[0].partition_graphs), 0)\n\n def test_merge_caller_and_hook_run_options(self):\n \"\"\"Test that RunOptions from caller and hooks can be merged properly.\"\"\"\n\n with ops.Graph().as_default():\n my_const = constant_op.constant(42, name='my_const')\n _ = constant_op.constant(24, name='my_const_2')\n\n hook_watch = debug_pb2.DebugTensorWatch(\n node_name='my_const_2',\n output_slot=0,\n debug_ops=['DebugIdentity'],\n debug_urls=[])\n hook = RunOptionsMetadataHook(2, 60000, False, hook_watch)\n with monitored_session.MonitoredSession(hooks=[hook]) as session:\n caller_watch = debug_pb2.DebugTensorWatch(\n node_name='my_const',\n output_slot=0,\n debug_ops=['DebugIdentity'],\n debug_urls=[])\n caller_options = config_pb2.RunOptions(\n trace_level=3, timeout_in_ms=30000, output_partition_graphs=True)\n caller_options.debug_options.debug_tensor_watch_opts.extend(\n [caller_watch])\n self.assertEqual(42, session.run(my_const, options=caller_options))\n\n # trace_level=3 from the caller should override 2 from the hook.\n # timeout_in_ms=60000 from the hook should override from the caller.\n # output_partition_graph=True from the caller should override False\n # from the hook.\n # The two debug watches from the caller and the hook should be merged,\n # in that order.\n self.assertEqual(\n [\n config_pb2.RunOptions(\n trace_level=3,\n timeout_in_ms=60000,\n output_partition_graphs=True,\n debug_options=debug_pb2.DebugOptions(\n debug_tensor_watch_opts=[caller_watch, hook_watch]))\n ],\n hook.run_options_list)\n self.assertEqual(1, len(hook.run_metadata_list))\n self.assertTrue(\n isinstance(hook.run_metadata_list[0], config_pb2.RunMetadata))\n self.assertGreater(len(hook.run_metadata_list[0].partition_graphs), 0)\n\n def test_with_statement_and_close(self):\n # Test case for https://github.com/tensorflow/tensorflow/issues/12224\n # where close() inside the with should have a better error message.\n with self.assertRaisesRegexp(RuntimeError, 'Session is already closed'):\n with monitored_session.MonitoredSession() as session:\n session.close()\n\n def test_step_fn_example(self):\n with ops.Graph().as_default():\n c = array_ops.placeholder(dtypes.float32)\n v = array_ops.identity(c)\n\n def step_fn(step_context):\n value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})\n return value\n\n with monitored_session.MonitoredSession() as session:\n self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)\n\n def test_step_function_stops(self):\n with ops.Graph().as_default():\n\n def step_fn(step_context):\n step_context.request_stop()\n\n with monitored_session.MonitoredSession() as session:\n self.assertEqual(None, session.run_step_fn(step_fn))\n self.assertTrue(session.should_stop())\n\n def test_step_request_stop_without_a_with_block(self):\n with ops.Graph().as_default():\n was_stop_iteration_raised = False\n\n def step_fn(step_context):\n step_context.request_stop()\n\n session = monitored_session.MonitoredSession()\n try:\n self.assertEqual(None, session.run_step_fn(step_fn))\n except StopIteration:\n was_stop_iteration_raised = True\n\n self.assertTrue(was_stop_iteration_raised)\n self.assertFalse(session.should_stop())\n\n def test_step_request_stop_in_a_loop(self):\n with ops.Graph().as_default():\n def step_fn(step_context):\n step_context.request_stop()\n\n with monitored_session.MonitoredSession() as session:\n while not session.should_stop():\n _ = session.run_step_fn(step_fn)\n self.fail('An exception should be raised on the line above.')\n\n def test_step_request_stop_with_returning_a_type(self):\n with ops.Graph().as_default():\n\n def step_fn(step_context):\n del step_context\n return 'a type'\n\n with monitored_session.MonitoredSession() as session:\n self.assertEqual('a type', session.run_step_fn(step_fn))\n\n def test_step_with_extra_arguments(self):\n with ops.Graph().as_default():\n\n def step_fn(step_context, extra_foo):\n del step_context, extra_foo\n\n with monitored_session.MonitoredSession() as session:\n with self.assertRaisesRegexp(\n ValueError,\n '`step_fn` may either have one `step_context` argument'):\n self.assertEqual(None, session.run_step_fn(step_fn))\n\n def test_step_fn_belongs_to_a_class(self):\n with ops.Graph().as_default():\n c = array_ops.placeholder(dtypes.float32)\n v = array_ops.identity(c)\n\n class Model(object):\n\n def step_fn(self, step_context):\n return step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})\n\n with monitored_session.MonitoredSession() as session:\n model = Model()\n self.assertNear(3.2, session.run_step_fn(model.step_fn), 0.1)\n\n def test_step_fn_belongs_to_a_class_and_has_extra_methods(self):\n with ops.Graph().as_default():\n\n class Model(object):\n\n def step_fn(self, step_context, extra_foo):\n del step_context, extra_foo\n\n with monitored_session.MonitoredSession() as session:\n with self.assertRaisesRegexp(\n ValueError,\n '`step_fn` may either have one `step_context` argument'):\n model = Model()\n self.assertEqual(None, session.run_step_fn(model.step_fn))\n\n def test_step_fn_with_hooks(self):\n with ops.Graph().as_default():\n var = resource_variable_ops.ResourceVariable(0.0)\n\n # This test higlights the interaction of hooks with\n # `Monitoredsession.run_step_fn`. The order of execution of operations\n # below is:\n # 0. stage_0\n # 1. stage_1_0 or stage_1_1 in an undefined order\n # 2. stage_2\n\n stage_0 = state_ops.assign_add(var, 0.3)\n stage_1_0 = state_ops.assign_add(var, 0.7)\n # The order of `stage_1_0` and `stage_1_1` is undefined by\n # `MonitoredSession`, but we should be able to assert when both of them\n # are complete. To obtain a consistent result of adding two different\n # constants to `var`, we rely on a control dependency and\n # `ResourceVariable`. Otherwise, it is possible that one of the\n # additions overwites the result of the other addition.\n with ops.control_dependencies([stage_1_0]):\n stage_1_1 = state_ops.assign_add(var, 0.5)\n stage_2 = state_ops.assign_add(var, 1.1)\n\n class Hook(session_run_hook.SessionRunHook):\n\n def __init__(self, testing):\n self._testing = testing\n\n def before_run(self, run_context):\n return session_run_hook.SessionRunArgs(fetches=stage_1_0)\n\n def after_run(self, run_context, run_values):\n self._testing.assertNear(0.3 + 0.5 + 0.7,\n run_context.session.run(var), 0.1)\n self._testing.assertNear(0.3 + 0.5 + 0.7 + 1.1,\n run_context.session.run(stage_2), 0.1)\n\n def step_fn(step_context):\n self.assertNear(0.3, step_context.session.run(stage_0), 0.1)\n return step_context.run_with_hooks(fetches=stage_1_1)\n\n with monitored_session.MonitoredSession(hooks=[Hook(self)]) as session:\n self.assertEqual(0.3 + 0.5 + 0.7, session.run_step_fn(step_fn))\n\n def test_step_fn_has_the_same_hooks_behavior_without_recovery(self):\n with ops.Graph().as_default():\n var = resource_variable_ops.ResourceVariable(0.0)\n\n stage_0 = state_ops.assign_add(var, 0.3)\n stage_1_0 = state_ops.assign_add(var, 0.7)\n with ops.control_dependencies([stage_1_0]):\n stage_1_1 = state_ops.assign_add(var, 0.5)\n stage_2 = state_ops.assign_add(var, 1.1)\n\n class Hook(session_run_hook.SessionRunHook):\n\n def __init__(self, testing):\n self._testing = testing\n\n def before_run(self, run_context):\n return session_run_hook.SessionRunArgs(fetches=stage_1_0)\n\n def after_run(self, run_context, run_values):\n self._testing.assertNear(0.3 + 0.5 + 0.7,\n run_context.session.run(var), 0.1)\n self._testing.assertNear(0.3 + 0.5 + 0.7 + 1.1,\n run_context.session.run(stage_2), 0.1)\n\n def step_fn(step_context):\n self.assertNear(0.3, step_context.session.run(stage_0), 0.1)\n return step_context.run_with_hooks(fetches=stage_1_1)\n\n with monitored_session.SingularMonitoredSession(\n hooks=[Hook(self)]) as session:\n self.assertEqual(0.3 + 0.5 + 0.7, session.run_step_fn(step_fn))\n\n def test_step_fn_with_hooks_and_request_stop(self):\n with ops.Graph().as_default():\n trace_the_hook = {'before_run': False, 'after_run': False}\n\n class Hook(session_run_hook.SessionRunHook):\n\n def before_run(self, run_context):\n trace_the_hook['before_run'] = True\n\n def after_run(self, run_context, run_values):\n trace_the_hook['after_run'] = True\n\n def step_fn(step_context):\n step_context.request_stop()\n\n with monitored_session.MonitoredSession(hooks=[Hook()]) as session:\n self.assertEqual(None, session.run_step_fn(step_fn))\n self.assertTrue(session.should_stop())\n # `step_context.request_stop()` in a step_fn interrupts the flow of\n # running the hooks.\n self.assertFalse(trace_the_hook['before_run'])\n self.assertFalse(trace_the_hook['after_run'])\n\n def test_recovers_from_an_exception_in_step_fn(self):\n trace_the_exception = {'run_already': False}\n\n with ops.Graph().as_default():\n c = array_ops.placeholder(dtypes.float32)\n v = array_ops.identity(c)\n\n def step_fn(step_context):\n if not trace_the_exception['run_already']:\n trace_the_exception['run_already'] = True\n raise errors_impl.AbortedError(None, None, 'Abort')\n\n return step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})\n\n with monitored_session.MonitoredSession() as session:\n self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)\n self.assertTrue(trace_the_exception['run_already'])\n\n def test_recovers_from_an_exception_in_step_fn_after_hooks(self):\n trace_the_exception = {'run_already': False, 'side_effect_counter': 0}\n\n with ops.Graph().as_default():\n c = array_ops.placeholder(dtypes.float32)\n v = array_ops.identity(c)\n graph_state = variables.Variable(0.0)\n graph_side_effect = state_ops.assign_add(graph_state, 0.31)\n\n def step_fn(step_context):\n trace_the_exception['side_effect_counter'] += 1\n step_context.session.run(graph_side_effect)\n\n value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})\n\n if not trace_the_exception['run_already']:\n trace_the_exception['run_already'] = True\n raise errors_impl.AbortedError(None, None, 'Abort')\n\n return value\n\n with self.test_session() as test_session:\n with monitored_session.MonitoredSession(\n CountingSessionCreator(test_session)) as session:\n session.run(variables.global_variables_initializer())\n\n self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)\n self.assertTrue(trace_the_exception['run_already'])\n # Make sure the rest of the body of the step_fn is re-executed upon\n # AbortedError:\n self.assertEqual(2, trace_the_exception['side_effect_counter'])\n self.assertNear(0.62, session.run(graph_state), 0.1)\n\n def test_step_fn_doesnt_recover_when_it_wasnt_asked_to(self):\n trace_the_exception = {'run_already': False}\n\n with ops.Graph().as_default():\n c = array_ops.placeholder(dtypes.float32)\n v = array_ops.identity(c)\n\n def step_fn(step_context):\n if not trace_the_exception['run_already']:\n trace_the_exception['run_already'] = True\n raise errors_impl.AbortedError(None, None, 'Abort')\n\n value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})\n return value\n\n with monitored_session.SingularMonitoredSession() as session:\n with self.assertRaisesRegexp(errors_impl.AbortedError, 'Abort'):\n self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)\n self.fail()\n\n self.assertTrue(trace_the_exception['run_already'])\n\n def test_step_fn_exception_from_before_run(self):\n trace_the_exception = {'run_already': False, 'side_effect_counter': 0}\n\n with ops.Graph().as_default():\n c = array_ops.placeholder(dtypes.float32)\n v = array_ops.identity(c)\n vv = constant_op.constant(3.2)\n graph_state = variables.Variable(0.0)\n graph_side_effect = state_ops.assign_add(graph_state, 0.31)\n\n class Hook(session_run_hook.SessionRunHook):\n\n def __init__(self, testing):\n self._testing = testing\n\n def before_run(self, run_context):\n if not trace_the_exception['run_already']:\n trace_the_exception['run_already'] = True\n raise errors_impl.AbortedError(None, None, 'Abort')\n return session_run_hook.SessionRunArgs(fetches=vv)\n\n def after_run(self, run_context, run_values):\n self._testing.assertNear(3.2, run_values.results, 0.1)\n\n def step_fn(step_context):\n trace_the_exception['side_effect_counter'] += 1\n step_context.session.run(graph_side_effect)\n return step_context.run_with_hooks(fetches=v, feed_dict={c: 1.3})\n\n with self.test_session() as test_session:\n with monitored_session.MonitoredSession(\n CountingSessionCreator(test_session),\n hooks=[Hook(self)]) as session:\n test_session.run(variables.global_variables_initializer())\n self.assertNear(1.3, session.run_step_fn(step_fn), 0.1)\n self.assertEqual(2, trace_the_exception['side_effect_counter'])\n self.assertNear(0.62, session.run(graph_state), 0.1)\n\n\nclass SingularMonitoredSessionTest(test.TestCase):\n \"\"\"Tests SingularMonitoredSession.\"\"\"\n\n def test_handles_initialization(self):\n with ops.Graph().as_default():\n a_var = variables.Variable(0)\n with monitored_session.SingularMonitoredSession() as session:\n # If it's not initialized, following statement raises an error.\n self.assertEqual(0, session.run(a_var))\n\n def test_do_not_handle_aborted_error(self):\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n\n class _RaiseAbortedHook(session_run_hook.SessionRunHook):\n\n def before_run(self, run_context):\n raise errors_impl.AbortedError(None, None, 'Abort')\n\n with monitored_session.SingularMonitoredSession(\n hooks=[_RaiseAbortedHook()]) as session:\n with self.assertRaises(errors_impl.AbortedError):\n self.assertEqual(0, session.run(gstep))\n\n with self.assertRaises(errors_impl.AbortedError):\n with monitored_session.SingularMonitoredSession(\n hooks=[_RaiseAbortedHook()]) as session:\n self.assertEqual(0, session.run(gstep))\n\n def test_exit_cleanly_on_out_of_range_exception(self):\n # Tests that we stop cleanly when OutOfRange is raised.\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,\n 'EOI'))\n session = monitored_session.SingularMonitoredSession(hooks=[hook])\n # session should cleanly exit from the context.\n with session:\n self.assertEqual(0, session.run(gstep))\n self.assertFalse(session.should_stop())\n # Here at step 1, the hook triggers and raises OutOfRange. The\n # session should go into should_stop() mode. It should raise the\n # exception. So next step should not be executed.\n session.run(do_step)\n self.assertTrue(False)\n self.assertTrue(session.should_stop())\n\n def test_regular_exception_reported_to_coord_pass_through_run(self):\n # Tests that regular exceptions reported to the coordinator from a thread\n # passes through a \"run()\" call within a \"with MonitoredSession\" block and\n # set the session in stop mode.\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n session = monitored_session.SingularMonitoredSession()\n run_performed_without_error = False\n with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):\n with session:\n self.assertEqual(0, session.run(gstep))\n # Report an exception through the coordinator.\n try:\n raise RuntimeError('a thread wants to stop')\n except RuntimeError as e:\n session._coordinated_creator.coord.request_stop(e)\n # Call run() which should perform normally.\n self.assertEqual(0, session.run(gstep))\n run_performed_without_error = True\n self.assertTrue(run_performed_without_error)\n\n def test_stop_cleanly_when_no_exception_in_with_body(self):\n # Tests that regular exceptions pass through\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n session = monitored_session.SingularMonitoredSession()\n with session:\n self.assertEqual(1, session.run(do_step))\n self.assertEqual(2, session.run(do_step))\n self.assertFalse(session.should_stop())\n # Should have closed.\n self.assertTrue(session.should_stop())\n self.assertEqual(None, session.raw_session())\n\n def test_graph(self):\n with ops.Graph().as_default() as g:\n with monitored_session.SingularMonitoredSession() as session:\n self.assertEqual(g, session.graph)\n\n def test_raw_session(self):\n with ops.Graph().as_default():\n with monitored_session.SingularMonitoredSession() as session:\n self.assertTrue(isinstance(session.raw_session(), session_lib.Session))\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests boosted_trees estimators and model_fn.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.core.kernels.boosted_trees import boosted_trees_pb2\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.estimator import model_fn\nfrom tensorflow.python.estimator import run_config\nfrom tensorflow.python.estimator.canned import boosted_trees\nfrom tensorflow.python.estimator.inputs import numpy_io\nfrom tensorflow.python.feature_column import feature_column\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import gen_boosted_trees_ops\nfrom tensorflow.python.ops import resources\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.training import checkpoint_utils\nfrom tensorflow.python.training import session_run_hook\n\nNUM_FEATURES = 3\n\nBUCKET_BOUNDARIES = [-2., .5, 12.] # Boundaries for all the features.\nINPUT_FEATURES = np.array(\n [\n [12.5, 1.0, -2.001, -2.0001, -1.999], # feature_0 quantized:[3,2,0,0,1]\n [2.0, -3.0, 0.5, 0.0, 0.4995], # feature_1 quantized:[2,0,2,1,1]\n [3.0, 20.0, 50.0, -100.0, 102.75], # feature_2 quantized:[2,3,3,0,3]\n ],\n dtype=np.float32)\n\nCLASSIFICATION_LABELS = [[0.], [1.], [1.], [0.], [0.]]\nREGRESSION_LABELS = [[1.5], [0.3], [0.2], [2.], [5.]]\nFEATURES_DICT = {'f_%d' % i: INPUT_FEATURES[i] for i in range(NUM_FEATURES)}\n\n# EXAMPLE_ID is not exposed to Estimator yet, but supported at model_fn level.\nEXAMPLE_IDS = np.array([0, 1, 2, 3, 4], dtype=np.int64)\nEXAMPLE_ID_COLUMN = '__example_id__'\n\n\ndef _make_train_input_fn(is_classification):\n \"\"\"Makes train input_fn for classification/regression.\"\"\"\n\n def _input_fn():\n features_dict = dict(FEATURES_DICT) # copies the dict to add an entry.\n features_dict[EXAMPLE_ID_COLUMN] = constant_op.constant(EXAMPLE_IDS)\n labels = CLASSIFICATION_LABELS if is_classification else REGRESSION_LABELS\n return features_dict, labels\n\n return _input_fn\n\n\ndef _make_train_input_fn_dataset(is_classification, batch=None, repeat=None):\n \"\"\"Makes input_fn using Dataset.\"\"\"\n\n def _input_fn():\n features_dict = dict(FEATURES_DICT) # copies the dict to add an entry.\n features_dict[EXAMPLE_ID_COLUMN] = constant_op.constant(EXAMPLE_IDS)\n labels = CLASSIFICATION_LABELS if is_classification else REGRESSION_LABELS\n if batch:\n ds = dataset_ops.Dataset.zip(\n (dataset_ops.Dataset.from_tensor_slices(features_dict),\n dataset_ops.Dataset.from_tensor_slices(labels))).batch(batch)\n else:\n ds = dataset_ops.Dataset.zip(\n (dataset_ops.Dataset.from_tensors(features_dict),\n dataset_ops.Dataset.from_tensors(labels)))\n # repeat indefinitely by default, or stop at the given step.\n ds = ds.repeat(repeat)\n return ds\n\n return _input_fn\n\n\nclass BoostedTreesEstimatorTest(test_util.TensorFlowTestCase):\n\n def setUp(self):\n self._feature_columns = {\n feature_column.bucketized_column(\n feature_column.numeric_column('f_%d' % i, dtype=dtypes.float32),\n BUCKET_BOUNDARIES)\n for i in range(NUM_FEATURES)\n }\n\n def _assert_checkpoint(self, model_dir, global_step, finalized_trees,\n attempted_layers):\n self._assert_checkpoint_and_return_model(model_dir, global_step,\n finalized_trees, attempted_layers)\n\n def _assert_checkpoint_and_return_model(self, model_dir, global_step,\n finalized_trees, attempted_layers):\n reader = checkpoint_utils.load_checkpoint(model_dir)\n self.assertEqual(global_step, reader.get_tensor(ops.GraphKeys.GLOBAL_STEP))\n serialized = reader.get_tensor('boosted_trees:0_serialized')\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n\n self.assertEqual(\n finalized_trees,\n sum([1 for t in ensemble_proto.tree_metadata if t.is_finalized]))\n self.assertEqual(attempted_layers,\n ensemble_proto.growing_metadata.num_layers_attempted)\n\n return ensemble_proto\n\n def testFirstCheckpointWorksFine(self):\n \"\"\"Tests that eval/pred doesn't crash with the very first checkpoint.\n\n The step-0 checkpoint will have only an empty ensemble, and a separate eval\n job might read from it and crash.\n This test ensures that prediction/evaluation works fine with it.\n \"\"\"\n input_fn = _make_train_input_fn(is_classification=True)\n predict_input_fn = numpy_io.numpy_input_fn(\n x=FEATURES_DICT, y=None, batch_size=1, num_epochs=1, shuffle=False)\n\n est = boosted_trees.BoostedTreesClassifier(\n feature_columns=self._feature_columns,\n n_batches_per_layer=1,\n n_trees=1,\n max_depth=5)\n\n class BailOutWithoutTraining(session_run_hook.SessionRunHook):\n\n def before_run(self, run_context):\n raise StopIteration('to bail out.')\n\n est.train(input_fn, steps=100, # must stop at 0 anyway.\n hooks=[BailOutWithoutTraining()])\n self._assert_checkpoint(\n est.model_dir, global_step=0, finalized_trees=0, attempted_layers=0)\n # Empty ensemble returns 0 logits, so that all output labels are 0.\n eval_res = est.evaluate(input_fn=input_fn, steps=1)\n self.assertAllClose(eval_res['accuracy'], 0.6)\n predictions = list(est.predict(input_fn=predict_input_fn))\n self.assertAllClose([[0], [0], [0], [0], [0]],\n [pred['class_ids'] for pred in predictions])\n\n def testTrainAndEvaluateBinaryClassifier(self):\n input_fn = _make_train_input_fn(is_classification=True)\n\n est = boosted_trees.BoostedTreesClassifier(\n feature_columns=self._feature_columns,\n n_batches_per_layer=1,\n n_trees=1,\n max_depth=5)\n\n # It will stop after 5 steps because of the max depth and num trees.\n num_steps = 100\n # Train for a few steps, and validate final checkpoint.\n est.train(input_fn, steps=num_steps)\n self._assert_checkpoint(\n est.model_dir, global_step=5, finalized_trees=1, attempted_layers=5)\n eval_res = est.evaluate(input_fn=input_fn, steps=1)\n self.assertAllClose(eval_res['accuracy'], 1.0)\n\n def testTrainTwiceAndEvaluateBinaryClassifier(self):\n input_fn = _make_train_input_fn(is_classification=True)\n\n est = boosted_trees.BoostedTreesClassifier(\n feature_columns=self._feature_columns,\n n_batches_per_layer=1,\n n_trees=5,\n max_depth=10)\n\n num_steps = 2\n # Train for a few steps, and validate final checkpoint.\n est.train(input_fn, steps=num_steps)\n est.train(input_fn, steps=num_steps)\n\n self._assert_checkpoint(\n est.model_dir, global_step=num_steps * 2,\n finalized_trees=0, attempted_layers=4)\n eval_res = est.evaluate(input_fn=input_fn, steps=1)\n self.assertAllClose(eval_res['accuracy'], 1.0)\n\n def testInferBinaryClassifier(self):\n train_input_fn = _make_train_input_fn(is_classification=True)\n predict_input_fn = numpy_io.numpy_input_fn(\n x=FEATURES_DICT, y=None, batch_size=1, num_epochs=1, shuffle=False)\n\n est = boosted_trees.BoostedTreesClassifier(\n feature_columns=self._feature_columns,\n n_batches_per_layer=1,\n n_trees=1,\n max_depth=5)\n\n # It will stop after 5 steps because of the max depth and num trees.\n num_steps = 100\n # Train for a few steps, and validate final checkpoint.\n est.train(train_input_fn, steps=num_steps)\n self._assert_checkpoint(\n est.model_dir, global_step=5, finalized_trees=1, attempted_layers=5)\n predictions = list(est.predict(input_fn=predict_input_fn))\n self.assertAllClose([[0], [1], [1], [0], [0]],\n [pred['class_ids'] for pred in predictions])\n\n def testTrainClassifierWithRankOneLabel(self):\n \"\"\"Tests that label with rank-1 tensor is also accepted by classifier.\"\"\"\n def _input_fn_with_rank_one_label():\n return FEATURES_DICT, [0., 1., 1., 0., 0.]\n\n est = boosted_trees.BoostedTreesClassifier(\n feature_columns=self._feature_columns,\n n_batches_per_layer=1,\n n_trees=1,\n max_depth=5)\n\n # It will stop after 5 steps because of the max depth and num trees.\n num_steps = 100\n # Train for a few steps, and validate final checkpoint.\n est.train(_input_fn_with_rank_one_label, steps=num_steps)\n self._assert_checkpoint(\n est.model_dir, global_step=5, finalized_trees=1, attempted_layers=5)\n eval_res = est.evaluate(input_fn=_input_fn_with_rank_one_label, steps=1)\n self.assertAllClose(eval_res['accuracy'], 1.0)\n\n def testTrainClassifierWithLabelVocabulary(self):\n apple, banana = 'apple', 'banana'\n def _input_fn_with_label_vocab():\n return FEATURES_DICT, [[apple], [banana], [banana], [apple], [apple]]\n predict_input_fn = numpy_io.numpy_input_fn(\n x=FEATURES_DICT, y=None, batch_size=1, num_epochs=1, shuffle=False)\n\n est = boosted_trees.BoostedTreesClassifier(\n feature_columns=self._feature_columns,\n n_batches_per_layer=1,\n n_trees=1,\n max_depth=5,\n label_vocabulary=[apple, banana])\n est.train(input_fn=_input_fn_with_label_vocab, steps=5)\n self._assert_checkpoint(\n est.model_dir, global_step=5, finalized_trees=1, attempted_layers=5)\n eval_res = est.evaluate(input_fn=_input_fn_with_label_vocab, steps=1)\n self.assertAllClose(eval_res['accuracy'], 1.0)\n predictions = list(est.predict(input_fn=predict_input_fn))\n self.assertAllClose([[0], [1], [1], [0], [0]],\n [pred['class_ids'] for pred in predictions])\n\n def testTrainClassifierWithIntegerLabel(self):\n def _input_fn_with_integer_label():\n return (FEATURES_DICT,\n constant_op.constant([[0], [1], [1], [0], [0]], dtypes.int32))\n predict_input_fn = numpy_io.numpy_input_fn(\n x=FEATURES_DICT, y=None, batch_size=1, num_epochs=1, shuffle=False)\n\n est = boosted_trees.BoostedTreesClassifier(\n feature_columns=self._feature_columns,\n n_batches_per_layer=1,\n n_trees=1,\n max_depth=5)\n est.train(input_fn=_input_fn_with_integer_label, steps=5)\n self._assert_checkpoint(\n est.model_dir, global_step=5, finalized_trees=1, attempted_layers=5)\n eval_res = est.evaluate(input_fn=_input_fn_with_integer_label, steps=1)\n self.assertAllClose(eval_res['accuracy'], 1.0)\n predictions = list(est.predict(input_fn=predict_input_fn))\n self.assertAllClose([[0], [1], [1], [0], [0]],\n [pred['class_ids'] for pred in predictions])\n\n def testTrainClassifierWithDataset(self):\n train_input_fn = _make_train_input_fn_dataset(is_classification=True)\n predict_input_fn = numpy_io.numpy_input_fn(\n x=FEATURES_DICT, y=None, batch_size=1, num_epochs=1, shuffle=False)\n\n est = boosted_trees.BoostedTreesClassifier(\n feature_columns=self._feature_columns,\n n_batches_per_layer=1,\n n_trees=1,\n max_depth=5)\n est.train(train_input_fn, steps=100) # will stop after 5 steps anyway.\n self._assert_checkpoint(\n est.model_dir, global_step=5, finalized_trees=1, attempted_layers=5)\n eval_res = est.evaluate(input_fn=train_input_fn, steps=1)\n self.assertAllClose(eval_res['accuracy'], 1.0)\n predictions = list(est.predict(input_fn=predict_input_fn))\n self.assertAllClose([[0], [1], [1], [0], [0]],\n [pred['class_ids'] for pred in predictions])\n\n def testTrainAndEvaluateRegressor(self):\n input_fn = _make_train_input_fn(is_classification=False)\n\n est = boosted_trees.BoostedTreesRegressor(\n feature_columns=self._feature_columns,\n n_batches_per_layer=1,\n n_trees=2,\n max_depth=5)\n\n # It will stop after 10 steps because of the max depth and num trees.\n num_steps = 100\n # Train for a few steps, and validate final checkpoint.\n est.train(input_fn, steps=num_steps)\n self._assert_checkpoint(\n est.model_dir, global_step=10, finalized_trees=2, attempted_layers=10)\n eval_res = est.evaluate(input_fn=input_fn, steps=1)\n self.assertAllClose(eval_res['average_loss'], 1.008551)\n\n def testInferRegressor(self):\n train_input_fn = _make_train_input_fn(is_classification=False)\n predict_input_fn = numpy_io.numpy_input_fn(\n x=FEATURES_DICT, y=None, batch_size=1, num_epochs=1, shuffle=False)\n\n est = boosted_trees.BoostedTreesRegressor(\n feature_columns=self._feature_columns,\n n_batches_per_layer=1,\n n_trees=1,\n max_depth=5)\n\n # It will stop after 5 steps because of the max depth and num trees.\n num_steps = 100\n # Train for a few steps, and validate final checkpoint.\n est.train(train_input_fn, steps=num_steps)\n self._assert_checkpoint(\n est.model_dir, global_step=5, finalized_trees=1, attempted_layers=5)\n predictions = list(est.predict(input_fn=predict_input_fn))\n self.assertAllClose(\n [[0.571619], [0.262821], [0.124549], [0.956801], [1.769801]],\n [pred['predictions'] for pred in predictions])\n\n def testTrainRegressorWithRankOneLabel(self):\n \"\"\"Tests that label with rank-1 tensor is also accepted by regressor.\"\"\"\n def _input_fn_with_rank_one_label():\n return FEATURES_DICT, [1.5, 0.3, 0.2, 2., 5.]\n\n est = boosted_trees.BoostedTreesRegressor(\n feature_columns=self._feature_columns,\n n_batches_per_layer=1,\n n_trees=1,\n max_depth=5)\n\n # It will stop after 5 steps because of the max depth and num trees.\n num_steps = 100\n # Train for a few steps, and validate final checkpoint.\n est.train(_input_fn_with_rank_one_label, steps=num_steps)\n self._assert_checkpoint(\n est.model_dir, global_step=5, finalized_trees=1, attempted_layers=5)\n eval_res = est.evaluate(input_fn=_input_fn_with_rank_one_label, steps=1)\n self.assertAllClose(eval_res['average_loss'], 2.478283)\n\n def testTrainRegressorWithDataset(self):\n train_input_fn = _make_train_input_fn_dataset(is_classification=False)\n predict_input_fn = numpy_io.numpy_input_fn(\n x=FEATURES_DICT, y=None, batch_size=1, num_epochs=1, shuffle=False)\n\n est = boosted_trees.BoostedTreesRegressor(\n feature_columns=self._feature_columns,\n n_batches_per_layer=1,\n n_trees=1,\n max_depth=5)\n est.train(train_input_fn, steps=100) # will stop after 5 steps anyway.\n self._assert_checkpoint(\n est.model_dir, global_step=5, finalized_trees=1, attempted_layers=5)\n eval_res = est.evaluate(input_fn=train_input_fn, steps=1)\n self.assertAllClose(eval_res['average_loss'], 2.478283)\n predictions = list(est.predict(input_fn=predict_input_fn))\n self.assertAllClose(\n [[0.571619], [0.262821], [0.124549], [0.956801], [1.769801]],\n [pred['predictions'] for pred in predictions])\n\n def testTrainRegressorWithDatasetBatch(self):\n # The batch_size as the entire data size should yield the same result as\n # dataset without batching.\n train_input_fn = _make_train_input_fn_dataset(\n is_classification=False, batch=5)\n predict_input_fn = numpy_io.numpy_input_fn(\n x=FEATURES_DICT, y=None, batch_size=1, num_epochs=1, shuffle=False)\n\n est = boosted_trees.BoostedTreesRegressor(\n feature_columns=self._feature_columns,\n n_batches_per_layer=1,\n n_trees=1,\n max_depth=5)\n est.train(train_input_fn, steps=100) # will stop after 5 steps anyway.\n self._assert_checkpoint(\n est.model_dir, global_step=5, finalized_trees=1, attempted_layers=5)\n eval_res = est.evaluate(input_fn=train_input_fn, steps=1)\n self.assertAllClose(eval_res['average_loss'], 2.478283)\n predictions = list(est.predict(input_fn=predict_input_fn))\n self.assertAllClose(\n [[0.571619], [0.262821], [0.124549], [0.956801], [1.769801]],\n [pred['predictions'] for pred in predictions])\n\n def testTrainRegressorWithDatasetLargerBatch(self):\n # The batch_size as the multiple of the entire data size should still yield\n # the same result.\n train_input_fn = _make_train_input_fn_dataset(\n is_classification=False, batch=15)\n predict_input_fn = numpy_io.numpy_input_fn(\n x=FEATURES_DICT, y=None, batch_size=1, num_epochs=1, shuffle=False)\n\n est = boosted_trees.BoostedTreesRegressor(\n feature_columns=self._feature_columns,\n n_batches_per_layer=1,\n n_trees=1,\n max_depth=5)\n est.train(train_input_fn, steps=100) # will stop after 5 steps anyway.\n self._assert_checkpoint(\n est.model_dir, global_step=5, finalized_trees=1, attempted_layers=5)\n eval_res = est.evaluate(input_fn=train_input_fn, steps=1)\n self.assertAllClose(eval_res['average_loss'], 2.478283)\n predictions = list(est.predict(input_fn=predict_input_fn))\n self.assertAllClose(\n [[0.571619], [0.262821], [0.124549], [0.956801], [1.769801]],\n [pred['predictions'] for pred in predictions])\n\n def testTrainRegressorWithDatasetSmallerBatch(self):\n # Even when using small batches, if (n_batches_per_layer * batch_size) makes\n # the same entire data size, the result should be the same.\n train_input_fn = _make_train_input_fn_dataset(\n is_classification=False, batch=1)\n predict_input_fn = numpy_io.numpy_input_fn(\n x=FEATURES_DICT, y=None, batch_size=1, num_epochs=1, shuffle=False)\n\n est = boosted_trees.BoostedTreesRegressor(\n feature_columns=self._feature_columns,\n n_batches_per_layer=5,\n n_trees=1,\n max_depth=5)\n # Train stops after (n_batches_per_layer * n_trees * max_depth) steps.\n est.train(train_input_fn, steps=100)\n self._assert_checkpoint(\n est.model_dir, global_step=25, finalized_trees=1, attempted_layers=5)\n # 5 batches = one epoch.\n eval_res = est.evaluate(input_fn=train_input_fn, steps=5)\n self.assertAllClose(eval_res['average_loss'], 2.478283)\n predictions = list(est.predict(input_fn=predict_input_fn))\n self.assertAllClose(\n [[0.571619], [0.262821], [0.124549], [0.956801], [1.769801]],\n [pred['predictions'] for pred in predictions])\n\n def testTrainRegressorWithDatasetWhenInputIsOverEarlier(self):\n train_input_fn = _make_train_input_fn_dataset(\n is_classification=False, repeat=3) # to stop input after 3 steps.\n predict_input_fn = numpy_io.numpy_input_fn(\n x=FEATURES_DICT, y=None, batch_size=1, num_epochs=1, shuffle=False)\n\n est = boosted_trees.BoostedTreesRegressor(\n feature_columns=self._feature_columns,\n n_batches_per_layer=1,\n n_trees=1,\n max_depth=5)\n # Note that training will stop when input exhausts.\n # This might not be a typical pattern, but dataset.repeat(3) causes\n # the input stream to cease after 3 steps.\n est.train(train_input_fn, steps=100)\n self._assert_checkpoint(\n est.model_dir, global_step=3, finalized_trees=0, attempted_layers=3)\n eval_res = est.evaluate(input_fn=train_input_fn, steps=1)\n self.assertAllClose(eval_res['average_loss'], 3.777295)\n predictions = list(est.predict(input_fn=predict_input_fn))\n self.assertAllClose(\n [[0.353850], [0.254100], [0.106850], [0.712100], [1.012100]],\n [pred['predictions'] for pred in predictions])\n\n def testTrainEvaluateAndPredictWithIndicatorColumn(self):\n categorical = feature_column.categorical_column_with_vocabulary_list(\n key='categorical', vocabulary_list=('bad', 'good', 'ok'))\n feature_indicator = feature_column.indicator_column(categorical)\n bucketized_col = feature_column.bucketized_column(\n feature_column.numeric_column(\n 'an_uninformative_feature', dtype=dtypes.float32),\n BUCKET_BOUNDARIES)\n\n labels = np.array([[0.], [5.7], [5.7], [0.], [0.]], dtype=np.float32)\n # Our categorical feature defines the labels perfectly\n input_fn = numpy_io.numpy_input_fn(\n x={\n 'an_uninformative_feature': np.array([1, 1, 1, 1, 1]),\n 'categorical': np.array(['bad', 'good', 'good', 'ok', 'bad']),\n },\n y=labels,\n batch_size=5,\n shuffle=False)\n\n # Train depth 1 tree.\n est = boosted_trees.BoostedTreesRegressor(\n feature_columns=[bucketized_col, feature_indicator],\n n_batches_per_layer=1,\n n_trees=1,\n learning_rate=1.0,\n max_depth=1)\n\n num_steps = 1\n est.train(input_fn, steps=num_steps)\n ensemble = self._assert_checkpoint_and_return_model(\n est.model_dir, global_step=1, finalized_trees=1, attempted_layers=1)\n\n # We learnt perfectly.\n eval_res = est.evaluate(input_fn=input_fn, steps=1)\n self.assertAllClose(eval_res['loss'], 0)\n\n predictions = list(est.predict(input_fn))\n self.assertAllClose(\n labels,\n [pred['predictions'] for pred in predictions])\n\n self.assertEqual(3, len(ensemble.trees[0].nodes))\n\n # Check that the split happened on 'good' value, which will be encoded as\n # feature with index 2 (0-numeric, 1 - 'bad')\n self.assertEqual(2, ensemble.trees[0].nodes[0].bucketized_split.feature_id)\n self.assertEqual(0, ensemble.trees[0].nodes[0].bucketized_split.threshold)\n\n def testTrainEvaluateAndPredictWithOnlyIndicatorColumn(self):\n categorical = feature_column.categorical_column_with_vocabulary_list(\n key='categorical', vocabulary_list=('bad', 'good', 'ok'))\n feature_indicator = feature_column.indicator_column(categorical)\n\n labels = np.array([[0.], [5.7], [5.7], [0.], [0.]], dtype=np.float32)\n # Our categorical feature defines the labels perfectly\n input_fn = numpy_io.numpy_input_fn(\n x={\n 'categorical': np.array(['bad', 'good', 'good', 'ok', 'bad']),\n },\n y=labels,\n batch_size=5,\n shuffle=False)\n\n # Train depth 1 tree.\n est = boosted_trees.BoostedTreesRegressor(\n feature_columns=[feature_indicator],\n n_batches_per_layer=1,\n n_trees=1,\n learning_rate=1.0,\n max_depth=1)\n\n num_steps = 1\n est.train(input_fn, steps=num_steps)\n ensemble = self._assert_checkpoint_and_return_model(\n est.model_dir, global_step=1, finalized_trees=1, attempted_layers=1)\n\n # We learnt perfectly.\n eval_res = est.evaluate(input_fn=input_fn, steps=1)\n self.assertAllClose(eval_res['loss'], 0)\n\n predictions = list(est.predict(input_fn))\n self.assertAllClose(\n labels,\n [pred['predictions'] for pred in predictions])\n\n self.assertEqual(3, len(ensemble.trees[0].nodes))\n\n # Check that the split happened on 'good' value, which will be encoded as\n # feature with index 1 (0 - 'bad', 2 - 'ok')\n self.assertEqual(1, ensemble.trees[0].nodes[0].bucketized_split.feature_id)\n self.assertEqual(0, ensemble.trees[0].nodes[0].bucketized_split.threshold)\n\n\nclass ModelFnTests(test_util.TensorFlowTestCase):\n \"\"\"Tests bt_model_fn including unexposed internal functionalities.\"\"\"\n\n def setUp(self):\n self._feature_columns = {\n feature_column.bucketized_column(\n feature_column.numeric_column('f_%d' % i, dtype=dtypes.float32),\n BUCKET_BOUNDARIES) for i in range(NUM_FEATURES)\n }\n\n def _get_expected_ensembles_for_classification(self):\n first_round = \"\"\"\n trees {\n nodes {\n bucketized_split {\n feature_id: 2\n threshold: 2\n left_id: 1\n right_id: 2\n }\n metadata {\n gain: 0.387675\n }\n }\n nodes {\n leaf {\n scalar: -0.181818\n }\n }\n nodes {\n leaf {\n scalar: 0.0625\n }\n }\n }\n tree_weights: 1.0\n tree_metadata {\n num_layers_grown: 1\n is_finalized: false\n }\n growing_metadata {\n num_trees_attempted: 1\n num_layers_attempted: 1\n last_layer_node_start: 1\n last_layer_node_end: 3\n }\n \"\"\"\n second_round = \"\"\"\n trees {\n nodes {\n bucketized_split {\n feature_id: 2\n threshold: 2\n left_id: 1\n right_id: 2\n }\n metadata {\n gain: 0.387675\n }\n }\n nodes {\n bucketized_split {\n feature_id: 0\n threshold: 3\n left_id: 3\n right_id: 4\n }\n metadata {\n gain: 0.0\n original_leaf {\n scalar: -0.181818\n }\n }\n }\n nodes {\n bucketized_split {\n feature_id: 0\n threshold: 0\n left_id: 5\n right_id: 6\n }\n metadata {\n gain: 0.105518\n original_leaf {\n scalar: 0.0625\n }\n }\n }\n nodes {\n leaf {\n scalar: -0.348397\n }\n }\n nodes {\n leaf {\n scalar: -0.181818\n }\n }\n nodes {\n leaf {\n scalar: 0.224091\n }\n }\n nodes {\n leaf {\n scalar: 0.056815\n }\n }\n }\n trees {\n nodes {\n leaf {\n scalar: 0.0\n }\n }\n }\n tree_weights: 1.0\n tree_weights: 1.0\n tree_metadata {\n num_layers_grown: 2\n is_finalized: true\n }\n tree_metadata {\n num_layers_grown: 0\n is_finalized: false\n }\n growing_metadata {\n num_trees_attempted: 1\n num_layers_attempted: 2\n last_layer_node_start: 0\n last_layer_node_end: 1\n }\n \"\"\"\n third_round = \"\"\"\n trees {\n nodes {\n bucketized_split {\n feature_id: 2\n threshold: 2\n left_id: 1\n right_id: 2\n }\n metadata {\n gain: 0.387675\n }\n }\n nodes {\n bucketized_split {\n feature_id: 0\n threshold: 3\n left_id: 3\n right_id: 4\n }\n metadata {\n gain: 0.0\n original_leaf {\n scalar: -0.181818\n }\n }\n }\n nodes {\n bucketized_split {\n feature_id: 0\n threshold: 0\n left_id: 5\n right_id: 6\n }\n metadata {\n gain: 0.105518\n original_leaf {\n scalar: 0.0625\n }\n }\n }\n nodes {\n leaf {\n scalar: -0.348397\n }\n }\n nodes {\n leaf {\n scalar: -0.181818\n }\n }\n nodes {\n leaf {\n scalar: 0.224091\n }\n }\n nodes {\n leaf {\n scalar: 0.056815\n }\n }\n }\n trees {\n nodes {\n bucketized_split {\n feature_id: 1\n threshold: 0\n left_id: 1\n right_id: 2\n }\n metadata {\n gain: 0.287131\n }\n }\n nodes {\n leaf {\n scalar: 0.162042\n }\n }\n nodes {\n leaf {\n scalar: -0.086986\n }\n }\n }\n tree_weights: 1.0\n tree_weights: 1.0\n tree_metadata {\n num_layers_grown: 2\n is_finalized: true\n }\n tree_metadata {\n num_layers_grown: 1\n is_finalized: false\n }\n growing_metadata {\n num_trees_attempted: 2\n num_layers_attempted: 3\n last_layer_node_start: 1\n last_layer_node_end: 3\n }\n \"\"\"\n return (first_round, second_round, third_round)\n\n def _get_expected_ensembles_for_classification_with_bias(self):\n first_round = \"\"\"\n trees {\n nodes {\n leaf {\n scalar: -0.405086\n }\n }\n }\n tree_weights: 1.0\n tree_metadata {\n }\n \"\"\"\n second_round = \"\"\"\n trees {\n nodes {\n bucketized_split {\n feature_id: 2\n threshold: 2\n left_id: 1\n right_id: 2\n }\n metadata {\n gain: 0.407711\n original_leaf {\n scalar: -0.405086\n }\n }\n }\n nodes {\n leaf {\n scalar: -0.556054\n }\n }\n nodes {\n leaf {\n scalar: -0.301233\n }\n }\n }\n tree_weights: 1.0\n tree_metadata {\n num_layers_grown: 1\n is_finalized: false\n }\n growing_metadata {\n num_trees_attempted: 1\n num_layers_attempted: 1\n last_layer_node_start: 1\n last_layer_node_end: 3\n }\n \"\"\"\n third_round = \"\"\"\n trees {\n nodes {\n bucketized_split {\n feature_id: 2\n threshold: 2\n left_id: 1\n right_id: 2\n }\n metadata {\n gain: 0.407711\n original_leaf {\n scalar: -0.405086\n }\n }\n }\n nodes {\n bucketized_split {\n feature_id: 0\n threshold: 3\n left_id: 3\n right_id: 4\n }\n metadata {\n original_leaf {\n scalar: -0.556054\n }\n }\n }\n nodes {\n bucketized_split {\n feature_id: 0\n threshold: 0\n left_id: 5\n right_id: 6\n }\n metadata {\n gain: 0.09876\n original_leaf {\n scalar: -0.301233\n }\n }\n }\n nodes {\n leaf {\n scalar: -0.698072\n }\n }\n nodes {\n leaf {\n scalar: -0.556054\n }\n }\n nodes {\n leaf {\n scalar: -0.106016\n }\n }\n nodes {\n leaf {\n scalar: -0.27349\n }\n }\n }\n trees {\n nodes {\n leaf {\n }\n }\n }\n tree_weights: 1.0\n tree_weights: 1.0\n tree_metadata {\n num_layers_grown: 2\n is_finalized: true\n }\n tree_metadata {\n }\n growing_metadata {\n num_trees_attempted: 1\n num_layers_attempted: 2\n last_layer_node_end: 1\n }\n \"\"\"\n forth_round = \"\"\"\n trees {\n nodes {\n bucketized_split {\n feature_id: 2\n threshold: 2\n left_id: 1\n right_id: 2\n }\n metadata {\n gain: 0.4077113\n original_leaf {\n scalar: -0.405086\n }\n }\n }\n nodes {\n bucketized_split {\n threshold: 3\n left_id: 3\n right_id: 4\n }\n metadata {\n original_leaf {\n scalar: -0.556054\n }\n }\n }\n nodes {\n bucketized_split {\n threshold: 0\n left_id: 5\n right_id: 6\n }\n metadata {\n gain: 0.09876\n original_leaf {\n scalar: -0.301233\n }\n }\n }\n nodes {\n leaf {\n scalar: -0.698072\n }\n }\n nodes {\n leaf {\n scalar: -0.556054\n }\n }\n nodes {\n leaf {\n scalar: -0.106016\n }\n }\n nodes {\n leaf {\n scalar: -0.27349\n }\n }\n }\n trees {\n nodes {\n bucketized_split {\n feature_id: 2\n threshold: 2\n left_id: 1\n right_id: 2\n }\n metadata {\n gain: 0.289927\n }\n }\n nodes {\n leaf {\n scalar: -0.134588\n }\n }\n nodes {\n leaf {\n scalar: 0.083838 \n }\n }\n }\n tree_weights: 1.0\n tree_weights: 1.0\n tree_metadata {\n num_layers_grown: 2\n is_finalized: true\n }\n tree_metadata {\n num_layers_grown: 1\n }\n growing_metadata {\n num_trees_attempted: 2\n num_layers_attempted: 3\n last_layer_node_start: 1\n last_layer_node_end: 3\n }\n \"\"\"\n return (first_round, second_round, third_round, forth_round)\n\n def _get_expected_ensembles_for_regression(self):\n first_round = \"\"\"\n trees {\n nodes {\n bucketized_split {\n feature_id: 1\n threshold: 1\n left_id: 1\n right_id: 2\n }\n metadata {\n gain: 1.169714\n }\n }\n nodes {\n leaf {\n scalar: 0.241322\n }\n }\n nodes {\n leaf {\n scalar: 0.083951\n }\n }\n }\n tree_weights: 1.0\n tree_metadata {\n num_layers_grown: 1\n is_finalized: false\n }\n growing_metadata {\n num_trees_attempted: 1\n num_layers_attempted: 1\n last_layer_node_start: 1\n last_layer_node_end: 3\n }\n \"\"\"\n second_round = \"\"\"\n trees {\n nodes {\n bucketized_split {\n feature_id: 1\n threshold: 1\n left_id: 1\n right_id: 2\n }\n metadata {\n gain: 1.169714\n }\n }\n nodes {\n bucketized_split {\n feature_id: 0\n threshold: 1\n left_id: 3\n right_id: 4\n }\n metadata {\n gain: 2.673407\n original_leaf {\n scalar: 0.241322\n }\n }\n }\n nodes {\n bucketized_split {\n feature_id: 0\n threshold: 0\n left_id: 5\n right_id: 6\n }\n metadata {\n gain: 0.324102\n original_leaf {\n scalar: 0.083951\n }\n }\n }\n nodes {\n leaf {\n scalar: 0.563167\n }\n }\n nodes {\n leaf {\n scalar: 0.247047\n }\n }\n nodes {\n leaf {\n scalar: 0.095273\n }\n }\n nodes {\n leaf {\n scalar: 0.222102\n }\n }\n }\n trees {\n nodes {\n leaf {\n scalar: 0.0\n }\n }\n }\n tree_weights: 1.0\n tree_weights: 1.0\n tree_metadata {\n num_layers_grown: 2\n is_finalized: true\n }\n tree_metadata {\n num_layers_grown: 0\n is_finalized: false\n }\n growing_metadata {\n num_trees_attempted: 1\n num_layers_attempted: 2\n last_layer_node_start: 0\n last_layer_node_end: 1\n }\n \"\"\"\n third_round = \"\"\"\n trees {\n nodes {\n bucketized_split {\n feature_id: 1\n threshold: 1\n left_id: 1\n right_id: 2\n }\n metadata {\n gain: 1.169714\n }\n }\n nodes {\n bucketized_split {\n feature_id: 0\n threshold: 1\n left_id: 3\n right_id: 4\n }\n metadata {\n gain: 2.673407\n original_leaf {\n scalar: 0.241322\n }\n }\n }\n nodes {\n bucketized_split {\n feature_id: 0\n threshold: 0\n left_id: 5\n right_id: 6\n }\n metadata {\n gain: 0.324102\n original_leaf {\n scalar: 0.083951\n }\n }\n }\n nodes {\n leaf {\n scalar: 0.563167\n }\n }\n nodes {\n leaf {\n scalar: 0.247047\n }\n }\n nodes {\n leaf {\n scalar: 0.095273\n }\n }\n nodes {\n leaf {\n scalar: 0.222102\n }\n }\n }\n trees {\n nodes {\n bucketized_split {\n feature_id: 1\n threshold: 0\n left_id: 1\n right_id: 2\n }\n metadata {\n gain: 0.981026\n }\n }\n nodes {\n leaf {\n scalar: 0.005166\n }\n }\n nodes {\n leaf {\n scalar: 0.180281\n }\n }\n }\n tree_weights: 1.0\n tree_weights: 1.0\n tree_metadata {\n num_layers_grown: 2\n is_finalized: true\n }\n tree_metadata {\n num_layers_grown: 1\n is_finalized: false\n }\n growing_metadata {\n num_trees_attempted: 2\n num_layers_attempted: 3\n last_layer_node_start: 1\n last_layer_node_end: 3\n }\n \"\"\"\n return (first_round, second_round, third_round)\n\n def _get_expected_ensembles_for_regression_with_bias(self):\n first_round = \"\"\"\n trees {\n nodes {\n leaf {\n scalar: 1.799974\n }\n }\n }\n tree_weights: 1.0\n tree_metadata {\n }\n \"\"\"\n second_round = \"\"\"\n trees {\n nodes {\n bucketized_split {\n feature_id: 1\n threshold: 1\n left_id: 1\n right_id: 2\n }\n metadata {\n gain: 1.190442\n original_leaf {\n scalar: 1.799974\n }\n }\n }\n nodes {\n leaf {\n scalar: 1.862786\n }\n }\n nodes {\n leaf {\n scalar: 1.706149\n }\n }\n }\n tree_weights: 1.0\n tree_metadata {\n num_layers_grown: 1\n is_finalized: false\n }\n growing_metadata {\n num_trees_attempted: 1\n num_layers_attempted: 1\n last_layer_node_start: 1\n last_layer_node_end: 3\n }\n \"\"\"\n third_round = \"\"\"\n trees {\n nodes {\n bucketized_split {\n feature_id: 1\n threshold: 1\n left_id: 1\n right_id: 2\n }\n metadata {\n gain: 1.190442\n original_leaf {\n scalar: 1.799974\n }\n }\n }\n nodes {\n bucketized_split {\n feature_id: 0\n threshold: 1\n left_id: 3\n right_id: 4\n }\n metadata {\n gain: 2.683594\n original_leaf {\n scalar: 1.862786\n }\n }\n }\n nodes {\n bucketized_split {\n feature_id: 0\n threshold: 0\n left_id: 5\n right_id: 6\n }\n metadata {\n gain: 0.322693\n original_leaf {\n scalar: 1.706149\n }\n }\n }\n nodes {\n leaf {\n scalar: 2.024487\n }\n }\n nodes {\n leaf {\n scalar: 1.710319\n }\n }\n nodes {\n leaf {\n scalar: 1.559208\n }\n }\n nodes {\n leaf {\n scalar: 1.686037\n }\n }\n }\n trees {\n nodes {\n leaf {\n scalar: 0.0\n }\n }\n }\n tree_weights: 1.0\n tree_weights: 1.0\n tree_metadata {\n num_layers_grown: 2\n is_finalized: true\n }\n tree_metadata {\n num_layers_grown: 0\n is_finalized: false\n }\n growing_metadata {\n num_trees_attempted: 1\n num_layers_attempted: 2\n last_layer_node_start: 0\n last_layer_node_end: 1\n }\n \"\"\"\n forth_round = \"\"\"\n trees {\n nodes {\n bucketized_split {\n feature_id: 1\n threshold: 1\n left_id: 1\n right_id: 2\n }\n metadata {\n gain: 1.190442\n original_leaf {\n scalar: 1.799974\n }\n }\n }\n nodes {\n bucketized_split {\n threshold: 1\n left_id: 3\n right_id: 4\n }\n metadata {\n gain: 2.683594\n original_leaf {\n scalar: 1.8627863\n }\n }\n }\n nodes {\n bucketized_split {\n left_id: 5\n right_id: 6\n }\n metadata {\n gain: 0.322693\n original_leaf {\n scalar: 1.706149\n }\n }\n }\n nodes {\n leaf {\n scalar: 2.024487\n }\n }\n nodes {\n leaf {\n scalar: 1.710319\n }\n }\n nodes {\n leaf {\n scalar: 1.5592078\n }\n }\n nodes {\n leaf {\n scalar: 1.686037\n }\n }\n }\n trees {\n nodes {\n bucketized_split {\n feature_id: 1\n left_id: 1\n right_id: 2\n }\n metadata {\n gain: 0.972589\n }\n }\n nodes {\n leaf {\n scalar: -0.137592\n }\n }\n nodes {\n leaf {\n scalar: 0.034926\n }\n }\n }\n tree_weights: 1.0\n tree_weights: 1.0\n tree_metadata {\n num_layers_grown: 2\n is_finalized: true\n }\n tree_metadata {\n num_layers_grown: 1\n }\n growing_metadata {\n num_trees_attempted: 2\n num_layers_attempted: 3\n last_layer_node_start: 1\n last_layer_node_end: 3\n }\n \"\"\"\n return (first_round, second_round, third_round, forth_round)\n\n def _get_train_op_and_ensemble(self,\n head,\n config,\n is_classification,\n train_in_memory,\n center_bias=False):\n \"\"\"Calls bt_model_fn() and returns the train_op and ensemble_serialzed.\"\"\"\n features, labels = _make_train_input_fn(is_classification)()\n\n tree_hparams = boosted_trees._TreeHParams( # pylint:disable=protected-access\n n_trees=2,\n max_depth=2,\n learning_rate=0.1,\n l1=0.,\n l2=0.01,\n tree_complexity=0.,\n min_node_weight=0.,\n center_bias=center_bias,\n pruning_mode='none')\n\n estimator_spec = boosted_trees._bt_model_fn( # pylint:disable=protected-access\n features=features,\n labels=labels,\n mode=model_fn.ModeKeys.TRAIN,\n head=head,\n feature_columns=self._feature_columns,\n tree_hparams=tree_hparams,\n example_id_column_name=EXAMPLE_ID_COLUMN,\n n_batches_per_layer=1,\n config=config,\n train_in_memory=train_in_memory)\n resources.initialize_resources(resources.shared_resources()).run()\n variables.global_variables_initializer().run()\n variables.local_variables_initializer().run()\n\n # Gets the train_op and serialized proto of the ensemble.\n shared_resources = resources.shared_resources()\n self.assertEqual(1, len(shared_resources))\n train_op = estimator_spec.train_op\n with ops.control_dependencies([train_op]):\n _, ensemble_serialized = (\n gen_boosted_trees_ops.boosted_trees_serialize_ensemble(\n shared_resources[0].handle))\n return train_op, ensemble_serialized\n\n def testTrainClassifierInMemory(self):\n ops.reset_default_graph()\n expected_first, expected_second, expected_third = (\n self._get_expected_ensembles_for_classification())\n with self.test_session() as sess:\n # Train with train_in_memory mode.\n with sess.graph.as_default():\n train_op, ensemble_serialized = self._get_train_op_and_ensemble(\n boosted_trees._create_classification_head(n_classes=2),\n run_config.RunConfig(),\n is_classification=True,\n train_in_memory=True)\n _, serialized = sess.run([train_op, ensemble_serialized])\n # Validate the trained ensemble.\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_first, ensemble_proto)\n\n # Run one more time and validate the trained ensemble.\n _, serialized = sess.run([train_op, ensemble_serialized])\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_second, ensemble_proto)\n\n # Third round training and validation.\n _, serialized = sess.run([train_op, ensemble_serialized])\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_third, ensemble_proto)\n\n def testTrainClassifierWithCenterBiasInMemory(self):\n ops.reset_default_graph()\n\n # When bias centering is on, we expect the very first node to have the\n expected_first, expected_second, expected_third, expected_forth = (\n self._get_expected_ensembles_for_classification_with_bias())\n\n with self.test_session() as sess:\n with sess.graph.as_default():\n train_op, ensemble_serialized = self._get_train_op_and_ensemble(\n boosted_trees._create_classification_head(n_classes=2),\n run_config.RunConfig(),\n is_classification=True,\n train_in_memory=True,\n center_bias=True)\n\n # 4 iterations to center bias.\n for _ in range(4):\n _, serialized = sess.run([train_op, ensemble_serialized])\n\n # Validate the trained ensemble.\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_first, ensemble_proto)\n\n _, serialized = sess.run([train_op, ensemble_serialized])\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_second, ensemble_proto)\n\n # Third round training and validation.\n _, serialized = sess.run([train_op, ensemble_serialized])\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_third, ensemble_proto)\n\n # Forth round training and validation.\n _, serialized = sess.run([train_op, ensemble_serialized])\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n\n self.assertProtoEquals(expected_forth, ensemble_proto)\n\n def testTrainClassifierNonInMemory(self):\n ops.reset_default_graph()\n expected_first, expected_second, expected_third = (\n self._get_expected_ensembles_for_classification())\n with self.test_session() as sess:\n # Train without train_in_memory mode.\n with sess.graph.as_default():\n train_op, ensemble_serialized = self._get_train_op_and_ensemble(\n boosted_trees._create_classification_head(n_classes=2),\n run_config.RunConfig(),\n is_classification=True,\n train_in_memory=False)\n _, serialized = sess.run([train_op, ensemble_serialized])\n # Validate the trained ensemble.\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_first, ensemble_proto)\n\n # Run one more time and validate the trained ensemble.\n _, serialized = sess.run([train_op, ensemble_serialized])\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_second, ensemble_proto)\n\n # Third round training and validation.\n _, serialized = sess.run([train_op, ensemble_serialized])\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_third, ensemble_proto)\n\n def testTrainClassifierWithCenterBiasNonInMemory(self):\n ops.reset_default_graph()\n\n # When bias centering is on, we expect the very first node to have the\n expected_first, expected_second, expected_third, expected_forth = (\n self._get_expected_ensembles_for_classification_with_bias())\n\n with self.test_session() as sess:\n with sess.graph.as_default():\n train_op, ensemble_serialized = self._get_train_op_and_ensemble(\n boosted_trees._create_classification_head(n_classes=2),\n run_config.RunConfig(),\n is_classification=True,\n train_in_memory=False,\n center_bias=True)\n # 4 iterations to center bias.\n for _ in range(4):\n _, serialized = sess.run([train_op, ensemble_serialized])\n # Validate the trained ensemble.\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_first, ensemble_proto)\n\n # Run one more time and validate the trained ensemble.\n _, serialized = sess.run([train_op, ensemble_serialized])\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_second, ensemble_proto)\n\n # Third round training and validation.\n _, serialized = sess.run([train_op, ensemble_serialized])\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_third, ensemble_proto)\n\n # Forth round training and validation.\n _, serialized = sess.run([train_op, ensemble_serialized])\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_forth, ensemble_proto)\n\n def testTrainRegressorInMemory(self):\n ops.reset_default_graph()\n expected_first, expected_second, expected_third = (\n self._get_expected_ensembles_for_regression())\n with self.test_session() as sess:\n # Train with train_in_memory mode.\n with sess.graph.as_default():\n train_op, ensemble_serialized = self._get_train_op_and_ensemble(\n boosted_trees._create_regression_head(label_dimension=1),\n run_config.RunConfig(),\n is_classification=False,\n train_in_memory=True)\n _, serialized = sess.run([train_op, ensemble_serialized])\n # Validate the trained ensemble.\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_first, ensemble_proto)\n\n # Run one more time and validate the trained ensemble.\n _, serialized = sess.run([train_op, ensemble_serialized])\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_second, ensemble_proto)\n\n # Third round training and validation.\n _, serialized = sess.run([train_op, ensemble_serialized])\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_third, ensemble_proto)\n\n def testTrainRegressorInMemoryWithCenterBias(self):\n ops.reset_default_graph()\n expected_first, expected_second, expected_third, expected_forth = (\n self._get_expected_ensembles_for_regression_with_bias())\n with self.test_session() as sess:\n # Train with train_in_memory mode.\n with sess.graph.as_default():\n train_op, ensemble_serialized = self._get_train_op_and_ensemble(\n boosted_trees._create_regression_head(label_dimension=1),\n run_config.RunConfig(),\n is_classification=False,\n train_in_memory=True,\n center_bias=True)\n # 3 iterations to center bias.\n for _ in range(3):\n _, serialized = sess.run([train_op, ensemble_serialized])\n # Validate the trained ensemble.\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n\n self.assertProtoEquals(expected_first, ensemble_proto)\n\n # Run one more time and validate the trained ensemble.\n _, serialized = sess.run([train_op, ensemble_serialized])\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_second, ensemble_proto)\n\n # Third round training and validation.\n _, serialized = sess.run([train_op, ensemble_serialized])\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_third, ensemble_proto)\n\n # Forth round training and validation.\n _, serialized = sess.run([train_op, ensemble_serialized])\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_forth, ensemble_proto)\n\n def testTrainRegressorNonInMemory(self):\n ops.reset_default_graph()\n expected_first, expected_second, expected_third = (\n self._get_expected_ensembles_for_regression())\n with self.test_session() as sess:\n # Train without train_in_memory mode.\n with sess.graph.as_default():\n train_op, ensemble_serialized = self._get_train_op_and_ensemble(\n boosted_trees._create_regression_head(label_dimension=1),\n run_config.RunConfig(),\n is_classification=False,\n train_in_memory=False)\n _, serialized = sess.run([train_op, ensemble_serialized])\n # Validate the trained ensemble.\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_first, ensemble_proto)\n\n # Run one more time and validate the trained ensemble.\n _, serialized = sess.run([train_op, ensemble_serialized])\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_second, ensemble_proto)\n\n # Third round training and validation.\n _, serialized = sess.run([train_op, ensemble_serialized])\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_third, ensemble_proto)\n\n def testTrainRegressorNotInMemoryWithCenterBias(self):\n ops.reset_default_graph()\n expected_first, expected_second, expected_third, expected_forth = (\n self._get_expected_ensembles_for_regression_with_bias())\n with self.test_session() as sess:\n # Train with train_in_memory mode.\n with sess.graph.as_default():\n train_op, ensemble_serialized = self._get_train_op_and_ensemble(\n boosted_trees._create_regression_head(label_dimension=1),\n run_config.RunConfig(),\n is_classification=False,\n train_in_memory=False,\n center_bias=True)\n # 3 iterations to center the bias (because we are using regularization).\n for _ in range(3):\n _, serialized = sess.run([train_op, ensemble_serialized])\n\n # Validate the trained ensemble.\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_first, ensemble_proto)\n\n # Run one more time and validate the trained ensemble.\n _, serialized = sess.run([train_op, ensemble_serialized])\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_second, ensemble_proto)\n\n # Third round training and validation.\n _, serialized = sess.run([train_op, ensemble_serialized])\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_third, ensemble_proto)\n\n # Forth round training and validation.\n _, serialized = sess.run([train_op, ensemble_serialized])\n ensemble_proto = boosted_trees_pb2.TreeEnsemble()\n ensemble_proto.ParseFromString(serialized)\n self.assertProtoEquals(expected_forth, ensemble_proto)\n\n\nif __name__ == '__main__':\n googletest.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tf.contrib.gan.python.features.random_tensor_pool.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.gan.python.features.python.random_tensor_pool_impl import tensor_pool\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.platform import test\n\n\nclass TensorPoolTest(test.TestCase):\n\n def test_pool_unknown_input_shape(self):\n \"\"\"Checks that `input_value` can have unknown shape.\"\"\"\n input_value = array_ops.placeholder(\n dtype=dtypes.int32, shape=[None, None, 3])\n output_value = tensor_pool(input_value, pool_size=10)\n self.assertEqual(output_value.shape.as_list(), [None, None, 3])\n\n with self.test_session(use_gpu=True) as session:\n for i in range(10):\n session.run(output_value, {input_value: [[[i] * 3]]})\n session.run(output_value, {input_value: [[[i] * 3] * 2]})\n session.run(output_value, {input_value: [[[i] * 3] * 5] * 2})\n\n def test_pool_sequence(self):\n \"\"\"Checks that values are pooled and returned maximally twice.\"\"\"\n input_value = array_ops.placeholder(dtype=dtypes.int32, shape=[])\n output_value = tensor_pool(input_value, pool_size=10)\n self.assertEqual(output_value.shape.as_list(), [])\n\n with self.test_session(use_gpu=True) as session:\n outs = []\n for i in range(50):\n out = session.run(output_value, {input_value: i})\n outs.append(out)\n self.assertLessEqual(out, i)\n\n _, counts = np.unique(outs, return_counts=True)\n # Check that each value is returned maximally twice.\n self.assertTrue((counts <= 2).all())\n\n def test_never_pool(self):\n \"\"\"Checks that setting `pooling_probability` to zero works.\"\"\"\n input_value = array_ops.placeholder(dtype=dtypes.int32, shape=[])\n output_value = tensor_pool(\n input_value, pool_size=10, pooling_probability=0.0)\n self.assertEqual(output_value.shape.as_list(), [])\n\n with self.test_session(use_gpu=True) as session:\n for i in range(50):\n out = session.run(output_value, {input_value: i})\n self.assertEqual(out, i)\n\n def test_pooling_probability(self):\n \"\"\"Checks that `pooling_probability` works.\"\"\"\n input_value = array_ops.placeholder(dtype=dtypes.int32, shape=[])\n pool_size = 10\n pooling_probability = 0.2\n output_value = tensor_pool(\n input_value,\n pool_size=pool_size,\n pooling_probability=pooling_probability)\n self.assertEqual(output_value.shape.as_list(), [])\n\n with self.test_session(use_gpu=True) as session:\n not_pooled = 0\n total = 1000\n for i in range(total):\n out = session.run(output_value, {input_value: i})\n if out == i:\n not_pooled += 1\n self.assertAllClose(\n (not_pooled - pool_size) / (total - pool_size),\n 1 - pooling_probability,\n atol=0.03)\n\n def test_input_values_tuple(self):\n \"\"\"Checks that `input_values` can be a tuple.\"\"\"\n input_values = (array_ops.placeholder(dtype=dtypes.int32, shape=[]),\n array_ops.placeholder(dtype=dtypes.int32, shape=[]))\n output_values = tensor_pool(input_values, pool_size=3)\n self.assertEqual(len(output_values), len(input_values))\n for output_value in output_values:\n self.assertEqual(output_value.shape.as_list(), [])\n\n with self.test_session(use_gpu=True) as session:\n for i in range(10):\n outs = session.run(output_values, {\n input_values[0]: i,\n input_values[1]: i + 1\n })\n self.assertEqual(len(outs), len(input_values))\n self.assertEqual(outs[1] - outs[0], 1)\n\n def test_pool_preserves_shape(self):\n t = constant_op.constant(1)\n input_values = [[t, t, t], (t, t), t]\n output_values = tensor_pool(input_values, pool_size=5)\n print('stuff: ', output_values)\n # Overall shape.\n self.assertIsInstance(output_values, list)\n self.assertEqual(3, len(output_values))\n # Shape of first element.\n self.assertIsInstance(output_values[0], list)\n self.assertEqual(3, len(output_values[0]))\n # Shape of second element.\n self.assertIsInstance(output_values[1], tuple)\n self.assertEqual(2, len(output_values[1]))\n # Shape of third element.\n self.assertIsInstance(output_values[2], ops.Tensor)\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Debugger Wrapper Session Consisting of a Local Curses-based CLI.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tempfile\n\nfrom tensorflow.python.client import session\nfrom tensorflow.python.debug.wrappers import dumping_wrapper\nfrom tensorflow.python.debug.wrappers import hooks\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.training import monitored_session\n\n\nclass DumpingDebugWrapperDiskUsageLimitTest(test_util.TensorFlowTestCase):\n\n @classmethod\n def setUpClass(cls):\n # For efficient testing, set the disk usage bytes limit to a small\n # number (10).\n os.environ[\"TFDBG_DISK_BYTES_LIMIT\"] = \"10\"\n\n def setUp(self):\n self.session_root = tempfile.mkdtemp()\n\n self.v = variables.Variable(10.0, dtype=dtypes.float32, name=\"v\")\n self.delta = constant_op.constant(1.0, dtype=dtypes.float32, name=\"delta\")\n self.eta = constant_op.constant(-1.4, dtype=dtypes.float32, name=\"eta\")\n self.inc_v = state_ops.assign_add(self.v, self.delta, name=\"inc_v\")\n self.dec_v = state_ops.assign_add(self.v, self.eta, name=\"dec_v\")\n\n self.sess = session.Session()\n self.sess.run(self.v.initializer)\n\n def testWrapperSessionNotExceedingLimit(self):\n def _watch_fn(fetches, feeds):\n del fetches, feeds\n return \"DebugIdentity\", r\"(.*delta.*|.*inc_v.*)\", r\".*\"\n sess = dumping_wrapper.DumpingDebugWrapperSession(\n self.sess, session_root=self.session_root,\n watch_fn=_watch_fn, log_usage=False)\n sess.run(self.inc_v)\n\n def testWrapperSessionExceedingLimit(self):\n def _watch_fn(fetches, feeds):\n del fetches, feeds\n return \"DebugIdentity\", r\".*delta.*\", r\".*\"\n sess = dumping_wrapper.DumpingDebugWrapperSession(\n self.sess, session_root=self.session_root,\n watch_fn=_watch_fn, log_usage=False)\n # Due to the watch function, each run should dump only 1 tensor,\n # which has a size of 4 bytes, which corresponds to the dumped 'delta:0'\n # tensor of scalar shape and float32 dtype.\n # 1st run should pass, after which the disk usage is at 4 bytes.\n sess.run(self.inc_v)\n # 2nd run should also pass, after which 8 bytes are used.\n sess.run(self.inc_v)\n # 3rd run should fail, because the total byte count (12) exceeds the\n # limit (10)\n with self.assertRaises(ValueError):\n sess.run(self.inc_v)\n\n def testHookNotExceedingLimit(self):\n def _watch_fn(fetches, feeds):\n del fetches, feeds\n return \"DebugIdentity\", r\".*delta.*\", r\".*\"\n dumping_hook = hooks.DumpingDebugHook(\n self.session_root, watch_fn=_watch_fn, log_usage=False)\n mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook])\n mon_sess.run(self.inc_v)\n\n def testHookExceedingLimit(self):\n def _watch_fn(fetches, feeds):\n del fetches, feeds\n return \"DebugIdentity\", r\".*delta.*\", r\".*\"\n dumping_hook = hooks.DumpingDebugHook(\n self.session_root, watch_fn=_watch_fn, log_usage=False)\n mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook])\n # Like in `testWrapperSessionExceedingLimit`, the first two calls\n # should be within the byte limit, but the third one should error\n # out due to exceeding the limit.\n mon_sess.run(self.inc_v)\n mon_sess.run(self.inc_v)\n with self.assertRaises(ValueError):\n mon_sess.run(self.inc_v)\n\n\nif __name__ == \"__main__\":\n googletest.main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Rate.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.rate import rate\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n\nclass RateTest(test.TestCase):\n\n @test_util.run_in_graph_and_eager_modes()\n def testBuildRate(self):\n m = rate.Rate()\n m.build(\n constant_op.constant([1], dtype=dtypes.float32),\n constant_op.constant([2], dtype=dtypes.float32))\n old_numer = m.numer\n m(\n constant_op.constant([2], dtype=dtypes.float32),\n constant_op.constant([2], dtype=dtypes.float32))\n self.assertTrue(old_numer is m.numer)\n\n @test_util.run_in_graph_and_eager_modes()\n def testBasic(self):\n with self.test_session():\n r_ = rate.Rate()\n a = r_(array_ops.ones([1]), denominator=array_ops.ones([1]))\n self.evaluate(variables.global_variables_initializer())\n self.evaluate(variables.local_variables_initializer())\n self.assertEqual([[1]], self.evaluate(a))\n b = r_(constant_op.constant([2]), denominator=constant_op.constant([2]))\n self.assertEqual([[1]], self.evaluate(b))\n c = r_(constant_op.constant([4]), denominator=constant_op.constant([3]))\n self.assertEqual([[2]], self.evaluate(c))\n d = r_(constant_op.constant([16]), denominator=constant_op.constant([3]))\n self.assertEqual([[0]], self.evaluate(d)) # divide by 0\n\n def testNamesWithSpaces(self):\n m1 = rate.Rate(name=\"has space\")\n m1(array_ops.ones([1]), array_ops.ones([1]))\n self.assertEqual(m1.name, \"has space\")\n self.assertEqual(m1.prev_values.name, \"has_space_1/prev_values:0\")\n\n @test_util.run_in_graph_and_eager_modes()\n def testWhileLoop(self):\n with self.test_session():\n r_ = rate.Rate()\n\n def body(value, denom, i, ret_rate):\n i += 1\n ret_rate = r_(value, denom)\n with ops.control_dependencies([ret_rate]):\n value = math_ops.add(value, 2)\n denom = math_ops.add(denom, 1)\n return [value, denom, i, ret_rate]\n\n def condition(v, d, i, r):\n del v, d, r # unused vars by condition\n return math_ops.less(i, 100)\n\n i = constant_op.constant(0)\n value = constant_op.constant([1], dtype=dtypes.float64)\n denom = constant_op.constant([1], dtype=dtypes.float64)\n ret_rate = r_(value, denom)\n self.evaluate(variables.global_variables_initializer())\n self.evaluate(variables.local_variables_initializer())\n loop = control_flow_ops.while_loop(condition, body,\n [value, denom, i, ret_rate])\n self.assertEqual([[2]], self.evaluate(loop[3]))\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functional tests for XLA Gather Op.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.compiler.tests import xla_test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import flags\nfrom tensorflow.python.platform import test\n\nFLAGS = flags.FLAGS\n\n\nclass GatherTest(xla_test.XLATestCase):\n\n def _buildParams(self, data, dtype):\n data = data.astype(dtype.as_numpy_dtype)\n # For complex types, adds an index-dependent imaginary component so we can\n # tell we got the right value.\n if dtype.is_complex:\n return data + 10j * data\n return data\n\n def testScalar1D(self):\n with self.cached_session() as session, self.test_scope():\n data = np.array([0, 1, 2, 3, 7, 5])\n for dtype in self.all_tf_types:\n for indices in 4, [4], [1, 2, 2, 4, 5]:\n params_np = self._buildParams(data, dtype)\n params = array_ops.placeholder(dtype=dtype)\n indices_tf = constant_op.constant(indices)\n gather_t = array_ops.gather(params, indices_tf)\n gather_val = session.run(gather_t, feed_dict={params: params_np})\n np_val = params_np[indices]\n self.assertAllEqual(np_val, gather_val)\n\n def testScalar2D(self):\n with self.cached_session() as session, self.test_scope():\n data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],\n [12, 13, 14]])\n for dtype in self.all_tf_types:\n for axis in 0, 1, -1:\n params_np = self._buildParams(data, dtype)\n params = array_ops.placeholder(dtype=dtype)\n indices = constant_op.constant(2)\n gather_t = array_ops.gather(params, indices, axis=axis)\n gather_val = session.run(gather_t, feed_dict={params: params_np})\n expected = np.take(params_np, 2, axis=axis)\n self.assertAllEqual(expected, gather_val)\n\n def testSimpleTwoD32(self):\n with self.cached_session() as session, self.test_scope():\n data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],\n [12, 13, 14]])\n for dtype in self.all_tf_types:\n for axis in 0, 1, -1:\n params_np = self._buildParams(data, dtype)\n params = array_ops.placeholder(dtype=dtype)\n # The indices must be in bounds for any axis.\n indices = constant_op.constant([0, 1, 0, 2])\n gather_t = array_ops.gather(params, indices, axis=axis)\n gather_val = session.run(gather_t, feed_dict={params: params_np})\n expected = np.take(params_np, [0, 1, 0, 2], axis=axis)\n self.assertAllEqual(expected, gather_val)\n\n def testSimpleTwoD32_Int64Indices(self):\n if np.int64 not in self.int_types:\n return\n\n with self.cached_session() as session, self.test_scope():\n data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],\n [12, 13, 14]])\n # The indices must be in bounds for any axis.\n indices_np = np.array([0, 1, 0, 2])\n for dtype in self.all_tf_types:\n for axis in 0, 1, -1:\n params_np = self._buildParams(data, dtype)\n params = array_ops.placeholder(dtype=dtype)\n indices = array_ops.placeholder(dtype=dtypes.int64)\n gather_t = array_ops.gather(params, indices, axis=axis)\n gather_val = session.run(\n gather_t, feed_dict={\n params: params_np,\n indices: indices_np\n })\n expected = np.take(params_np, [0, 1, 0, 2], axis=axis)\n self.assertAllEqual(expected, gather_val)\n\n def testHigherRank(self):\n \"\"\"Check that scalar and empty indices shapes work as well.\"\"\"\n shape = (2, 1, 3, 2)\n for indices_shape in (), (0,), (2, 0), (2, 3):\n for dtype in self.all_tf_types:\n for axis in 0, 1, 2, 3, -1, -2:\n params = self._buildParams(np.random.randn(*shape), dtype)\n indices = np.random.randint(shape[axis], size=indices_shape)\n with self.cached_session() as sess, self.test_scope():\n tf_params = array_ops.placeholder(dtype=dtype)\n tf_indices = constant_op.constant(indices, dtype=dtypes.int32)\n gather = array_ops.gather(tf_params, tf_indices, axis=axis)\n gather_value = sess.run(gather, feed_dict={tf_params: params})\n gather_np = np.take(params, indices, axis=axis)\n self.assertAllEqual(gather_np, gather_value)\n\n def testIndicesWithDifferentDimensions(self):\n with self.cached_session():\n for dtype in self.numeric_tf_types:\n params = array_ops.placeholder(dtype=dtype)\n indices = array_ops.placeholder(dtype=np.int32)\n with self.test_scope():\n gather = array_ops.gather(params, indices)\n self.assertAllEqual(\n 7, gather.eval(feed_dict={params: [4, 7, 2], indices: 1}))\n self.assertAllEqual(\n [7], gather.eval(feed_dict={params: [4, 7, 2], indices: [1]}))\n self.assertAllEqual(\n [[7]], gather.eval(feed_dict={params: [4, 7, 2], indices: [[1]]}))\n\n def testGatherPrecision(self):\n with self.cached_session() as session, self.test_scope():\n data = np.array([[0, 0, 0, 0], [0, 2 * (1 + np.exp2(-8)), 0, 0],\n [0, 0, 0, 0], [0.015789, 0.0985, 0.55789, 0.3842]])\n indices = np.array([1, 2, 3, 1])\n dtype = dtypes.float32\n params_np = self._buildParams(data, dtype)\n params = array_ops.placeholder(dtype=dtype)\n indices_tf = constant_op.constant(indices)\n gather_t = array_ops.gather(params, indices_tf)\n gather_val = session.run(gather_t, feed_dict={params: params_np})\n np_val = params_np[indices]\n self.assertAllEqual(np_val, gather_val)\n\n\nclass GatherBenchmark(test.Benchmark):\n \"\"\"Microbenchmarks for the gather op.\"\"\"\n\n def _benchmarkGather(self, name, axis, gather_indices, use_xla_jit):\n\n def BuilderFn():\n inputs = variables.Variable(\n array_ops.zeros([100, 100, 10, 100, 50], dtype=dtypes.float32),\n dtype=dtypes.float32,\n name='input')\n indices = variables.Variable(\n gather_indices, dtype=dtypes.int32, name='indices')\n gather_t = array_ops.gather(inputs, indices, axis=axis)\n return '%s.axis%d' % (name, axis), [gather_t]\n\n xla_test.Benchmark(self, BuilderFn, use_xla_jit=use_xla_jit, device='cpu')\n\n def _benchmarkSliceGather(self, axis, use_xla_jit):\n \"\"\"Benchmarks a gather op that's really a dynamic slice.\"\"\"\n self._benchmarkGather('slice_gather', axis, [1], use_xla_jit)\n\n def _benchmarkNontrivialGather(self, axis, use_xla_jit):\n self._benchmarkGather('nontrivial_gather', axis, [9, 1, 0, 2] * 4,\n use_xla_jit)\n\n def benchmarkSliceGatherAxis0(self):\n self._benchmarkSliceGather(axis=0, use_xla_jit=False)\n\n def benchmarkSliceGatherAxis0XLA(self):\n self._benchmarkSliceGather(axis=0, use_xla_jit=True)\n\n def benchmarkSliceGatherAxis1(self):\n self._benchmarkSliceGather(axis=1, use_xla_jit=False)\n\n def benchmarkSliceGatherAxis1XLA(self):\n self._benchmarkSliceGather(axis=1, use_xla_jit=True)\n\n def benchmarkSliceGatherAxis4(self):\n self._benchmarkSliceGather(axis=4, use_xla_jit=False)\n\n def benchmarkSliceGatherAxis4XLA(self):\n self._benchmarkSliceGather(axis=4, use_xla_jit=True)\n\n def benchmarkNontrivialGatherAxis0(self):\n self._benchmarkNontrivialGather(axis=0, use_xla_jit=False)\n\n def benchmarkNontrivialGatherAxis0XLA(self):\n self._benchmarkNontrivialGather(axis=0, use_xla_jit=True)\n\n def benchmarkNontrivialGatherAxis1(self):\n self._benchmarkNontrivialGather(axis=1, use_xla_jit=False)\n\n def benchmarkNontrivialGatherAxis1XLA(self):\n self._benchmarkNontrivialGather(axis=1, use_xla_jit=True)\n\n def benchmarkNontrivialGatherAxis4(self):\n self._benchmarkNontrivialGather(axis=4, use_xla_jit=False)\n\n def benchmarkNontrivialGatherAxis4XLA(self):\n self._benchmarkNontrivialGather(axis=4, use_xla_jit=True)\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Base class for testing reader datasets.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gzip\nimport os\nimport zlib\n\nfrom tensorflow.contrib.data.python.ops import readers\nfrom tensorflow.core.example import example_pb2\nfrom tensorflow.core.example import feature_pb2\nfrom tensorflow.python.data.ops import iterator_ops\nfrom tensorflow.python.data.ops import readers as core_readers\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.lib.io import python_io\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import parsing_ops\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.util import compat\n\n\nclass FixedLengthRecordDatasetTestBase(test.TestCase):\n \"\"\"Base class for setting up and testing FixedLengthRecordDataset.\"\"\"\n\n def setUp(self):\n super(FixedLengthRecordDatasetTestBase, self).setUp()\n self._num_files = 2\n self._num_records = 7\n self._header_bytes = 5\n self._record_bytes = 3\n self._footer_bytes = 2\n\n def _record(self, f, r):\n return compat.as_bytes(str(f * 2 + r) * self._record_bytes)\n\n def _createFiles(self):\n filenames = []\n for i in range(self._num_files):\n fn = os.path.join(self.get_temp_dir(), \"fixed_length_record.%d.txt\" % i)\n filenames.append(fn)\n with open(fn, \"wb\") as f:\n f.write(b\"H\" * self._header_bytes)\n for j in range(self._num_records):\n f.write(self._record(i, j))\n f.write(b\"F\" * self._footer_bytes)\n return filenames\n\n\nclass ReadBatchFeaturesTestBase(test.TestCase):\n \"\"\"Base class for setting up and testing `make_batched_feature_dataset`.\"\"\"\n\n def setUp(self):\n super(ReadBatchFeaturesTestBase, self).setUp()\n self._num_files = 2\n self._num_records = 7\n self.test_filenames = self._createFiles()\n\n def make_batch_feature(self,\n filenames,\n num_epochs,\n batch_size,\n label_key=None,\n reader_num_threads=1,\n parser_num_threads=1,\n shuffle=False,\n shuffle_seed=None,\n drop_final_batch=False):\n self.filenames = filenames\n self.num_epochs = num_epochs\n self.batch_size = batch_size\n\n return readers.make_batched_features_dataset(\n file_pattern=self.filenames,\n batch_size=self.batch_size,\n features={\n \"file\": parsing_ops.FixedLenFeature([], dtypes.int64),\n \"record\": parsing_ops.FixedLenFeature([], dtypes.int64),\n \"keywords\": parsing_ops.VarLenFeature(dtypes.string),\n \"label\": parsing_ops.FixedLenFeature([], dtypes.string),\n },\n label_key=label_key,\n reader=core_readers.TFRecordDataset,\n num_epochs=self.num_epochs,\n shuffle=shuffle,\n shuffle_seed=shuffle_seed,\n reader_num_threads=reader_num_threads,\n parser_num_threads=parser_num_threads,\n drop_final_batch=drop_final_batch)\n\n def _record(self, f, r, l):\n example = example_pb2.Example(\n features=feature_pb2.Features(\n feature={\n \"file\":\n feature_pb2.Feature(\n int64_list=feature_pb2.Int64List(value=[f])),\n \"record\":\n feature_pb2.Feature(\n int64_list=feature_pb2.Int64List(value=[r])),\n \"keywords\":\n feature_pb2.Feature(\n bytes_list=feature_pb2.BytesList(\n value=self._get_keywords(f, r))),\n \"label\":\n feature_pb2.Feature(\n bytes_list=feature_pb2.BytesList(\n value=[compat.as_bytes(l)]))\n }))\n return example.SerializeToString()\n\n def _get_keywords(self, f, r):\n num_keywords = 1 + (f + r) % 2\n keywords = []\n for index in range(num_keywords):\n keywords.append(compat.as_bytes(\"keyword%d\" % index))\n return keywords\n\n def _sum_keywords(self, num_files):\n sum_keywords = 0\n for i in range(num_files):\n for j in range(self._num_records):\n sum_keywords += 1 + (i + j) % 2\n return sum_keywords\n\n def _createFiles(self):\n filenames = []\n for i in range(self._num_files):\n fn = os.path.join(self.get_temp_dir(), \"tf_record.%d.txt\" % i)\n filenames.append(fn)\n writer = python_io.TFRecordWriter(fn)\n for j in range(self._num_records):\n writer.write(self._record(i, j, \"fake-label\"))\n writer.close()\n return filenames\n\n def _run_actual_batch(self, outputs, sess, label_key_provided=False):\n if label_key_provided:\n # outputs would be a tuple of (feature dict, label)\n label_op = outputs[1]\n features_op = outputs[0]\n else:\n features_op = outputs\n label_op = features_op[\"label\"]\n file_op = features_op[\"file\"]\n keywords_indices_op = features_op[\"keywords\"].indices\n keywords_values_op = features_op[\"keywords\"].values\n keywords_dense_shape_op = features_op[\"keywords\"].dense_shape\n record_op = features_op[\"record\"]\n return sess.run([\n file_op, keywords_indices_op, keywords_values_op,\n keywords_dense_shape_op, record_op, label_op\n ])\n\n def _next_actual_batch(self, sess, label_key_provided=False):\n return self._run_actual_batch(self.outputs, sess, label_key_provided)\n\n def _interleave(self, iterators, cycle_length):\n pending_iterators = iterators\n open_iterators = []\n num_open = 0\n for i in range(cycle_length):\n if pending_iterators:\n open_iterators.append(pending_iterators.pop(0))\n num_open += 1\n\n while num_open:\n for i in range(min(cycle_length, len(open_iterators))):\n if open_iterators[i] is None:\n continue\n try:\n yield next(open_iterators[i])\n except StopIteration:\n if pending_iterators:\n open_iterators[i] = pending_iterators.pop(0)\n else:\n open_iterators[i] = None\n num_open -= 1\n\n def _next_expected_batch(self,\n file_indices,\n batch_size,\n num_epochs,\n cycle_length=1):\n\n def _next_record(file_indices):\n for j in file_indices:\n for i in range(self._num_records):\n yield j, i, compat.as_bytes(\"fake-label\")\n\n def _next_record_interleaved(file_indices, cycle_length):\n return self._interleave([_next_record([i]) for i in file_indices],\n cycle_length)\n\n file_batch = []\n keywords_batch_indices = []\n keywords_batch_values = []\n keywords_batch_max_len = 0\n record_batch = []\n batch_index = 0\n label_batch = []\n for _ in range(num_epochs):\n if cycle_length == 1:\n next_records = _next_record(file_indices)\n else:\n next_records = _next_record_interleaved(file_indices, cycle_length)\n for record in next_records:\n f = record[0]\n r = record[1]\n label_batch.append(record[2])\n file_batch.append(f)\n record_batch.append(r)\n keywords = self._get_keywords(f, r)\n keywords_batch_values.extend(keywords)\n keywords_batch_indices.extend(\n [[batch_index, i] for i in range(len(keywords))])\n batch_index += 1\n keywords_batch_max_len = max(keywords_batch_max_len, len(keywords))\n if len(file_batch) == batch_size:\n yield [\n file_batch, keywords_batch_indices, keywords_batch_values,\n [batch_size, keywords_batch_max_len], record_batch, label_batch\n ]\n file_batch = []\n keywords_batch_indices = []\n keywords_batch_values = []\n keywords_batch_max_len = 0\n record_batch = []\n batch_index = 0\n label_batch = []\n if file_batch:\n yield [\n file_batch, keywords_batch_indices, keywords_batch_values,\n [len(file_batch), keywords_batch_max_len], record_batch, label_batch\n ]\n\n def verify_records(self,\n sess,\n batch_size,\n file_index=None,\n num_epochs=1,\n label_key_provided=False,\n interleave_cycle_length=1):\n if file_index is not None:\n file_indices = [file_index]\n else:\n file_indices = range(self._num_files)\n\n for expected_batch in self._next_expected_batch(\n file_indices,\n batch_size,\n num_epochs,\n cycle_length=interleave_cycle_length):\n actual_batch = self._next_actual_batch(\n sess, label_key_provided=label_key_provided)\n for i in range(len(expected_batch)):\n self.assertAllEqual(expected_batch[i], actual_batch[i])\n\n\nclass TextLineDatasetTestBase(test.TestCase):\n \"\"\"Base class for setting up and testing TextLineDataset.\"\"\"\n\n def _lineText(self, f, l):\n return compat.as_bytes(\"%d: %d\" % (f, l))\n\n def _createFiles(self,\n num_files,\n num_lines,\n crlf=False,\n compression_type=None):\n filenames = []\n for i in range(num_files):\n fn = os.path.join(self.get_temp_dir(), \"text_line.%d.txt\" % i)\n filenames.append(fn)\n contents = []\n for j in range(num_lines):\n contents.append(self._lineText(i, j))\n # Always include a newline after the record unless it is\n # at the end of the file, in which case we include it\n if j + 1 != num_lines or i == 0:\n contents.append(b\"\\r\\n\" if crlf else b\"\\n\")\n contents = b\"\".join(contents)\n\n if not compression_type:\n with open(fn, \"wb\") as f:\n f.write(contents)\n elif compression_type == \"GZIP\":\n with gzip.GzipFile(fn, \"wb\") as f:\n f.write(contents)\n elif compression_type == \"ZLIB\":\n contents = zlib.compress(contents)\n with open(fn, \"wb\") as f:\n f.write(contents)\n else:\n raise ValueError(\"Unsupported compression_type\", compression_type)\n\n return filenames\n\n\nclass TFRecordDatasetTestBase(test.TestCase):\n \"\"\"Base class for setting up and testing TFRecordDataset.\"\"\"\n\n def setUp(self):\n super(TFRecordDatasetTestBase, self).setUp()\n self._num_files = 2\n self._num_records = 7\n\n self.test_filenames = self._createFiles()\n\n self.filenames = array_ops.placeholder(dtypes.string, shape=[None])\n self.num_epochs = array_ops.placeholder_with_default(\n constant_op.constant(1, dtypes.int64), shape=[])\n self.compression_type = array_ops.placeholder_with_default(\"\", shape=[])\n self.batch_size = array_ops.placeholder(dtypes.int64, shape=[])\n\n repeat_dataset = core_readers.TFRecordDataset(\n self.filenames, self.compression_type).repeat(self.num_epochs)\n batch_dataset = repeat_dataset.batch(self.batch_size)\n\n iterator = iterator_ops.Iterator.from_structure(batch_dataset.output_types)\n self.init_op = iterator.make_initializer(repeat_dataset)\n self.init_batch_op = iterator.make_initializer(batch_dataset)\n self.get_next = iterator.get_next()\n\n def _record(self, f, r):\n return compat.as_bytes(\"Record %d of file %d\" % (r, f))\n\n def _createFiles(self):\n filenames = []\n for i in range(self._num_files):\n fn = os.path.join(self.get_temp_dir(), \"tf_record.%d.txt\" % i)\n filenames.append(fn)\n writer = python_io.TFRecordWriter(fn)\n for j in range(self._num_records):\n writer.write(self._record(i, j))\n writer.close()\n return filenames\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Run Config (deprecated, use tf.estimator.RunConfig instead).\n\nThis module and all its submodules are deprecated. See\n[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)\nfor migration instructions.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport json\nimport os\n\nimport six\n\nfrom tensorflow.contrib.framework.python.framework import experimental\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.estimator import run_config as core_run_config\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import server_lib\nfrom tensorflow.python.util.deprecation import deprecated\n\n\n# A list of the property names in RunConfig user allows to change. They will\n# not affect the execution framework, so when execution framework checks the\n# `uid` of the RunConfig, it should be ignored.\n_DEFAULT_UID_WHITE_LIST = [\n 'tf_random_seed',\n 'save_summary_steps',\n 'save_checkpoints_steps',\n 'save_checkpoints_secs',\n 'session_config',\n 'keep_checkpoint_max',\n 'keep_checkpoint_every_n_hours',\n 'log_step_count_steps',\n]\n\n\nclass Environment(object):\n \"\"\"DEPRECATED CLASS.\"\"\"\n # For running general distributed training.\n CLOUD = 'cloud'\n # For running Google-internal distributed training.\n GOOGLE = 'google'\n # For running on local desktop.\n LOCAL = 'local'\n\n\nclass TaskType(object):\n \"\"\"DEPRECATED CLASS.\"\"\"\n MASTER = 'master'\n PS = 'ps'\n WORKER = 'worker'\n\n\nclass ClusterConfig(object):\n \"\"\"This class specifies the configurations for a distributed run.\n\n THIS CLASS IS DEPRECATED. Use tf.estimator.RunConfig instead.\n\n If you're using an `Estimator`, you should probably use the subclass\n RunConfig instead.\n \"\"\"\n\n def __init__(self, master=None, evaluation_master=None):\n \"\"\"Constructor.\n\n Sets the properties `cluster_spec`, `is_chief`, `master` (if `None` in the\n args), `num_ps_replicas`, `task_id`, and `task_type` based on the\n `TF_CONFIG` environment variable, if the pertinent information is\n present. The `TF_CONFIG` environment variable is a JSON object with\n attributes: `cluster`, `environment`, and `task`.\n\n `cluster` is a JSON serialized version of `ClusterSpec`'s Python dict from\n `server_lib.py`, mapping task types (usually one of the TaskType enums) to a\n list of task addresses.\n\n `environment` specifies the runtime environment for the job (usually one of\n the `Environment` enums). Defaults to `LOCAL`.\n\n `task` has two attributes: `type` and `index`, where `type` can be any of\n the task types in `cluster`. When `TF_CONFIG` contains said information, the\n following properties are set on this class:\n\n * `task_type` is set to `TF_CONFIG['task']['type']`. Defaults to `None`.\n * `task_id` is set to `TF_CONFIG['task']['index']`. Defaults to 0.\n * `cluster_spec` is parsed from `TF_CONFIG['cluster']`. Defaults to {}.\n * `master` is determined by looking up `task_type` and `task_id` in the\n `cluster_spec`. Defaults to ''.\n * `num_ps_replicas` is set by counting the number of nodes listed\n in the `ps` attribute of `cluster_spec`. Defaults to 0.\n * `num_worker_replicas` is set by counting the number of nodes listed\n in the `worker` attribute of `cluster_spec`. Defaults to 0.\n * `is_chief` is deteremined based on `task_type`, `type_id`, and\n `environment`.\n\n Example:\n ```\n cluster = {'ps': ['host1:2222', 'host2:2222'],\n 'worker': ['host3:2222', 'host4:2222', 'host5:2222']}\n os.environ['TF_CONFIG'] = json.dumps(\n {'cluster': cluster,\n 'task': {'type': 'worker', 'index': 1}})\n config = ClusterConfig()\n assert config.master == 'host4:2222'\n assert config.task_id == 1\n assert config.num_ps_replicas == 2\n assert config.num_worker_replicas == 3\n assert config.cluster_spec == server_lib.ClusterSpec(cluster)\n assert config.task_type == 'worker'\n assert not config.is_chief\n ```\n\n Args:\n master: TensorFlow master. Defaults to empty string for local.\n evaluation_master: The master on which to perform evaluation.\n \"\"\"\n # If not explicitly specified in the constructor and the TF_CONFIG\n # environment variable is present, load cluster_spec from TF_CONFIG.\n config = json.loads(os.environ.get('TF_CONFIG') or '{}')\n\n # Set task_type and task_id if the TF_CONFIG environment variable is\n # present. Otherwise, use the respective default (None / 0).\n task_env = config.get('task', {})\n self._task_type = task_env.get('type', None)\n self._task_id = self.get_task_id()\n\n self._cluster_spec = server_lib.ClusterSpec(config.get('cluster', {}))\n self._master = (master if master is not None else\n _get_master(self._cluster_spec, self._task_type,\n self._task_id) or '')\n self._num_ps_replicas = _count_ps(self._cluster_spec) or 0\n self._num_worker_replicas = _count_worker(self._cluster_spec) or 0\n\n # Set is_chief.\n self._environment = config.get('environment', Environment.LOCAL)\n self._is_chief = None\n if self._task_type is None:\n self._is_chief = (self._task_id == 0)\n elif self._environment == Environment.CLOUD:\n # When the TF_CONFIG environment variable is set, we can set the\n # default of is_chief to 0 when task_type is \"master\" and task_id is 0.\n self._is_chief = (self._task_type == TaskType.MASTER and\n self._task_id == 0)\n else:\n # Legacy behavior is that is_chief is None if task_id == 0.\n self._is_chief = (self._task_type == TaskType.WORKER and\n self._task_id == 0)\n\n self._evaluation_master = evaluation_master or ''\n\n @property\n def cluster_spec(self):\n return self._cluster_spec\n\n @property\n def environment(self):\n return self._environment\n\n @property\n def evaluation_master(self):\n return self._evaluation_master\n\n @property\n def is_chief(self):\n return self._is_chief\n\n @property\n def master(self):\n return self._master\n\n @property\n def num_ps_replicas(self):\n return self._num_ps_replicas\n\n @property\n def num_worker_replicas(self):\n return self._num_worker_replicas\n\n @property\n def task_id(self):\n return self._task_id\n\n @property\n def task_type(self):\n return self._task_type\n\n @staticmethod\n def get_task_id():\n \"\"\"Returns task index from `TF_CONFIG` environmental variable.\n\n If you have a ClusterConfig instance, you can just access its task_id\n property instead of calling this function and re-parsing the environmental\n variable.\n\n Returns:\n `TF_CONFIG['task']['index']`. Defaults to 0.\n \"\"\"\n config = json.loads(os.environ.get('TF_CONFIG') or '{}')\n task_env = config.get('task', {})\n task_index = task_env.get('index')\n return int(task_index) if task_index else 0\n\n\nclass RunConfig(ClusterConfig, core_run_config.RunConfig):\n \"\"\"This class specifies the configurations for an `Estimator` run.\n\n This class is a deprecated implementation of `tf.estimator.RunConfig`\n interface.\n \"\"\"\n _USE_DEFAULT = 0\n\n @deprecated(None, 'When switching to tf.estimator.Estimator, use'\n ' tf.estimator.RunConfig instead.')\n def __init__(self,\n master=None,\n num_cores=0,\n log_device_placement=False,\n gpu_memory_fraction=1,\n tf_random_seed=None,\n save_summary_steps=100,\n save_checkpoints_secs=_USE_DEFAULT,\n save_checkpoints_steps=None,\n keep_checkpoint_max=5,\n keep_checkpoint_every_n_hours=10000,\n log_step_count_steps=100,\n protocol=None,\n evaluation_master='',\n model_dir=None,\n session_config=None):\n \"\"\"Constructor.\n\n The superclass `ClusterConfig` may set properties like `cluster_spec`,\n `is_chief`, `master` (if `None` in the args), `num_ps_replicas`, `task_id`,\n and `task_type` based on the `TF_CONFIG` environment variable. See\n `ClusterConfig` for more details.\n\n N.B.: If `save_checkpoints_steps` or `save_checkpoints_secs` is set,\n `keep_checkpoint_max` might need to be adjusted accordingly, especially in\n distributed training. For example, setting `save_checkpoints_secs` as 60\n without adjusting `keep_checkpoint_max` (defaults to 5) leads to situation\n that checkpoint would be garbage collected after 5 minutes. In distributed\n training, the evaluation job starts asynchronously and might fail to load or\n find the checkpoint due to race condition.\n\n Args:\n master: TensorFlow master. Defaults to empty string for local.\n num_cores: Number of cores to be used. If 0, the system picks an\n appropriate number (default: 0).\n log_device_placement: Log the op placement to devices (default: False).\n gpu_memory_fraction: Fraction of GPU memory used by the process on\n each GPU uniformly on the same machine.\n tf_random_seed: Random seed for TensorFlow initializers.\n Setting this value allows consistency between reruns.\n save_summary_steps: Save summaries every this many steps.\n save_checkpoints_secs: Save checkpoints every this many seconds. Can not\n be specified with `save_checkpoints_steps`.\n save_checkpoints_steps: Save checkpoints every this many steps. Can not be\n specified with `save_checkpoints_secs`.\n keep_checkpoint_max: The maximum number of recent checkpoint files to\n keep. As new files are created, older files are deleted. If None or 0,\n all checkpoint files are kept. Defaults to 5 (that is, the 5 most recent\n checkpoint files are kept.)\n keep_checkpoint_every_n_hours: Number of hours between each checkpoint\n to be saved. The default value of 10,000 hours effectively disables\n the feature.\n log_step_count_steps: The frequency, in number of global steps, that the\n global step/sec will be logged during training.\n evaluation_master: the master on which to perform evaluation.\n model_dir: directory where model parameters, graph etc are saved. If\n `None`, will use `model_dir` property in `TF_CONFIG` environment\n variable. If both are set, must have same value. If both are `None`, see\n `Estimator` about where the model will be saved.\n session_config: a ConfigProto used to set session parameters, or None.\n Note - using this argument, it is easy to provide settings which break\n otherwise perfectly good models. Use with care.\n protocol: An optional argument which specifies the protocol used when\n starting server. None means default to grpc.\n \"\"\"\n # Neither parent class calls super().__init__(), so here we have to\n # manually call their __init__() methods.\n ClusterConfig.__init__(\n self, master=master, evaluation_master=evaluation_master)\n # For too long this code didn't call:\n # core_run_config.RunConfig.__init__(self)\n # so instead of breaking compatibility with that assumption, we\n # just manually initialize this field:\n self._train_distribute = None\n self._eval_distribute = None\n self._device_fn = None\n\n gpu_options = config_pb2.GPUOptions(\n per_process_gpu_memory_fraction=gpu_memory_fraction)\n self._tf_config = config_pb2.ConfigProto(\n log_device_placement=log_device_placement,\n inter_op_parallelism_threads=num_cores,\n intra_op_parallelism_threads=num_cores,\n gpu_options=gpu_options)\n\n self._tf_random_seed = tf_random_seed\n self._save_summary_steps = save_summary_steps\n self._save_checkpoints_secs = save_checkpoints_secs\n self._log_step_count_steps = log_step_count_steps\n self._protocol = protocol\n self._session_config = session_config\n if save_checkpoints_secs == RunConfig._USE_DEFAULT:\n if save_checkpoints_steps is None:\n self._save_checkpoints_secs = 600\n else:\n self._save_checkpoints_secs = None\n self._save_checkpoints_steps = save_checkpoints_steps\n\n # TODO(weiho): Remove these after ModelFn refactoring, when users can\n # create Scaffold and Saver in their model_fn to set these.\n self._keep_checkpoint_max = keep_checkpoint_max\n self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours\n self._model_dir = _get_model_dir(model_dir)\n\n @experimental\n def uid(self, whitelist=None):\n \"\"\"Generates a 'Unique Identifier' based on all internal fields.\n\n Caller should use the uid string to check `RunConfig` instance integrity\n in one session use, but should not rely on the implementation details, which\n is subject to change.\n\n Args:\n whitelist: A list of the string names of the properties uid should not\n include. If `None`, defaults to `_DEFAULT_UID_WHITE_LIST`, which\n includes most properties user allowes to change.\n\n Returns:\n A uid string.\n \"\"\"\n if whitelist is None:\n whitelist = _DEFAULT_UID_WHITE_LIST\n\n state = {k: v for k, v in self.__dict__.items() if not k.startswith('__')}\n # Pop out the keys in whitelist.\n for k in whitelist:\n state.pop('_' + k, None)\n\n ordered_state = collections.OrderedDict(\n sorted(state.items(), key=lambda t: t[0]))\n # For class instance without __repr__, some special cares are required.\n # Otherwise, the object address will be used.\n if '_cluster_spec' in ordered_state:\n ordered_state['_cluster_spec'] = collections.OrderedDict(\n sorted(ordered_state['_cluster_spec'].as_dict().items(),\n key=lambda t: t[0]))\n return ', '.join(\n '%s=%r' % (k, v) for (k, v) in six.iteritems(ordered_state))\n\n @property\n def model_dir(self):\n return self._model_dir\n\n @property\n def tf_config(self):\n return self._tf_config\n\n @property\n def tf_random_seed(self):\n return self._tf_random_seed\n\n @property\n def save_summary_steps(self):\n return self._save_summary_steps\n\n @property\n def save_checkpoints_secs(self):\n return self._save_checkpoints_secs\n\n @property\n def save_checkpoints_steps(self):\n return self._save_checkpoints_steps\n\n @property\n def session_config(self):\n return self._session_config\n\n @property\n def keep_checkpoint_max(self):\n return self._keep_checkpoint_max\n\n @property\n def keep_checkpoint_every_n_hours(self):\n return self._keep_checkpoint_every_n_hours\n\n @property\n def log_step_count_steps(self):\n return self._log_step_count_steps\n\n\ndef _count_ps(cluster_spec):\n \"\"\"Counts the number of parameter servers in cluster_spec.\"\"\"\n return len(cluster_spec.as_dict().get('ps', [])) if cluster_spec else 0\n\n\ndef _count_worker(cluster_spec):\n \"\"\"Counts the number of workers in cluster_spec.\n\n Workers with TaskType.WORKER and TaskType.MASTER are included in the return\n value.\n\n Args:\n cluster_spec: a ClusterSpec instance that describes current deployment.\n\n Returns:\n The total number of eligible workers.\n\n If 'cluster_spec' was None, then 0 is returned.\n \"\"\"\n return (len(cluster_spec.as_dict().get('worker', [])) +\n len(cluster_spec.as_dict().get('master', []))) if cluster_spec else 0\n\n\ndef _get_master(cluster_spec, task_type, task_id):\n \"\"\"Returns the appropriate string for the TensorFlow master.\"\"\"\n if not cluster_spec:\n return ''\n\n # If there is only one node in the cluster, do things locally.\n jobs = cluster_spec.jobs\n if len(jobs) == 1 and len(cluster_spec.job_tasks(jobs[0])) == 1:\n return ''\n\n # Lookup the master in cluster_spec using task_type and task_id,\n # if possible.\n if task_type:\n if task_type not in jobs:\n raise ValueError(\n '%s is not a valid task_type in the cluster_spec:\\n'\n '%s\\n\\n'\n 'Note that these values may be coming from the TF_CONFIG environment '\n 'variable.' % (task_type, cluster_spec))\n addresses = cluster_spec.job_tasks(task_type)\n if task_id >= len(addresses) or task_id < 0:\n raise ValueError(\n '%d is not a valid task_id for task_type %s in the '\n 'cluster_spec:\\n'\n '%s\\n\\n'\n 'Note that these value may be coming from the TF_CONFIG environment '\n 'variable.' % (task_id, task_type, cluster_spec))\n return 'grpc://' + addresses[task_id]\n\n # For backwards compatibility, we return empty string if task_type was\n # not set (task_type did not previously exist).\n return ''\n\n\ndef _get_model_dir(model_dir):\n \"\"\"Returns `model_dir` based user provided `model_dir` or `TF_CONFIG`.\"\"\"\n\n model_dir_in_tf_config = json.loads(\n os.environ.get('TF_CONFIG') or '{}').get('model_dir', None)\n if model_dir_in_tf_config is not None:\n if model_dir is not None and model_dir_in_tf_config != model_dir:\n raise ValueError(\n '`model_dir` provided in RunConfig construct, if set, '\n 'must have the same value as the model_dir in TF_CONFIG. '\n 'model_dir: {}\\nTF_CONFIG[\"model_dir\"]: {}.\\n'.format(\n model_dir, model_dir_in_tf_config))\n\n logging.info('Using model_dir in TF_CONFIG: %s', model_dir_in_tf_config)\n\n return model_dir or model_dir_in_tf_config\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\n\"\"\"Base class for RpcOp tests.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\nimport numpy as np\n\nfrom tensorflow.contrib.proto.python.ops import decode_proto_op\nfrom tensorflow.contrib.proto.python.ops import encode_proto_op\nfrom tensorflow.contrib.rpc.python.kernel_tests import test_example_pb2\nfrom tensorflow.contrib.rpc.python.ops import rpc_op\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\n\n__all__ = ['I_WARNED_YOU', 'RpcOpTestBase']\n\nI_WARNED_YOU = 'I warned you!'\n\n\nclass RpcOpTestBase(object):\n # pylint: disable=missing-docstring,invalid-name\n \"\"\"Base class for RpcOp tests.\"\"\"\n\n def get_method_name(self, suffix):\n raise NotImplementedError\n\n def rpc(self, *args, **kwargs):\n return rpc_op.rpc(*args, protocol=self._protocol, **kwargs)\n\n def try_rpc(self, *args, **kwargs):\n return rpc_op.try_rpc(*args, protocol=self._protocol, **kwargs)\n\n def testScalarHostPortRpc(self):\n with self.test_session() as sess:\n request_tensors = (\n test_example_pb2.TestCase(values=[1, 2, 3]).SerializeToString())\n response_tensors = self.rpc(\n method=self.get_method_name('Increment'),\n address=self._address,\n request=request_tensors)\n self.assertEqual(response_tensors.shape, ())\n response_values = sess.run(response_tensors)\n response_message = test_example_pb2.TestCase()\n self.assertTrue(response_message.ParseFromString(response_values))\n self.assertAllEqual([2, 3, 4], response_message.values)\n\n def testScalarHostPortTryRpc(self):\n with self.test_session() as sess:\n request_tensors = (\n test_example_pb2.TestCase(values=[1, 2, 3]).SerializeToString())\n response_tensors, status_code, status_message = self.try_rpc(\n method=self.get_method_name('Increment'),\n address=self._address,\n request=request_tensors)\n self.assertEqual(status_code.shape, ())\n self.assertEqual(status_message.shape, ())\n self.assertEqual(response_tensors.shape, ())\n response_values, status_code_values, status_message_values = (\n sess.run((response_tensors, status_code, status_message)))\n response_message = test_example_pb2.TestCase()\n self.assertTrue(response_message.ParseFromString(response_values))\n self.assertAllEqual([2, 3, 4], response_message.values)\n # For the base Rpc op, don't expect to get error status back.\n self.assertEqual(errors.OK, status_code_values)\n self.assertEqual(b'', status_message_values)\n\n def testEmptyHostPortRpc(self):\n with self.test_session() as sess:\n request_tensors = []\n response_tensors = self.rpc(\n method=self.get_method_name('Increment'),\n address=self._address,\n request=request_tensors)\n self.assertAllEqual(response_tensors.shape, [0])\n response_values = sess.run(response_tensors)\n self.assertAllEqual(response_values.shape, [0])\n\n def testInvalidMethod(self):\n for method in [\n '/InvalidService.Increment',\n self.get_method_name('InvalidMethodName')\n ]:\n with self.test_session() as sess:\n with self.assertRaisesOpError(self.invalid_method_string):\n sess.run(self.rpc(method=method, address=self._address, request=''))\n\n _, status_code_value, status_message_value = sess.run(\n self.try_rpc(method=method, address=self._address, request=''))\n self.assertEqual(errors.UNIMPLEMENTED, status_code_value)\n self.assertTrue(\n self.invalid_method_string in status_message_value.decode('ascii'))\n\n def testInvalidAddress(self):\n # This covers the case of address='' and address='localhost:293874293874'\n address = 'unix:/tmp/this_unix_socket_doesnt_exist_97820348!!@'\n with self.test_session() as sess:\n with self.assertRaises(errors.UnavailableError):\n sess.run(\n self.rpc(\n method=self.get_method_name('Increment'),\n address=address,\n request=''))\n _, status_code_value, status_message_value = sess.run(\n self.try_rpc(\n method=self.get_method_name('Increment'),\n address=address,\n request=''))\n self.assertEqual(errors.UNAVAILABLE, status_code_value)\n self.assertTrue(\n self.connect_failed_string in status_message_value.decode('ascii'))\n\n def testAlwaysFailingMethod(self):\n with self.test_session() as sess:\n response_tensors = self.rpc(\n method=self.get_method_name('AlwaysFailWithInvalidArgument'),\n address=self._address,\n request='')\n self.assertEqual(response_tensors.shape, ())\n with self.assertRaisesOpError(I_WARNED_YOU):\n sess.run(response_tensors)\n\n response_tensors, status_code, status_message = self.try_rpc(\n method=self.get_method_name('AlwaysFailWithInvalidArgument'),\n address=self._address,\n request='')\n self.assertEqual(response_tensors.shape, ())\n self.assertEqual(status_code.shape, ())\n self.assertEqual(status_message.shape, ())\n status_code_value, status_message_value = sess.run((status_code,\n status_message))\n self.assertEqual(errors.INVALID_ARGUMENT, status_code_value)\n self.assertTrue(I_WARNED_YOU in status_message_value.decode('ascii'))\n\n def testSometimesFailingMethodWithManyRequests(self):\n with self.test_session() as sess:\n # Fail hard by default.\n response_tensors = self.rpc(\n method=self.get_method_name('SometimesFailWithInvalidArgument'),\n address=self._address,\n request=[''] * 20)\n self.assertEqual(response_tensors.shape, (20,))\n with self.assertRaisesOpError(I_WARNED_YOU):\n sess.run(response_tensors)\n\n # Don't fail hard, use TryRpc - return the failing status instead.\n response_tensors, status_code, status_message = self.try_rpc(\n method=self.get_method_name('SometimesFailWithInvalidArgument'),\n address=self._address,\n request=[''] * 20)\n self.assertEqual(response_tensors.shape, (20,))\n self.assertEqual(status_code.shape, (20,))\n self.assertEqual(status_message.shape, (20,))\n status_code_values, status_message_values = sess.run((status_code,\n status_message))\n self.assertTrue([\n x in (errors.OK, errors.INVALID_ARGUMENT) for x in status_code_values\n ])\n expected_message_values = np.where(\n status_code_values == errors.INVALID_ARGUMENT,\n I_WARNED_YOU.encode('ascii'), b'')\n self.assertAllEqual(expected_message_values, status_message_values)\n\n def testVecHostPortRpc(self):\n with self.test_session() as sess:\n request_tensors = [\n test_example_pb2.TestCase(\n values=[i, i + 1, i + 2]).SerializeToString() for i in range(20)\n ]\n response_tensors = self.rpc(\n method=self.get_method_name('Increment'),\n address=self._address,\n request=request_tensors)\n self.assertEqual(response_tensors.shape, (20,))\n response_values = sess.run(response_tensors)\n self.assertEqual(response_values.shape, (20,))\n for i in range(20):\n response_message = test_example_pb2.TestCase()\n self.assertTrue(response_message.ParseFromString(response_values[i]))\n self.assertAllEqual([i + 1, i + 2, i + 3], response_message.values)\n\n def testVecHostPortManyParallelRpcs(self):\n with self.test_session() as sess:\n request_tensors = [\n test_example_pb2.TestCase(\n values=[i, i + 1, i + 2]).SerializeToString() for i in range(20)\n ]\n many_response_tensors = [\n self.rpc(\n method=self.get_method_name('Increment'),\n address=self._address,\n request=request_tensors) for _ in range(10)\n ]\n # Launch parallel 10 calls to the RpcOp, each containing 20 rpc requests.\n many_response_values = sess.run(many_response_tensors)\n self.assertEqual(10, len(many_response_values))\n for response_values in many_response_values:\n self.assertEqual(response_values.shape, (20,))\n for i in range(20):\n response_message = test_example_pb2.TestCase()\n self.assertTrue(response_message.ParseFromString(response_values[i]))\n self.assertAllEqual([i + 1, i + 2, i + 3], response_message.values)\n\n def testVecHostPortRpcUsingEncodeAndDecodeProto(self):\n with self.test_session() as sess:\n request_tensors = encode_proto_op.encode_proto(\n message_type='tensorflow.contrib.rpc.TestCase',\n field_names=['values'],\n sizes=[[3]] * 20,\n values=[\n [[i, i + 1, i + 2] for i in range(20)],\n ])\n response_tensor_strings = self.rpc(\n method=self.get_method_name('Increment'),\n address=self._address,\n request=request_tensors)\n _, (response_shape,) = decode_proto_op.decode_proto(\n bytes=response_tensor_strings,\n message_type='tensorflow.contrib.rpc.TestCase',\n field_names=['values'],\n output_types=[dtypes.int32])\n response_shape_values = sess.run(response_shape)\n self.assertAllEqual([[i + 1, i + 2, i + 3]\n for i in range(20)], response_shape_values)\n\n def testVecHostPortRpcCancelsUponSessionTimeOutWhenSleepingForever(self):\n with self.test_session() as sess:\n request_tensors = [''] * 25 # This will launch 25 RPC requests.\n response_tensors = self.rpc(\n method=self.get_method_name('SleepForever'),\n address=self._address,\n request=request_tensors)\n for timeout_ms in [1, 500, 1000]:\n options = config_pb2.RunOptions(timeout_in_ms=timeout_ms)\n with self.assertRaises((errors.UnavailableError,\n errors.DeadlineExceededError)):\n sess.run(response_tensors, options=options)\n\n def testVecHostPortRpcCancelsUponConfiguredTimeOutWhenSleepingForever(self):\n with self.test_session() as sess:\n request_tensors = [''] * 25 # This will launch 25 RPC requests.\n response_tensors = self.rpc(\n method=self.get_method_name('SleepForever'),\n address=self._address,\n timeout_in_ms=1000,\n request=request_tensors)\n with self.assertRaises(errors.DeadlineExceededError):\n sess.run(response_tensors)\n\n def testTryRpcPropagatesDeadlineErrorWithSometimesTimingOutRequests(self):\n with self.test_session() as sess:\n response_tensors, status_code, status_message = self.try_rpc(\n method=self.get_method_name('SometimesSleepForever'),\n timeout_in_ms=1000,\n address=self._address,\n request=[''] * 20)\n self.assertEqual(response_tensors.shape, (20,))\n self.assertEqual(status_code.shape, (20,))\n self.assertEqual(status_message.shape, (20,))\n status_code_values = sess.run(status_code)\n self.assertTrue([\n x in (errors.OK, errors.DEADLINE_EXCEEDED) for x in status_code_values\n ])\n\n def testTryRpcWithMultipleAddressesSingleRequest(self):\n flatten = lambda x: list(itertools.chain.from_iterable(x))\n with self.test_session() as sess:\n addresses = flatten([[\n self._address, 'unix:/tmp/this_unix_socket_doesnt_exist_97820348!!@'\n ] for _ in range(10)])\n request = test_example_pb2.TestCase(values=[0, 1, 2]).SerializeToString()\n response_tensors, status_code, _ = self.try_rpc(\n method=self.get_method_name('Increment'),\n address=addresses,\n request=request)\n response_tensors_values, status_code_values = sess.run((response_tensors,\n status_code))\n self.assertAllEqual(\n flatten([errors.OK, errors.UNAVAILABLE] for _ in range(10)),\n status_code_values)\n for i in range(10):\n self.assertTrue(response_tensors_values[2 * i])\n self.assertFalse(response_tensors_values[2 * i + 1])\n\n def testTryRpcWithMultipleMethodsSingleRequest(self):\n flatten = lambda x: list(itertools.chain.from_iterable(x))\n with self.test_session() as sess:\n methods = flatten(\n [[self.get_method_name('Increment'), 'InvalidMethodName']\n for _ in range(10)])\n request = test_example_pb2.TestCase(values=[0, 1, 2]).SerializeToString()\n response_tensors, status_code, _ = self.try_rpc(\n method=methods, address=self._address, request=request)\n response_tensors_values, status_code_values = sess.run((response_tensors,\n status_code))\n self.assertAllEqual(\n flatten([errors.OK, errors.UNIMPLEMENTED] for _ in range(10)),\n status_code_values)\n for i in range(10):\n self.assertTrue(response_tensors_values[2 * i])\n self.assertFalse(response_tensors_values[2 * i + 1])\n\n def testTryRpcWithMultipleAddressesAndRequests(self):\n flatten = lambda x: list(itertools.chain.from_iterable(x))\n with self.test_session() as sess:\n addresses = flatten([[\n self._address, 'unix:/tmp/this_unix_socket_doesnt_exist_97820348!!@'\n ] for _ in range(10)])\n requests = [\n test_example_pb2.TestCase(\n values=[i, i + 1, i + 2]).SerializeToString() for i in range(20)\n ]\n response_tensors, status_code, _ = self.try_rpc(\n method=self.get_method_name('Increment'),\n address=addresses,\n request=requests)\n response_tensors_values, status_code_values = sess.run((response_tensors,\n status_code))\n self.assertAllEqual(\n flatten([errors.OK, errors.UNAVAILABLE] for _ in range(10)),\n status_code_values)\n for i in range(20):\n if i % 2 == 1:\n self.assertFalse(response_tensors_values[i])\n else:\n response_message = test_example_pb2.TestCase()\n self.assertTrue(\n response_message.ParseFromString(response_tensors_values[i]))\n self.assertAllEqual([i + 1, i + 2, i + 3], response_message.values)\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for lite.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tempfile\nimport numpy as np\n\nfrom tensorflow.contrib.lite.python import lite\nfrom tensorflow.contrib.lite.python import lite_constants\nfrom tensorflow.contrib.lite.python.interpreter import Interpreter\nfrom tensorflow.python import keras\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops.variables import global_variables_initializer as _global_variables_initializer\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import resource_loader\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.saved_model import saved_model\nfrom tensorflow.python.training.training_util import write_graph\n\n\nclass FromConstructor(test_util.TensorFlowTestCase):\n\n # Tests invalid constructors using a dummy value for the GraphDef.\n def testInvalidConstructor(self):\n message = ('If input_tensors and output_tensors are None, both '\n 'input_arrays_with_shape and output_arrays must be defined.')\n\n # `output_arrays` is not defined.\n with self.assertRaises(ValueError) as error:\n lite.TocoConverter(\n None, None, [], input_arrays_with_shape=[('input', [3, 9])])\n self.assertEqual(message, str(error.exception))\n\n # `input_arrays_with_shape` is not defined.\n with self.assertRaises(ValueError) as error:\n lite.TocoConverter(None, [], None, output_arrays=['output'])\n self.assertEqual(message, str(error.exception))\n\n # Tests valid constructors using a dummy value for the GraphDef.\n def testValidConstructor(self):\n converter = lite.TocoConverter(\n None,\n None,\n None,\n input_arrays_with_shape=[('input', [3, 9])],\n output_arrays=['output'])\n self.assertFalse(converter._has_valid_tensors())\n self.assertEqual(converter.get_input_arrays(), ['input'])\n\n with self.assertRaises(ValueError) as error:\n converter._set_batch_size(1)\n self.assertEqual(\n 'The batch size cannot be set for this model. Please use '\n 'input_shapes parameter.', str(error.exception))\n\n converter = lite.TocoConverter(None, ['input_tensor'], ['output_tensor'])\n self.assertTrue(converter._has_valid_tensors())\n\n\nclass FromSessionTest(test_util.TensorFlowTestCase):\n\n def testFloat(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n out_tensor = in_tensor + in_tensor\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('Placeholder', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n def testQuantization(self):\n in_tensor_1 = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')\n in_tensor_2 = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')\n out_tensor = array_ops.fake_quant_with_min_max_args(\n in_tensor_1 + in_tensor_2, min=0., max=1., name='output')\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TocoConverter.from_session(\n sess, [in_tensor_1, in_tensor_2], [out_tensor])\n converter.inference_type = lite_constants.QUANTIZED_UINT8\n converter.quantized_input_stats = {\n 'inputA': (0., 1.),\n 'inputB': (0., 1.)\n } # mean, std_dev\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(2, len(input_details))\n self.assertEqual('inputA', input_details[0]['name'])\n self.assertEqual(np.uint8, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((1., 0.),\n input_details[0]['quantization']) # scale, zero_point\n\n self.assertEqual('inputB', input_details[1]['name'])\n self.assertEqual(np.uint8, input_details[1]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())\n self.assertEqual((1., 0.),\n input_details[1]['quantization']) # scale, zero_point\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('output', output_details[0]['name'])\n self.assertEqual(np.uint8, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n self.assertTrue(output_details[0]['quantization'][0] > 0) # scale\n\n def testQuantizationInvalid(self):\n in_tensor_1 = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')\n in_tensor_2 = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')\n out_tensor = array_ops.fake_quant_with_min_max_args(\n in_tensor_1 + in_tensor_2, min=0., max=1., name='output')\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TocoConverter.from_session(\n sess, [in_tensor_1, in_tensor_2], [out_tensor])\n converter.inference_type = lite_constants.QUANTIZED_UINT8\n converter.quantized_input_stats = {'inputA': (0., 1.)} # mean, std_dev\n with self.assertRaises(ValueError) as error:\n converter.convert()\n self.assertEqual(\n 'Quantization input stats are not available for input tensors '\n '\\'inputB\\'.', str(error.exception))\n\n def testSizeNoneInvalid(self):\n in_tensor = array_ops.placeholder(dtype=dtypes.float32)\n out_tensor = in_tensor + in_tensor\n sess = session.Session()\n\n # Test invalid shape. None after 1st dimension.\n converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])\n with self.assertRaises(ValueError) as error:\n converter.convert()\n self.assertEqual('Provide an input shape for input array \\'Placeholder\\'.',\n str(error.exception))\n\n def testBatchSizeInvalid(self):\n in_tensor = array_ops.placeholder(\n shape=[1, None, 16, 3], dtype=dtypes.float32)\n out_tensor = in_tensor + in_tensor\n sess = session.Session()\n\n # Test invalid shape. None after 1st dimension.\n converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])\n with self.assertRaises(ValueError) as error:\n converter.convert()\n self.assertEqual(\n 'None is only supported in the 1st dimension. Tensor '\n '\\'Placeholder\\' has invalid shape \\'[1, None, 16, 3]\\'.',\n str(error.exception))\n\n def testBatchSizeValid(self):\n in_tensor = array_ops.placeholder(\n shape=[None, 16, 16, 3], dtype=dtypes.float32)\n out_tensor = in_tensor + in_tensor\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('Placeholder', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n def testFreezeGraph(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n var = variable_scope.get_variable(\n 'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)\n out_tensor = in_tensor + var\n sess = session.Session()\n sess.run(_global_variables_initializer())\n\n # Convert model and ensure model is not None.\n converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('Placeholder', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n # TODO(nupurgarg): Verify value of contents in GraphViz.\n def testGraphviz(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n out_tensor = in_tensor + in_tensor\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])\n converter.output_format = lite_constants.GRAPHVIZ_DOT\n graphviz_output = converter.convert()\n self.assertTrue(graphviz_output)\n\n # TODO(nupurgarg): Verify value of contents in GraphViz.\n def testDumpGraphviz(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n out_tensor = in_tensor + in_tensor\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])\n graphviz_dir = self.get_temp_dir()\n converter.dump_graphviz_dir = graphviz_dir\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Ensure interpreter is able to allocate and check graphviz data.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n num_items_graphviz = len(os.listdir(graphviz_dir))\n self.assertTrue(num_items_graphviz)\n\n # Convert model and ensure model is not None.\n converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])\n graphviz_dir = self.get_temp_dir()\n converter.dump_graphviz_dir = graphviz_dir\n converter.dump_graphviz_video = True\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Ensure graphviz folder has more data after using video flag.\n num_items_graphviz_video = len(os.listdir(graphviz_dir))\n self.assertTrue(num_items_graphviz_video > num_items_graphviz)\n\n def testInferenceInputType(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n out_tensor = in_tensor + in_tensor\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])\n converter.inference_input_type = lite_constants.QUANTIZED_UINT8\n converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('Placeholder', input_details[0]['name'])\n self.assertEqual(np.uint8, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((1., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n\n def testDefaultRangesStats(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n out_tensor = in_tensor + in_tensor\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])\n converter.inference_type = lite_constants.QUANTIZED_UINT8\n converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev\n converter.default_ranges_stats = (0, 6) # min, max\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('Placeholder', input_details[0]['name'])\n self.assertEqual(np.uint8, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((1., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add', output_details[0]['name'])\n self.assertEqual(np.uint8, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n self.assertTrue(output_details[0]['quantization'][0] > 0) # scale\n\n def testPostTrainingQuantize(self):\n np.random.seed(0)\n # We need the tensor to have more than 1024 elements for quantize_weights\n # to kick in. Thus, the [33, 33] shape.\n in_tensor_1 = array_ops.placeholder(\n shape=[33, 33], dtype=dtypes.float32, name='inputA')\n in_tensor_2 = constant_op.constant(\n np.random.uniform(low=-10., high=10., size=(33, 33)),\n shape=[33, 33],\n dtype=dtypes.float32,\n name='inputB')\n out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')\n sess = session.Session()\n\n # Convert float model.\n float_converter = lite.TocoConverter.from_session(sess, [in_tensor_1],\n [out_tensor])\n float_tflite = float_converter.convert()\n self.assertTrue(float_tflite)\n\n # Convert quantized weights model.\n quantized_converter = lite.TocoConverter.from_session(\n sess, [in_tensor_1], [out_tensor])\n quantized_converter.post_training_quantize = True\n quantized_tflite = quantized_converter.convert()\n self.assertTrue(quantized_tflite)\n\n # Ensure that the quantized weights tflite model is smaller.\n self.assertTrue(len(quantized_tflite) < len(float_tflite))\n\n\nclass FromFrozenGraphFile(test_util.TensorFlowTestCase):\n\n def testFloat(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n _ = in_tensor + in_tensor\n sess = session.Session()\n\n # Write graph to file.\n graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')\n write_graph(sess.graph_def, '', graph_def_file, False)\n sess.close()\n\n # Convert model and ensure model is not None.\n converter = lite.TocoConverter.from_frozen_graph(graph_def_file,\n ['Placeholder'], ['add'])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('Placeholder', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n def testFloatWithShapesArray(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n _ = in_tensor + in_tensor\n sess = session.Session()\n\n # Write graph to file.\n graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')\n write_graph(sess.graph_def, '', graph_def_file, False)\n sess.close()\n\n # Convert model and ensure model is not None.\n converter = lite.TocoConverter.from_frozen_graph(\n graph_def_file, ['Placeholder'], ['add'],\n input_shapes={'Placeholder': [1, 16, 16, 3]})\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n\n def testFreezeGraph(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n var = variable_scope.get_variable(\n 'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)\n _ = in_tensor + var\n sess = session.Session()\n\n # Write graph to file.\n graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')\n write_graph(sess.graph_def, '', graph_def_file, False)\n sess.close()\n\n # Ensure the graph with variables cannot be converted.\n with self.assertRaises(ValueError) as error:\n lite.TocoConverter.from_frozen_graph(graph_def_file, ['Placeholder'],\n ['add'])\n self.assertEqual('Please freeze the graph using freeze_graph.py.',\n str(error.exception))\n\n def testPbtxt(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n _ = in_tensor + in_tensor\n sess = session.Session()\n\n # Write graph to file.\n graph_def_file = os.path.join(self.get_temp_dir(), 'model.pbtxt')\n write_graph(sess.graph_def, '', graph_def_file, True)\n sess.close()\n\n # Convert model and ensure model is not None.\n converter = lite.TocoConverter.from_frozen_graph(graph_def_file,\n ['Placeholder'], ['add'])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('Placeholder', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n def testInvalidFile(self):\n graph_def_file = os.path.join(self.get_temp_dir(), 'invalid_file')\n with gfile.Open(graph_def_file, 'wb') as temp_file:\n temp_file.write('bad data')\n temp_file.flush()\n\n # Attempts to convert the invalid model.\n with self.assertRaises(ValueError) as error:\n lite.TocoConverter.from_frozen_graph(graph_def_file, ['Placeholder'],\n ['add'])\n self.assertEqual(\n 'Unable to parse input file \\'{}\\'.'.format(graph_def_file),\n str(error.exception))\n\n # TODO(nupurgarg): Test model loading in open source.\n def _initObjectDetectionArgs(self):\n # Initializes the arguments required for the object detection model.\n self._graph_def_file = resource_loader.get_path_to_datafile(\n 'testdata/tflite_graph.pbtxt')\n self._input_arrays = ['normalized_input_image_tensor']\n self._output_arrays = [\n 'TFLite_Detection_PostProcess', 'TFLite_Detection_PostProcess:1',\n 'TFLite_Detection_PostProcess:2', 'TFLite_Detection_PostProcess:3'\n ]\n self._input_shapes = {'normalized_input_image_tensor': [1, 300, 300, 3]}\n\n def testTFLiteGraphDef(self):\n # Tests the object detection model that cannot be loaded in TensorFlow.\n self._initObjectDetectionArgs()\n\n converter = lite.TocoConverter.from_frozen_graph(\n self._graph_def_file, self._input_arrays, self._output_arrays,\n self._input_shapes)\n converter.allow_custom_ops = True\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('normalized_input_image_tensor', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 300, 300, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(4, len(output_details))\n self.assertEqual('TFLite_Detection_PostProcess', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 10, 4] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n self.assertEqual('TFLite_Detection_PostProcess:1',\n output_details[1]['name'])\n self.assertTrue(([1, 10] == output_details[1]['shape']).all())\n self.assertEqual('TFLite_Detection_PostProcess:2',\n output_details[2]['name'])\n self.assertTrue(([1, 10] == output_details[2]['shape']).all())\n self.assertEqual('TFLite_Detection_PostProcess:3',\n output_details[3]['name'])\n self.assertTrue(([1] == output_details[3]['shape']).all())\n\n def testTFLiteGraphDefInvalid(self):\n # Tests invalid cases for the model that cannot be loaded in TensorFlow.\n self._initObjectDetectionArgs()\n\n # Missing `input_shapes`.\n with self.assertRaises(ValueError) as error:\n lite.TocoConverter.from_frozen_graph(\n self._graph_def_file, self._input_arrays, self._output_arrays)\n self.assertEqual('input_shapes must be defined for this model.',\n str(error.exception))\n\n # `input_shapes` does not contain the names in `input_arrays`.\n with self.assertRaises(ValueError) as error:\n lite.TocoConverter.from_frozen_graph(\n self._graph_def_file,\n self._input_arrays,\n self._output_arrays,\n input_shapes={'invalid-value': [1, 19]})\n self.assertEqual(\n 'input_shapes must contain a value for each item in input_array.',\n str(error.exception))\n\n\nclass FromSavedModelTest(test_util.TensorFlowTestCase):\n\n def _createSavedModel(self, shape):\n \"\"\"Create a simple SavedModel.\"\"\"\n saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')\n with session.Session() as sess:\n in_tensor_1 = array_ops.placeholder(\n shape=shape, dtype=dtypes.float32, name='inputB')\n in_tensor_2 = array_ops.placeholder(\n shape=shape, dtype=dtypes.float32, name='inputA')\n out_tensor = in_tensor_1 + in_tensor_2\n inputs = {'x': in_tensor_1, 'y': in_tensor_2}\n outputs = {'z': out_tensor}\n saved_model.simple_save(sess, saved_model_dir, inputs, outputs)\n return saved_model_dir\n\n def testSimpleModel(self):\n \"\"\"Test a SavedModel.\"\"\"\n saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])\n\n # Convert model and ensure model is not None.\n converter = lite.TocoConverter.from_saved_model(saved_model_dir)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(2, len(input_details))\n self.assertEqual('inputA', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n self.assertEqual('inputB', input_details[1]['name'])\n self.assertEqual(np.float32, input_details[1]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())\n self.assertEqual((0., 0.), input_details[1]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n def testNoneBatchSize(self):\n \"\"\"Test a SavedModel, with None in input tensor's shape.\"\"\"\n saved_model_dir = self._createSavedModel(shape=[None, 16, 16, 3])\n\n converter = lite.TocoConverter.from_saved_model(saved_model_dir)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(2, len(input_details))\n self.assertEqual('inputA', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n self.assertEqual('inputB', input_details[1]['name'])\n self.assertEqual(np.float32, input_details[1]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())\n self.assertEqual((0., 0.), input_details[1]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n def testOrderInputArrays(self):\n \"\"\"Test a SavedModel ordering of input arrays.\"\"\"\n saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])\n\n converter = lite.TocoConverter.from_saved_model(\n saved_model_dir, input_arrays=['inputB', 'inputA'])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(2, len(input_details))\n self.assertEqual('inputA', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n self.assertEqual('inputB', input_details[1]['name'])\n self.assertEqual(np.float32, input_details[1]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())\n self.assertEqual((0., 0.), input_details[1]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n def testSubsetInputArrays(self):\n \"\"\"Test a SavedModel with a subset of the input array names of the model.\"\"\"\n saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])\n\n # Check case where input shape is given.\n converter = lite.TocoConverter.from_saved_model(\n saved_model_dir,\n input_arrays=['inputA'],\n input_shapes={'inputA': [1, 16, 16, 3]})\n\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check case where input shape is None.\n converter = lite.TocoConverter.from_saved_model(\n saved_model_dir, input_arrays=['inputA'], input_shapes={'inputA': None})\n\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n\nclass FromKerasFile(test_util.TensorFlowTestCase):\n\n def setUp(self):\n keras.backend.clear_session()\n\n def _getSequentialModel(self):\n with session.Session().as_default():\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(2, input_shape=(3,)))\n model.add(keras.layers.RepeatVector(3))\n model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))\n model.compile(\n loss=keras.losses.MSE,\n optimizer=keras.optimizers.RMSprop(),\n metrics=[keras.metrics.categorical_accuracy],\n sample_weight_mode='temporal')\n x = np.random.random((1, 3))\n y = np.random.random((1, 3, 3))\n model.train_on_batch(x, y)\n model.predict(x)\n\n try:\n fd, keras_file = tempfile.mkstemp('.h5')\n keras.models.save_model(model, keras_file)\n finally:\n os.close(fd)\n return keras_file\n\n def testSequentialModel(self):\n \"\"\"Test a Sequential tf.keras model with default inputs.\"\"\"\n keras_file = self._getSequentialModel()\n\n converter = lite.TocoConverter.from_keras_model_file(keras_file)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check tensor details of converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('dense_input', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n # Check inference of converted model.\n input_data = np.array([[1, 2, 3]], dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], input_data)\n interpreter.invoke()\n tflite_result = interpreter.get_tensor(output_details[0]['index'])\n\n keras_model = keras.models.load_model(keras_file)\n keras_result = keras_model.predict(input_data)\n\n np.testing.assert_almost_equal(tflite_result, keras_result, 5)\n os.remove(keras_file)\n\n def testSequentialModelInputArray(self):\n \"\"\"Test a Sequential tf.keras model testing input arrays argument.\"\"\"\n keras_file = self._getSequentialModel()\n\n # Invalid input array raises error.\n with self.assertRaises(ValueError) as error:\n lite.TocoConverter.from_keras_model_file(\n keras_file, input_arrays=['invalid-input'])\n self.assertEqual(\"Invalid tensors 'invalid-input' were found.\",\n str(error.exception))\n\n # Valid input array.\n converter = lite.TocoConverter.from_keras_model_file(\n keras_file, input_arrays=['dense_input'])\n tflite_model = converter.convert()\n os.remove(keras_file)\n self.assertTrue(tflite_model)\n\n def testSequentialModelInputShape(self):\n \"\"\"Test a Sequential tf.keras model testing input shapes argument.\"\"\"\n keras_file = self._getSequentialModel()\n\n # Passing in shape of invalid input array has no impact as long as all input\n # arrays have a shape.\n converter = lite.TocoConverter.from_keras_model_file(\n keras_file, input_shapes={'invalid-input': [2, 3]})\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Passing in shape of valid input array.\n converter = lite.TocoConverter.from_keras_model_file(\n keras_file, input_shapes={'dense_input': [2, 3]})\n tflite_model = converter.convert()\n os.remove(keras_file)\n self.assertTrue(tflite_model)\n\n # Check input shape from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('dense_input', input_details[0]['name'])\n self.assertTrue(([2, 3] == input_details[0]['shape']).all())\n\n def testSequentialModelOutputArray(self):\n \"\"\"Test a Sequential tf.keras model testing output arrays argument.\"\"\"\n keras_file = self._getSequentialModel()\n\n # Invalid output array raises error.\n with self.assertRaises(ValueError) as error:\n lite.TocoConverter.from_keras_model_file(\n keras_file, output_arrays=['invalid-output'])\n self.assertEqual(\"Invalid tensors 'invalid-output' were found.\",\n str(error.exception))\n\n # Valid output array.\n converter = lite.TocoConverter.from_keras_model_file(\n keras_file, output_arrays=['time_distributed/Reshape_1'])\n tflite_model = converter.convert()\n os.remove(keras_file)\n self.assertTrue(tflite_model)\n\n def testFunctionalModel(self):\n \"\"\"Test a Functional tf.keras model with default inputs.\"\"\"\n with session.Session().as_default():\n inputs = keras.layers.Input(shape=(3,), name='input')\n x = keras.layers.Dense(2)(inputs)\n output = keras.layers.Dense(3)(x)\n\n model = keras.models.Model(inputs, output)\n model.compile(\n loss=keras.losses.MSE,\n optimizer=keras.optimizers.RMSprop(),\n metrics=[keras.metrics.categorical_accuracy])\n x = np.random.random((1, 3))\n y = np.random.random((1, 3))\n model.train_on_batch(x, y)\n\n model.predict(x)\n fd, keras_file = tempfile.mkstemp('.h5')\n try:\n keras.models.save_model(model, keras_file)\n finally:\n os.close(fd)\n\n # Convert to TFLite model.\n converter = lite.TocoConverter.from_keras_model_file(keras_file)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check tensor details of converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('input', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n # Check inference of converted model.\n input_data = np.array([[1, 2, 3]], dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], input_data)\n interpreter.invoke()\n tflite_result = interpreter.get_tensor(output_details[0]['index'])\n\n keras_model = keras.models.load_model(keras_file)\n keras_result = keras_model.predict(input_data)\n\n np.testing.assert_almost_equal(tflite_result, keras_result, 5)\n os.remove(keras_file)\n\n def testFunctionalModelMultipleInputs(self):\n \"\"\"Test a Functional tf.keras model with multiple inputs and outputs.\"\"\"\n with session.Session().as_default():\n a = keras.layers.Input(shape=(3,), name='input_a')\n b = keras.layers.Input(shape=(3,), name='input_b')\n dense = keras.layers.Dense(4, name='dense')\n c = dense(a)\n d = dense(b)\n e = keras.layers.Dropout(0.5, name='dropout')(c)\n\n model = keras.models.Model([a, b], [d, e])\n model.compile(\n loss=keras.losses.MSE,\n optimizer=keras.optimizers.RMSprop(),\n metrics=[keras.metrics.mae],\n loss_weights=[1., 0.5])\n\n input_a_np = np.random.random((10, 3))\n input_b_np = np.random.random((10, 3))\n output_d_np = np.random.random((10, 4))\n output_e_np = np.random.random((10, 4))\n model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])\n\n model.predict([input_a_np, input_b_np], batch_size=5)\n fd, keras_file = tempfile.mkstemp('.h5')\n try:\n keras.models.save_model(model, keras_file)\n finally:\n os.close(fd)\n\n # Convert to TFLite model.\n converter = lite.TocoConverter.from_keras_model_file(keras_file)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n os.remove(keras_file)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(2, len(input_details))\n self.assertEqual('input_a', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n self.assertEqual('input_b', input_details[1]['name'])\n self.assertEqual(np.float32, input_details[1]['dtype'])\n self.assertTrue(([1, 3] == input_details[1]['shape']).all())\n self.assertEqual((0., 0.), input_details[1]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(2, len(output_details))\n self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 4] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n self.assertEqual('dropout/Identity', output_details[1]['name'])\n self.assertEqual(np.float32, output_details[1]['dtype'])\n self.assertTrue(([1, 4] == output_details[1]['shape']).all())\n self.assertEqual((0., 0.), output_details[1]['quantization'])\n\n def testFunctionalSequentialModel(self):\n \"\"\"Test a Functional tf.keras model containing a Sequential model.\"\"\"\n with session.Session().as_default():\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(2, input_shape=(3,)))\n model.add(keras.layers.RepeatVector(3))\n model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))\n model = keras.models.Model(model.input, model.output)\n\n model.compile(\n loss=keras.losses.MSE,\n optimizer=keras.optimizers.RMSprop(),\n metrics=[keras.metrics.categorical_accuracy],\n sample_weight_mode='temporal')\n x = np.random.random((1, 3))\n y = np.random.random((1, 3, 3))\n model.train_on_batch(x, y)\n model.predict(x)\n\n model.predict(x)\n fd, keras_file = tempfile.mkstemp('.h5')\n try:\n keras.models.save_model(model, keras_file)\n finally:\n os.close(fd)\n\n # Convert to TFLite model.\n converter = lite.TocoConverter.from_keras_model_file(keras_file)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check tensor details of converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('dense_input', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n # Check inference of converted model.\n input_data = np.array([[1, 2, 3]], dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], input_data)\n interpreter.invoke()\n tflite_result = interpreter.get_tensor(output_details[0]['index'])\n\n keras_model = keras.models.load_model(keras_file)\n keras_result = keras_model.predict(input_data)\n\n np.testing.assert_almost_equal(tflite_result, keras_result, 5)\n os.remove(keras_file)\n\n\nif __name__ == '__main__':\n test.main()\n" ]
[ [ "tensorflow.python.ops.math_ops.log", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.array_ops.constant", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.array_ops.split", "tensorflow.contrib.compiler.jit.experimental_jit_scope", "tensorflow.python.framework.op_def_registry.get_registered_ops", "tensorflow.python.ops.rnn_cell_impl.assert_like_rnncell", "tensorflow.python.framework.ops.add_to_collection", "tensorflow.python.ops.math_ops.tanh", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.ops.init_ops.random_uniform_initializer", "tensorflow.python.ops.init_ops.constant_initializer", "tensorflow.python.ops.gen_array_ops.tile", "tensorflow.python.layers.base.InputSpec", "tensorflow.python.framework.ops.get_collection", "tensorflow.python.ops.array_ops.where", "tensorflow.python.ops.clip_ops.clip_by_value", "tensorflow.python.ops.nn_ops.bias_add", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.ops.math_ops.matmul", "tensorflow.python.util.nest.map_structure", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.contrib.layers.python.layers.layers.layer_norm", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.ops.array_ops.slice", "tensorflow.python.util.nest.is_sequence", "tensorflow.python.ops.variable_scope.get_variable_scope", "tensorflow.python.ops.rnn_cell_impl.LSTMStateTuple", "tensorflow.python.ops.nn_impl.l2_normalize", "tensorflow.python.util.nest.assert_same_structure", "tensorflow.python.ops.math_ops.sigmoid", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.platform.tf_logging.warn", "tensorflow.python.ops.nn_ops.dropout", "tensorflow.python.ops.variable_scope.get_variable", "tensorflow.python.ops.init_ops.zeros_initializer", "tensorflow.python.ops.math_ops.mod", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.ops.nn_ops.xw_plus_b", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.util.nest.flatten", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.ops.image_ops.resize_image_with_pad", "numpy.sqrt", "tensorflow.python.ops.image_ops.adjust_saturation", "numpy.cumsum", "numpy.all", "tensorflow.python.ops.image_ops.flip_up_down", "tensorflow.python.ops.image_ops.non_max_suppression", "numpy.square", "tensorflow.python.ops.image_ops.extract_jpeg_shape", "tensorflow.python.ops.image_ops.encode_png", "numpy.zeros", "tensorflow.python.ops.image_ops.hsv_to_rgb", "tensorflow.python.ops.image_ops_impl._verify_compatible_image_shapes", "tensorflow.python.platform.test.is_gpu_available", "numpy.full_like", "numpy.log10", "tensorflow.python.ops.image_ops.adjust_hue", "numpy.array", "numpy.sum", "tensorflow.python.ops.image_ops.yuv_to_rgb", "tensorflow.python.ops.gradients.gradients", "tensorflow.python.ops.image_ops.pad_to_bounding_box", "tensorflow.python.ops.image_ops.decode_png", "tensorflow.python.ops.image_ops.non_max_suppression_padded", "tensorflow.python.ops.image_ops.ssim_multiscale", "tensorflow.python.ops.image_ops.image_gradients", "tensorflow.python.ops.image_ops.transpose_image", "tensorflow.python.platform.test.gpu_device_name", "numpy.random.uniform", "tensorflow.core.protobuf.config_pb2.ConfigProto", "numpy.vstack", "tensorflow.python.ops.image_ops.rgb_to_yuv", "numpy.expand_dims", "numpy.asarray", "tensorflow.python.ops.image_ops.psnr", "numpy.ndarray", "numpy.concatenate", "tensorflow.python.ops.image_ops.resize_image_with_crop_or_pad", "tensorflow.python.ops.image_ops.encode_jpeg", "numpy.reshape", "tensorflow.python.ops.image_ops.random_flip_left_right", "tensorflow.python.ops.math_ops.add", "tensorflow.python.ops.image_ops.sample_distorted_bounding_box", "tensorflow.python.ops.image_ops.resize_bilinear", "tensorflow.python.ops.image_ops.decode_bmp", "tensorflow.python.ops.image_ops.per_image_standardization", "numpy.random.rand", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.image_ops.rgb_to_grayscale", "tensorflow.python.ops.array_ops.stack", "tensorflow.python.ops.image_ops.adjust_brightness", "tensorflow.python.ops.image_ops.convert_image_dtype", "numpy.ones", "tensorflow.python.ops.image_ops.decode_image", "tensorflow.python.ops.image_ops.decode_and_crop_jpeg", "numpy.empty", "tensorflow.python.ops.image_ops.random_flip_up_down", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.array_ops.strided_slice", "tensorflow.python.ops.array_ops.placeholder", "numpy.mean", "numpy.zeros_like", "numpy.histogram", "tensorflow.python.ops.image_ops.yiq_to_rgb", "numpy.fliplr", "tensorflow.python.ops.array_ops.unstack", "tensorflow.python.ops.image_ops.decode_jpeg", "numpy.rot90", "numpy.isnan", "tensorflow.python.ops.image_ops.flip_left_right", "tensorflow.python.ops.image_ops.rgb_to_hsv", "tensorflow.python.ops.image_ops.grayscale_to_rgb", "tensorflow.python.ops.image_ops.resize_bicubic", "tensorflow.python.ops.image_ops.adjust_contrast", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.framework.ops.Graph", "tensorflow.python.ops.image_ops.resize_area", "tensorflow.python.ops.image_ops.central_crop", "numpy.dstack", "numpy.tile", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.ops.image_ops.rgb_to_yiq", "tensorflow.python.ops.random_ops.random_uniform", "tensorflow.python.ops.image_ops.decode_gif", "numpy.squeeze", "tensorflow.python.ops.image_ops.total_variation", "tensorflow.python.framework.ops.device", "tensorflow.python.ops.image_ops.resize_images", "tensorflow.python.ops.image_ops.rot90", "numpy.arange", "tensorflow.python.platform.googletest.main", "tensorflow.python.ops.io_ops.read_file", "tensorflow.python.ops.image_ops.crop_to_bounding_box", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.image_ops.sobel_edges", "tensorflow.python.ops.image_ops.adjust_gamma", "tensorflow.python.ops.image_ops.ssim", "tensorflow.python.client.session.Session", "numpy.abs", "numpy.random.seed", "numpy.triu_indices", "numpy.isfinite", "tensorflow.python.ops.gen_image_ops.adjust_saturation", "numpy.prod", "tensorflow.python.ops.random_ops.random_normal", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.ops.resource_variable_ops.ResourceVariable", "tensorflow.python.training.momentum.MomentumOptimizer", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.platform.test.main", "tensorflow.python.ops.variables.trainable_variables", "tensorflow.python.ops.variables.global_variables_initializer", "numpy.array", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.gen_boosted_trees_ops.boosted_trees_serialize_ensemble", "tensorflow.python.ops.gen_boosted_trees_ops.is_boosted_trees_ensemble_initialized", "tensorflow.python.ops.gen_boosted_trees_ops.boosted_trees_ensemble_resource_handle_op", "tensorflow.python.ops.gen_boosted_trees_ops.boosted_trees_deserialize_ensemble", "tensorflow.python.ops.gen_boosted_trees_ops.boosted_trees_create_ensemble", "tensorflow.python.training.saver.BaseSaverBuilder.SaveSpec", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.framework.ops.add_to_collection", "tensorflow.python.ops.gen_boosted_trees_ops.boosted_trees_get_ensemble_states", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.resources.register_resource" ], [ "numpy.random.random", "numpy.random.seed", "numpy.random.choice", "numpy.sort", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.platform.test.main", "numpy.random.randint", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.contrib.framework.python.ops.sort_ops.sort", "numpy.argsort", "tensorflow.contrib.framework.python.ops.sort_ops.argsort", "tensorflow.python.ops.random_ops.random_uniform", "tensorflow.python.framework.constant_op.constant" ], [ "numpy.dot", "numpy.expand_dims", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.framework.ops.device", "numpy.eye", "tensorflow.python.platform.test.main", "numpy.zeros", "tensorflow.python.platform.test.is_gpu_available", "tensorflow.python.client.session.Session", "tensorflow.python.ops.linalg.linalg_impl.matrix_exponential", "numpy.array", "numpy.abs", "numpy.random.seed", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.framework.ops.Graph", "numpy.tile", "numpy.ones", "tensorflow.python.ops.random_ops.random_normal", "numpy.prod", "numpy.random.uniform", "tensorflow.python.ops.variables.global_variables_initializer", "numpy.empty", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.platform.gfile.ListDirectory", "tensorflow.python.util.compat.as_str_any" ], [ "tensorflow.python.framework.function.Defun", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.platform.googletest.main", "numpy.array", "tensorflow.compiler.tf2xla.python.xla.reduce_window" ], [ "tensorflow.core.protobuf.config_pb2.RunMetadata", "tensorflow.python.training.monitored_session.Scaffold", "tensorflow.python.training.monitored_session.MonitoredSession", "tensorflow.python.training.monitored_session._HookedSession", "tensorflow.python.ops.state_ops.assign_add", "tensorflow.python.framework.errors_impl.AbortedError", "tensorflow.python.summary.summary.scalar", "tensorflow.core.protobuf.config_pb2.RunOptions", "tensorflow.python.training.monitored_session.MonitoredTrainingSession", "tensorflow.python.training.monitored_session._WrappedSession", "tensorflow.python.ops.variables.Variable", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.training.checkpoint_management.latest_checkpoint", "tensorflow.python.framework.errors_impl.UnknownError", "tensorflow.python.training.saver._get_saver_or_default", "tensorflow.python.ops.array_ops.identity", "tensorflow.core.protobuf.debug_pb2.DebugTensorWatch", "tensorflow.python.training.monitored_session._CoordinatedSession", "tensorflow.python.ops.control_flow_ops.Assert", "tensorflow.contrib.testing.python.framework.util_test.latest_summaries", "tensorflow.python.ops.resource_variable_ops.ResourceVariable", "tensorflow.python.training.monitored_session.SingularMonitoredSession", "tensorflow.python.framework.errors_impl.UnavailableError", "tensorflow.python.platform.test.main", "tensorflow.python.training.monitored_session._WrappedSession.run", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.training.basic_session_run_hooks.CheckpointSaverHook", "tensorflow.contrib.framework.python.ops.variables.get_or_create_global_step", "tensorflow.python.training.monitored_session.ChiefSessionCreator", "tensorflow.python.training.coordinator.Coordinator", "tensorflow.python.client.session.Session", "tensorflow.python.training.monitored_session._RecoverableSession", "tensorflow.python.framework.errors_impl.CancelledError", "tensorflow.python.framework.ops.Graph", "tensorflow.core.protobuf.debug_pb2.DebugOptions", "tensorflow.python.training.basic_session_run_hooks.StopAtStepHook", "tensorflow.python.training.monitored_session._WrappedSession.__init__", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.training.session_run_hook.SessionRunArgs", "tensorflow.python.framework.errors_impl.OutOfRangeError", "tensorflow.python.training.saver.Saver", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors", "tensorflow.core.kernels.boosted_trees.boosted_trees_pb2.TreeEnsemble", "tensorflow.python.estimator.canned.boosted_trees._create_classification_head", "tensorflow.python.training.checkpoint_utils.load_checkpoint", "tensorflow.python.feature_column.feature_column.indicator_column", "tensorflow.python.estimator.inputs.numpy_io.numpy_input_fn", "tensorflow.python.platform.googletest.main", "tensorflow.python.estimator.canned.boosted_trees.BoostedTreesClassifier", "tensorflow.python.estimator.canned.boosted_trees._create_regression_head", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.framework.ops.reset_default_graph", "tensorflow.python.estimator.canned.boosted_trees._TreeHParams", "tensorflow.python.ops.gen_boosted_trees_ops.boosted_trees_serialize_ensemble", "tensorflow.python.feature_column.feature_column.numeric_column", "tensorflow.python.ops.resources.shared_resources", "tensorflow.python.estimator.canned.boosted_trees.BoostedTreesRegressor", "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices", "tensorflow.python.estimator.canned.boosted_trees._bt_model_fn", "numpy.array", "tensorflow.python.feature_column.feature_column.categorical_column_with_vocabulary_list", "tensorflow.python.estimator.run_config.RunConfig", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.ops.variables.local_variables_initializer", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.contrib.gan.python.features.python.random_tensor_pool_impl.tensor_pool", "numpy.unique", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.platform.test.main", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.debug.wrappers.hooks.DumpingDebugHook", "tensorflow.python.ops.state_ops.assign_add", "tensorflow.python.debug.wrappers.dumping_wrapper.DumpingDebugWrapperSession", "tensorflow.python.ops.variables.Variable", "tensorflow.python.client.session.Session", "tensorflow.python.platform.googletest.main", "tensorflow.python.training.monitored_session._HookedSession", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.contrib.rate.rate.Rate", "tensorflow.python.framework.test_util.run_in_graph_and_eager_modes", "tensorflow.python.ops.math_ops.less", "tensorflow.python.ops.control_flow_ops.while_loop", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.math_ops.add", "tensorflow.python.platform.test.main", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.ops.variables.local_variables_initializer", "tensorflow.python.framework.constant_op.constant" ], [ "numpy.exp2", "numpy.take", "tensorflow.compiler.tests.xla_test.Benchmark", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.array_ops.gather", "tensorflow.python.ops.variables.Variable", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.platform.test.main", "numpy.random.randint", "numpy.random.randn", "numpy.array", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.ops.parsing_ops.FixedLenFeature", "tensorflow.python.ops.parsing_ops.VarLenFeature", "tensorflow.python.data.ops.readers.TFRecordDataset", "tensorflow.python.util.compat.as_bytes", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.data.ops.iterator_ops.Iterator.from_structure", "tensorflow.core.example.feature_pb2.Int64List", "tensorflow.python.ops.array_ops.placeholder_with_default", "tensorflow.python.lib.io.python_io.TFRecordWriter", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.platform.tf_logging.info", "tensorflow.core.protobuf.config_pb2.GPUOptions", "tensorflow.core.protobuf.config_pb2.ConfigProto", "tensorflow.python.util.deprecation.deprecated" ], [ "tensorflow.core.protobuf.config_pb2.RunOptions", "tensorflow.contrib.rpc.python.ops.rpc_op.rpc", "tensorflow.contrib.proto.python.ops.decode_proto_op.decode_proto", "tensorflow.contrib.rpc.python.ops.rpc_op.try_rpc", "tensorflow.contrib.rpc.python.kernel_tests.test_example_pb2.TestCase" ], [ "tensorflow.python.keras.models.save_model", "tensorflow.python.keras.layers.Dense", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.contrib.lite.python.lite.TocoConverter.from_frozen_graph", "tensorflow.contrib.lite.python.lite.TocoConverter", "tensorflow.python.saved_model.saved_model.simple_save", "tensorflow.contrib.lite.python.lite.TocoConverter.from_session", "tensorflow.python.keras.optimizers.RMSprop", "tensorflow.python.ops.array_ops.fake_quant_with_min_max_args", "numpy.testing.assert_almost_equal", "tensorflow.contrib.lite.python.lite.TocoConverter.from_keras_model_file", "tensorflow.python.platform.test.main", "tensorflow.python.ops.math_ops.matmul", "tensorflow.python.platform.resource_loader.get_path_to_datafile", "tensorflow.python.keras.models.Sequential", "tensorflow.python.keras.models.load_model", "tensorflow.python.client.session.Session", "tensorflow.python.keras.layers.Dropout", "tensorflow.contrib.lite.python.interpreter.Interpreter", "numpy.array", "tensorflow.python.keras.layers.Input", "tensorflow.python.keras.layers.RepeatVector", "numpy.random.random", "numpy.random.seed", "tensorflow.contrib.lite.python.lite.TocoConverter.from_saved_model", "tensorflow.python.ops.variable_scope.get_variable", "tensorflow.python.training.training_util.write_graph", "numpy.random.uniform", "tensorflow.python.keras.models.Model", "tensorflow.python.keras.backend.clear_session", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.platform.gfile.Open" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.4", "1.5", "1.7", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.13", "2.3", "2.4", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.13", "2.3", "2.4", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.6", "2.4", "2.3", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.13", "1.5", "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13", "1.12" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.5", "1.4" ] } ]
aribasadme/CrimeDB
[ "a16b35204921726ec6f3cf9a7ec5d9b51cbe7d49" ]
[ "crime_db.py" ]
[ "import pandas as pd\nfrom police_api import PoliceAPI\n\n\ndef first_job(api, dates, t_current):\n \"\"\"\n Creates the tables and populates them with the historical data\n from ​T​_0\n​ ​to ​T​_current\n \"\"\"\n # subset of dates\n dates_hist = dates[dates <= t_current]\n\n # crime_categories table\n s_crime_cat = set()\n for date in dates_hist:\n s_crime_cat.update(api.get_crime_categories(date))\n\n crime_categories['id'] = [c.url for c in s_crime_cat]\n crime_categories['description'] = [c.name for c in s_crime_cat]\n crime_categories.set_index('id', inplace=True)\n\n # To get the crimes for each force and neighbourhood\n cr = []\n for d in date_hist:\n cr.append([api.get_crimes_area(n.boundary, date=d) for n in s_nb_flat])\n # Flattern the list\n crimes_flat = [c for sublist1 in cr for sublist2 in sublist1 for c in sublist2]\n # Subset for those containing a valid \"persistent_id\"\n crimes_flat[:] = [c.__dict__ for c in crimes_flat if c.persistent_id != '']\n # Convert to DataFrame\n df_crimes = pd.DataFrame(crimes_flat)\n df_crimes = df_crimes[['month', 'category', 'id', 'persistent_id', \n 'location', 'context', 'outcome_status']]\n # Get the key values for the objects in each column\n crimes['latitude'] = df_crimes['location'].apply(lambda x: x.latitude)\n crimes['longitude'] = df_crimes['location'].apply(lambda x: x.longitude)\n crimes['street'] = df_crimes['location'].apply(lambda x: x.street)\n\n ### outcome_categories table ###\n # Get outcome_status to populate outcome_categories table\n outcome_status = crimes.pop('outcome_status')\n df_outcomes = pd.DataFrame(outcome_status.apply(lambda x: x.__dict__).to_list())\n df_outcomes.pop('api')\n outcome_categories['id'] = df_outcomes['category'].apply(lambda x: x['id'])\n outcome_categories['name'] = df_outcomes['category'].apply(lambda x: x['name'])\n # Drop duplicates\n outcome_categories = outcome_categories.loc[outcome_categories.name.drop_duplicates().index]\n outcome_categories.set_index('id', inplace=True)\n\n ### streets table ###\n # Get streets to populate streets table\n s_streets = crimes['street']\n streets['id'] = s_streets.apply(lambda x: x['id'])\n streets['name'] = s_streets.apply(lambda x: x['name'])\n # Drop duplicates\n streets = streets.loc[streets.id.drop_duplicates().index]\n streets.set_index('id', inplace=True)\n\n # Clean crimes table\n crimes['street'] = crimes['street'].apply(lambda x: x['id'])\n # rename 'month' to 'date'\n crimes.rename(columns={\"month\": \"date\"}, inplace=True)\n # Ordering columns\n cols = ['persistent_id', 'category', 'street', 'latitude', 'longitude', 'date', 'context']\n crimes = crimes[cols]\n crimes.set_index('persistent_id', inplace=True)\n\n ### outcomes table ###\n crime_idx = crimes.index.to_list()\n l_outcomes = [api.get_crime(idx).outcomes for idx in crime_idx]\n l_outcomes_flat = [o for sublist in l_outcomes for o in sublist]\n outcomes['crime'] = [o.crime.id for o in l_outcomes_flat]\n outcomes['category'] = [o.category.id for o in l_outcomes_flat]\n outcomes['date'] = [o.date for o in l_outcomes_flat]\n outcomes['person_id'] = [' ' for o in l_outcomes_flat] # person_id is empty given by the api\n outcomes.drop_duplicates(['crime', 'category'], inplace=True)\n outcomes.set_index(['crime', 'category'], inplace=True)\n\n\ndef second_job(api, dates, t_last_update, t_current):\n dates_upd = dates[dates <= t_current and dates >= t_last_update]\n\n s_crime_cat = set()\n for date in dates_upd:\n s_crime_cat.update(api.get_crime_categories(date))\n url = [c.url for c in s_crime_cat]\n name = [c.name for c in s_crime_cat]\n df_crime_categories = pd.DataFrame.from_dict({'id': url, 'description': name})\n df_crime_categories.set_index('id')\n\n crime_categories.append(df_crime_categories, ignore_index=True)\n\n cr = []\n for d in dates_upd:\n cr.append([api.get_crimes_area(n.boundary, date=d) for n in s_nb_flat])\n # Flattern the list\n crimes_flat = [c for sublist1 in cr for sublist2 in sublist1 for c in sublist2]\n # Subset for those containing a valid \"persistent_id\"\n crimes_flat[:] = [c.__dict__ for c in crimes_flat if c.persistent_id!='']\n # Convert to DataFrame\n df_crimes = pd.DataFrame(crimes_flat)\n df_crimes = df_crimes[['month', 'category', 'id', 'persistent_id', 'location', 'context', 'outcome_status']]\n # Get the key values for the objects in each column\n df_crimes['latitude'] = df_crimes['location'].apply(lambda x: x.latitude)\n df_crimes['longitude'] = df_crimes['location'].apply(lambda x: x.longitude)\n df_crimes['street'] = df_crimes['location'].apply(lambda x: x.street)\n\n ### outcome_categories table ###\n # Get outcome_status to populate outcome_categories table\n outcome_status = df_crimes.pop('outcome_status')\n df_outcomes = pd.DataFrame(outcome_status.apply(lambda x: x.__dict__).to_list())\n df_outcomes.pop('api')\n df_outcome_categories = pd.DataFrame({'id': [], 'description': []})\n df_outcome_categories['id'] = df_outcomes['category'].apply(lambda x: x['id'])\n df_outcome_categories['description'] = df_outcomes['category'].apply(lambda x: x['name'])\n # Drop duplicates\n df_outcome_categories = df_outcome_categories.loc[df_outcome_categoriesdf_outcome_categories.name.drop_duplicates().index]\n df_outcome_categories.set_index('id', inplace=True)\n\n outcome_categories.append(df_outcome_categories, ignore_index=True)\n\n\n ### streets table ###\n # Get streets to populate streets table\n s_streets = crimes['street']\n df_streets = pd.DataFrame({'id': [], 'name': []})\n df_streets['id'] = s_streets.apply(lambda x: x['id'])\n df_streets['name'] = s_streets.apply(lambda x: x['name'])\n # Drop duplicates\n df_streets = df_streets.loc[df_streets.id.drop_duplicates().index]\n df_streets.set_index('id', inplace=True)\n streets.append(df_streets, ignore_index=True)\n\n # Clean crimes table\n df_crimes['street'] = df_crimes['street'].apply(lambda x: x['id'])\n # rename 'month' to 'date'\n df_crimes.rename(columns={\"month\": \"date\"}, inplace=True)\n # Ordering columns\n cols = ['persistent_id', 'category', 'street', 'latitude', 'longitude', 'date', 'context']\n df_crimes = crimes[cols]\n df_crimes.set_index('persistent_id', inplace=True)\n\n crimes.append(df_crimes, ignore_index=True)\n\n ### outcomes table ###\n crime_idx = crimes.index.to_list()\n l_outcomes = [api.get_crime(idx).outcomes for idx in crime_idx]\n l_outcomes_flat = [o for sublist in l_outcomes for o in sublist]\n df_outcomes = pd.DataFrame({'crime': [], 'category': [], 'date': [], 'person_id': []})\n df_outcomes['crime'] = [o.crime.id for o in l_outcomes_flat]\n df_outcomes['category'] = [o.category.id for o in l_outcomes_flat]\n df_outcomes['date'] = [o.date for o in l_outcomes_flat]\n df_outcomes['person_id'] = [' ' for o in l_outcomes_flat] # person_id is empty given by the api\n df_outcomes.drop_duplicates(['crime', 'category'], inplace=True)\n df_outcomes.set_index(['crime', 'category'], inplace=True)\n\n outcomes.append(df_outcomes, ignore_index=True)\n\ndef last_job(api, dates, t_current):\n dates_upd = dates[dates == t_current]\n\n s_crime_cat = set()\n for date in dates_upd:\n s_crime_cat.update(api.get_crime_categories(date))\n url = [c.url for c in s_crime_cat]\n name = [c.name for c in s_crime_cat]\n df_crime_categories = pd.DataFrame.from_dict({'id': url, 'description': name})\n df_crime_categories.set_index('id')\n\n crime_categories.append(df_crime_categories, ignore_index=True)\n\n cr = []\n for d in dates_upd:\n cr.append([api.get_crimes_area(n.boundary, date=d) for n in s_nb_flat])\n # Flattern the list\n crimes_flat = [c for sublist1 in cr for sublist2 in sublist1 for c in sublist2]\n # Subset for those containing a valid \"persistent_id\"\n crimes_flat[:] = [c.__dict__ for c in crimes_flat if c.persistent_id!='']\n # Convert to DataFrame\n df_crimes = pd.DataFrame(crimes_flat)\n df_crimes = df_crimes[['month', 'category', 'id', 'persistent_id', 'location', 'context', 'outcome_status']]\n # Get the key values for the objects in each column\n df_crimes['latitude'] = df_crimes['location'].apply(lambda x: x.latitude)\n df_crimes['longitude'] = df_crimes['location'].apply(lambda x: x.longitude)\n df_crimes['street'] = df_crimes['location'].apply(lambda x: x.street)\n\n\n ## outcome_categories table ##\n # Get outcome_status to populate outcome_categories table\n outcome_status = df_crimes.pop('outcome_status')\n df_outcomes = pd.DataFrame(outcome_status.apply(lambda x: x.__dict__).to_list())\n df_outcomes.pop('api')\n df_outcome_categories = pd.DataFrame({'id': [], 'description': []})\n df_outcome_categories['id'] = df_outcomes['category'].apply(lambda x: x['id'])\n df_outcome_categories['description'] = df_outcomes['category'].apply(lambda x: x['name'])\n # Drop duplicates\n df_outcome_categories = df_outcome_categories.loc[df_outcome_categoriesdf_outcome_categories.name.drop_duplicates().index]\n df_outcome_categories.set_index('id', inplace=True)\n\n outcome_categories.append(df_outcome_categories, ignore_index=True)\n\n\n ### streets table ###\n # Get streets to populate streets table\n s_streets = crimes['street']\n df_streets = pd.DataFrame({'id': [], 'name': []})\n df_streets['id'] = s_streets.apply(lambda x: x['id'])\n df_streets['name'] = s_streets.apply(lambda x: x['name'])\n # Drop duplicates\n df_streets = df_streets.loc[df_streets.id.drop_duplicates().index]\n df_streets.set_index('id', inplace=True)\n streets.append(df_streets, ignore_index=True)\n\n # Clean crimes table\n df_crimes['street'] = df_crimes['street'].apply(lambda x: x['id'])\n # rename 'month' to 'date'\n df_crimes.rename(columns={\"month\": \"date\"}, inplace=True)\n # Ordering columns\n cols = ['persistent_id', 'category', 'street', 'latitude', 'longitude', 'date', 'context']\n df_crimes = crimes[cols]\n df_crimes.set_index('persistent_id', inplace=True)\n\n crimes.append(df_crimes, ignore_index=True)\n\n ### outcomes table ###\n crime_idx = crimes.index.to_list()\n l_outcomes = [api.get_crime(idx).outcomes for idx in crime_idx]\n l_outcomes_flat = [o for sublist in l_outcomes for o in sublist]\n df_outcomes = pd.DataFrame({'crime': [], 'category': [], 'date': [], 'person_id': []})\n df_outcomes['crime'] = [o.crime.id for o in l_outcomes_flat]\n df_outcomes['category'] = [o.category.id for o in l_outcomes_flat]\n df_outcomes['date'] = [o.date for o in l_outcomes_flat]\n df_outcomes['person_id'] = [' ' for o in l_outcomes_flat] # person_id is empty given by the api\n df_outcomes.drop_duplicates(['crime', 'category'], inplace=True)\n df_outcomes.set_index(['crime', 'category'], inplace=True)\n\n outcomes.append(df_outcomes, ignore_index=True)\n\n\ndef main(t_current):\n # Call the police API\n api = PoliceAPI()\n\n # Define tables\n crime_categories = pd.DataFrame({'id': [], 'description': []})\n outcome_categories = pd.DataFrame({'id': [], 'description': []})\n streets = pd.DataFrame({'id': [], 'name': []})\n crimes = pd.DataFrame({'persistent_id': [], 'category': [], 'street': [], 'city': [], 'latitude': [], 'longitude': [], 'date': [], 'context': []})\n outcomes = pd.DataFrame({'crime': [], 'category': [], 'date': [], 'person_id': []})\n\n # Transform dates into pandas Series for better manipulation\n dates = pd.Series(api.get_dates())\n\n # Get Forces\n forces = api.get_forces()\n # Get neighbourhoods\n neighbourhoods = [f.neighbourhoods for f in forces]\n nb_flat = [n for sublist in neighbourhoods for n in sublist]\n s_nb_flat = pd.Series(nb_flat).unique()\n\n first_job(api, dates, t_current)\n\n t_last_update = api.get_latest_date()\n second_job(api, dates, t_last_update, t_current)\n\n last_job(api, t_current)\n\n\nif __name__ == \"__main__\":\n main(t_current)\n" ]
[ [ "pandas.Series", "pandas.DataFrame", "pandas.DataFrame.from_dict" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
mothguib/maptrainer
[ "335334fed073f8d14a4c5137eaa0424efcbcac63", "335334fed073f8d14a4c5137eaa0424efcbcac63" ]
[ "maptrainer/model/LinRNNModel.py", "maptrainer/model/LinearModel.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport torch\nfrom torch import autograd\nfrom torch.autograd import Variable\nimport torch.nn as nn\n\nfrom maptrainer.model.MAPModel import MAPModel\nfrom ..data import INIT_RANGE_BOUND\n\n\nclass LinRNNModel(MAPModel):\n \"\"\"\n `LinRNNModel`: Linear-output RNN model\n\n Container module standing for an RNN with a linear output layer\n \"\"\"\n\n def __init__(self,\n n_input,\n _n_hid,\n _nlayers,\n variant=\"LSTM\",\n dropout=0.0,\n **kwargs):\n\n super(LinRNNModel, self).__init__()\n self.variant = variant\n self.nhid = _n_hid\n self.nlayers = _nlayers\n self.drop = nn.Dropout(dropout)\n\n # The linear layer as projection that maps hidden state space to\n # vertices' space namely that this linear layer has as many units\n # as there are vertices\n self.linearise = nn.Linear(_n_hid, n_input)\n\n if variant in ['LSTM', 'GRU']:\n self.rnn = getattr(nn, variant)(n_input, _n_hid, _nlayers,\n dropout=dropout)\n else:\n try:\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[\n variant]\n except KeyError:\n raise ValueError(\"\"\"An invalid option for `--model` was \n supplied, options are ['LSTM', 'GRU', 'RNN_TANH' or \n RNN_RELU']\"\"\")\n self.rnn = nn.RNN(n_input, _n_hid, _nlayers,\n nonlinearity=nonlinearity, dropout=dropout)\n\n self.init_parameters()\n\n def forward(self, _input, hidden):\n \"\"\"\n\n :param _input:\n Shape: N x T x n_in\n :type _input: FloatTensor or Variable\n :param hidden: (h_t, c_t)\n :type hidden:\n :return:\n :rtype:\n \"\"\"\n _input = _input.permute(1, 0, 2) # The dimension representing the\n # index of elements in a sequence (or the tth element of the\n # sequence) is put into the 1st dim (axis 0) and the one\n # representing indices of sequence (the nth sequence) into the 2nd\n # dim (axis 1). Henceforth, `_input` will have a shape of `T x N x\n # n_ins`.\n\n dropped_out_input = self.drop(_input)\n self.rnn.flatten_parameters()\n output, hidden = self.rnn(dropped_out_input, hidden)\n dropped_out_output = self.drop(output)\n linearised = self.linearise(dropped_out_output)\n\n return linearised, hidden\n\n def init_hidden(self, bsz):\n weight = next(self.parameters()).data\n if self.variant == 'LSTM':\n return (\n Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()),\n Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()))\n # returns (h_t, c_t)\n else:\n return Variable(weight.new(self.nlayers, bsz, self.nhid).zero_())\n\n def predict(self, _input, bsz):\n if isinstance(_input, np.ndarray):\n _input = autograd.Variable(torch.from_numpy(_input).float())\n\n if isinstance(_input, autograd.Variable):\n if len(_input.size()) == 2:\n _input = _input.view(len(_input), 1, -1)\n\n sizes = _input.size()\n if sizes[1] == 1:\n _input = _input.expand(sizes[0], bsz, sizes[2])\n else:\n raise TypeError(\n \"_input must be a np.ndarray or an autograd.Variable\")\n\n hidden = self.init_hidden(bsz)\n\n outputs, hidden = self(_input, hidden)\n\n return outputs[:, 0, :], hidden[:, 0, :]\n", "# -*- coding: utf-8 -*-\n\nfrom collections import OrderedDict\nimport torch\nimport torch.nn as nn\n\nfrom maptrainer.model.MAPModel import MAPModel\nfrom ..data import INIT_RANGE_BOUND\n\n\nclass LinearModel(MAPModel):\n\n def __init__(self,\n ninputs: int = -1,\n nlayers: int = -1,\n nhid: int = -1,\n noutputs: int = -1,\n dropout: float = 0.0,\n variant: str = None,\n **kwargs):\n\n super(LinearModel, self).__init__()\n\n self.ninputs = ninputs\n\n self.noutputs = noutputs if noutputs != -1 else self.ninputs\n\n self.drop = nn.Dropout(dropout)\n\n self.variant = variant\n\n bias = False if variant == \"IdentityWeightsNoBias\" else True\n\n # Last layer, which is linear, to convert probabilities into idlenesses\n self.network = nn.Linear(self.ninputs, self.noutputs, bias=bias)\n\n if variant.startswith(\"IdentityWeights\"):\n self.init_parameters()\n\n def init_parameters(self, init_range_bound: int = INIT_RANGE_BOUND):\n \"\"\"\n\n Initialises the weights of the output layer\n :return:\n :rtype:\n \"\"\"\n\n self.network.weight.data = torch.zeros(self.noutputs, self.ninputs)\n\n for i in range(self.noutputs):\n self.network.weight.data[i][i] = 1\n\n if self.variant == \"IdentityWeightsNoBias\":\n for i in range(self.noutputs):\n self.network.bias.data[i] = 0\n\n def forward(self, _input):\n \"\"\"\n\n :param _input:\n Shape: N x T x n_in\n :type _input: FloatTensor or Variable\n :return:\n :rtype:\n \"\"\"\n\n dropped_out_input = self.drop(_input)\n output = self.network(dropped_out_input)\n\n # TDLT\n # print(\"DBG: \", self.network.bias.data)\n\n return output\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.RNN", "torch.from_numpy" ], [ "torch.nn.Linear", "torch.nn.Dropout", "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sjdv1982/silk
[ "232e759cabfc7a87550d1e50ed9c4de4e0e57bf4", "232e759cabfc7a87550d1e50ed9c4de4e0e57bf4" ]
[ "tests/silk/test-complex.py", "tests/mixed/tofromstream2.py" ]
[ "import sys\nfrom pprint import pprint\nfrom silk import Silk, ValidationError\n\ndef adder(self, other):\n return other + self.x\n\ns = Silk()\ns.__add__ = adder\ns.bla = adder\ns.x = 80\nprint(s.x.data)\nprint(s.bla(5))\nprint(s+5)\n\ns2 = Silk(schema=s.schema)\ns2.x = 10\nprint(s2+5)\n\ns3 = Silk(schema=s2.schema)\ns3.x = 10\nprint(s3+25)\n\ndef xy(self):\n return self.x + self.y\n\ns.x = 1\ns.y = 2\nprint(s.x + s.y)\ns3.xy = property(xy) # all three Silks use the same schema\nprint(s.xy)\n\ndef xx_get(self):\n return self.x * self.x\ndef xx_set(self, xx):\n import math\n self.x = int(math.sqrt(xx))\n\ns.x = 3\ns.xx = property(xx_get, xx_set)\nprint(s.xx)\ns.xx = 16\nprint(s.xx)\nprint(s.x.data)\n\ns.z = {}\ns.z.q = 12\ns.z.r = 24\nsz = s.z\nprint(sz.q.data, sz.r.data)\ns.z.r = 25\nprint(sz.q.data, sz.r.data)\ns.z.qr = property(lambda self: self.q * self.r)\nprint(s.z.qr)\n\ndef validate_z(self):\n print(\"VALIDATE\", self.q.data, self.r.data)\n assert self.q < self.r\ntry:\n s.z.add_validator(validate_z)\nexcept Exception:\n pprint(s.schema)\n\ns.z.validate()\npprint(s.schema)\n\ns.lis = [1,2,3]\ns.lis.append(10)\ns.validate()\nprint(s.lis.data)\ns.lis += [5]\ns.validate()\nprint(s.lis*2)\nfor a in s.lis[1:3]:\n print(a.data)\nprint(hasattr(s, \"lis\"), \"lis\" in s)\nprint(hasattr(s, \"lis2\"), \"lis2\" in s)\n\nfor v in s:\n print(s[v].data)\nprint(\"\")\nfor v in s.lis:\n print(v.data)\nprint()\n\ns = Silk().set(5)\ninc = lambda self: self + 1\ns.x = inc\nprint(s.x())\ns.y = property(inc)\nprint(s.y)\ndef setter(self,v):\n self.set(v - 1)\ns.z = property(inc, setter)\nprint(s.z)\ns.z = 10\nprint(s.data)\nprint(s.z)\n\nimport numpy as np\narr = np.array([1.0,2.0,3.0])\ns2.arr = arr\n# Need .self.data or .unsilk for Numpy arrays, because Numpy arrays have a .data method\nprint(s2.arr.self.data, arr)\nprint(s2.arr.unsilk, arr)\nprint(type(s2.arr.self.data), type(arr))\nprint(s2.arr[2].self.data, arr[2])\nprint(type(s2.arr[2].self.data), type(arr[2]))\n\n#s2.arr.schema[\"type\"] = \"array\" # inferred\nprint(s2.arr.schema[\"type\"])\nitem = Silk().set(5.0)\n#item.schema[\"type\"] = \"number\" # inferred\ndef func(self):\n assert self > 0\nitem.add_validator(func)\ns2.arr.schema[\"items\"] = item.schema\ns2.x.validate(full=False)\nprint(\"ARR\", s2.arr, type(s2.arr))\nfor nr, ele in enumerate(s2.arr):\n print(\"ELE\", nr, ele, type(ele))\n ele.validate(full=False)\ns2.x.validate(full=False)\ns2.validate()\n\ns2.arr[0] = 5\nprint(s2.arr.unsilk)\n\ns = Silk()\ns.x = 1.0\ns.y = 0.0\ns.z = 0.0\ndef func(self):\n assert abs(self.x**2+self.y**2+self.z**2 - 1) < 0.001\ns.add_validator(func)\ns.validate()\ntry:\n s.y = 1.0 # would fail\n s.validate()\nexcept ValidationError:\n s.y = 0\n\ns.x = 0.0\ns.y = 0.0\ns.z = 1.0\ns.validate()\n\ns.x = 0.0\ns.y = 1.0\ns.z = 0.0\ns.validate()\n\nprint(s.data)\n\nimport numpy as np\na = Silk()\na.coor = [0.0,0.0,1.0]\npprint(a.coor.schema)\nprint(a.coor.data)\nprint(\"START\")\nnp.array(a.coor.data)\nprint(np.array(a.coor.data))\ndef func(self):\n import numpy as np #necessary!\n arr = np.array(self.data)\n assert abs(np.sum(arr**2) - 1) < 0.01\na.coor.add_validator(func)\n\nc = Silk()\nc.set( [0.0, 0.0, 0.0] )\nc.schema.clear()\nc.schema.update(a.coor.schema)\n\ndef set_x(self, value):\n self[0] = value\nc.x = property(lambda self: self[0], set_x)\ndef set_y(self, value):\n self[1] = value\nc.y = property(lambda self: self[1], set_y)\ndef set_z(self, value):\n self[2] = value\nc.z = property(lambda self: self[2], set_z)\n\ndef set_xyz(self, xyz):\n x,y,z = xyz\n self.x = x\n self.y = y\n self.z = z\n self.validate()\nc.xyz = property(lambda self: tuple(self.data), set_xyz)\n\nc.x = 0.2\ntry:\n c.validate()\nexcept ValidationError as exc:\n print(exc)\nc.y = -0.3\nc.z = 0.93\nc.validate()\nprint(c.data)\nc.xyz = -1,0,0\nprint(c.data, c.xyz)\npprint(c.schema)\n\nTest = Silk()\ndef __init__(self, a, b):\n self.a = a\n self.b = b\ndef __call__(self, c):\n return self.a + self.b + c\nTest.__init__ = __init__\nTest.__call__ = __call__\ntest = Test(7,8)\ntest.validate()\nprint(test.data)\nprint(test(5))\npprint(test.schema)\n\nprint(\"START\")\ntest.l = []\nl = test.l\nl.append(\"bla\")\ntest.validate()\ntry:\n l.append(10) #Error\n l.validate()\nexcept ValidationError as exc:\n print(exc)\n l.pop(-1)\nprint(test.l.data)", "import sys\nimport numpy as np\nfrom silk.mixed.get_form import get_form\nfrom silk.mixed.io import to_stream, from_stream\n\ndt1 = np.dtype([\n (\"a\", int),\n (\"b\", int),\n (\"c\", (\"float\", 3)),\n], align=True)\n\ndt2 = np.dtype([\n (\"a\", int),\n (\"b\", int),\n (\"c\", object),\n], align=True)\n\nassert dt1.isalignedstruct\nassert dt2.isalignedstruct\nassert dt1[\"c\"].subdtype\n\nd1 = np.zeros(1, dt1)[0]\nd2 = np.zeros(1, dt2)[0]\nd2[\"c\"] = np.zeros(3, float)\n\nd1[\"a\"] = d2[\"a\"] = 10\nd1[\"b\"] = d2[\"b\"] = 20\nd1[\"c\"][:] = d2[\"c\"][:] = range(100,103)\n\nstorage1, form1 = get_form(d1)\nstorage2, form2 = get_form(d2)\n\nprint(storage1, form1)\nprint(storage2, form2)\n\nstream1 = to_stream(d1, storage1, form1)\n#print(stream1)\nstream2 = to_stream(d2, storage2, form2)\n#print(stream2)\n\nnewd1 = from_stream(stream1, storage1, form1)\nprint(newd1.tobytes() == d1.tobytes())\nnewd2 = from_stream(stream2, storage2, form2)\nprint(newd2[\"a\"] == d2[\"a\"])\nprint(newd2[\"b\"] == d2[\"b\"])\nprint(newd2[\"c\"].tobytes() == d2[\"c\"].tobytes())\n" ]
[ [ "numpy.array", "numpy.sum" ], [ "numpy.zeros", "numpy.dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jayzed82/scikit-learn
[ "f52c0d441502117020ac0152cea8e89367f55ed6", "f52c0d441502117020ac0152cea8e89367f55ed6" ]
[ "sklearn/feature_selection/_univariate_selection.py", "sklearn/metrics/tests/test_common.py" ]
[ "\"\"\"Univariate features selection.\"\"\"\n\n# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.\n# L. Buitinck, A. Joly\n# License: BSD 3 clause\n\n\nimport numpy as np\nimport warnings\n\nfrom scipy import special, stats\nfrom scipy.sparse import issparse\n\nfrom ..base import BaseEstimator\nfrom ..preprocessing import LabelBinarizer\nfrom ..utils import (as_float_array, check_array, check_X_y, safe_sqr,\n safe_mask)\nfrom ..utils.extmath import safe_sparse_dot, row_norms\nfrom ..utils.validation import check_is_fitted\nfrom ._base import SelectorMixin\n\n\ndef _clean_nans(scores):\n \"\"\"\n Fixes Issue #1240: NaNs can't be properly compared, so change them to the\n smallest value of scores's dtype. -inf seems to be unreliable.\n \"\"\"\n # XXX where should this function be called? fit? scoring functions\n # themselves?\n scores = as_float_array(scores, copy=True)\n scores[np.isnan(scores)] = np.finfo(scores.dtype).min\n return scores\n\n\n######################################################################\n# Scoring functions\n\n\n# The following function is a rewriting of scipy.stats.f_oneway\n# Contrary to the scipy.stats.f_oneway implementation it does not\n# copy the data while keeping the inputs unchanged.\ndef f_oneway(*args):\n \"\"\"Performs a 1-way ANOVA.\n\n The one-way ANOVA tests the null hypothesis that 2 or more groups have\n the same population mean. The test is applied to samples from two or\n more groups, possibly with differing sizes.\n\n Read more in the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n *args : {array-like, sparse matrix}\n sample1, sample2... The sample measurements should be given as\n arguments.\n\n Returns\n -------\n f_statistic : float\n The computed F-value of the test.\n p_value : float\n The associated p-value from the F-distribution.\n\n Notes\n -----\n The ANOVA test has important assumptions that must be satisfied in order\n for the associated p-value to be valid.\n\n 1. The samples are independent\n 2. Each sample is from a normally distributed population\n 3. The population standard deviations of the groups are all equal. This\n property is known as homoscedasticity.\n\n If these assumptions are not true for a given set of data, it may still be\n possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although\n with some loss of power.\n\n The algorithm is from Heiman[2], pp.394-7.\n\n See ``scipy.stats.f_oneway`` that should give the same results while\n being less efficient.\n\n References\n ----------\n\n .. [1] Lowry, Richard. \"Concepts and Applications of Inferential\n Statistics\". Chapter 14.\n http://faculty.vassar.edu/lowry/ch14pt1.html\n\n .. [2] Heiman, G.W. Research Methods in Statistics. 2002.\n\n \"\"\"\n n_classes = len(args)\n args = [as_float_array(a) for a in args]\n n_samples_per_class = np.array([a.shape[0] for a in args])\n n_samples = np.sum(n_samples_per_class)\n ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)\n sums_args = [np.asarray(a.sum(axis=0)) for a in args]\n square_of_sums_alldata = sum(sums_args) ** 2\n square_of_sums_args = [s ** 2 for s in sums_args]\n sstot = ss_alldata - square_of_sums_alldata / float(n_samples)\n ssbn = 0.\n for k, _ in enumerate(args):\n ssbn += square_of_sums_args[k] / n_samples_per_class[k]\n ssbn -= square_of_sums_alldata / float(n_samples)\n sswn = sstot - ssbn\n dfbn = n_classes - 1\n dfwn = n_samples - n_classes\n msb = ssbn / float(dfbn)\n msw = sswn / float(dfwn)\n constant_features_idx = np.where(msw == 0.)[0]\n if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size):\n warnings.warn(\"Features %s are constant.\" % constant_features_idx,\n UserWarning)\n f = msb / msw\n # flatten matrix to vector in sparse case\n f = np.asarray(f).ravel()\n prob = special.fdtrc(dfbn, dfwn, f)\n return f, prob\n\n\ndef f_classif(X, y):\n \"\"\"Compute the ANOVA F-value for the provided sample.\n\n Read more in the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The set of regressors that will be tested sequentially.\n\n y : ndarray of shape (n_samples,)\n The target vector.\n\n Returns\n -------\n f_statistic : ndarray of shape (n_features,)\n F-statistic for each feature.\n\n p_values : ndarray of shape (n_features,)\n P-values associated with the F-statistic.\n\n See Also\n --------\n chi2 : Chi-squared stats of non-negative features for classification tasks.\n f_regression : F-value between label/feature for regression tasks.\n \"\"\"\n X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'])\n args = [X[safe_mask(X, y == k)] for k in np.unique(y)]\n return f_oneway(*args)\n\n\ndef _chisquare(f_obs, f_exp):\n \"\"\"Fast replacement for scipy.stats.chisquare.\n\n Version from https://github.com/scipy/scipy/pull/2525 with additional\n optimizations.\n \"\"\"\n f_obs = np.asarray(f_obs, dtype=np.float64)\n\n k = len(f_obs)\n # Reuse f_obs for chi-squared statistics\n chisq = f_obs\n chisq -= f_exp\n chisq **= 2\n with np.errstate(invalid=\"ignore\"):\n chisq /= f_exp\n chisq = chisq.sum(axis=0)\n return chisq, special.chdtrc(k - 1, chisq)\n\n\ndef chi2(X, y):\n \"\"\"Compute chi-squared stats between each non-negative feature and class.\n\n This score can be used to select the n_features features with the\n highest values for the test chi-squared statistic from X, which must\n contain only non-negative features such as booleans or frequencies\n (e.g., term counts in document classification), relative to the classes.\n\n Recall that the chi-square test measures dependence between stochastic\n variables, so using this function \"weeds out\" the features that are the\n most likely to be independent of class and therefore irrelevant for\n classification.\n\n Read more in the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Sample vectors.\n\n y : array-like of shape (n_samples,)\n Target vector (class labels).\n\n Returns\n -------\n chi2 : ndarray of shape (n_features,)\n Chi2 statistics for each feature.\n\n p_values : ndarray of shape (n_features,)\n P-values for each feature.\n\n Notes\n -----\n Complexity of this algorithm is O(n_classes * n_features).\n\n See Also\n --------\n f_classif : ANOVA F-value between label/feature for classification tasks.\n f_regression : F-value between label/feature for regression tasks.\n \"\"\"\n\n # XXX: we might want to do some of the following in logspace instead for\n # numerical stability.\n X = check_array(X, accept_sparse='csr')\n if np.any((X.data if issparse(X) else X) < 0):\n raise ValueError(\"Input X must be non-negative.\")\n\n Y = LabelBinarizer().fit_transform(y)\n if Y.shape[1] == 1:\n Y = np.append(1 - Y, Y, axis=1)\n\n observed = safe_sparse_dot(Y.T, X) # n_classes * n_features\n\n feature_count = X.sum(axis=0).reshape(1, -1)\n class_prob = Y.mean(axis=0).reshape(1, -1)\n expected = np.dot(class_prob.T, feature_count)\n\n return _chisquare(observed, expected)\n\n\ndef r_regression(X, y, *, center=True):\n \"\"\"Compute Pearson's r for each features and the target.\n\n Pearson's r is also known as the Pearson correlation coefficient.\n\n .. versionadded:: 1.0\n\n Linear model for testing the individual effect of each of many regressors.\n This is a scoring function to be used in a feature selection procedure, not\n a free standing feature selection procedure.\n\n The cross correlation between each regressor and the target is computed\n as ((X[:, i] - mean(X[:, i])) * (y - mean_y)) / (std(X[:, i]) * std(y)).\n\n For more on usage see the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data matrix.\n\n y : array-like of shape (n_samples,)\n The target vector.\n\n center : bool, default=True\n Whether or not to center the data matrix `X` and the target vector `y`.\n By default, `X` and `y` will be centered.\n\n Returns\n -------\n correlation_coefficient : ndarray of shape (n_features,)\n Pearson's R correlation coefficients of features.\n\n See Also\n --------\n f_regression: Univariate linear regression tests returning f-statistic\n and p-values\n mutual_info_regression: Mutual information for a continuous target.\n f_classif: ANOVA F-value between label/feature for classification tasks.\n chi2: Chi-squared stats of non-negative features for classification tasks.\n \"\"\"\n X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],\n dtype=np.float64)\n n_samples = X.shape[0]\n\n # Compute centered values\n # Note that E[(x - mean(x))*(y - mean(y))] = E[x*(y - mean(y))], so we\n # need not center X\n if center:\n y = y - np.mean(y)\n if issparse(X):\n X_means = X.mean(axis=0).getA1()\n else:\n X_means = X.mean(axis=0)\n # Compute the scaled standard deviations via moments\n X_norms = np.sqrt(row_norms(X.T, squared=True) -\n n_samples * X_means ** 2)\n else:\n X_norms = row_norms(X.T)\n\n correlation_coefficient = safe_sparse_dot(y, X)\n correlation_coefficient /= X_norms\n correlation_coefficient /= np.linalg.norm(y)\n return correlation_coefficient\n\n\ndef f_regression(X, y, *, center=True):\n \"\"\"Univariate linear regression tests returning F-statistic and p-values.\n\n Quick linear model for testing the effect of a single regressor,\n sequentially for many regressors.\n\n This is done in 2 steps:\n\n 1. The cross correlation between each regressor and the target is computed,\n that is, ((X[:, i] - mean(X[:, i])) * (y - mean_y)) / (std(X[:, i]) *\n std(y)) using r_regression function.\n 2. It is converted to an F score and then to a p-value.\n\n :func:`f_regression` is derived from :func:`r_regression` and will rank\n features in the same order if all the features are positively correlated\n with the target.\n\n Note however that contrary to :func:`f_regression`, :func:`r_regression`\n values lie in [-1, 1] and can thus be negative. :func:`f_regression` is\n therefore recommended as a feature selection criterion to identify\n potentially predictive feature for a downstream classifier, irrespective of\n the sign of the association with the target variable.\n\n Furthermore :func:`f_regression` returns p-values while\n :func:`r_regression` does not.\n\n Read more in the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data matrix.\n\n y : array-like of shape (n_samples,)\n The target vector.\n\n center : bool, default=True\n Whether or not to center the data matrix `X` and the target vector `y`.\n By default, `X` and `y` will be centered.\n\n Returns\n -------\n f_statistic : ndarray of shape (n_features,)\n F-statistic for each feature.\n\n p_values : ndarray of shape (n_features,)\n P-values associated with the F-statistic.\n\n See Also\n --------\n r_regression: Pearson's R between label/feature for regression tasks.\n f_classif: ANOVA F-value between label/feature for classification tasks.\n chi2: Chi-squared stats of non-negative features for classification tasks.\n SelectKBest: Select features based on the k highest scores.\n SelectFpr: Select features based on a false positive rate test.\n SelectFdr: Select features based on an estimated false discovery rate.\n SelectFwe: Select features based on family-wise error rate.\n SelectPercentile: Select features based on percentile of the highest\n scores.\n \"\"\"\n correlation_coefficient = r_regression(X, y, center=center)\n deg_of_freedom = y.size - (2 if center else 1)\n\n corr_coef_squared = correlation_coefficient ** 2\n f_statistic = corr_coef_squared / (1 - corr_coef_squared) * deg_of_freedom\n p_values = stats.f.sf(f_statistic, 1, deg_of_freedom)\n return f_statistic, p_values\n\n\n######################################################################\n# Base classes\n\nclass _BaseFilter(SelectorMixin, BaseEstimator):\n \"\"\"Initialize the univariate feature selection.\n\n Parameters\n ----------\n score_func : callable\n Function taking two arrays X and y, and returning a pair of arrays\n (scores, pvalues) or a single array with scores.\n \"\"\"\n\n def __init__(self, score_func):\n self.score_func = score_func\n\n def fit(self, X, y):\n \"\"\"Run score function on (X, y) and get the appropriate features.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The training input samples.\n\n y : array-like of shape (n_samples,)\n The target values (class labels in classification, real numbers in\n regression).\n\n Returns\n -------\n self : object\n \"\"\"\n X, y = self._validate_data(X, y, accept_sparse=['csr', 'csc'],\n multi_output=True)\n\n if not callable(self.score_func):\n raise TypeError(\"The score function should be a callable, %s (%s) \"\n \"was passed.\"\n % (self.score_func, type(self.score_func)))\n\n self._check_params(X, y)\n score_func_ret = self.score_func(X, y)\n if isinstance(score_func_ret, (list, tuple)):\n self.scores_, self.pvalues_ = score_func_ret\n self.pvalues_ = np.asarray(self.pvalues_)\n else:\n self.scores_ = score_func_ret\n self.pvalues_ = None\n\n self.scores_ = np.asarray(self.scores_)\n\n return self\n\n def _check_params(self, X, y):\n pass\n\n def _more_tags(self):\n return {'requires_y': True}\n\n\n######################################################################\n# Specific filters\n######################################################################\nclass SelectPercentile(_BaseFilter):\n \"\"\"Select features according to a percentile of the highest scores.\n\n Read more in the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n score_func : callable, default=f_classif\n Function taking two arrays X and y, and returning a pair of arrays\n (scores, pvalues) or a single array with scores.\n Default is f_classif (see below \"See Also\"). The default function only\n works with classification tasks.\n\n .. versionadded:: 0.18\n\n percentile : int, default=10\n Percent of features to keep.\n\n Attributes\n ----------\n scores_ : array-like of shape (n_features,)\n Scores of features.\n\n pvalues_ : array-like of shape (n_features,)\n p-values of feature scores, None if `score_func` returned only scores.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n Examples\n --------\n >>> from sklearn.datasets import load_digits\n >>> from sklearn.feature_selection import SelectPercentile, chi2\n >>> X, y = load_digits(return_X_y=True)\n >>> X.shape\n (1797, 64)\n >>> X_new = SelectPercentile(chi2, percentile=10).fit_transform(X, y)\n >>> X_new.shape\n (1797, 7)\n\n Notes\n -----\n Ties between features with equal scores will be broken in an unspecified\n way.\n\n See Also\n --------\n f_classif : ANOVA F-value between label/feature for classification tasks.\n mutual_info_classif : Mutual information for a discrete target.\n chi2 : Chi-squared stats of non-negative features for classification tasks.\n f_regression : F-value between label/feature for regression tasks.\n mutual_info_regression : Mutual information for a continuous target.\n SelectKBest : Select features based on the k highest scores.\n SelectFpr : Select features based on a false positive rate test.\n SelectFdr : Select features based on an estimated false discovery rate.\n SelectFwe : Select features based on family-wise error rate.\n GenericUnivariateSelect : Univariate feature selector with configurable\n mode.\n \"\"\"\n def __init__(self, score_func=f_classif, *, percentile=10):\n super().__init__(score_func=score_func)\n self.percentile = percentile\n\n def _check_params(self, X, y):\n if not 0 <= self.percentile <= 100:\n raise ValueError(\"percentile should be >=0, <=100; got %r\"\n % self.percentile)\n\n def _get_support_mask(self):\n check_is_fitted(self)\n\n # Cater for NaNs\n if self.percentile == 100:\n return np.ones(len(self.scores_), dtype=bool)\n elif self.percentile == 0:\n return np.zeros(len(self.scores_), dtype=bool)\n\n scores = _clean_nans(self.scores_)\n threshold = np.percentile(scores, 100 - self.percentile)\n mask = scores > threshold\n ties = np.where(scores == threshold)[0]\n if len(ties):\n max_feats = int(len(scores) * self.percentile / 100)\n kept_ties = ties[:max_feats - mask.sum()]\n mask[kept_ties] = True\n return mask\n\n\nclass SelectKBest(_BaseFilter):\n \"\"\"Select features according to the k highest scores.\n\n Read more in the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n score_func : callable, default=f_classif\n Function taking two arrays X and y, and returning a pair of arrays\n (scores, pvalues) or a single array with scores.\n Default is f_classif (see below \"See Also\"). The default function only\n works with classification tasks.\n\n .. versionadded:: 0.18\n\n k : int or \"all\", default=10\n Number of top features to select.\n The \"all\" option bypasses selection, for use in a parameter search.\n\n Attributes\n ----------\n scores_ : array-like of shape (n_features,)\n Scores of features.\n\n pvalues_ : array-like of shape (n_features,)\n p-values of feature scores, None if `score_func` returned only scores.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n Examples\n --------\n >>> from sklearn.datasets import load_digits\n >>> from sklearn.feature_selection import SelectKBest, chi2\n >>> X, y = load_digits(return_X_y=True)\n >>> X.shape\n (1797, 64)\n >>> X_new = SelectKBest(chi2, k=20).fit_transform(X, y)\n >>> X_new.shape\n (1797, 20)\n\n Notes\n -----\n Ties between features with equal scores will be broken in an unspecified\n way.\n\n See Also\n --------\n f_classif: ANOVA F-value between label/feature for classification tasks.\n mutual_info_classif: Mutual information for a discrete target.\n chi2: Chi-squared stats of non-negative features for classification tasks.\n f_regression: F-value between label/feature for regression tasks.\n mutual_info_regression: Mutual information for a continuous target.\n SelectPercentile: Select features based on percentile of the highest\n scores.\n SelectFpr : Select features based on a false positive rate test.\n SelectFdr : Select features based on an estimated false discovery rate.\n SelectFwe : Select features based on family-wise error rate.\n GenericUnivariateSelect : Univariate feature selector with configurable\n mode.\n \"\"\"\n def __init__(self, score_func=f_classif, *, k=10):\n super().__init__(score_func=score_func)\n self.k = k\n\n def _check_params(self, X, y):\n if not (self.k == \"all\" or 0 <= self.k <= X.shape[1]):\n raise ValueError(\"k should be >=0, <= n_features = %d; got %r. \"\n \"Use k='all' to return all features.\"\n % (X.shape[1], self.k))\n\n def _get_support_mask(self):\n check_is_fitted(self)\n\n if self.k == 'all':\n return np.ones(self.scores_.shape, dtype=bool)\n elif self.k == 0:\n return np.zeros(self.scores_.shape, dtype=bool)\n else:\n scores = _clean_nans(self.scores_)\n mask = np.zeros(scores.shape, dtype=bool)\n\n # Request a stable sort. Mergesort takes more memory (~40MB per\n # megafeature on x86-64).\n mask[np.argsort(scores, kind=\"mergesort\")[-self.k:]] = 1\n return mask\n\n\nclass SelectFpr(_BaseFilter):\n \"\"\"Filter: Select the pvalues below alpha based on a FPR test.\n\n FPR test stands for False Positive Rate test. It controls the total\n amount of false detections.\n\n Read more in the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n score_func : callable, default=f_classif\n Function taking two arrays X and y, and returning a pair of arrays\n (scores, pvalues).\n Default is f_classif (see below \"See Also\"). The default function only\n works with classification tasks.\n\n alpha : float, default=5e-2\n The highest p-value for features to be kept.\n\n Attributes\n ----------\n scores_ : array-like of shape (n_features,)\n Scores of features.\n\n pvalues_ : array-like of shape (n_features,)\n p-values of feature scores.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n Examples\n --------\n >>> from sklearn.datasets import load_breast_cancer\n >>> from sklearn.feature_selection import SelectFpr, chi2\n >>> X, y = load_breast_cancer(return_X_y=True)\n >>> X.shape\n (569, 30)\n >>> X_new = SelectFpr(chi2, alpha=0.01).fit_transform(X, y)\n >>> X_new.shape\n (569, 16)\n\n See Also\n --------\n f_classif : ANOVA F-value between label/feature for classification tasks.\n chi2 : Chi-squared stats of non-negative features for classification tasks.\n mutual_info_classif: Mutual information for a discrete target.\n f_regression : F-value between label/feature for regression tasks.\n mutual_info_regression : Mutual information for a continuous target.\n SelectPercentile : Select features based on percentile of the highest\n scores.\n SelectKBest : Select features based on the k highest scores.\n SelectFdr : Select features based on an estimated false discovery rate.\n SelectFwe : Select features based on family-wise error rate.\n GenericUnivariateSelect : Univariate feature selector with configurable\n mode.\n \"\"\"\n def __init__(self, score_func=f_classif, *, alpha=5e-2):\n super().__init__(score_func=score_func)\n self.alpha = alpha\n\n def _get_support_mask(self):\n check_is_fitted(self)\n\n return self.pvalues_ < self.alpha\n\n\nclass SelectFdr(_BaseFilter):\n \"\"\"Filter: Select the p-values for an estimated false discovery rate\n\n This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound\n on the expected false discovery rate.\n\n Read more in the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n score_func : callable, default=f_classif\n Function taking two arrays X and y, and returning a pair of arrays\n (scores, pvalues).\n Default is f_classif (see below \"See Also\"). The default function only\n works with classification tasks.\n\n alpha : float, default=5e-2\n The highest uncorrected p-value for features to keep.\n\n Examples\n --------\n >>> from sklearn.datasets import load_breast_cancer\n >>> from sklearn.feature_selection import SelectFdr, chi2\n >>> X, y = load_breast_cancer(return_X_y=True)\n >>> X.shape\n (569, 30)\n >>> X_new = SelectFdr(chi2, alpha=0.01).fit_transform(X, y)\n >>> X_new.shape\n (569, 16)\n\n Attributes\n ----------\n scores_ : array-like of shape (n_features,)\n Scores of features.\n\n pvalues_ : array-like of shape (n_features,)\n p-values of feature scores.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n References\n ----------\n https://en.wikipedia.org/wiki/False_discovery_rate\n\n See Also\n --------\n f_classif : ANOVA F-value between label/feature for classification tasks.\n mutual_info_classif : Mutual information for a discrete target.\n chi2 : Chi-squared stats of non-negative features for classification tasks.\n f_regression : F-value between label/feature for regression tasks.\n mutual_info_regression : Mutual information for a contnuous target.\n SelectPercentile : Select features based on percentile of the highest\n scores.\n SelectKBest : Select features based on the k highest scores.\n SelectFpr : Select features based on a false positive rate test.\n SelectFwe : Select features based on family-wise error rate.\n GenericUnivariateSelect : Univariate feature selector with configurable\n mode.\n \"\"\"\n def __init__(self, score_func=f_classif, *, alpha=5e-2):\n super().__init__(score_func=score_func)\n self.alpha = alpha\n\n def _get_support_mask(self):\n check_is_fitted(self)\n\n n_features = len(self.pvalues_)\n sv = np.sort(self.pvalues_)\n selected = sv[sv <= float(self.alpha) / n_features *\n np.arange(1, n_features + 1)]\n if selected.size == 0:\n return np.zeros_like(self.pvalues_, dtype=bool)\n return self.pvalues_ <= selected.max()\n\n\nclass SelectFwe(_BaseFilter):\n \"\"\"Filter: Select the p-values corresponding to Family-wise error rate\n\n Read more in the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n score_func : callable, default=f_classif\n Function taking two arrays X and y, and returning a pair of arrays\n (scores, pvalues).\n Default is f_classif (see below \"See Also\"). The default function only\n works with classification tasks.\n\n alpha : float, default=5e-2\n The highest uncorrected p-value for features to keep.\n\n Examples\n --------\n >>> from sklearn.datasets import load_breast_cancer\n >>> from sklearn.feature_selection import SelectFwe, chi2\n >>> X, y = load_breast_cancer(return_X_y=True)\n >>> X.shape\n (569, 30)\n >>> X_new = SelectFwe(chi2, alpha=0.01).fit_transform(X, y)\n >>> X_new.shape\n (569, 15)\n\n Attributes\n ----------\n scores_ : array-like of shape (n_features,)\n Scores of features.\n\n pvalues_ : array-like of shape (n_features,)\n p-values of feature scores.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n See Also\n --------\n f_classif : ANOVA F-value between label/feature for classification tasks.\n chi2 : Chi-squared stats of non-negative features for classification tasks.\n f_regression : F-value between label/feature for regression tasks.\n SelectPercentile : Select features based on percentile of the highest\n scores.\n SelectKBest : Select features based on the k highest scores.\n SelectFpr : Select features based on a false positive rate test.\n SelectFdr : Select features based on an estimated false discovery rate.\n GenericUnivariateSelect : Univariate feature selector with configurable\n mode.\n \"\"\"\n def __init__(self, score_func=f_classif, *, alpha=5e-2):\n super().__init__(score_func=score_func)\n self.alpha = alpha\n\n def _get_support_mask(self):\n check_is_fitted(self)\n\n return (self.pvalues_ < self.alpha / len(self.pvalues_))\n\n\n######################################################################\n# Generic filter\n######################################################################\n\n# TODO this class should fit on either p-values or scores,\n# depending on the mode.\nclass GenericUnivariateSelect(_BaseFilter):\n \"\"\"Univariate feature selector with configurable strategy.\n\n Read more in the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n score_func : callable, default=f_classif\n Function taking two arrays X and y, and returning a pair of arrays\n (scores, pvalues). For modes 'percentile' or 'kbest' it can return\n a single array scores.\n\n mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}, default='percentile'\n Feature selection mode.\n\n param : float or int depending on the feature selection mode, default=1e-5\n Parameter of the corresponding mode.\n\n Attributes\n ----------\n scores_ : array-like of shape (n_features,)\n Scores of features.\n\n pvalues_ : array-like of shape (n_features,)\n p-values of feature scores, None if `score_func` returned scores only.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n Examples\n --------\n >>> from sklearn.datasets import load_breast_cancer\n >>> from sklearn.feature_selection import GenericUnivariateSelect, chi2\n >>> X, y = load_breast_cancer(return_X_y=True)\n >>> X.shape\n (569, 30)\n >>> transformer = GenericUnivariateSelect(chi2, mode='k_best', param=20)\n >>> X_new = transformer.fit_transform(X, y)\n >>> X_new.shape\n (569, 20)\n\n See Also\n --------\n f_classif : ANOVA F-value between label/feature for classification tasks.\n mutual_info_classif : Mutual information for a discrete target.\n chi2 : Chi-squared stats of non-negative features for classification tasks.\n f_regression : F-value between label/feature for regression tasks.\n mutual_info_regression : Mutual information for a continuous target.\n SelectPercentile : Select features based on percentile of the highest\n scores.\n SelectKBest : Select features based on the k highest scores.\n SelectFpr : Select features based on a false positive rate test.\n SelectFdr : Select features based on an estimated false discovery rate.\n SelectFwe : Select features based on family-wise error rate.\n \"\"\"\n\n _selection_modes: dict = {'percentile': SelectPercentile,\n 'k_best': SelectKBest,\n 'fpr': SelectFpr,\n 'fdr': SelectFdr,\n 'fwe': SelectFwe}\n\n def __init__(self, score_func=f_classif, *, mode='percentile', param=1e-5):\n super().__init__(score_func=score_func)\n self.mode = mode\n self.param = param\n\n def _make_selector(self):\n selector = self._selection_modes[self.mode](score_func=self.score_func)\n\n # Now perform some acrobatics to set the right named parameter in\n # the selector\n possible_params = selector._get_param_names()\n possible_params.remove('score_func')\n selector.set_params(**{possible_params[0]: self.param})\n\n return selector\n\n def _check_params(self, X, y):\n if self.mode not in self._selection_modes:\n raise ValueError(\"The mode passed should be one of %s, %r,\"\n \" (type %s) was passed.\"\n % (self._selection_modes.keys(), self.mode,\n type(self.mode)))\n\n self._make_selector()._check_params(X, y)\n\n def _get_support_mask(self):\n check_is_fitted(self)\n\n selector = self._make_selector()\n selector.pvalues_ = self.pvalues_\n selector.scores_ = self.scores_\n return selector._get_support_mask()\n", "\nfrom functools import partial\nfrom inspect import signature\nfrom itertools import product\nfrom itertools import chain\nfrom itertools import permutations\n\nimport numpy as np\nimport scipy.sparse as sp\n\nimport pytest\n\nfrom sklearn.datasets import make_multilabel_classification\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.utils.multiclass import type_of_target\nfrom sklearn.utils.validation import _num_samples\nfrom sklearn.utils.validation import check_random_state\nfrom sklearn.utils import shuffle\n\nfrom sklearn.utils._testing import assert_allclose\nfrom sklearn.utils._testing import assert_almost_equal\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.utils._testing import assert_array_less\nfrom sklearn.utils._testing import ignore_warnings\n\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import average_precision_score\nfrom sklearn.metrics import balanced_accuracy_score\nfrom sklearn.metrics import brier_score_loss\nfrom sklearn.metrics import cohen_kappa_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import coverage_error\nfrom sklearn.metrics import det_curve\nfrom sklearn.metrics import explained_variance_score\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import fbeta_score\nfrom sklearn.metrics import hamming_loss\nfrom sklearn.metrics import hinge_loss\nfrom sklearn.metrics import jaccard_score\nfrom sklearn.metrics import label_ranking_average_precision_score\nfrom sklearn.metrics import label_ranking_loss\nfrom sklearn.metrics import log_loss\nfrom sklearn.metrics import max_error\nfrom sklearn.metrics import matthews_corrcoef\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import mean_absolute_percentage_error\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import mean_tweedie_deviance\nfrom sklearn.metrics import mean_poisson_deviance\nfrom sklearn.metrics import mean_gamma_deviance\nfrom sklearn.metrics import median_absolute_error\nfrom sklearn.metrics import multilabel_confusion_matrix\nfrom sklearn.metrics import mean_pinball_loss\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import r2_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import zero_one_loss\nfrom sklearn.metrics import ndcg_score\nfrom sklearn.metrics import dcg_score\nfrom sklearn.metrics import top_k_accuracy_score\n\nfrom sklearn.metrics._base import _average_binary_score\n\n\n# Note toward developers about metric testing\n# -------------------------------------------\n# It is often possible to write one general test for several metrics:\n#\n# - invariance properties, e.g. invariance to sample order\n# - common behavior for an argument, e.g. the \"normalize\" with value True\n# will return the mean of the metrics and with value False will return\n# the sum of the metrics.\n#\n# In order to improve the overall metric testing, it is a good idea to write\n# first a specific test for the given metric and then add a general test for\n# all metrics that have the same behavior.\n#\n# Two types of datastructures are used in order to implement this system:\n# dictionaries of metrics and lists of metrics wit common properties.\n#\n# Dictionaries of metrics\n# ------------------------\n# The goal of having those dictionaries is to have an easy way to call a\n# particular metric and associate a name to each function:\n#\n# - REGRESSION_METRICS: all regression metrics.\n# - CLASSIFICATION_METRICS: all classification metrics\n# which compare a ground truth and the estimated targets as returned by a\n# classifier.\n# - THRESHOLDED_METRICS: all classification metrics which\n# compare a ground truth and a score, e.g. estimated probabilities or\n# decision function (format might vary)\n#\n# Those dictionaries will be used to test systematically some invariance\n# properties, e.g. invariance toward several input layout.\n#\n\nREGRESSION_METRICS = {\n \"max_error\": max_error,\n \"mean_absolute_error\": mean_absolute_error,\n \"mean_squared_error\": mean_squared_error,\n \"mean_pinball_loss\": mean_pinball_loss,\n \"median_absolute_error\": median_absolute_error,\n \"mean_absolute_percentage_error\": mean_absolute_percentage_error,\n \"explained_variance_score\": explained_variance_score,\n \"r2_score\": partial(r2_score, multioutput='variance_weighted'),\n \"mean_normal_deviance\": partial(mean_tweedie_deviance, power=0),\n \"mean_poisson_deviance\": mean_poisson_deviance,\n \"mean_gamma_deviance\": mean_gamma_deviance,\n \"mean_compound_poisson_deviance\":\n partial(mean_tweedie_deviance, power=1.4),\n}\n\nCLASSIFICATION_METRICS = {\n \"accuracy_score\": accuracy_score,\n \"balanced_accuracy_score\": balanced_accuracy_score,\n \"adjusted_balanced_accuracy_score\": partial(balanced_accuracy_score,\n adjusted=True),\n \"unnormalized_accuracy_score\": partial(accuracy_score, normalize=False),\n\n # `confusion_matrix` returns absolute values and hence behaves unnormalized\n # . Naming it with an unnormalized_ prefix is necessary for this module to\n # skip sample_weight scaling checks which will fail for unnormalized\n # metrics.\n \"unnormalized_confusion_matrix\": confusion_matrix,\n \"normalized_confusion_matrix\": lambda *args, **kwargs: (\n confusion_matrix(*args, **kwargs).astype('float') / confusion_matrix(\n *args, **kwargs).sum(axis=1)[:, np.newaxis]\n ),\n\n \"unnormalized_multilabel_confusion_matrix\": multilabel_confusion_matrix,\n \"unnormalized_multilabel_confusion_matrix_sample\":\n partial(multilabel_confusion_matrix, samplewise=True),\n \"hamming_loss\": hamming_loss,\n\n \"zero_one_loss\": zero_one_loss,\n \"unnormalized_zero_one_loss\": partial(zero_one_loss, normalize=False),\n\n # These are needed to test averaging\n \"jaccard_score\": jaccard_score,\n \"precision_score\": precision_score,\n \"recall_score\": recall_score,\n \"f1_score\": f1_score,\n \"f2_score\": partial(fbeta_score, beta=2),\n \"f0.5_score\": partial(fbeta_score, beta=0.5),\n \"matthews_corrcoef_score\": matthews_corrcoef,\n\n \"weighted_f0.5_score\": partial(fbeta_score, average=\"weighted\", beta=0.5),\n \"weighted_f1_score\": partial(f1_score, average=\"weighted\"),\n \"weighted_f2_score\": partial(fbeta_score, average=\"weighted\", beta=2),\n \"weighted_precision_score\": partial(precision_score, average=\"weighted\"),\n \"weighted_recall_score\": partial(recall_score, average=\"weighted\"),\n \"weighted_jaccard_score\": partial(jaccard_score, average=\"weighted\"),\n\n \"micro_f0.5_score\": partial(fbeta_score, average=\"micro\", beta=0.5),\n \"micro_f1_score\": partial(f1_score, average=\"micro\"),\n \"micro_f2_score\": partial(fbeta_score, average=\"micro\", beta=2),\n \"micro_precision_score\": partial(precision_score, average=\"micro\"),\n \"micro_recall_score\": partial(recall_score, average=\"micro\"),\n \"micro_jaccard_score\": partial(jaccard_score, average=\"micro\"),\n\n \"macro_f0.5_score\": partial(fbeta_score, average=\"macro\", beta=0.5),\n \"macro_f1_score\": partial(f1_score, average=\"macro\"),\n \"macro_f2_score\": partial(fbeta_score, average=\"macro\", beta=2),\n \"macro_precision_score\": partial(precision_score, average=\"macro\"),\n \"macro_recall_score\": partial(recall_score, average=\"macro\"),\n \"macro_jaccard_score\": partial(jaccard_score, average=\"macro\"),\n\n \"samples_f0.5_score\": partial(fbeta_score, average=\"samples\", beta=0.5),\n \"samples_f1_score\": partial(f1_score, average=\"samples\"),\n \"samples_f2_score\": partial(fbeta_score, average=\"samples\", beta=2),\n \"samples_precision_score\": partial(precision_score, average=\"samples\"),\n \"samples_recall_score\": partial(recall_score, average=\"samples\"),\n \"samples_jaccard_score\": partial(jaccard_score, average=\"samples\"),\n\n \"cohen_kappa_score\": cohen_kappa_score,\n}\n\n\ndef precision_recall_curve_padded_thresholds(*args, **kwargs):\n \"\"\"\n The dimensions of precision-recall pairs and the threshold array as\n returned by the precision_recall_curve do not match. See\n func:`sklearn.metrics.precision_recall_curve`\n\n This prevents implicit conversion of return value triple to an higher\n dimensional np.array of dtype('float64') (it will be of dtype('object)\n instead). This again is needed for assert_array_equal to work correctly.\n\n As a workaround we pad the threshold array with NaN values to match\n the dimension of precision and recall arrays respectively.\n \"\"\"\n precision, recall, thresholds = precision_recall_curve(*args, **kwargs)\n\n pad_threshholds = len(precision) - len(thresholds)\n\n return np.array([\n precision,\n recall,\n np.pad(thresholds.astype(np.float64),\n pad_width=(0, pad_threshholds),\n mode='constant',\n constant_values=[np.nan])\n ])\n\n\nCURVE_METRICS = {\n \"roc_curve\": roc_curve,\n \"precision_recall_curve\": precision_recall_curve_padded_thresholds,\n \"det_curve\": det_curve,\n}\n\nTHRESHOLDED_METRICS = {\n \"coverage_error\": coverage_error,\n \"label_ranking_loss\": label_ranking_loss,\n \"log_loss\": log_loss,\n \"unnormalized_log_loss\": partial(log_loss, normalize=False),\n\n \"hinge_loss\": hinge_loss,\n\n \"brier_score_loss\": brier_score_loss,\n\n \"roc_auc_score\": roc_auc_score, # default: average=\"macro\"\n \"weighted_roc_auc\": partial(roc_auc_score, average=\"weighted\"),\n \"samples_roc_auc\": partial(roc_auc_score, average=\"samples\"),\n \"micro_roc_auc\": partial(roc_auc_score, average=\"micro\"),\n \"ovr_roc_auc\": partial(roc_auc_score, average=\"macro\", multi_class='ovr'),\n \"weighted_ovr_roc_auc\": partial(roc_auc_score, average=\"weighted\",\n multi_class='ovr'),\n \"ovo_roc_auc\": partial(roc_auc_score, average=\"macro\", multi_class='ovo'),\n \"weighted_ovo_roc_auc\": partial(roc_auc_score, average=\"weighted\",\n multi_class='ovo'),\n \"partial_roc_auc\": partial(roc_auc_score, max_fpr=0.5),\n\n \"average_precision_score\":\n average_precision_score, # default: average=\"macro\"\n \"weighted_average_precision_score\":\n partial(average_precision_score, average=\"weighted\"),\n \"samples_average_precision_score\":\n partial(average_precision_score, average=\"samples\"),\n \"micro_average_precision_score\":\n partial(average_precision_score, average=\"micro\"),\n \"label_ranking_average_precision_score\":\n label_ranking_average_precision_score,\n \"ndcg_score\": ndcg_score,\n \"dcg_score\": dcg_score,\n\n \"top_k_accuracy_score\": top_k_accuracy_score\n}\n\nALL_METRICS = dict()\nALL_METRICS.update(THRESHOLDED_METRICS)\nALL_METRICS.update(CLASSIFICATION_METRICS)\nALL_METRICS.update(REGRESSION_METRICS)\nALL_METRICS.update(CURVE_METRICS)\n\n# Lists of metrics with common properties\n# ---------------------------------------\n# Lists of metrics with common properties are used to test systematically some\n# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that\n# are symmetric with respect to their input argument y_true and y_pred.\n#\n# When you add a new metric or functionality, check if a general test\n# is already written.\n\n# Those metrics don't support binary inputs\nMETRIC_UNDEFINED_BINARY = {\n \"samples_f0.5_score\",\n \"samples_f1_score\",\n \"samples_f2_score\",\n \"samples_precision_score\",\n \"samples_recall_score\",\n \"samples_jaccard_score\",\n \"coverage_error\",\n \"unnormalized_multilabel_confusion_matrix_sample\",\n \"label_ranking_loss\",\n \"label_ranking_average_precision_score\",\n \"dcg_score\",\n \"ndcg_score\"\n}\n\n# Those metrics don't support multiclass inputs\nMETRIC_UNDEFINED_MULTICLASS = {\n \"brier_score_loss\",\n\n \"micro_roc_auc\",\n \"samples_roc_auc\",\n \"partial_roc_auc\",\n \"roc_auc_score\",\n \"weighted_roc_auc\",\n\n \"average_precision_score\",\n \"weighted_average_precision_score\",\n \"micro_average_precision_score\",\n \"samples_average_precision_score\",\n\n \"jaccard_score\",\n\n # with default average='binary', multiclass is prohibited\n \"precision_score\",\n \"recall_score\",\n \"f1_score\",\n \"f2_score\",\n \"f0.5_score\",\n\n # curves\n \"roc_curve\",\n \"precision_recall_curve\",\n \"det_curve\",\n}\n\n# Metric undefined with \"binary\" or \"multiclass\" input\nMETRIC_UNDEFINED_BINARY_MULTICLASS = METRIC_UNDEFINED_BINARY.union(\n METRIC_UNDEFINED_MULTICLASS)\n\n# Metrics with an \"average\" argument\nMETRICS_WITH_AVERAGING = {\n \"precision_score\", \"recall_score\", \"f1_score\", \"f2_score\", \"f0.5_score\",\n \"jaccard_score\"\n}\n\n# Threshold-based metrics with an \"average\" argument\nTHRESHOLDED_METRICS_WITH_AVERAGING = {\n \"roc_auc_score\", \"average_precision_score\", \"partial_roc_auc\",\n}\n\n# Metrics with a \"pos_label\" argument\nMETRICS_WITH_POS_LABEL = {\n \"roc_curve\",\n \"precision_recall_curve\",\n \"det_curve\",\n\n \"brier_score_loss\",\n\n \"precision_score\", \"recall_score\", \"f1_score\", \"f2_score\", \"f0.5_score\",\n \"jaccard_score\",\n\n \"average_precision_score\",\n \"weighted_average_precision_score\",\n \"micro_average_precision_score\",\n \"samples_average_precision_score\",\n}\n\n# Metrics with a \"labels\" argument\n# TODO: Handle multi_class metrics that has a labels argument as well as a\n# decision function argument. e.g hinge_loss\nMETRICS_WITH_LABELS = {\n \"unnormalized_confusion_matrix\",\n \"normalized_confusion_matrix\",\n \"roc_curve\",\n \"precision_recall_curve\",\n \"det_curve\",\n\n \"precision_score\", \"recall_score\", \"f1_score\", \"f2_score\", \"f0.5_score\",\n \"jaccard_score\",\n\n \"weighted_f0.5_score\", \"weighted_f1_score\", \"weighted_f2_score\",\n \"weighted_precision_score\", \"weighted_recall_score\",\n \"weighted_jaccard_score\",\n\n \"micro_f0.5_score\", \"micro_f1_score\", \"micro_f2_score\",\n \"micro_precision_score\", \"micro_recall_score\",\n \"micro_jaccard_score\",\n\n \"macro_f0.5_score\", \"macro_f1_score\", \"macro_f2_score\",\n \"macro_precision_score\", \"macro_recall_score\",\n \"macro_jaccard_score\",\n\n \"unnormalized_multilabel_confusion_matrix\",\n \"unnormalized_multilabel_confusion_matrix_sample\",\n\n \"cohen_kappa_score\",\n}\n\n# Metrics with a \"normalize\" option\nMETRICS_WITH_NORMALIZE_OPTION = {\n \"accuracy_score\",\n \"top_k_accuracy_score\",\n \"zero_one_loss\",\n}\n\n# Threshold-based metrics with \"multilabel-indicator\" format support\nTHRESHOLDED_MULTILABEL_METRICS = {\n \"log_loss\",\n \"unnormalized_log_loss\",\n\n \"roc_auc_score\", \"weighted_roc_auc\", \"samples_roc_auc\",\n \"micro_roc_auc\", \"partial_roc_auc\",\n\n \"average_precision_score\", \"weighted_average_precision_score\",\n \"samples_average_precision_score\", \"micro_average_precision_score\",\n\n \"coverage_error\", \"label_ranking_loss\",\n\n \"ndcg_score\",\n \"dcg_score\",\n\n \"label_ranking_average_precision_score\",\n}\n\n# Classification metrics with \"multilabel-indicator\" format\nMULTILABELS_METRICS = {\n \"accuracy_score\", \"unnormalized_accuracy_score\",\n \"hamming_loss\",\n \"zero_one_loss\", \"unnormalized_zero_one_loss\",\n\n \"weighted_f0.5_score\", \"weighted_f1_score\", \"weighted_f2_score\",\n \"weighted_precision_score\", \"weighted_recall_score\",\n \"weighted_jaccard_score\",\n\n \"macro_f0.5_score\", \"macro_f1_score\", \"macro_f2_score\",\n \"macro_precision_score\", \"macro_recall_score\",\n \"macro_jaccard_score\",\n\n \"micro_f0.5_score\", \"micro_f1_score\", \"micro_f2_score\",\n \"micro_precision_score\", \"micro_recall_score\",\n \"micro_jaccard_score\",\n\n \"unnormalized_multilabel_confusion_matrix\",\n\n \"samples_f0.5_score\", \"samples_f1_score\", \"samples_f2_score\",\n \"samples_precision_score\", \"samples_recall_score\",\n \"samples_jaccard_score\",\n}\n\n# Regression metrics with \"multioutput-continuous\" format support\nMULTIOUTPUT_METRICS = {\n \"mean_absolute_error\", \"median_absolute_error\", \"mean_squared_error\",\n \"r2_score\", \"explained_variance_score\", \"mean_absolute_percentage_error\",\n \"mean_pinball_loss\"\n}\n\n# Symmetric with respect to their input arguments y_true and y_pred\n# metric(y_true, y_pred) == metric(y_pred, y_true).\nSYMMETRIC_METRICS = {\n \"accuracy_score\", \"unnormalized_accuracy_score\",\n \"hamming_loss\",\n \"zero_one_loss\", \"unnormalized_zero_one_loss\",\n\n \"micro_jaccard_score\", \"macro_jaccard_score\",\n \"jaccard_score\",\n \"samples_jaccard_score\",\n\n \"f1_score\", \"micro_f1_score\", \"macro_f1_score\",\n \"weighted_recall_score\",\n # P = R = F = accuracy in multiclass case\n \"micro_f0.5_score\", \"micro_f1_score\", \"micro_f2_score\",\n \"micro_precision_score\", \"micro_recall_score\",\n\n \"matthews_corrcoef_score\", \"mean_absolute_error\", \"mean_squared_error\",\n \"median_absolute_error\", \"max_error\",\n\n # Pinball loss is only symmetric for alpha=0.5 which is the default.\n \"mean_pinball_loss\",\n\n \"cohen_kappa_score\", \"mean_normal_deviance\"\n}\n\n# Asymmetric with respect to their input arguments y_true and y_pred\n# metric(y_true, y_pred) != metric(y_pred, y_true).\nNOT_SYMMETRIC_METRICS = {\n \"balanced_accuracy_score\",\n \"adjusted_balanced_accuracy_score\",\n \"explained_variance_score\",\n \"r2_score\",\n \"unnormalized_confusion_matrix\",\n \"normalized_confusion_matrix\",\n \"roc_curve\",\n \"precision_recall_curve\",\n \"det_curve\",\n\n \"precision_score\", \"recall_score\", \"f2_score\", \"f0.5_score\",\n\n \"weighted_f0.5_score\", \"weighted_f1_score\", \"weighted_f2_score\",\n \"weighted_precision_score\", \"weighted_jaccard_score\",\n \"unnormalized_multilabel_confusion_matrix\",\n\n \"macro_f0.5_score\", \"macro_f2_score\", \"macro_precision_score\",\n \"macro_recall_score\", \"log_loss\", \"hinge_loss\",\n \"mean_gamma_deviance\", \"mean_poisson_deviance\",\n \"mean_compound_poisson_deviance\", \"mean_absolute_percentage_error\"\n}\n\n\n# No Sample weight support\nMETRICS_WITHOUT_SAMPLE_WEIGHT = {\n \"median_absolute_error\",\n \"max_error\",\n \"ovo_roc_auc\",\n \"weighted_ovo_roc_auc\"\n}\n\nMETRICS_REQUIRE_POSITIVE_Y = {\n \"mean_poisson_deviance\",\n \"mean_gamma_deviance\",\n \"mean_compound_poisson_deviance\",\n}\n\n\ndef _require_positive_targets(y1, y2):\n \"\"\"Make targets strictly positive\"\"\"\n offset = abs(min(y1.min(), y2.min())) + 1\n y1 += offset\n y2 += offset\n return y1, y2\n\n\ndef test_symmetry_consistency():\n\n # We shouldn't forget any metrics\n assert ((SYMMETRIC_METRICS | NOT_SYMMETRIC_METRICS |\n set(THRESHOLDED_METRICS) | METRIC_UNDEFINED_BINARY_MULTICLASS) ==\n set(ALL_METRICS))\n\n assert (SYMMETRIC_METRICS & NOT_SYMMETRIC_METRICS) == set()\n\n\[email protected](\"name\", sorted(SYMMETRIC_METRICS))\ndef test_symmetric_metric(name):\n # Test the symmetry of score and loss functions\n random_state = check_random_state(0)\n y_true = random_state.randint(0, 2, size=(20, ))\n y_pred = random_state.randint(0, 2, size=(20, ))\n\n if name in METRICS_REQUIRE_POSITIVE_Y:\n y_true, y_pred = _require_positive_targets(y_true, y_pred)\n\n y_true_bin = random_state.randint(0, 2, size=(20, 25))\n y_pred_bin = random_state.randint(0, 2, size=(20, 25))\n\n metric = ALL_METRICS[name]\n if name in METRIC_UNDEFINED_BINARY:\n if name in MULTILABELS_METRICS:\n assert_allclose(metric(y_true_bin, y_pred_bin),\n metric(y_pred_bin, y_true_bin),\n err_msg=\"%s is not symmetric\" % name)\n else:\n assert False, \"This case is currently unhandled\"\n else:\n assert_allclose(metric(y_true, y_pred),\n metric(y_pred, y_true),\n err_msg=\"%s is not symmetric\" % name)\n\n\[email protected](\"name\", sorted(NOT_SYMMETRIC_METRICS))\ndef test_not_symmetric_metric(name):\n # Test the symmetry of score and loss functions\n random_state = check_random_state(0)\n y_true = random_state.randint(0, 2, size=(20, ))\n y_pred = random_state.randint(0, 2, size=(20, ))\n\n if name in METRICS_REQUIRE_POSITIVE_Y:\n y_true, y_pred = _require_positive_targets(y_true, y_pred)\n\n metric = ALL_METRICS[name]\n\n # use context manager to supply custom error message\n with pytest.raises(AssertionError):\n assert_array_equal(metric(y_true, y_pred), metric(y_pred, y_true))\n raise ValueError(\"%s seems to be symmetric\" % name)\n\n\[email protected](\n 'name',\n sorted(set(ALL_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS))\ndef test_sample_order_invariance(name):\n random_state = check_random_state(0)\n y_true = random_state.randint(0, 2, size=(20, ))\n y_pred = random_state.randint(0, 2, size=(20, ))\n\n if name in METRICS_REQUIRE_POSITIVE_Y:\n y_true, y_pred = _require_positive_targets(y_true, y_pred)\n\n y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)\n\n with ignore_warnings():\n metric = ALL_METRICS[name]\n assert_allclose(metric(y_true, y_pred),\n metric(y_true_shuffle, y_pred_shuffle),\n err_msg=\"%s is not sample order invariant\" % name)\n\n\n@ignore_warnings\ndef test_sample_order_invariance_multilabel_and_multioutput():\n random_state = check_random_state(0)\n\n # Generate some data\n y_true = random_state.randint(0, 2, size=(20, 25))\n y_pred = random_state.randint(0, 2, size=(20, 25))\n y_score = random_state.normal(size=y_true.shape)\n\n y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,\n y_pred,\n y_score,\n random_state=0)\n\n for name in MULTILABELS_METRICS:\n metric = ALL_METRICS[name]\n assert_allclose(metric(y_true, y_pred),\n metric(y_true_shuffle, y_pred_shuffle),\n err_msg=\"%s is not sample order invariant\" % name)\n\n for name in THRESHOLDED_MULTILABEL_METRICS:\n metric = ALL_METRICS[name]\n assert_allclose(metric(y_true, y_score),\n metric(y_true_shuffle, y_score_shuffle),\n err_msg=\"%s is not sample order invariant\" % name)\n\n for name in MULTIOUTPUT_METRICS:\n metric = ALL_METRICS[name]\n assert_allclose(metric(y_true, y_score),\n metric(y_true_shuffle, y_score_shuffle),\n err_msg=\"%s is not sample order invariant\" % name)\n assert_allclose(metric(y_true, y_pred),\n metric(y_true_shuffle, y_pred_shuffle),\n err_msg=\"%s is not sample order invariant\" % name)\n\n\[email protected](\n 'name',\n sorted(set(ALL_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS))\ndef test_format_invariance_with_1d_vectors(name):\n random_state = check_random_state(0)\n y1 = random_state.randint(0, 2, size=(20, ))\n y2 = random_state.randint(0, 2, size=(20, ))\n\n if name in METRICS_REQUIRE_POSITIVE_Y:\n y1, y2 = _require_positive_targets(y1, y2)\n\n y1_list = list(y1)\n y2_list = list(y2)\n\n y1_1d, y2_1d = np.array(y1), np.array(y2)\n assert_array_equal(y1_1d.ndim, 1)\n assert_array_equal(y2_1d.ndim, 1)\n y1_column = np.reshape(y1_1d, (-1, 1))\n y2_column = np.reshape(y2_1d, (-1, 1))\n y1_row = np.reshape(y1_1d, (1, -1))\n y2_row = np.reshape(y2_1d, (1, -1))\n\n with ignore_warnings():\n metric = ALL_METRICS[name]\n\n measure = metric(y1, y2)\n\n assert_allclose(metric(y1_list, y2_list), measure,\n err_msg=\"%s is not representation invariant with list\"\n \"\" % name)\n\n assert_allclose(metric(y1_1d, y2_1d), measure,\n err_msg=\"%s is not representation invariant with \"\n \"np-array-1d\" % name)\n\n assert_allclose(metric(y1_column, y2_column), measure,\n err_msg=\"%s is not representation invariant with \"\n \"np-array-column\" % name)\n\n # Mix format support\n assert_allclose(metric(y1_1d, y2_list), measure,\n err_msg=\"%s is not representation invariant with mix \"\n \"np-array-1d and list\" % name)\n\n assert_allclose(metric(y1_list, y2_1d), measure,\n err_msg=\"%s is not representation invariant with mix \"\n \"np-array-1d and list\" % name)\n\n assert_allclose(metric(y1_1d, y2_column), measure,\n err_msg=\"%s is not representation invariant with mix \"\n \"np-array-1d and np-array-column\" % name)\n\n assert_allclose(metric(y1_column, y2_1d), measure,\n err_msg=\"%s is not representation invariant with mix \"\n \"np-array-1d and np-array-column\" % name)\n\n assert_allclose(metric(y1_list, y2_column), measure,\n err_msg=\"%s is not representation invariant with mix \"\n \"list and np-array-column\" % name)\n\n assert_allclose(metric(y1_column, y2_list), measure,\n err_msg=\"%s is not representation invariant with mix \"\n \"list and np-array-column\" % name)\n\n # These mix representations aren't allowed\n with pytest.raises(ValueError):\n metric(y1_1d, y2_row)\n with pytest.raises(ValueError):\n metric(y1_row, y2_1d)\n with pytest.raises(ValueError):\n metric(y1_list, y2_row)\n with pytest.raises(ValueError):\n metric(y1_row, y2_list)\n with pytest.raises(ValueError):\n metric(y1_column, y2_row)\n with pytest.raises(ValueError):\n metric(y1_row, y2_column)\n\n # NB: We do not test for y1_row, y2_row as these may be\n # interpreted as multilabel or multioutput data.\n if (name not in (MULTIOUTPUT_METRICS | THRESHOLDED_MULTILABEL_METRICS |\n MULTILABELS_METRICS)):\n with pytest.raises(ValueError):\n metric(y1_row, y2_row)\n\n\[email protected](\n 'name',\n sorted(set(CLASSIFICATION_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS))\ndef test_classification_invariance_string_vs_numbers_labels(name):\n # Ensure that classification metrics with string labels are invariant\n random_state = check_random_state(0)\n y1 = random_state.randint(0, 2, size=(20, ))\n y2 = random_state.randint(0, 2, size=(20, ))\n\n y1_str = np.array([\"eggs\", \"spam\"])[y1]\n y2_str = np.array([\"eggs\", \"spam\"])[y2]\n\n pos_label_str = \"spam\"\n labels_str = [\"eggs\", \"spam\"]\n\n with ignore_warnings():\n metric = CLASSIFICATION_METRICS[name]\n measure_with_number = metric(y1, y2)\n\n # Ugly, but handle case with a pos_label and label\n metric_str = metric\n if name in METRICS_WITH_POS_LABEL:\n metric_str = partial(metric_str, pos_label=pos_label_str)\n\n measure_with_str = metric_str(y1_str, y2_str)\n\n assert_array_equal(measure_with_number, measure_with_str,\n err_msg=\"{0} failed string vs number invariance \"\n \"test\".format(name))\n\n measure_with_strobj = metric_str(y1_str.astype('O'),\n y2_str.astype('O'))\n assert_array_equal(measure_with_number, measure_with_strobj,\n err_msg=\"{0} failed string object vs number \"\n \"invariance test\".format(name))\n\n if name in METRICS_WITH_LABELS:\n metric_str = partial(metric_str, labels=labels_str)\n measure_with_str = metric_str(y1_str, y2_str)\n assert_array_equal(measure_with_number, measure_with_str,\n err_msg=\"{0} failed string vs number \"\n \"invariance test\".format(name))\n\n measure_with_strobj = metric_str(y1_str.astype('O'),\n y2_str.astype('O'))\n assert_array_equal(measure_with_number, measure_with_strobj,\n err_msg=\"{0} failed string vs number \"\n \"invariance test\".format(name))\n\n\[email protected]('name', THRESHOLDED_METRICS)\ndef test_thresholded_invariance_string_vs_numbers_labels(name):\n # Ensure that thresholded metrics with string labels are invariant\n random_state = check_random_state(0)\n y1 = random_state.randint(0, 2, size=(20, ))\n y2 = random_state.randint(0, 2, size=(20, ))\n\n y1_str = np.array([\"eggs\", \"spam\"])[y1]\n\n pos_label_str = \"spam\"\n\n with ignore_warnings():\n metric = THRESHOLDED_METRICS[name]\n if name not in METRIC_UNDEFINED_BINARY:\n # Ugly, but handle case with a pos_label and label\n metric_str = metric\n if name in METRICS_WITH_POS_LABEL:\n metric_str = partial(metric_str, pos_label=pos_label_str)\n\n measure_with_number = metric(y1, y2)\n measure_with_str = metric_str(y1_str, y2)\n assert_array_equal(measure_with_number, measure_with_str,\n err_msg=\"{0} failed string vs number \"\n \"invariance test\".format(name))\n\n measure_with_strobj = metric_str(y1_str.astype('O'), y2)\n assert_array_equal(measure_with_number, measure_with_strobj,\n err_msg=\"{0} failed string object vs number \"\n \"invariance test\".format(name))\n else:\n # TODO those metrics doesn't support string label yet\n with pytest.raises(ValueError):\n metric(y1_str, y2)\n with pytest.raises(ValueError):\n metric(y1_str.astype('O'), y2)\n\n\ninvalids_nan_inf = [\n ([0, 1], [np.inf, np.inf]),\n ([0, 1], [np.nan, np.nan]),\n ([0, 1], [np.nan, np.inf]),\n ([0, 1], [np.inf, 1]),\n ([0, 1], [np.nan, 1]),\n]\n\n\[email protected](\n 'metric',\n chain(THRESHOLDED_METRICS.values(), REGRESSION_METRICS.values())\n)\[email protected](\"y_true, y_score\", invalids_nan_inf)\ndef test_regression_thresholded_inf_nan_input(metric, y_true, y_score):\n with pytest.raises(ValueError, match=\"contains NaN, infinity\"):\n metric(y_true, y_score)\n\n\[email protected]('metric', CLASSIFICATION_METRICS.values())\[email protected](\n \"y_true, y_score\",\n invalids_nan_inf +\n # Add an additional case for classification only\n # non-regression test for:\n # https://github.com/scikit-learn/scikit-learn/issues/6809\n [([np.nan, 1, 2], [1, 2, 3])] # type: ignore\n)\ndef test_classification_inf_nan_input(metric, y_true, y_score):\n \"\"\"check that classification metrics raise a message mentioning the\n occurrence of non-finite values in the target vectors.\"\"\"\n err_msg = \"Input contains NaN, infinity or a value too large\"\n with pytest.raises(ValueError, match=err_msg):\n metric(y_true, y_score)\n\n\[email protected]('metric', CLASSIFICATION_METRICS.values())\ndef test_classification_binary_continuous_input(metric):\n \"\"\"check that classification metrics raise a message of mixed type data\n with continuous/binary target vectors.\"\"\"\n y_true, y_score = ['a', 'b', 'a'], [0.1, 0.2, 0.3]\n err_msg = (\n \"Classification metrics can't handle a mix of binary and continuous \"\n \"targets\"\n )\n with pytest.raises(ValueError, match=err_msg):\n metric(y_true, y_score)\n\n\n@ignore_warnings\ndef check_single_sample(name):\n # Non-regression test: scores should work with a single sample.\n # This is important for leave-one-out cross validation.\n # Score functions tested are those that formerly called np.squeeze,\n # which turns an array of size 1 into a 0-d array (!).\n metric = ALL_METRICS[name]\n\n # assert that no exception is thrown\n if name in METRICS_REQUIRE_POSITIVE_Y:\n values = [1, 2]\n else:\n values = [0, 1]\n for i, j in product(values, repeat=2):\n metric([i], [j])\n\n\n@ignore_warnings\ndef check_single_sample_multioutput(name):\n metric = ALL_METRICS[name]\n for i, j, k, l in product([0, 1], repeat=4):\n metric(np.array([[i, j]]), np.array([[k, l]]))\n\n\[email protected](\n 'name',\n sorted(\n set(ALL_METRICS)\n # Those metrics are not always defined with one sample\n # or in multiclass classification\n - METRIC_UNDEFINED_BINARY_MULTICLASS - set(THRESHOLDED_METRICS)))\ndef test_single_sample(name):\n check_single_sample(name)\n\n\[email protected]('name',\n sorted(MULTIOUTPUT_METRICS | MULTILABELS_METRICS))\ndef test_single_sample_multioutput(name):\n check_single_sample_multioutput(name)\n\n\[email protected]('name', sorted(MULTIOUTPUT_METRICS))\ndef test_multioutput_number_of_output_differ(name):\n y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])\n y_pred = np.array([[0, 0], [1, 0], [0, 0]])\n\n metric = ALL_METRICS[name]\n with pytest.raises(ValueError):\n metric(y_true, y_pred)\n\n\[email protected]('name', sorted(MULTIOUTPUT_METRICS))\ndef test_multioutput_regression_invariance_to_dimension_shuffling(name):\n # test invariance to dimension shuffling\n random_state = check_random_state(0)\n y_true = random_state.uniform(0, 2, size=(20, 5))\n y_pred = random_state.uniform(0, 2, size=(20, 5))\n\n metric = ALL_METRICS[name]\n error = metric(y_true, y_pred)\n\n for _ in range(3):\n perm = random_state.permutation(y_true.shape[1])\n assert_allclose(metric(y_true[:, perm], y_pred[:, perm]),\n error,\n err_msg=\"%s is not dimension shuffling invariant\" % (\n name))\n\n\n@ignore_warnings\ndef test_multilabel_representation_invariance():\n # Generate some data\n n_classes = 4\n n_samples = 50\n\n _, y1 = make_multilabel_classification(n_features=1, n_classes=n_classes,\n random_state=0, n_samples=n_samples,\n allow_unlabeled=True)\n _, y2 = make_multilabel_classification(n_features=1, n_classes=n_classes,\n random_state=1, n_samples=n_samples,\n allow_unlabeled=True)\n\n # To make sure at least one empty label is present\n y1 = np.vstack([y1, [[0] * n_classes]])\n y2 = np.vstack([y2, [[0] * n_classes]])\n\n y1_sparse_indicator = sp.coo_matrix(y1)\n y2_sparse_indicator = sp.coo_matrix(y2)\n\n y1_list_array_indicator = list(y1)\n y2_list_array_indicator = list(y2)\n\n y1_list_list_indicator = [list(a) for a in y1_list_array_indicator]\n y2_list_list_indicator = [list(a) for a in y2_list_array_indicator]\n\n for name in MULTILABELS_METRICS:\n metric = ALL_METRICS[name]\n\n # XXX cruel hack to work with partial functions\n if isinstance(metric, partial):\n metric.__module__ = 'tmp'\n metric.__name__ = name\n\n measure = metric(y1, y2)\n\n # Check representation invariance\n assert_allclose(metric(y1_sparse_indicator, y2_sparse_indicator),\n measure,\n err_msg=\"%s failed representation invariance between \"\n \"dense and sparse indicator formats.\" % name)\n assert_almost_equal(metric(y1_list_list_indicator,\n y2_list_list_indicator),\n measure,\n err_msg=\"%s failed representation invariance \"\n \"between dense array and list of list \"\n \"indicator formats.\" % name)\n assert_almost_equal(metric(y1_list_array_indicator,\n y2_list_array_indicator),\n measure,\n err_msg=\"%s failed representation invariance \"\n \"between dense and list of array \"\n \"indicator formats.\" % name)\n\n\[email protected]('name', sorted(MULTILABELS_METRICS))\ndef test_raise_value_error_multilabel_sequences(name):\n # make sure the multilabel-sequence format raises ValueError\n multilabel_sequences = [\n [[1], [2], [0, 1]],\n [(), (2), (0, 1)],\n [[]],\n [()],\n np.array([[], [1, 2]], dtype='object')]\n\n metric = ALL_METRICS[name]\n for seq in multilabel_sequences:\n with pytest.raises(ValueError):\n metric(seq, seq)\n\n\[email protected]('name', sorted(METRICS_WITH_NORMALIZE_OPTION))\ndef test_normalize_option_binary_classification(name):\n # Test in the binary case\n n_classes = 2\n n_samples = 20\n random_state = check_random_state(0)\n\n y_true = random_state.randint(0, n_classes, size=(n_samples, ))\n y_pred = random_state.randint(0, n_classes, size=(n_samples, ))\n y_score = random_state.normal(size=y_true.shape)\n\n metrics = ALL_METRICS[name]\n pred = y_score if name in THRESHOLDED_METRICS else y_pred\n measure_normalized = metrics(y_true, pred, normalize=True)\n measure_not_normalized = metrics(y_true, pred, normalize=False)\n\n assert_array_less(-1.0 * measure_normalized, 0,\n err_msg=\"We failed to test correctly the normalize \"\n \"option\")\n\n assert_allclose(measure_normalized, measure_not_normalized / n_samples,\n err_msg=f\"Failed with {name}\")\n\n\[email protected]('name', sorted(METRICS_WITH_NORMALIZE_OPTION))\ndef test_normalize_option_multiclass_classification(name):\n # Test in the multiclass case\n n_classes = 4\n n_samples = 20\n random_state = check_random_state(0)\n\n y_true = random_state.randint(0, n_classes, size=(n_samples, ))\n y_pred = random_state.randint(0, n_classes, size=(n_samples, ))\n y_score = random_state.uniform(size=(n_samples, n_classes))\n\n metrics = ALL_METRICS[name]\n pred = y_score if name in THRESHOLDED_METRICS else y_pred\n measure_normalized = metrics(y_true, pred, normalize=True)\n measure_not_normalized = metrics(y_true, pred, normalize=False)\n\n assert_array_less(-1.0 * measure_normalized, 0,\n err_msg=\"We failed to test correctly the normalize \"\n \"option\")\n\n assert_allclose(measure_normalized, measure_not_normalized / n_samples,\n err_msg=f\"Failed with {name}\")\n\n\[email protected]('name', sorted(\n METRICS_WITH_NORMALIZE_OPTION.intersection(MULTILABELS_METRICS)\n))\ndef test_normalize_option_multilabel_classification(name):\n # Test in the multilabel case\n n_classes = 4\n n_samples = 100\n random_state = check_random_state(0)\n\n # for both random_state 0 and 1, y_true and y_pred has at least one\n # unlabelled entry\n _, y_true = make_multilabel_classification(n_features=1,\n n_classes=n_classes,\n random_state=0,\n allow_unlabeled=True,\n n_samples=n_samples)\n _, y_pred = make_multilabel_classification(n_features=1,\n n_classes=n_classes,\n random_state=1,\n allow_unlabeled=True,\n n_samples=n_samples)\n\n y_score = random_state.uniform(size=y_true.shape)\n\n # To make sure at least one empty label is present\n y_true += [0]*n_classes\n y_pred += [0]*n_classes\n\n metrics = ALL_METRICS[name]\n pred = y_score if name in THRESHOLDED_METRICS else y_pred\n measure_normalized = metrics(y_true, pred, normalize=True)\n measure_not_normalized = metrics(y_true, pred, normalize=False)\n\n assert_array_less(-1.0 * measure_normalized, 0,\n err_msg=\"We failed to test correctly the normalize \"\n \"option\")\n\n assert_allclose(measure_normalized, measure_not_normalized / n_samples,\n err_msg=f\"Failed with {name}\")\n\n\n@ignore_warnings\ndef _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,\n is_multilabel):\n n_samples, n_classes = y_true_binarize.shape\n\n # No averaging\n label_measure = metric(y_true, y_pred, average=None)\n assert_allclose(label_measure,\n [metric(y_true_binarize[:, i], y_pred_binarize[:, i])\n for i in range(n_classes)])\n\n # Micro measure\n micro_measure = metric(y_true, y_pred, average=\"micro\")\n assert_allclose(micro_measure,\n metric(y_true_binarize.ravel(), y_pred_binarize.ravel()))\n\n # Macro measure\n macro_measure = metric(y_true, y_pred, average=\"macro\")\n assert_allclose(macro_measure, np.mean(label_measure))\n\n # Weighted measure\n weights = np.sum(y_true_binarize, axis=0, dtype=int)\n\n if np.sum(weights) != 0:\n weighted_measure = metric(y_true, y_pred, average=\"weighted\")\n assert_allclose(weighted_measure,\n np.average(label_measure, weights=weights))\n else:\n weighted_measure = metric(y_true, y_pred, average=\"weighted\")\n assert_allclose(weighted_measure, 0)\n\n # Sample measure\n if is_multilabel:\n sample_measure = metric(y_true, y_pred, average=\"samples\")\n assert_allclose(sample_measure,\n np.mean([metric(y_true_binarize[i], y_pred_binarize[i])\n for i in range(n_samples)]))\n\n with pytest.raises(ValueError):\n metric(y_true, y_pred, average=\"unknown\")\n with pytest.raises(ValueError):\n metric(y_true, y_pred, average=\"garbage\")\n\n\ndef check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,\n y_score):\n is_multilabel = type_of_target(y_true).startswith(\"multilabel\")\n\n metric = ALL_METRICS[name]\n\n if name in METRICS_WITH_AVERAGING:\n _check_averaging(metric, y_true, y_pred, y_true_binarize,\n y_pred_binarize, is_multilabel)\n elif name in THRESHOLDED_METRICS_WITH_AVERAGING:\n _check_averaging(metric, y_true, y_score, y_true_binarize,\n y_score, is_multilabel)\n else:\n raise ValueError(\"Metric is not recorded as having an average option\")\n\n\[email protected]('name', sorted(METRICS_WITH_AVERAGING))\ndef test_averaging_multiclass(name):\n n_samples, n_classes = 50, 3\n random_state = check_random_state(0)\n y_true = random_state.randint(0, n_classes, size=(n_samples, ))\n y_pred = random_state.randint(0, n_classes, size=(n_samples, ))\n y_score = random_state.uniform(size=(n_samples, n_classes))\n\n lb = LabelBinarizer().fit(y_true)\n y_true_binarize = lb.transform(y_true)\n y_pred_binarize = lb.transform(y_pred)\n\n check_averaging(name, y_true, y_true_binarize,\n y_pred, y_pred_binarize, y_score)\n\n\[email protected](\n 'name',\n sorted(METRICS_WITH_AVERAGING | THRESHOLDED_METRICS_WITH_AVERAGING))\ndef test_averaging_multilabel(name):\n n_samples, n_classes = 40, 5\n _, y = make_multilabel_classification(n_features=1, n_classes=n_classes,\n random_state=5, n_samples=n_samples,\n allow_unlabeled=False)\n y_true = y[:20]\n y_pred = y[20:]\n y_score = check_random_state(0).normal(size=(20, n_classes))\n y_true_binarize = y_true\n y_pred_binarize = y_pred\n\n check_averaging(name, y_true, y_true_binarize,\n y_pred, y_pred_binarize, y_score)\n\n\[email protected]('name', sorted(METRICS_WITH_AVERAGING))\ndef test_averaging_multilabel_all_zeroes(name):\n y_true = np.zeros((20, 3))\n y_pred = np.zeros((20, 3))\n y_score = np.zeros((20, 3))\n y_true_binarize = y_true\n y_pred_binarize = y_pred\n\n check_averaging(name, y_true, y_true_binarize,\n y_pred, y_pred_binarize, y_score)\n\n\ndef test_averaging_binary_multilabel_all_zeroes():\n y_true = np.zeros((20, 3))\n y_pred = np.zeros((20, 3))\n y_true_binarize = y_true\n y_pred_binarize = y_pred\n # Test _average_binary_score for weight.sum() == 0\n binary_metric = (lambda y_true, y_score, average=\"macro\":\n _average_binary_score(\n precision_score, y_true, y_score, average))\n _check_averaging(binary_metric, y_true, y_pred, y_true_binarize,\n y_pred_binarize, is_multilabel=True)\n\n\[email protected]('name', sorted(METRICS_WITH_AVERAGING))\ndef test_averaging_multilabel_all_ones(name):\n y_true = np.ones((20, 3))\n y_pred = np.ones((20, 3))\n y_score = np.ones((20, 3))\n y_true_binarize = y_true\n y_pred_binarize = y_pred\n\n check_averaging(name, y_true, y_true_binarize,\n y_pred, y_pred_binarize, y_score)\n\n\n@ignore_warnings\ndef check_sample_weight_invariance(name, metric, y1, y2):\n rng = np.random.RandomState(0)\n sample_weight = rng.randint(1, 10, size=len(y1))\n\n # top_k_accuracy_score always lead to a perfect score for k > 1 in the\n # binary case\n metric = partial(metric, k=1) if name == \"top_k_accuracy_score\" else metric\n\n # check that unit weights gives the same score as no weight\n unweighted_score = metric(y1, y2, sample_weight=None)\n\n assert_allclose(\n unweighted_score,\n metric(y1, y2, sample_weight=np.ones(shape=len(y1))),\n err_msg=\"For %s sample_weight=None is not equivalent to \"\n \"sample_weight=ones\" % name)\n\n # check that the weighted and unweighted scores are unequal\n weighted_score = metric(y1, y2, sample_weight=sample_weight)\n\n # use context manager to supply custom error message\n with pytest.raises(AssertionError):\n assert_allclose(unweighted_score, weighted_score)\n raise ValueError(\"Unweighted and weighted scores are unexpectedly \"\n \"almost equal (%s) and (%s) \"\n \"for %s\" % (unweighted_score, weighted_score, name))\n\n # check that sample_weight can be a list\n weighted_score_list = metric(y1, y2,\n sample_weight=sample_weight.tolist())\n assert_allclose(\n weighted_score, weighted_score_list,\n err_msg=(\"Weighted scores for array and list \"\n \"sample_weight input are not equal (%s != %s) for %s\") % (\n weighted_score, weighted_score_list, name))\n\n # check that integer weights is the same as repeated samples\n repeat_weighted_score = metric(\n np.repeat(y1, sample_weight, axis=0),\n np.repeat(y2, sample_weight, axis=0), sample_weight=None)\n assert_allclose(\n weighted_score, repeat_weighted_score,\n err_msg=\"Weighting %s is not equal to repeating samples\" % name)\n\n # check that ignoring a fraction of the samples is equivalent to setting\n # the corresponding weights to zero\n sample_weight_subset = sample_weight[1::2]\n sample_weight_zeroed = np.copy(sample_weight)\n sample_weight_zeroed[::2] = 0\n y1_subset = y1[1::2]\n y2_subset = y2[1::2]\n weighted_score_subset = metric(y1_subset, y2_subset,\n sample_weight=sample_weight_subset)\n weighted_score_zeroed = metric(y1, y2,\n sample_weight=sample_weight_zeroed)\n assert_allclose(\n weighted_score_subset, weighted_score_zeroed,\n err_msg=(\"Zeroing weights does not give the same result as \"\n \"removing the corresponding samples (%s != %s) for %s\" %\n (weighted_score_zeroed, weighted_score_subset, name)))\n\n if not name.startswith('unnormalized'):\n # check that the score is invariant under scaling of the weights by a\n # common factor\n for scaling in [2, 0.3]:\n assert_allclose(\n weighted_score,\n metric(y1, y2, sample_weight=sample_weight * scaling),\n err_msg=\"%s sample_weight is not invariant \"\n \"under scaling\" % name)\n\n # Check that if number of samples in y_true and sample_weight are not\n # equal, meaningful error is raised.\n error_message = (r\"Found input variables with inconsistent numbers of \"\n r\"samples: \\[{}, {}, {}\\]\".format(\n _num_samples(y1), _num_samples(y2),\n _num_samples(sample_weight) * 2))\n with pytest.raises(ValueError, match=error_message):\n metric(y1, y2, sample_weight=np.hstack([sample_weight,\n sample_weight]))\n\n\[email protected](\n 'name',\n sorted(\n set(ALL_METRICS).intersection(set(REGRESSION_METRICS)) -\n METRICS_WITHOUT_SAMPLE_WEIGHT))\ndef test_regression_sample_weight_invariance(name):\n n_samples = 50\n random_state = check_random_state(0)\n # regression\n y_true = random_state.random_sample(size=(n_samples,))\n y_pred = random_state.random_sample(size=(n_samples,))\n metric = ALL_METRICS[name]\n check_sample_weight_invariance(name, metric, y_true, y_pred)\n\n\[email protected](\n 'name',\n sorted(\n set(ALL_METRICS) - set(REGRESSION_METRICS) -\n METRICS_WITHOUT_SAMPLE_WEIGHT - METRIC_UNDEFINED_BINARY))\ndef test_binary_sample_weight_invariance(name):\n # binary\n n_samples = 50\n random_state = check_random_state(0)\n y_true = random_state.randint(0, 2, size=(n_samples, ))\n y_pred = random_state.randint(0, 2, size=(n_samples, ))\n y_score = random_state.random_sample(size=(n_samples,))\n metric = ALL_METRICS[name]\n if name in THRESHOLDED_METRICS:\n check_sample_weight_invariance(name, metric, y_true, y_score)\n else:\n check_sample_weight_invariance(name, metric, y_true, y_pred)\n\n\[email protected](\n 'name',\n sorted(\n set(ALL_METRICS) - set(REGRESSION_METRICS) -\n METRICS_WITHOUT_SAMPLE_WEIGHT - METRIC_UNDEFINED_BINARY_MULTICLASS))\ndef test_multiclass_sample_weight_invariance(name):\n # multiclass\n n_samples = 50\n random_state = check_random_state(0)\n y_true = random_state.randint(0, 5, size=(n_samples, ))\n y_pred = random_state.randint(0, 5, size=(n_samples, ))\n y_score = random_state.random_sample(size=(n_samples, 5))\n metric = ALL_METRICS[name]\n if name in THRESHOLDED_METRICS:\n # softmax\n temp = np.exp(-y_score)\n y_score_norm = temp / temp.sum(axis=-1).reshape(-1, 1)\n check_sample_weight_invariance(name, metric, y_true, y_score_norm)\n else:\n check_sample_weight_invariance(name, metric, y_true, y_pred)\n\n\[email protected](\n 'name',\n sorted((MULTILABELS_METRICS | THRESHOLDED_MULTILABEL_METRICS\n | MULTIOUTPUT_METRICS) - METRICS_WITHOUT_SAMPLE_WEIGHT))\ndef test_multilabel_sample_weight_invariance(name):\n # multilabel indicator\n random_state = check_random_state(0)\n _, ya = make_multilabel_classification(n_features=1, n_classes=10,\n random_state=0, n_samples=50,\n allow_unlabeled=False)\n _, yb = make_multilabel_classification(n_features=1, n_classes=10,\n random_state=1, n_samples=50,\n allow_unlabeled=False)\n y_true = np.vstack([ya, yb])\n y_pred = np.vstack([ya, ya])\n y_score = random_state.randint(1, 4, size=y_true.shape)\n\n metric = ALL_METRICS[name]\n if name in THRESHOLDED_METRICS:\n check_sample_weight_invariance(name, metric, y_true, y_score)\n else:\n check_sample_weight_invariance(name, metric, y_true, y_pred)\n\n\n@ignore_warnings\ndef test_no_averaging_labels():\n # test labels argument when not using averaging\n # in multi-class and multi-label cases\n y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])\n y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])\n y_true_multiclass = np.array([0, 1, 2])\n y_pred_multiclass = np.array([0, 2, 3])\n labels = np.array([3, 0, 1, 2])\n _, inverse_labels = np.unique(labels, return_inverse=True)\n\n for name in METRICS_WITH_AVERAGING:\n for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass],\n [y_true_multilabel, y_pred_multilabel]]:\n if name not in MULTILABELS_METRICS and y_pred.ndim > 1:\n continue\n\n metric = ALL_METRICS[name]\n\n score_labels = metric(y_true, y_pred, labels=labels, average=None)\n score = metric(y_true, y_pred, average=None)\n assert_array_equal(score_labels, score[inverse_labels])\n\n\[email protected](\n 'name',\n sorted(MULTILABELS_METRICS - {\"unnormalized_multilabel_confusion_matrix\"}))\ndef test_multilabel_label_permutations_invariance(name):\n random_state = check_random_state(0)\n n_samples, n_classes = 20, 4\n\n y_true = random_state.randint(0, 2, size=(n_samples, n_classes))\n y_score = random_state.randint(0, 2, size=(n_samples, n_classes))\n\n metric = ALL_METRICS[name]\n score = metric(y_true, y_score)\n\n for perm in permutations(range(n_classes), n_classes):\n y_score_perm = y_score[:, perm]\n y_true_perm = y_true[:, perm]\n\n current_score = metric(y_true_perm, y_score_perm)\n assert_almost_equal(score, current_score)\n\n\[email protected](\n 'name', sorted(THRESHOLDED_MULTILABEL_METRICS | MULTIOUTPUT_METRICS))\ndef test_thresholded_multilabel_multioutput_permutations_invariance(name):\n random_state = check_random_state(0)\n n_samples, n_classes = 20, 4\n y_true = random_state.randint(0, 2, size=(n_samples, n_classes))\n y_score = random_state.normal(size=y_true.shape)\n\n # Makes sure all samples have at least one label. This works around errors\n # when running metrics where average=\"sample\"\n y_true[y_true.sum(1) == 4, 0] = 0\n y_true[y_true.sum(1) == 0, 0] = 1\n\n metric = ALL_METRICS[name]\n score = metric(y_true, y_score)\n\n for perm in permutations(range(n_classes), n_classes):\n y_score_perm = y_score[:, perm]\n y_true_perm = y_true[:, perm]\n\n current_score = metric(y_true_perm, y_score_perm)\n if metric == mean_absolute_percentage_error:\n assert np.isfinite(current_score)\n assert current_score > 1e6\n # Here we are not comparing the values in case of MAPE because\n # whenever y_true value is exactly zero, the MAPE value doesn't\n # signify anything. Thus, in this case we are just expecting\n # very large finite value.\n else:\n assert_almost_equal(score, current_score)\n\n\[email protected](\n 'name',\n sorted(set(THRESHOLDED_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS))\ndef test_thresholded_metric_permutation_invariance(name):\n n_samples, n_classes = 100, 3\n random_state = check_random_state(0)\n\n y_score = random_state.rand(n_samples, n_classes)\n temp = np.exp(-y_score)\n y_score = temp / temp.sum(axis=-1).reshape(-1, 1)\n y_true = random_state.randint(0, n_classes, size=n_samples)\n\n metric = ALL_METRICS[name]\n score = metric(y_true, y_score)\n for perm in permutations(range(n_classes), n_classes):\n inverse_perm = np.zeros(n_classes, dtype=int)\n inverse_perm[list(perm)] = np.arange(n_classes)\n y_score_perm = y_score[:, inverse_perm]\n y_true_perm = np.take(perm, y_true)\n\n current_score = metric(y_true_perm, y_score_perm)\n assert_almost_equal(score, current_score)\n\n\[email protected](\"metric_name\", CLASSIFICATION_METRICS)\ndef test_metrics_consistent_type_error(metric_name):\n # check that an understable message is raised when the type between y_true\n # and y_pred mismatch\n rng = np.random.RandomState(42)\n y1 = np.array([\"spam\"] * 3 + [\"eggs\"] * 2, dtype=object)\n y2 = rng.randint(0, 2, size=y1.size)\n\n err_msg = \"Labels in y_true and y_pred should be of the same type.\"\n with pytest.raises(TypeError, match=err_msg):\n CLASSIFICATION_METRICS[metric_name](y1, y2)\n\n\[email protected](\n \"metric, y_pred_threshold\",\n [\n (average_precision_score, True),\n (brier_score_loss, True),\n (f1_score, False),\n (partial(fbeta_score, beta=1), False),\n (jaccard_score, False),\n (precision_recall_curve, True),\n (precision_score, False),\n (recall_score, False),\n (roc_curve, True),\n ],\n)\[email protected](\"dtype_y_str\", [str, object])\ndef test_metrics_pos_label_error_str(metric, y_pred_threshold, dtype_y_str):\n # check that the error message if `pos_label` is not specified and the\n # targets is made of strings.\n rng = np.random.RandomState(42)\n y1 = np.array([\"spam\"] * 3 + [\"eggs\"] * 2, dtype=dtype_y_str)\n y2 = rng.randint(0, 2, size=y1.size)\n\n if not y_pred_threshold:\n y2 = np.array([\"spam\", \"eggs\"], dtype=dtype_y_str)[y2]\n\n err_msg_pos_label_None = (\n \"y_true takes value in {'eggs', 'spam'} and pos_label is not \"\n \"specified: either make y_true take value in {0, 1} or {-1, 1} or \"\n \"pass pos_label explicit\"\n )\n err_msg_pos_label_1 = (\n r\"pos_label=1 is not a valid label. It should be one of \"\n r\"\\['eggs', 'spam'\\]\"\n )\n\n pos_label_default = signature(metric).parameters[\"pos_label\"].default\n\n err_msg = (\n err_msg_pos_label_1\n if pos_label_default == 1\n else err_msg_pos_label_None\n )\n with pytest.raises(ValueError, match=err_msg):\n metric(y1, y2)\n" ]
[ [ "numpy.dot", "numpy.asarray", "scipy.special.chdtrc", "numpy.mean", "numpy.zeros_like", "numpy.where", "scipy.sparse.issparse", "numpy.unique", "numpy.arange", "numpy.finfo", "scipy.special.fdtrc", "numpy.zeros", "numpy.nonzero", "numpy.isnan", "numpy.append", "numpy.errstate", "numpy.argsort", "numpy.array", "numpy.sum", "numpy.linalg.norm", "numpy.percentile", "numpy.sort", "numpy.ones", "scipy.stats.f.sf" ], [ "numpy.take", "sklearn.utils._testing.ignore_warnings", "sklearn.metrics.confusion_matrix", "sklearn.utils._testing.assert_array_less", "numpy.mean", "sklearn.utils._testing.assert_almost_equal", "numpy.exp", "numpy.hstack", "scipy.sparse.coo_matrix", "sklearn.utils._testing.assert_allclose", "numpy.unique", "numpy.reshape", "numpy.arange", "sklearn.metrics.precision_recall_curve", "numpy.copy", "sklearn.preprocessing.LabelBinarizer", "numpy.repeat", "numpy.zeros", "sklearn.utils._testing.assert_array_equal", "sklearn.datasets.make_multilabel_classification", "sklearn.utils.validation.check_random_state", "numpy.random.RandomState", "numpy.array", "numpy.sum", "sklearn.utils.validation._num_samples", "numpy.isfinite", "sklearn.utils.shuffle", "numpy.ones", "sklearn.utils.multiclass.type_of_target", "sklearn.metrics._base._average_binary_score", "numpy.average", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
AeRabelais/bdt_pipeline
[ "35d0cc3f7ada35e082c384d0755916605daa5feb" ]
[ "scripts/dataToParquet.py" ]
[ "\"\"\"\r\n@Title: dataToParquet.py\r\n@author: Ashia Lewis\r\n\r\nGOAL: Create and update the parquet files for the air and soil data, separately.\r\n\"\"\"\r\nimport os\r\nimport glob\r\nimport pandas as pd\r\nimport pyarrow as pa\r\nimport pyarrow.parquet as pq\r\n\r\n#CODE TO BE USED FOR THE BATCH DATA\r\n\"\"\"\r\n#file directories for the air and soil files\r\nair_dir = r\"D:\\sample_biodiversitree\\data\\export_data\\air_data\"\r\nsoil_dir = r\"D:\\sample_biodiversitree\\scripts\\data\\export_data\\soil_data\"\r\n\r\n\r\n\r\n#all_air_files = glob.glob(air_dir + '/**/*.csv', recursive=True)\r\n\r\nall_soil_files = glob.glob(soil_dir + '/**/*.csv', recursive=True)\r\n\r\n\r\n#air_data = pd.concat((pd.read_csv(f) for f in all_air_files ))\r\n#air_data.to_parquet('air_data.parquet')\r\n\r\n#need to look at soil's clean up job\r\n\r\nsoil_data = pd.concat((pd.read_csv(f) for f in all_soil_files ))\r\nsoil_data.to_parquet('soil_data.parquet')\r\n\r\n\"\"\"\r\n\r\n#CODE TO BE USED IN THE ACTUAL PIPELINE\r\n\r\n# file directories for the air and soil files \r\nair_dir = r\"D:\\sample_biodiversitree\\data\\export_data\\air_data\"\r\nsoil_dir = r\"D:\\sample_biodiversitree\\data\\export_data\\soil_data\"\r\n\r\n#concatentate all of files' data\r\nall_air_files = glob.glob(air_dir + '/**/*.csv', recursive=True)\r\nall_soil_files = glob.glob(soil_dir + '/**/*.csv', recursive=True)\r\n\r\n#put the data in a dataframe \r\nair_data = pd.concat((pd.read_csv(f) for f in all_air_files))\r\nsoil_data = pd.concat((pd.read_csv(f) for f in all_soil_files))\r\n\r\n#add data to existing parquet files\r\nair_table = pa.Table.from_pandas(air_data)\r\nsoil_table = pa.Table.from_pandas(soil_data)\r\n\r\n\r\nair_writer = pq.ParquetWriter('air_data.parquet', air_table.schema)\r\nair_writer.write_table(table = air_table)\r\n\r\nif air_writer:\r\n air_writer.close()\r\n\r\nsoil_writer = pq.ParquetWriter('soil_data.parquet', soil_table.schema)\r\nsoil_writer.write_table(table = soil_table)\r\n\r\nif soil_writer:\r\n soil_writer.close()\r\n\r\n\r\n\r\n\r\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
arushi-08/pandas
[ "014ea2e5a8a647cfa5e3050a5c1299eb39b293d3", "014ea2e5a8a647cfa5e3050a5c1299eb39b293d3" ]
[ "pandas/tests/plotting/test_datetimelike.py", "pandas/core/dtypes/cast.py" ]
[ "\"\"\" Test cases for time series specific (freq conversion, etc) \"\"\"\nfrom datetime import (\n date,\n datetime,\n time,\n timedelta,\n)\nimport pickle\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs import (\n BaseOffset,\n to_offset,\n)\nimport pandas.util._test_decorators as td\n\nfrom pandas import (\n DataFrame,\n Index,\n NaT,\n Series,\n isna,\n to_datetime,\n)\nimport pandas._testing as tm\nfrom pandas.core.indexes.datetimes import (\n DatetimeIndex,\n bdate_range,\n date_range,\n)\nfrom pandas.core.indexes.period import (\n Period,\n PeriodIndex,\n period_range,\n)\nfrom pandas.core.indexes.timedeltas import timedelta_range\nfrom pandas.tests.plotting.common import TestPlotBase\n\nfrom pandas.tseries.offsets import WeekOfMonth\n\npytestmark = pytest.mark.slow\n\n\[email protected]_if_no_mpl\nclass TestTSPlot(TestPlotBase):\n def setup_method(self, method):\n TestPlotBase.setup_method(self, method)\n\n self.freq = [\"S\", \"T\", \"H\", \"D\", \"W\", \"M\", \"Q\", \"A\"]\n idx = [period_range(\"12/31/1999\", freq=x, periods=100) for x in self.freq]\n self.period_ser = [Series(np.random.randn(len(x)), x) for x in idx]\n self.period_df = [\n DataFrame(np.random.randn(len(x), 3), index=x, columns=[\"A\", \"B\", \"C\"])\n for x in idx\n ]\n\n freq = [\"S\", \"T\", \"H\", \"D\", \"W\", \"M\", \"Q-DEC\", \"A\", \"1B30Min\"]\n idx = [date_range(\"12/31/1999\", freq=x, periods=100) for x in freq]\n self.datetime_ser = [Series(np.random.randn(len(x)), x) for x in idx]\n self.datetime_df = [\n DataFrame(np.random.randn(len(x), 3), index=x, columns=[\"A\", \"B\", \"C\"])\n for x in idx\n ]\n\n def teardown_method(self, method):\n tm.close()\n\n def test_ts_plot_with_tz(self, tz_aware_fixture):\n # GH2877, GH17173, GH31205, GH31580\n tz = tz_aware_fixture\n index = date_range(\"1/1/2011\", periods=2, freq=\"H\", tz=tz)\n ts = Series([188.5, 328.25], index=index)\n with tm.assert_produces_warning(None):\n _check_plot_works(ts.plot)\n ax = ts.plot()\n xdata = list(ax.get_lines())[0].get_xdata()\n # Check first and last points' labels are correct\n assert (xdata[0].hour, xdata[0].minute) == (0, 0)\n assert (xdata[-1].hour, xdata[-1].minute) == (1, 0)\n\n def test_fontsize_set_correctly(self):\n # For issue #8765\n df = DataFrame(np.random.randn(10, 9), index=range(10))\n fig, ax = self.plt.subplots()\n df.plot(fontsize=2, ax=ax)\n for label in ax.get_xticklabels() + ax.get_yticklabels():\n assert label.get_fontsize() == 2\n\n def test_frame_inferred(self):\n # inferred freq\n idx = date_range(\"1/1/1987\", freq=\"MS\", periods=100)\n idx = DatetimeIndex(idx.values, freq=None)\n\n df = DataFrame(np.random.randn(len(idx), 3), index=idx)\n _check_plot_works(df.plot)\n\n # axes freq\n idx = idx[0:40].union(idx[45:99])\n df2 = DataFrame(np.random.randn(len(idx), 3), index=idx)\n _check_plot_works(df2.plot)\n\n # N > 1\n idx = date_range(\"2008-1-1 00:15:00\", freq=\"15T\", periods=10)\n idx = DatetimeIndex(idx.values, freq=None)\n df = DataFrame(np.random.randn(len(idx), 3), index=idx)\n _check_plot_works(df.plot)\n\n def test_is_error_nozeroindex(self):\n # GH11858\n i = np.array([1, 2, 3])\n a = DataFrame(i, index=i)\n _check_plot_works(a.plot, xerr=a)\n _check_plot_works(a.plot, yerr=a)\n\n def test_nonnumeric_exclude(self):\n idx = date_range(\"1/1/1987\", freq=\"A\", periods=3)\n df = DataFrame({\"A\": [\"x\", \"y\", \"z\"], \"B\": [1, 2, 3]}, idx)\n\n fig, ax = self.plt.subplots()\n df.plot(ax=ax) # it works\n assert len(ax.get_lines()) == 1 # B was plotted\n self.plt.close(fig)\n\n msg = \"no numeric data to plot\"\n with pytest.raises(TypeError, match=msg):\n df[\"A\"].plot()\n\n def test_tsplot(self):\n\n _, ax = self.plt.subplots()\n ts = tm.makeTimeSeries()\n\n for s in self.period_ser:\n _check_plot_works(s.plot, ax=ax)\n\n for s in self.datetime_ser:\n _check_plot_works(s.plot, ax=ax)\n\n _, ax = self.plt.subplots()\n ts.plot(style=\"k\", ax=ax)\n color = (0.0, 0.0, 0.0, 1)\n assert color == ax.get_lines()[0].get_color()\n\n def test_both_style_and_color(self):\n\n ts = tm.makeTimeSeries()\n msg = (\n \"Cannot pass 'style' string with a color symbol and 'color' \"\n \"keyword argument. Please use one or the other or pass 'style' \"\n \"without a color symbol\"\n )\n with pytest.raises(ValueError, match=msg):\n ts.plot(style=\"b-\", color=\"#000099\")\n\n s = ts.reset_index(drop=True)\n with pytest.raises(ValueError, match=msg):\n s.plot(style=\"b-\", color=\"#000099\")\n\n def test_high_freq(self):\n freaks = [\"ms\", \"us\"]\n for freq in freaks:\n _, ax = self.plt.subplots()\n rng = date_range(\"1/1/2012\", periods=100, freq=freq)\n ser = Series(np.random.randn(len(rng)), rng)\n _check_plot_works(ser.plot, ax=ax)\n\n def test_get_datevalue(self):\n from pandas.plotting._matplotlib.converter import get_datevalue\n\n assert get_datevalue(None, \"D\") is None\n assert get_datevalue(1987, \"A\") == 1987\n assert get_datevalue(Period(1987, \"A\"), \"M\") == Period(\"1987-12\", \"M\").ordinal\n assert get_datevalue(\"1/1/1987\", \"D\") == Period(\"1987-1-1\", \"D\").ordinal\n\n def test_ts_plot_format_coord(self):\n def check_format_of_first_point(ax, expected_string):\n first_line = ax.get_lines()[0]\n first_x = first_line.get_xdata()[0].ordinal\n first_y = first_line.get_ydata()[0]\n try:\n assert expected_string == ax.format_coord(first_x, first_y)\n except (ValueError):\n pytest.skip(\n \"skipping test because issue forming test comparison GH7664\"\n )\n\n annual = Series(1, index=date_range(\"2014-01-01\", periods=3, freq=\"A-DEC\"))\n _, ax = self.plt.subplots()\n annual.plot(ax=ax)\n check_format_of_first_point(ax, \"t = 2014 y = 1.000000\")\n\n # note this is added to the annual plot already in existence, and\n # changes its freq field\n daily = Series(1, index=date_range(\"2014-01-01\", periods=3, freq=\"D\"))\n daily.plot(ax=ax)\n check_format_of_first_point(ax, \"t = 2014-01-01 y = 1.000000\")\n tm.close()\n\n def test_line_plot_period_series(self):\n for s in self.period_ser:\n _check_plot_works(s.plot, s.index.freq)\n\n @pytest.mark.parametrize(\n \"frqncy\", [\"1S\", \"3S\", \"5T\", \"7H\", \"4D\", \"8W\", \"11M\", \"3A\"]\n )\n def test_line_plot_period_mlt_series(self, frqncy):\n # test period index line plot for series with multiples (`mlt`) of the\n # frequency (`frqncy`) rule code. tests resolution of issue #14763\n idx = period_range(\"12/31/1999\", freq=frqncy, periods=100)\n s = Series(np.random.randn(len(idx)), idx)\n _check_plot_works(s.plot, s.index.freq.rule_code)\n\n def test_line_plot_datetime_series(self):\n for s in self.datetime_ser:\n _check_plot_works(s.plot, s.index.freq.rule_code)\n\n def test_line_plot_period_frame(self):\n for df in self.period_df:\n _check_plot_works(df.plot, df.index.freq)\n\n @pytest.mark.parametrize(\n \"frqncy\", [\"1S\", \"3S\", \"5T\", \"7H\", \"4D\", \"8W\", \"11M\", \"3A\"]\n )\n def test_line_plot_period_mlt_frame(self, frqncy):\n # test period index line plot for DataFrames with multiples (`mlt`)\n # of the frequency (`frqncy`) rule code. tests resolution of issue\n # #14763\n idx = period_range(\"12/31/1999\", freq=frqncy, periods=100)\n df = DataFrame(np.random.randn(len(idx), 3), index=idx, columns=[\"A\", \"B\", \"C\"])\n freq = df.index.asfreq(df.index.freq.rule_code).freq\n _check_plot_works(df.plot, freq)\n\n def test_line_plot_datetime_frame(self):\n for df in self.datetime_df:\n freq = df.index.to_period(df.index.freq.rule_code).freq\n _check_plot_works(df.plot, freq)\n\n def test_line_plot_inferred_freq(self):\n for ser in self.datetime_ser:\n ser = Series(ser.values, Index(np.asarray(ser.index)))\n _check_plot_works(ser.plot, ser.index.inferred_freq)\n\n ser = ser[[0, 3, 5, 6]]\n _check_plot_works(ser.plot)\n\n def test_fake_inferred_business(self):\n _, ax = self.plt.subplots()\n rng = date_range(\"2001-1-1\", \"2001-1-10\")\n ts = Series(range(len(rng)), index=rng)\n ts = ts[:3].append(ts[5:])\n ts.plot(ax=ax)\n assert not hasattr(ax, \"freq\")\n\n def test_plot_offset_freq(self):\n ser = tm.makeTimeSeries()\n _check_plot_works(ser.plot)\n\n dr = date_range(ser.index[0], freq=\"BQS\", periods=10)\n ser = Series(np.random.randn(len(dr)), index=dr)\n _check_plot_works(ser.plot)\n\n def test_plot_multiple_inferred_freq(self):\n dr = Index([datetime(2000, 1, 1), datetime(2000, 1, 6), datetime(2000, 1, 11)])\n ser = Series(np.random.randn(len(dr)), index=dr)\n _check_plot_works(ser.plot)\n\n def test_uhf(self):\n import pandas.plotting._matplotlib.converter as conv\n\n idx = date_range(\"2012-6-22 21:59:51.960928\", freq=\"L\", periods=500)\n df = DataFrame(np.random.randn(len(idx), 2), index=idx)\n\n _, ax = self.plt.subplots()\n df.plot(ax=ax)\n axis = ax.get_xaxis()\n\n tlocs = axis.get_ticklocs()\n tlabels = axis.get_ticklabels()\n for loc, label in zip(tlocs, tlabels):\n xp = conv._from_ordinal(loc).strftime(\"%H:%M:%S.%f\")\n rs = str(label.get_text())\n if len(rs):\n assert xp == rs\n\n def test_irreg_hf(self):\n idx = date_range(\"2012-6-22 21:59:51\", freq=\"S\", periods=100)\n df = DataFrame(np.random.randn(len(idx), 2), index=idx)\n\n irreg = df.iloc[[0, 1, 3, 4]]\n _, ax = self.plt.subplots()\n irreg.plot(ax=ax)\n diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()\n\n sec = 1.0 / 24 / 60 / 60\n assert (np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all()\n\n _, ax = self.plt.subplots()\n df2 = df.copy()\n df2.index = df.index.astype(object)\n df2.plot(ax=ax)\n diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()\n assert (np.fabs(diffs[1:] - sec) < 1e-8).all()\n\n def test_irregular_datetime64_repr_bug(self):\n ser = tm.makeTimeSeries()\n ser = ser[[0, 1, 2, 7]]\n\n _, ax = self.plt.subplots()\n\n ret = ser.plot(ax=ax)\n assert ret is not None\n\n for rs, xp in zip(ax.get_lines()[0].get_xdata(), ser.index):\n assert rs == xp\n\n def test_business_freq(self):\n bts = tm.makePeriodSeries()\n _, ax = self.plt.subplots()\n bts.plot(ax=ax)\n assert ax.get_lines()[0].get_xydata()[0, 0] == bts.index[0].ordinal\n idx = ax.get_lines()[0].get_xdata()\n assert PeriodIndex(data=idx).freqstr == \"B\"\n\n def test_business_freq_convert(self):\n bts = tm.makeTimeSeries(300).asfreq(\"BM\")\n ts = bts.to_period(\"M\")\n _, ax = self.plt.subplots()\n bts.plot(ax=ax)\n assert ax.get_lines()[0].get_xydata()[0, 0] == ts.index[0].ordinal\n idx = ax.get_lines()[0].get_xdata()\n assert PeriodIndex(data=idx).freqstr == \"M\"\n\n def test_freq_with_no_period_alias(self):\n # GH34487\n freq = WeekOfMonth()\n bts = tm.makeTimeSeries(5).asfreq(freq)\n _, ax = self.plt.subplots()\n bts.plot(ax=ax)\n\n idx = ax.get_lines()[0].get_xdata()\n msg = \"freq not specified and cannot be inferred\"\n with pytest.raises(ValueError, match=msg):\n PeriodIndex(data=idx)\n\n def test_nonzero_base(self):\n # GH2571\n idx = date_range(\"2012-12-20\", periods=24, freq=\"H\") + timedelta(minutes=30)\n df = DataFrame(np.arange(24), index=idx)\n _, ax = self.plt.subplots()\n df.plot(ax=ax)\n rs = ax.get_lines()[0].get_xdata()\n assert not Index(rs).is_normalized\n\n def test_dataframe(self):\n bts = DataFrame({\"a\": tm.makeTimeSeries()})\n _, ax = self.plt.subplots()\n bts.plot(ax=ax)\n idx = ax.get_lines()[0].get_xdata()\n tm.assert_index_equal(bts.index.to_period(), PeriodIndex(idx))\n\n def test_axis_limits(self):\n def _test(ax):\n xlim = ax.get_xlim()\n ax.set_xlim(xlim[0] - 5, xlim[1] + 10)\n result = ax.get_xlim()\n assert result[0] == xlim[0] - 5\n assert result[1] == xlim[1] + 10\n\n # string\n expected = (Period(\"1/1/2000\", ax.freq), Period(\"4/1/2000\", ax.freq))\n ax.set_xlim(\"1/1/2000\", \"4/1/2000\")\n result = ax.get_xlim()\n assert int(result[0]) == expected[0].ordinal\n assert int(result[1]) == expected[1].ordinal\n\n # datetime\n expected = (Period(\"1/1/2000\", ax.freq), Period(\"4/1/2000\", ax.freq))\n ax.set_xlim(datetime(2000, 1, 1), datetime(2000, 4, 1))\n result = ax.get_xlim()\n assert int(result[0]) == expected[0].ordinal\n assert int(result[1]) == expected[1].ordinal\n fig = ax.get_figure()\n self.plt.close(fig)\n\n ser = tm.makeTimeSeries()\n _, ax = self.plt.subplots()\n ser.plot(ax=ax)\n _test(ax)\n\n _, ax = self.plt.subplots()\n df = DataFrame({\"a\": ser, \"b\": ser + 1})\n df.plot(ax=ax)\n _test(ax)\n\n df = DataFrame({\"a\": ser, \"b\": ser + 1})\n axes = df.plot(subplots=True)\n\n for ax in axes:\n _test(ax)\n\n def test_get_finder(self):\n import pandas.plotting._matplotlib.converter as conv\n\n assert conv.get_finder(to_offset(\"B\")) == conv._daily_finder\n assert conv.get_finder(to_offset(\"D\")) == conv._daily_finder\n assert conv.get_finder(to_offset(\"M\")) == conv._monthly_finder\n assert conv.get_finder(to_offset(\"Q\")) == conv._quarterly_finder\n assert conv.get_finder(to_offset(\"A\")) == conv._annual_finder\n assert conv.get_finder(to_offset(\"W\")) == conv._daily_finder\n\n def test_finder_daily(self):\n day_lst = [10, 40, 252, 400, 950, 2750, 10000]\n\n xpl1 = xpl2 = [Period(\"1999-1-1\", freq=\"B\").ordinal] * len(day_lst)\n rs1 = []\n rs2 = []\n for n in day_lst:\n rng = bdate_range(\"1999-1-1\", periods=n)\n ser = Series(np.random.randn(len(rng)), rng)\n _, ax = self.plt.subplots()\n ser.plot(ax=ax)\n xaxis = ax.get_xaxis()\n rs1.append(xaxis.get_majorticklocs()[0])\n\n vmin, vmax = ax.get_xlim()\n ax.set_xlim(vmin + 0.9, vmax)\n rs2.append(xaxis.get_majorticklocs()[0])\n self.plt.close(ax.get_figure())\n\n assert rs1 == xpl1\n assert rs2 == xpl2\n\n def test_finder_quarterly(self):\n yrs = [3.5, 11]\n\n xpl1 = xpl2 = [Period(\"1988Q1\").ordinal] * len(yrs)\n rs1 = []\n rs2 = []\n for n in yrs:\n rng = period_range(\"1987Q2\", periods=int(n * 4), freq=\"Q\")\n ser = Series(np.random.randn(len(rng)), rng)\n _, ax = self.plt.subplots()\n ser.plot(ax=ax)\n xaxis = ax.get_xaxis()\n rs1.append(xaxis.get_majorticklocs()[0])\n\n (vmin, vmax) = ax.get_xlim()\n ax.set_xlim(vmin + 0.9, vmax)\n rs2.append(xaxis.get_majorticklocs()[0])\n self.plt.close(ax.get_figure())\n\n assert rs1 == xpl1\n assert rs2 == xpl2\n\n def test_finder_monthly(self):\n yrs = [1.15, 2.5, 4, 11]\n\n xpl1 = xpl2 = [Period(\"Jan 1988\").ordinal] * len(yrs)\n rs1 = []\n rs2 = []\n for n in yrs:\n rng = period_range(\"1987Q2\", periods=int(n * 12), freq=\"M\")\n ser = Series(np.random.randn(len(rng)), rng)\n _, ax = self.plt.subplots()\n ser.plot(ax=ax)\n xaxis = ax.get_xaxis()\n rs1.append(xaxis.get_majorticklocs()[0])\n\n vmin, vmax = ax.get_xlim()\n ax.set_xlim(vmin + 0.9, vmax)\n rs2.append(xaxis.get_majorticklocs()[0])\n self.plt.close(ax.get_figure())\n\n assert rs1 == xpl1\n assert rs2 == xpl2\n\n def test_finder_monthly_long(self):\n rng = period_range(\"1988Q1\", periods=24 * 12, freq=\"M\")\n ser = Series(np.random.randn(len(rng)), rng)\n _, ax = self.plt.subplots()\n ser.plot(ax=ax)\n xaxis = ax.get_xaxis()\n rs = xaxis.get_majorticklocs()[0]\n xp = Period(\"1989Q1\", \"M\").ordinal\n assert rs == xp\n\n def test_finder_annual(self):\n xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170]\n xp = [Period(x, freq=\"A\").ordinal for x in xp]\n rs = []\n for nyears in [5, 10, 19, 49, 99, 199, 599, 1001]:\n rng = period_range(\"1987\", periods=nyears, freq=\"A\")\n ser = Series(np.random.randn(len(rng)), rng)\n _, ax = self.plt.subplots()\n ser.plot(ax=ax)\n xaxis = ax.get_xaxis()\n rs.append(xaxis.get_majorticklocs()[0])\n self.plt.close(ax.get_figure())\n\n assert rs == xp\n\n def test_finder_minutely(self):\n nminutes = 50 * 24 * 60\n rng = date_range(\"1/1/1999\", freq=\"Min\", periods=nminutes)\n ser = Series(np.random.randn(len(rng)), rng)\n _, ax = self.plt.subplots()\n ser.plot(ax=ax)\n xaxis = ax.get_xaxis()\n rs = xaxis.get_majorticklocs()[0]\n xp = Period(\"1/1/1999\", freq=\"Min\").ordinal\n\n assert rs == xp\n\n def test_finder_hourly(self):\n nhours = 23\n rng = date_range(\"1/1/1999\", freq=\"H\", periods=nhours)\n ser = Series(np.random.randn(len(rng)), rng)\n _, ax = self.plt.subplots()\n ser.plot(ax=ax)\n xaxis = ax.get_xaxis()\n rs = xaxis.get_majorticklocs()[0]\n xp = Period(\"1/1/1999\", freq=\"H\").ordinal\n\n assert rs == xp\n\n def test_gaps(self):\n ts = tm.makeTimeSeries()\n ts[5:25] = np.nan\n _, ax = self.plt.subplots()\n ts.plot(ax=ax)\n lines = ax.get_lines()\n assert len(lines) == 1\n line = lines[0]\n data = line.get_xydata()\n\n if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3:\n data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan)\n\n assert isinstance(data, np.ma.core.MaskedArray)\n mask = data.mask\n assert mask[5:25, 1].all()\n self.plt.close(ax.get_figure())\n\n # irregular\n ts = tm.makeTimeSeries()\n ts = ts[[0, 1, 2, 5, 7, 9, 12, 15, 20]]\n ts[2:5] = np.nan\n _, ax = self.plt.subplots()\n ax = ts.plot(ax=ax)\n lines = ax.get_lines()\n assert len(lines) == 1\n line = lines[0]\n data = line.get_xydata()\n\n if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3:\n data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan)\n\n assert isinstance(data, np.ma.core.MaskedArray)\n mask = data.mask\n assert mask[2:5, 1].all()\n self.plt.close(ax.get_figure())\n\n # non-ts\n idx = [0, 1, 2, 5, 7, 9, 12, 15, 20]\n ser = Series(np.random.randn(len(idx)), idx)\n ser[2:5] = np.nan\n _, ax = self.plt.subplots()\n ser.plot(ax=ax)\n lines = ax.get_lines()\n assert len(lines) == 1\n line = lines[0]\n data = line.get_xydata()\n if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3:\n data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan)\n\n assert isinstance(data, np.ma.core.MaskedArray)\n mask = data.mask\n assert mask[2:5, 1].all()\n\n def test_gap_upsample(self):\n low = tm.makeTimeSeries()\n low[5:25] = np.nan\n _, ax = self.plt.subplots()\n low.plot(ax=ax)\n\n idxh = date_range(low.index[0], low.index[-1], freq=\"12h\")\n s = Series(np.random.randn(len(idxh)), idxh)\n s.plot(secondary_y=True)\n lines = ax.get_lines()\n assert len(lines) == 1\n assert len(ax.right_ax.get_lines()) == 1\n\n line = lines[0]\n data = line.get_xydata()\n if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3:\n data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan)\n\n assert isinstance(data, np.ma.core.MaskedArray)\n mask = data.mask\n assert mask[5:25, 1].all()\n\n def test_secondary_y(self):\n ser = Series(np.random.randn(10))\n ser2 = Series(np.random.randn(10))\n fig, _ = self.plt.subplots()\n ax = ser.plot(secondary_y=True)\n assert hasattr(ax, \"left_ax\")\n assert not hasattr(ax, \"right_ax\")\n axes = fig.get_axes()\n line = ax.get_lines()[0]\n xp = Series(line.get_ydata(), line.get_xdata())\n tm.assert_series_equal(ser, xp)\n assert ax.get_yaxis().get_ticks_position() == \"right\"\n assert not axes[0].get_yaxis().get_visible()\n self.plt.close(fig)\n\n _, ax2 = self.plt.subplots()\n ser2.plot(ax=ax2)\n assert ax2.get_yaxis().get_ticks_position() == self.default_tick_position\n self.plt.close(ax2.get_figure())\n\n ax = ser2.plot()\n ax2 = ser.plot(secondary_y=True)\n assert ax.get_yaxis().get_visible()\n assert not hasattr(ax, \"left_ax\")\n assert hasattr(ax, \"right_ax\")\n assert hasattr(ax2, \"left_ax\")\n assert not hasattr(ax2, \"right_ax\")\n\n def test_secondary_y_ts(self):\n idx = date_range(\"1/1/2000\", periods=10)\n ser = Series(np.random.randn(10), idx)\n ser2 = Series(np.random.randn(10), idx)\n fig, _ = self.plt.subplots()\n ax = ser.plot(secondary_y=True)\n assert hasattr(ax, \"left_ax\")\n assert not hasattr(ax, \"right_ax\")\n axes = fig.get_axes()\n line = ax.get_lines()[0]\n xp = Series(line.get_ydata(), line.get_xdata()).to_timestamp()\n tm.assert_series_equal(ser, xp)\n assert ax.get_yaxis().get_ticks_position() == \"right\"\n assert not axes[0].get_yaxis().get_visible()\n self.plt.close(fig)\n\n _, ax2 = self.plt.subplots()\n ser2.plot(ax=ax2)\n assert ax2.get_yaxis().get_ticks_position() == self.default_tick_position\n self.plt.close(ax2.get_figure())\n\n ax = ser2.plot()\n ax2 = ser.plot(secondary_y=True)\n assert ax.get_yaxis().get_visible()\n\n @td.skip_if_no_scipy\n def test_secondary_kde(self):\n\n ser = Series(np.random.randn(10))\n fig, ax = self.plt.subplots()\n ax = ser.plot(secondary_y=True, kind=\"density\", ax=ax)\n assert hasattr(ax, \"left_ax\")\n assert not hasattr(ax, \"right_ax\")\n axes = fig.get_axes()\n assert axes[1].get_yaxis().get_ticks_position() == \"right\"\n\n def test_secondary_bar(self):\n ser = Series(np.random.randn(10))\n fig, ax = self.plt.subplots()\n ser.plot(secondary_y=True, kind=\"bar\", ax=ax)\n axes = fig.get_axes()\n assert axes[1].get_yaxis().get_ticks_position() == \"right\"\n\n def test_secondary_frame(self):\n df = DataFrame(np.random.randn(5, 3), columns=[\"a\", \"b\", \"c\"])\n axes = df.plot(secondary_y=[\"a\", \"c\"], subplots=True)\n assert axes[0].get_yaxis().get_ticks_position() == \"right\"\n assert axes[1].get_yaxis().get_ticks_position() == self.default_tick_position\n assert axes[2].get_yaxis().get_ticks_position() == \"right\"\n\n def test_secondary_bar_frame(self):\n df = DataFrame(np.random.randn(5, 3), columns=[\"a\", \"b\", \"c\"])\n axes = df.plot(kind=\"bar\", secondary_y=[\"a\", \"c\"], subplots=True)\n assert axes[0].get_yaxis().get_ticks_position() == \"right\"\n assert axes[1].get_yaxis().get_ticks_position() == self.default_tick_position\n assert axes[2].get_yaxis().get_ticks_position() == \"right\"\n\n def test_mixed_freq_regular_first(self):\n # TODO\n s1 = tm.makeTimeSeries()\n s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]\n\n # it works!\n _, ax = self.plt.subplots()\n s1.plot(ax=ax)\n\n ax2 = s2.plot(style=\"g\", ax=ax)\n lines = ax2.get_lines()\n idx1 = PeriodIndex(lines[0].get_xdata())\n idx2 = PeriodIndex(lines[1].get_xdata())\n\n tm.assert_index_equal(idx1, s1.index.to_period(\"B\"))\n tm.assert_index_equal(idx2, s2.index.to_period(\"B\"))\n\n left, right = ax2.get_xlim()\n pidx = s1.index.to_period()\n assert left <= pidx[0].ordinal\n assert right >= pidx[-1].ordinal\n\n def test_mixed_freq_irregular_first(self):\n s1 = tm.makeTimeSeries()\n s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]\n _, ax = self.plt.subplots()\n s2.plot(style=\"g\", ax=ax)\n s1.plot(ax=ax)\n assert not hasattr(ax, \"freq\")\n lines = ax.get_lines()\n x1 = lines[0].get_xdata()\n tm.assert_numpy_array_equal(x1, s2.index.astype(object).values)\n x2 = lines[1].get_xdata()\n tm.assert_numpy_array_equal(x2, s1.index.astype(object).values)\n\n def test_mixed_freq_regular_first_df(self):\n # GH 9852\n s1 = tm.makeTimeSeries().to_frame()\n s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]\n _, ax = self.plt.subplots()\n s1.plot(ax=ax)\n ax2 = s2.plot(style=\"g\", ax=ax)\n lines = ax2.get_lines()\n idx1 = PeriodIndex(lines[0].get_xdata())\n idx2 = PeriodIndex(lines[1].get_xdata())\n assert idx1.equals(s1.index.to_period(\"B\"))\n assert idx2.equals(s2.index.to_period(\"B\"))\n left, right = ax2.get_xlim()\n pidx = s1.index.to_period()\n assert left <= pidx[0].ordinal\n assert right >= pidx[-1].ordinal\n\n def test_mixed_freq_irregular_first_df(self):\n # GH 9852\n s1 = tm.makeTimeSeries().to_frame()\n s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]\n _, ax = self.plt.subplots()\n s2.plot(style=\"g\", ax=ax)\n s1.plot(ax=ax)\n assert not hasattr(ax, \"freq\")\n lines = ax.get_lines()\n x1 = lines[0].get_xdata()\n tm.assert_numpy_array_equal(x1, s2.index.astype(object).values)\n x2 = lines[1].get_xdata()\n tm.assert_numpy_array_equal(x2, s1.index.astype(object).values)\n\n def test_mixed_freq_hf_first(self):\n idxh = date_range(\"1/1/1999\", periods=365, freq=\"D\")\n idxl = date_range(\"1/1/1999\", periods=12, freq=\"M\")\n high = Series(np.random.randn(len(idxh)), idxh)\n low = Series(np.random.randn(len(idxl)), idxl)\n _, ax = self.plt.subplots()\n high.plot(ax=ax)\n low.plot(ax=ax)\n for line in ax.get_lines():\n assert PeriodIndex(data=line.get_xdata()).freq == \"D\"\n\n def test_mixed_freq_alignment(self):\n ts_ind = date_range(\"2012-01-01 13:00\", \"2012-01-02\", freq=\"H\")\n ts_data = np.random.randn(12)\n\n ts = Series(ts_data, index=ts_ind)\n ts2 = ts.asfreq(\"T\").interpolate()\n\n _, ax = self.plt.subplots()\n ax = ts.plot(ax=ax)\n ts2.plot(style=\"r\", ax=ax)\n\n assert ax.lines[0].get_xdata()[0] == ax.lines[1].get_xdata()[0]\n\n def test_mixed_freq_lf_first(self):\n\n idxh = date_range(\"1/1/1999\", periods=365, freq=\"D\")\n idxl = date_range(\"1/1/1999\", periods=12, freq=\"M\")\n high = Series(np.random.randn(len(idxh)), idxh)\n low = Series(np.random.randn(len(idxl)), idxl)\n _, ax = self.plt.subplots()\n low.plot(legend=True, ax=ax)\n high.plot(legend=True, ax=ax)\n for line in ax.get_lines():\n assert PeriodIndex(data=line.get_xdata()).freq == \"D\"\n leg = ax.get_legend()\n assert len(leg.texts) == 2\n self.plt.close(ax.get_figure())\n\n idxh = date_range(\"1/1/1999\", periods=240, freq=\"T\")\n idxl = date_range(\"1/1/1999\", periods=4, freq=\"H\")\n high = Series(np.random.randn(len(idxh)), idxh)\n low = Series(np.random.randn(len(idxl)), idxl)\n _, ax = self.plt.subplots()\n low.plot(ax=ax)\n high.plot(ax=ax)\n for line in ax.get_lines():\n assert PeriodIndex(data=line.get_xdata()).freq == \"T\"\n\n def test_mixed_freq_irreg_period(self):\n ts = tm.makeTimeSeries()\n irreg = ts[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 29]]\n rng = period_range(\"1/3/2000\", periods=30, freq=\"B\")\n ps = Series(np.random.randn(len(rng)), rng)\n _, ax = self.plt.subplots()\n irreg.plot(ax=ax)\n ps.plot(ax=ax)\n\n def test_mixed_freq_shared_ax(self):\n\n # GH13341, using sharex=True\n idx1 = date_range(\"2015-01-01\", periods=3, freq=\"M\")\n idx2 = idx1[:1].union(idx1[2:])\n s1 = Series(range(len(idx1)), idx1)\n s2 = Series(range(len(idx2)), idx2)\n\n fig, (ax1, ax2) = self.plt.subplots(nrows=2, sharex=True)\n s1.plot(ax=ax1)\n s2.plot(ax=ax2)\n\n assert ax1.freq == \"M\"\n assert ax2.freq == \"M\"\n assert ax1.lines[0].get_xydata()[0, 0] == ax2.lines[0].get_xydata()[0, 0]\n\n # using twinx\n fig, ax1 = self.plt.subplots()\n ax2 = ax1.twinx()\n s1.plot(ax=ax1)\n s2.plot(ax=ax2)\n\n assert ax1.lines[0].get_xydata()[0, 0] == ax2.lines[0].get_xydata()[0, 0]\n\n # TODO (GH14330, GH14322)\n # plotting the irregular first does not yet work\n # fig, ax1 = plt.subplots()\n # ax2 = ax1.twinx()\n # s2.plot(ax=ax1)\n # s1.plot(ax=ax2)\n # assert (ax1.lines[0].get_xydata()[0, 0] ==\n # ax2.lines[0].get_xydata()[0, 0])\n\n def test_nat_handling(self):\n\n _, ax = self.plt.subplots()\n\n dti = DatetimeIndex([\"2015-01-01\", NaT, \"2015-01-03\"])\n s = Series(range(len(dti)), dti)\n s.plot(ax=ax)\n xdata = ax.get_lines()[0].get_xdata()\n # plot x data is bounded by index values\n assert s.index.min() <= Series(xdata).min()\n assert Series(xdata).max() <= s.index.max()\n\n def test_to_weekly_resampling(self):\n idxh = date_range(\"1/1/1999\", periods=52, freq=\"W\")\n idxl = date_range(\"1/1/1999\", periods=12, freq=\"M\")\n high = Series(np.random.randn(len(idxh)), idxh)\n low = Series(np.random.randn(len(idxl)), idxl)\n _, ax = self.plt.subplots()\n high.plot(ax=ax)\n low.plot(ax=ax)\n for line in ax.get_lines():\n assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq\n\n def test_from_weekly_resampling(self):\n idxh = date_range(\"1/1/1999\", periods=52, freq=\"W\")\n idxl = date_range(\"1/1/1999\", periods=12, freq=\"M\")\n high = Series(np.random.randn(len(idxh)), idxh)\n low = Series(np.random.randn(len(idxl)), idxl)\n _, ax = self.plt.subplots()\n low.plot(ax=ax)\n high.plot(ax=ax)\n\n expected_h = idxh.to_period().asi8.astype(np.float64)\n expected_l = np.array(\n [1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549, 1553, 1558, 1562],\n dtype=np.float64,\n )\n for line in ax.get_lines():\n assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq\n xdata = line.get_xdata(orig=False)\n if len(xdata) == 12: # idxl lines\n tm.assert_numpy_array_equal(xdata, expected_l)\n else:\n tm.assert_numpy_array_equal(xdata, expected_h)\n tm.close()\n\n def test_from_resampling_area_line_mixed(self):\n idxh = date_range(\"1/1/1999\", periods=52, freq=\"W\")\n idxl = date_range(\"1/1/1999\", periods=12, freq=\"M\")\n high = DataFrame(np.random.rand(len(idxh), 3), index=idxh, columns=[0, 1, 2])\n low = DataFrame(np.random.rand(len(idxl), 3), index=idxl, columns=[0, 1, 2])\n\n # low to high\n for kind1, kind2 in [(\"line\", \"area\"), (\"area\", \"line\")]:\n _, ax = self.plt.subplots()\n low.plot(kind=kind1, stacked=True, ax=ax)\n high.plot(kind=kind2, stacked=True, ax=ax)\n\n # check low dataframe result\n expected_x = np.array(\n [\n 1514,\n 1519,\n 1523,\n 1527,\n 1531,\n 1536,\n 1540,\n 1544,\n 1549,\n 1553,\n 1558,\n 1562,\n ],\n dtype=np.float64,\n )\n expected_y = np.zeros(len(expected_x), dtype=np.float64)\n for i in range(3):\n line = ax.lines[i]\n assert PeriodIndex(line.get_xdata()).freq == idxh.freq\n tm.assert_numpy_array_equal(line.get_xdata(orig=False), expected_x)\n # check stacked values are correct\n expected_y += low[i].values\n tm.assert_numpy_array_equal(line.get_ydata(orig=False), expected_y)\n\n # check high dataframe result\n expected_x = idxh.to_period().asi8.astype(np.float64)\n expected_y = np.zeros(len(expected_x), dtype=np.float64)\n for i in range(3):\n line = ax.lines[3 + i]\n assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq\n tm.assert_numpy_array_equal(line.get_xdata(orig=False), expected_x)\n expected_y += high[i].values\n tm.assert_numpy_array_equal(line.get_ydata(orig=False), expected_y)\n\n # high to low\n for kind1, kind2 in [(\"line\", \"area\"), (\"area\", \"line\")]:\n _, ax = self.plt.subplots()\n high.plot(kind=kind1, stacked=True, ax=ax)\n low.plot(kind=kind2, stacked=True, ax=ax)\n\n # check high dataframe result\n expected_x = idxh.to_period().asi8.astype(np.float64)\n expected_y = np.zeros(len(expected_x), dtype=np.float64)\n for i in range(3):\n line = ax.lines[i]\n assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq\n tm.assert_numpy_array_equal(line.get_xdata(orig=False), expected_x)\n expected_y += high[i].values\n tm.assert_numpy_array_equal(line.get_ydata(orig=False), expected_y)\n\n # check low dataframe result\n expected_x = np.array(\n [\n 1514,\n 1519,\n 1523,\n 1527,\n 1531,\n 1536,\n 1540,\n 1544,\n 1549,\n 1553,\n 1558,\n 1562,\n ],\n dtype=np.float64,\n )\n expected_y = np.zeros(len(expected_x), dtype=np.float64)\n for i in range(3):\n lines = ax.lines[3 + i]\n assert PeriodIndex(data=lines.get_xdata()).freq == idxh.freq\n tm.assert_numpy_array_equal(lines.get_xdata(orig=False), expected_x)\n expected_y += low[i].values\n tm.assert_numpy_array_equal(lines.get_ydata(orig=False), expected_y)\n\n def test_mixed_freq_second_millisecond(self):\n # GH 7772, GH 7760\n idxh = date_range(\"2014-07-01 09:00\", freq=\"S\", periods=50)\n idxl = date_range(\"2014-07-01 09:00\", freq=\"100L\", periods=500)\n high = Series(np.random.randn(len(idxh)), idxh)\n low = Series(np.random.randn(len(idxl)), idxl)\n # high to low\n _, ax = self.plt.subplots()\n high.plot(ax=ax)\n low.plot(ax=ax)\n assert len(ax.get_lines()) == 2\n for line in ax.get_lines():\n assert PeriodIndex(data=line.get_xdata()).freq == \"L\"\n tm.close()\n\n # low to high\n _, ax = self.plt.subplots()\n low.plot(ax=ax)\n high.plot(ax=ax)\n assert len(ax.get_lines()) == 2\n for line in ax.get_lines():\n assert PeriodIndex(data=line.get_xdata()).freq == \"L\"\n\n def test_irreg_dtypes(self):\n # date\n idx = [date(2000, 1, 1), date(2000, 1, 5), date(2000, 1, 20)]\n df = DataFrame(np.random.randn(len(idx), 3), Index(idx, dtype=object))\n _check_plot_works(df.plot)\n\n # np.datetime64\n idx = date_range(\"1/1/2000\", periods=10)\n idx = idx[[0, 2, 5, 9]].astype(object)\n df = DataFrame(np.random.randn(len(idx), 3), idx)\n _, ax = self.plt.subplots()\n _check_plot_works(df.plot, ax=ax)\n\n def test_time(self):\n t = datetime(1, 1, 1, 3, 30, 0)\n deltas = np.random.randint(1, 20, 3).cumsum()\n ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas])\n df = DataFrame(\n {\"a\": np.random.randn(len(ts)), \"b\": np.random.randn(len(ts))}, index=ts\n )\n fig, ax = self.plt.subplots()\n df.plot(ax=ax)\n\n # verify tick labels\n ticks = ax.get_xticks()\n labels = ax.get_xticklabels()\n for t, l in zip(ticks, labels):\n m, s = divmod(int(t), 60)\n h, m = divmod(m, 60)\n rs = l.get_text()\n if len(rs) > 0:\n if s != 0:\n xp = time(h, m, s).strftime(\"%H:%M:%S\")\n else:\n xp = time(h, m, s).strftime(\"%H:%M\")\n assert xp == rs\n\n def test_time_change_xlim(self):\n t = datetime(1, 1, 1, 3, 30, 0)\n deltas = np.random.randint(1, 20, 3).cumsum()\n ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas])\n df = DataFrame(\n {\"a\": np.random.randn(len(ts)), \"b\": np.random.randn(len(ts))}, index=ts\n )\n fig, ax = self.plt.subplots()\n df.plot(ax=ax)\n\n # verify tick labels\n ticks = ax.get_xticks()\n labels = ax.get_xticklabels()\n for t, l in zip(ticks, labels):\n m, s = divmod(int(t), 60)\n h, m = divmod(m, 60)\n rs = l.get_text()\n if len(rs) > 0:\n if s != 0:\n xp = time(h, m, s).strftime(\"%H:%M:%S\")\n else:\n xp = time(h, m, s).strftime(\"%H:%M\")\n assert xp == rs\n\n # change xlim\n ax.set_xlim(\"1:30\", \"5:00\")\n\n # check tick labels again\n ticks = ax.get_xticks()\n labels = ax.get_xticklabels()\n for t, l in zip(ticks, labels):\n m, s = divmod(int(t), 60)\n h, m = divmod(m, 60)\n rs = l.get_text()\n if len(rs) > 0:\n if s != 0:\n xp = time(h, m, s).strftime(\"%H:%M:%S\")\n else:\n xp = time(h, m, s).strftime(\"%H:%M\")\n assert xp == rs\n\n def test_time_musec(self):\n t = datetime(1, 1, 1, 3, 30, 0)\n deltas = np.random.randint(1, 20, 3).cumsum()\n ts = np.array([(t + timedelta(microseconds=int(x))).time() for x in deltas])\n df = DataFrame(\n {\"a\": np.random.randn(len(ts)), \"b\": np.random.randn(len(ts))}, index=ts\n )\n fig, ax = self.plt.subplots()\n ax = df.plot(ax=ax)\n\n # verify tick labels\n ticks = ax.get_xticks()\n labels = ax.get_xticklabels()\n for t, l in zip(ticks, labels):\n m, s = divmod(int(t), 60)\n\n us = round((t - int(t)) * 1e6)\n\n h, m = divmod(m, 60)\n rs = l.get_text()\n if len(rs) > 0:\n if (us % 1000) != 0:\n xp = time(h, m, s, us).strftime(\"%H:%M:%S.%f\")\n elif (us // 1000) != 0:\n xp = time(h, m, s, us).strftime(\"%H:%M:%S.%f\")[:-3]\n elif s != 0:\n xp = time(h, m, s, us).strftime(\"%H:%M:%S\")\n else:\n xp = time(h, m, s, us).strftime(\"%H:%M\")\n assert xp == rs\n\n def test_secondary_upsample(self):\n idxh = date_range(\"1/1/1999\", periods=365, freq=\"D\")\n idxl = date_range(\"1/1/1999\", periods=12, freq=\"M\")\n high = Series(np.random.randn(len(idxh)), idxh)\n low = Series(np.random.randn(len(idxl)), idxl)\n _, ax = self.plt.subplots()\n low.plot(ax=ax)\n ax = high.plot(secondary_y=True, ax=ax)\n for line in ax.get_lines():\n assert PeriodIndex(line.get_xdata()).freq == \"D\"\n assert hasattr(ax, \"left_ax\")\n assert not hasattr(ax, \"right_ax\")\n for line in ax.left_ax.get_lines():\n assert PeriodIndex(line.get_xdata()).freq == \"D\"\n\n def test_secondary_legend(self):\n fig = self.plt.figure()\n ax = fig.add_subplot(211)\n\n # ts\n df = tm.makeTimeDataFrame()\n df.plot(secondary_y=[\"A\", \"B\"], ax=ax)\n leg = ax.get_legend()\n assert len(leg.get_lines()) == 4\n assert leg.get_texts()[0].get_text() == \"A (right)\"\n assert leg.get_texts()[1].get_text() == \"B (right)\"\n assert leg.get_texts()[2].get_text() == \"C\"\n assert leg.get_texts()[3].get_text() == \"D\"\n assert ax.right_ax.get_legend() is None\n colors = set()\n for line in leg.get_lines():\n colors.add(line.get_color())\n\n # TODO: color cycle problems\n assert len(colors) == 4\n self.plt.close(fig)\n\n fig = self.plt.figure()\n ax = fig.add_subplot(211)\n df.plot(secondary_y=[\"A\", \"C\"], mark_right=False, ax=ax)\n leg = ax.get_legend()\n assert len(leg.get_lines()) == 4\n assert leg.get_texts()[0].get_text() == \"A\"\n assert leg.get_texts()[1].get_text() == \"B\"\n assert leg.get_texts()[2].get_text() == \"C\"\n assert leg.get_texts()[3].get_text() == \"D\"\n self.plt.close(fig)\n\n fig, ax = self.plt.subplots()\n df.plot(kind=\"bar\", secondary_y=[\"A\"], ax=ax)\n leg = ax.get_legend()\n assert leg.get_texts()[0].get_text() == \"A (right)\"\n assert leg.get_texts()[1].get_text() == \"B\"\n self.plt.close(fig)\n\n fig, ax = self.plt.subplots()\n df.plot(kind=\"bar\", secondary_y=[\"A\"], mark_right=False, ax=ax)\n leg = ax.get_legend()\n assert leg.get_texts()[0].get_text() == \"A\"\n assert leg.get_texts()[1].get_text() == \"B\"\n self.plt.close(fig)\n\n fig = self.plt.figure()\n ax = fig.add_subplot(211)\n df = tm.makeTimeDataFrame()\n ax = df.plot(secondary_y=[\"C\", \"D\"], ax=ax)\n leg = ax.get_legend()\n assert len(leg.get_lines()) == 4\n assert ax.right_ax.get_legend() is None\n colors = set()\n for line in leg.get_lines():\n colors.add(line.get_color())\n\n # TODO: color cycle problems\n assert len(colors) == 4\n self.plt.close(fig)\n\n # non-ts\n df = tm.makeDataFrame()\n fig = self.plt.figure()\n ax = fig.add_subplot(211)\n ax = df.plot(secondary_y=[\"A\", \"B\"], ax=ax)\n leg = ax.get_legend()\n assert len(leg.get_lines()) == 4\n assert ax.right_ax.get_legend() is None\n colors = set()\n for line in leg.get_lines():\n colors.add(line.get_color())\n\n # TODO: color cycle problems\n assert len(colors) == 4\n self.plt.close()\n\n fig = self.plt.figure()\n ax = fig.add_subplot(211)\n ax = df.plot(secondary_y=[\"C\", \"D\"], ax=ax)\n leg = ax.get_legend()\n assert len(leg.get_lines()) == 4\n assert ax.right_ax.get_legend() is None\n colors = set()\n for line in leg.get_lines():\n colors.add(line.get_color())\n\n # TODO: color cycle problems\n assert len(colors) == 4\n\n def test_format_date_axis(self):\n rng = date_range(\"1/1/2012\", periods=12, freq=\"M\")\n df = DataFrame(np.random.randn(len(rng), 3), rng)\n _, ax = self.plt.subplots()\n ax = df.plot(ax=ax)\n xaxis = ax.get_xaxis()\n for line in xaxis.get_ticklabels():\n if len(line.get_text()) > 0:\n assert line.get_rotation() == 30\n\n def test_ax_plot(self):\n x = date_range(start=\"2012-01-02\", periods=10, freq=\"D\")\n y = list(range(len(x)))\n _, ax = self.plt.subplots()\n lines = ax.plot(x, y, label=\"Y\")\n tm.assert_index_equal(DatetimeIndex(lines[0].get_xdata()), x)\n\n def test_mpl_nopandas(self):\n dates = [date(2008, 12, 31), date(2009, 1, 31)]\n values1 = np.arange(10.0, 11.0, 0.5)\n values2 = np.arange(11.0, 12.0, 0.5)\n\n kw = {\"fmt\": \"-\", \"lw\": 4}\n\n _, ax = self.plt.subplots()\n ax.plot_date([x.toordinal() for x in dates], values1, **kw)\n ax.plot_date([x.toordinal() for x in dates], values2, **kw)\n\n line1, line2 = ax.get_lines()\n\n exp = np.array([x.toordinal() for x in dates], dtype=np.float64)\n tm.assert_numpy_array_equal(line1.get_xydata()[:, 0], exp)\n exp = np.array([x.toordinal() for x in dates], dtype=np.float64)\n tm.assert_numpy_array_equal(line2.get_xydata()[:, 0], exp)\n\n def test_irregular_ts_shared_ax_xlim(self):\n # GH 2960\n from pandas.plotting._matplotlib.converter import DatetimeConverter\n\n ts = tm.makeTimeSeries()[:20]\n ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]\n\n # plot the left section of the irregular series, then the right section\n _, ax = self.plt.subplots()\n ts_irregular[:5].plot(ax=ax)\n ts_irregular[5:].plot(ax=ax)\n\n # check that axis limits are correct\n left, right = ax.get_xlim()\n assert left <= DatetimeConverter.convert(ts_irregular.index.min(), \"\", ax)\n assert right >= DatetimeConverter.convert(ts_irregular.index.max(), \"\", ax)\n\n def test_secondary_y_non_ts_xlim(self):\n # GH 3490 - non-timeseries with secondary y\n index_1 = [1, 2, 3, 4]\n index_2 = [5, 6, 7, 8]\n s1 = Series(1, index=index_1)\n s2 = Series(2, index=index_2)\n\n _, ax = self.plt.subplots()\n s1.plot(ax=ax)\n left_before, right_before = ax.get_xlim()\n s2.plot(secondary_y=True, ax=ax)\n left_after, right_after = ax.get_xlim()\n\n assert left_before >= left_after\n assert right_before < right_after\n\n def test_secondary_y_regular_ts_xlim(self):\n # GH 3490 - regular-timeseries with secondary y\n index_1 = date_range(start=\"2000-01-01\", periods=4, freq=\"D\")\n index_2 = date_range(start=\"2000-01-05\", periods=4, freq=\"D\")\n s1 = Series(1, index=index_1)\n s2 = Series(2, index=index_2)\n\n _, ax = self.plt.subplots()\n s1.plot(ax=ax)\n left_before, right_before = ax.get_xlim()\n s2.plot(secondary_y=True, ax=ax)\n left_after, right_after = ax.get_xlim()\n\n assert left_before >= left_after\n assert right_before < right_after\n\n def test_secondary_y_mixed_freq_ts_xlim(self):\n # GH 3490 - mixed frequency timeseries with secondary y\n rng = date_range(\"2000-01-01\", periods=10000, freq=\"min\")\n ts = Series(1, index=rng)\n\n _, ax = self.plt.subplots()\n ts.plot(ax=ax)\n left_before, right_before = ax.get_xlim()\n ts.resample(\"D\").mean().plot(secondary_y=True, ax=ax)\n left_after, right_after = ax.get_xlim()\n\n # a downsample should not have changed either limit\n assert left_before == left_after\n assert right_before == right_after\n\n def test_secondary_y_irregular_ts_xlim(self):\n # GH 3490 - irregular-timeseries with secondary y\n from pandas.plotting._matplotlib.converter import DatetimeConverter\n\n ts = tm.makeTimeSeries()[:20]\n ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]\n\n _, ax = self.plt.subplots()\n ts_irregular[:5].plot(ax=ax)\n # plot higher-x values on secondary axis\n ts_irregular[5:].plot(secondary_y=True, ax=ax)\n # ensure secondary limits aren't overwritten by plot on primary\n ts_irregular[:5].plot(ax=ax)\n\n left, right = ax.get_xlim()\n assert left <= DatetimeConverter.convert(ts_irregular.index.min(), \"\", ax)\n assert right >= DatetimeConverter.convert(ts_irregular.index.max(), \"\", ax)\n\n def test_plot_outofbounds_datetime(self):\n # 2579 - checking this does not raise\n values = [date(1677, 1, 1), date(1677, 1, 2)]\n _, ax = self.plt.subplots()\n ax.plot(values)\n\n values = [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)]\n ax.plot(values)\n\n def test_format_timedelta_ticks_narrow(self):\n\n expected_labels = [f\"00:00:00.0000000{i:0>2d}\" for i in np.arange(10)]\n\n rng = timedelta_range(\"0\", periods=10, freq=\"ns\")\n df = DataFrame(np.random.randn(len(rng), 3), rng)\n fig, ax = self.plt.subplots()\n df.plot(fontsize=2, ax=ax)\n self.plt.draw()\n labels = ax.get_xticklabels()\n\n result_labels = [x.get_text() for x in labels]\n assert len(result_labels) == len(expected_labels)\n assert result_labels == expected_labels\n\n def test_format_timedelta_ticks_wide(self):\n expected_labels = [\n \"00:00:00\",\n \"1 days 03:46:40\",\n \"2 days 07:33:20\",\n \"3 days 11:20:00\",\n \"4 days 15:06:40\",\n \"5 days 18:53:20\",\n \"6 days 22:40:00\",\n \"8 days 02:26:40\",\n \"9 days 06:13:20\",\n ]\n\n rng = timedelta_range(\"0\", periods=10, freq=\"1 d\")\n df = DataFrame(np.random.randn(len(rng), 3), rng)\n fig, ax = self.plt.subplots()\n ax = df.plot(fontsize=2, ax=ax)\n self.plt.draw()\n labels = ax.get_xticklabels()\n\n result_labels = [x.get_text() for x in labels]\n assert len(result_labels) == len(expected_labels)\n assert result_labels == expected_labels\n\n def test_timedelta_plot(self):\n # test issue #8711\n s = Series(range(5), timedelta_range(\"1day\", periods=5))\n _, ax = self.plt.subplots()\n _check_plot_works(s.plot, ax=ax)\n\n # test long period\n index = timedelta_range(\"1 day 2 hr 30 min 10 s\", periods=10, freq=\"1 d\")\n s = Series(np.random.randn(len(index)), index)\n _, ax = self.plt.subplots()\n _check_plot_works(s.plot, ax=ax)\n\n # test short period\n index = timedelta_range(\"1 day 2 hr 30 min 10 s\", periods=10, freq=\"1 ns\")\n s = Series(np.random.randn(len(index)), index)\n _, ax = self.plt.subplots()\n _check_plot_works(s.plot, ax=ax)\n\n def test_hist(self):\n # https://github.com/matplotlib/matplotlib/issues/8459\n rng = date_range(\"1/1/2011\", periods=10, freq=\"H\")\n x = rng\n w1 = np.arange(0, 1, 0.1)\n w2 = np.arange(0, 1, 0.1)[::-1]\n _, ax = self.plt.subplots()\n ax.hist([x, x], weights=[w1, w2])\n\n def test_overlapping_datetime(self):\n # GB 6608\n s1 = Series(\n [1, 2, 3],\n index=[\n datetime(1995, 12, 31),\n datetime(2000, 12, 31),\n datetime(2005, 12, 31),\n ],\n )\n s2 = Series(\n [1, 2, 3],\n index=[\n datetime(1997, 12, 31),\n datetime(2003, 12, 31),\n datetime(2008, 12, 31),\n ],\n )\n\n # plot first series, then add the second series to those axes,\n # then try adding the first series again\n _, ax = self.plt.subplots()\n s1.plot(ax=ax)\n s2.plot(ax=ax)\n s1.plot(ax=ax)\n\n @pytest.mark.xfail(reason=\"GH9053 matplotlib does not use ax.xaxis.converter\")\n def test_add_matplotlib_datetime64(self):\n # GH9053 - ensure that a plot with PeriodConverter still understands\n # datetime64 data. This still fails because matplotlib overrides the\n # ax.xaxis.converter with a DatetimeConverter\n s = Series(np.random.randn(10), index=date_range(\"1970-01-02\", periods=10))\n ax = s.plot()\n with tm.assert_produces_warning(DeprecationWarning):\n # multi-dimensional indexing\n ax.plot(s.index, s.values, color=\"g\")\n l1, l2 = ax.lines\n tm.assert_numpy_array_equal(l1.get_xydata(), l2.get_xydata())\n\n def test_matplotlib_scatter_datetime64(self):\n # https://github.com/matplotlib/matplotlib/issues/11391\n df = DataFrame(np.random.RandomState(0).rand(10, 2), columns=[\"x\", \"y\"])\n df[\"time\"] = date_range(\"2018-01-01\", periods=10, freq=\"D\")\n fig, ax = self.plt.subplots()\n ax.scatter(x=\"time\", y=\"y\", data=df)\n self.plt.draw()\n label = ax.get_xticklabels()[0]\n if self.mpl_ge_3_2_0:\n expected = \"2018-01-01\"\n elif self.mpl_ge_3_0_0:\n expected = \"2017-12-08\"\n else:\n expected = \"2017-12-12\"\n assert label.get_text() == expected\n\n def test_check_xticks_rot(self):\n # https://github.com/pandas-dev/pandas/issues/29460\n # regular time series\n x = to_datetime([\"2020-05-01\", \"2020-05-02\", \"2020-05-03\"])\n df = DataFrame({\"x\": x, \"y\": [1, 2, 3]})\n axes = df.plot(x=\"x\", y=\"y\")\n self._check_ticks_props(axes, xrot=0)\n\n # irregular time series\n x = to_datetime([\"2020-05-01\", \"2020-05-02\", \"2020-05-04\"])\n df = DataFrame({\"x\": x, \"y\": [1, 2, 3]})\n axes = df.plot(x=\"x\", y=\"y\")\n self._check_ticks_props(axes, xrot=30)\n\n # use timeseries index or not\n axes = df.set_index(\"x\").plot(y=\"y\", use_index=True)\n self._check_ticks_props(axes, xrot=30)\n axes = df.set_index(\"x\").plot(y=\"y\", use_index=False)\n self._check_ticks_props(axes, xrot=0)\n\n # separate subplots\n axes = df.plot(x=\"x\", y=\"y\", subplots=True, sharex=True)\n self._check_ticks_props(axes, xrot=30)\n axes = df.plot(x=\"x\", y=\"y\", subplots=True, sharex=False)\n self._check_ticks_props(axes, xrot=0)\n\n\ndef _check_plot_works(f, freq=None, series=None, *args, **kwargs):\n import matplotlib.pyplot as plt\n\n fig = plt.gcf()\n\n try:\n plt.clf()\n ax = fig.add_subplot(211)\n orig_ax = kwargs.pop(\"ax\", plt.gca())\n orig_axfreq = getattr(orig_ax, \"freq\", None)\n\n ret = f(*args, **kwargs)\n assert ret is not None # do something more intelligent\n\n ax = kwargs.pop(\"ax\", plt.gca())\n if series is not None:\n dfreq = series.index.freq\n if isinstance(dfreq, BaseOffset):\n dfreq = dfreq.rule_code\n if orig_axfreq is None:\n assert ax.freq == dfreq\n\n if freq is not None and orig_axfreq is None:\n assert ax.freq == freq\n\n ax = fig.add_subplot(212)\n kwargs[\"ax\"] = ax\n ret = f(*args, **kwargs)\n assert ret is not None # TODO: do something more intelligent\n\n with tm.ensure_clean(return_filelike=True) as path:\n plt.savefig(path)\n\n # GH18439, GH#24088, statsmodels#4772\n with tm.ensure_clean(return_filelike=True) as path:\n pickle.dump(fig, path)\n finally:\n plt.close(fig)\n", "\"\"\"\nRoutines for casting.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom datetime import (\n date,\n datetime,\n timedelta,\n)\nimport functools\nimport inspect\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Sized,\n TypeVar,\n cast,\n overload,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import lib\nfrom pandas._libs.tslibs import (\n NaT,\n OutOfBoundsDatetime,\n OutOfBoundsTimedelta,\n Timedelta,\n Timestamp,\n conversion,\n)\nfrom pandas._libs.tslibs.timedeltas import array_to_timedelta64\nfrom pandas._typing import (\n ArrayLike,\n Dtype,\n DtypeObj,\n Scalar,\n)\nfrom pandas.errors import IntCastingNaNError\nfrom pandas.util._exceptions import find_stack_level\nfrom pandas.util._validators import validate_bool_kwarg\n\nfrom pandas.core.dtypes.common import (\n DT64NS_DTYPE,\n TD64NS_DTYPE,\n ensure_int8,\n ensure_int16,\n ensure_int32,\n ensure_int64,\n ensure_object,\n ensure_str,\n is_bool,\n is_bool_dtype,\n is_complex,\n is_complex_dtype,\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n is_dtype_equal,\n is_extension_array_dtype,\n is_float,\n is_float_dtype,\n is_integer,\n is_integer_dtype,\n is_numeric_dtype,\n is_object_dtype,\n is_scalar,\n is_string_dtype,\n is_timedelta64_dtype,\n is_unsigned_integer_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.dtypes import (\n CategoricalDtype,\n DatetimeTZDtype,\n ExtensionDtype,\n IntervalDtype,\n PandasDtype,\n PeriodDtype,\n)\nfrom pandas.core.dtypes.generic import (\n ABCExtensionArray,\n ABCSeries,\n)\nfrom pandas.core.dtypes.inference import is_list_like\nfrom pandas.core.dtypes.missing import (\n is_valid_na_for_dtype,\n isna,\n na_value_for_dtype,\n notna,\n)\n\nif TYPE_CHECKING:\n\n from pandas.core.arrays import (\n DatetimeArray,\n ExtensionArray,\n IntervalArray,\n PeriodArray,\n TimedeltaArray,\n )\n\n_int8_max = np.iinfo(np.int8).max\n_int16_max = np.iinfo(np.int16).max\n_int32_max = np.iinfo(np.int32).max\n_int64_max = np.iinfo(np.int64).max\n\nNumpyArrayT = TypeVar(\"NumpyArrayT\", bound=np.ndarray)\n\n\ndef maybe_convert_platform(\n values: list | tuple | range | np.ndarray | ExtensionArray,\n) -> ArrayLike:\n \"\"\"try to do platform conversion, allow ndarray or list here\"\"\"\n arr: ArrayLike\n\n if isinstance(values, (list, tuple, range)):\n arr = construct_1d_object_array_from_listlike(values)\n else:\n # The caller is responsible for ensuring that we have np.ndarray\n # or ExtensionArray here.\n arr = values\n\n if arr.dtype == object:\n arr = cast(np.ndarray, arr)\n arr = lib.maybe_convert_objects(arr)\n\n return arr\n\n\ndef is_nested_object(obj) -> bool:\n \"\"\"\n return a boolean if we have a nested object, e.g. a Series with 1 or\n more Series elements\n\n This may not be necessarily be performant.\n\n \"\"\"\n return bool(\n isinstance(obj, ABCSeries)\n and is_object_dtype(obj.dtype)\n and any(isinstance(v, ABCSeries) for v in obj._values)\n )\n\n\ndef maybe_box_datetimelike(value: Scalar, dtype: Dtype | None = None) -> Scalar:\n \"\"\"\n Cast scalar to Timestamp or Timedelta if scalar is datetime-like\n and dtype is not object.\n\n Parameters\n ----------\n value : scalar\n dtype : Dtype, optional\n\n Returns\n -------\n scalar\n \"\"\"\n if dtype == object:\n pass\n elif isinstance(value, (np.datetime64, datetime)):\n value = Timestamp(value)\n elif isinstance(value, (np.timedelta64, timedelta)):\n value = Timedelta(value)\n\n return value\n\n\ndef maybe_box_native(value: Scalar) -> Scalar:\n \"\"\"\n If passed a scalar cast the scalar to a python native type.\n\n Parameters\n ----------\n value : scalar or Series\n\n Returns\n -------\n scalar or Series\n \"\"\"\n if is_float(value):\n # error: Argument 1 to \"float\" has incompatible type\n # \"Union[Union[str, int, float, bool], Union[Any, Timestamp, Timedelta, Any]]\";\n # expected \"Union[SupportsFloat, _SupportsIndex, str]\"\n value = float(value) # type: ignore[arg-type]\n elif is_integer(value):\n # error: Argument 1 to \"int\" has incompatible type\n # \"Union[Union[str, int, float, bool], Union[Any, Timestamp, Timedelta, Any]]\";\n # expected \"Union[str, SupportsInt, _SupportsIndex, _SupportsTrunc]\"\n value = int(value) # type: ignore[arg-type]\n elif is_bool(value):\n value = bool(value)\n elif isinstance(value, (np.datetime64, np.timedelta64)):\n value = maybe_box_datetimelike(value)\n return value\n\n\ndef maybe_unbox_datetimelike(value: Scalar, dtype: DtypeObj) -> Scalar:\n \"\"\"\n Convert a Timedelta or Timestamp to timedelta64 or datetime64 for setting\n into a numpy array. Failing to unbox would risk dropping nanoseconds.\n\n Notes\n -----\n Caller is responsible for checking dtype.kind in [\"m\", \"M\"]\n \"\"\"\n if is_valid_na_for_dtype(value, dtype):\n # GH#36541: can't fill array directly with pd.NaT\n # > np.empty(10, dtype=\"datetime64[64]\").fill(pd.NaT)\n # ValueError: cannot convert float NaN to integer\n value = dtype.type(\"NaT\", \"ns\")\n elif isinstance(value, Timestamp):\n if value.tz is None:\n value = value.to_datetime64()\n elif not isinstance(dtype, DatetimeTZDtype):\n raise TypeError(\"Cannot unbox tzaware Timestamp to tznaive dtype\")\n elif isinstance(value, Timedelta):\n value = value.to_timedelta64()\n\n _disallow_mismatched_datetimelike(value, dtype)\n return value\n\n\ndef _disallow_mismatched_datetimelike(value, dtype: DtypeObj):\n \"\"\"\n numpy allows np.array(dt64values, dtype=\"timedelta64[ns]\") and\n vice-versa, but we do not want to allow this, so we need to\n check explicitly\n \"\"\"\n vdtype = getattr(value, \"dtype\", None)\n if vdtype is None:\n return\n elif (vdtype.kind == \"m\" and dtype.kind == \"M\") or (\n vdtype.kind == \"M\" and dtype.kind == \"m\"\n ):\n raise TypeError(f\"Cannot cast {repr(value)} to {dtype}\")\n\n\ndef maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLike:\n \"\"\"\n try to cast to the specified dtype (e.g. convert back to bool/int\n or could be an astype of float64->float32\n \"\"\"\n do_round = False\n\n if isinstance(dtype, str):\n if dtype == \"infer\":\n inferred_type = lib.infer_dtype(ensure_object(result), skipna=False)\n if inferred_type == \"boolean\":\n dtype = \"bool\"\n elif inferred_type == \"integer\":\n dtype = \"int64\"\n elif inferred_type == \"datetime64\":\n dtype = \"datetime64[ns]\"\n elif inferred_type == \"timedelta64\":\n dtype = \"timedelta64[ns]\"\n\n # try to upcast here\n elif inferred_type == \"floating\":\n dtype = \"int64\"\n if issubclass(result.dtype.type, np.number):\n do_round = True\n\n else:\n # TODO: complex? what if result is already non-object?\n dtype = \"object\"\n\n dtype = np.dtype(dtype)\n\n if not isinstance(dtype, np.dtype):\n # enforce our signature annotation\n raise TypeError(dtype) # pragma: no cover\n\n converted = maybe_downcast_numeric(result, dtype, do_round)\n if converted is not result:\n return converted\n\n # a datetimelike\n # GH12821, iNaT is cast to float\n if dtype.kind in [\"M\", \"m\"] and result.dtype.kind in [\"i\", \"f\"]:\n result = result.astype(dtype)\n\n return result\n\n\ndef maybe_downcast_numeric(\n result: ArrayLike, dtype: DtypeObj, do_round: bool = False\n) -> ArrayLike:\n \"\"\"\n Subset of maybe_downcast_to_dtype restricted to numeric dtypes.\n\n Parameters\n ----------\n result : ndarray or ExtensionArray\n dtype : np.dtype or ExtensionDtype\n do_round : bool\n\n Returns\n -------\n ndarray or ExtensionArray\n \"\"\"\n if not isinstance(dtype, np.dtype) or not isinstance(result.dtype, np.dtype):\n # e.g. SparseDtype has no itemsize attr\n return result\n\n def trans(x):\n if do_round:\n return x.round()\n return x\n\n if dtype.kind == result.dtype.kind:\n # don't allow upcasts here (except if empty)\n if result.dtype.itemsize <= dtype.itemsize and result.size:\n return result\n\n if is_bool_dtype(dtype) or is_integer_dtype(dtype):\n\n if not result.size:\n # if we don't have any elements, just astype it\n return trans(result).astype(dtype)\n\n # do a test on the first element, if it fails then we are done\n r = result.ravel()\n arr = np.array([r[0]])\n\n if isna(arr).any():\n # if we have any nulls, then we are done\n return result\n\n elif not isinstance(r[0], (np.integer, np.floating, int, float, bool)):\n # a comparable, e.g. a Decimal may slip in here\n return result\n\n if (\n issubclass(result.dtype.type, (np.object_, np.number))\n and notna(result).all()\n ):\n new_result = trans(result).astype(dtype)\n if new_result.dtype.kind == \"O\" or result.dtype.kind == \"O\":\n # np.allclose may raise TypeError on object-dtype\n if (new_result == result).all():\n return new_result\n else:\n if np.allclose(new_result, result, rtol=0):\n return new_result\n\n elif (\n issubclass(dtype.type, np.floating)\n and not is_bool_dtype(result.dtype)\n and not is_string_dtype(result.dtype)\n ):\n return result.astype(dtype)\n\n return result\n\n\ndef maybe_cast_pointwise_result(\n result: ArrayLike,\n dtype: DtypeObj,\n numeric_only: bool = False,\n same_dtype: bool = True,\n) -> ArrayLike:\n \"\"\"\n Try casting result of a pointwise operation back to the original dtype if\n appropriate.\n\n Parameters\n ----------\n result : array-like\n Result to cast.\n dtype : np.dtype or ExtensionDtype\n Input Series from which result was calculated.\n numeric_only : bool, default False\n Whether to cast only numerics or datetimes as well.\n same_dtype : bool, default True\n Specify dtype when calling _from_sequence\n\n Returns\n -------\n result : array-like\n result maybe casted to the dtype.\n \"\"\"\n\n assert not is_scalar(result)\n\n if isinstance(dtype, ExtensionDtype):\n if not isinstance(dtype, (CategoricalDtype, DatetimeTZDtype)):\n # TODO: avoid this special-casing\n # We have to special case categorical so as not to upcast\n # things like counts back to categorical\n\n cls = dtype.construct_array_type()\n if same_dtype:\n result = maybe_cast_to_extension_array(cls, result, dtype=dtype)\n else:\n result = maybe_cast_to_extension_array(cls, result)\n\n elif (numeric_only and is_numeric_dtype(dtype)) or not numeric_only:\n result = maybe_downcast_to_dtype(result, dtype)\n\n return result\n\n\ndef maybe_cast_to_extension_array(\n cls: type[ExtensionArray], obj: ArrayLike, dtype: ExtensionDtype | None = None\n) -> ArrayLike:\n \"\"\"\n Call to `_from_sequence` that returns the object unchanged on Exception.\n\n Parameters\n ----------\n cls : class, subclass of ExtensionArray\n obj : arraylike\n Values to pass to cls._from_sequence\n dtype : ExtensionDtype, optional\n\n Returns\n -------\n ExtensionArray or obj\n \"\"\"\n from pandas.core.arrays.string_ import BaseStringArray\n\n assert isinstance(cls, type), f\"must pass a type: {cls}\"\n assertion_msg = f\"must pass a subclass of ExtensionArray: {cls}\"\n assert issubclass(cls, ABCExtensionArray), assertion_msg\n\n # Everything can be converted to StringArrays, but we may not want to convert\n if issubclass(cls, BaseStringArray) and lib.infer_dtype(obj) != \"string\":\n return obj\n\n try:\n result = cls._from_sequence(obj, dtype=dtype)\n except Exception:\n # We can't predict what downstream EA constructors may raise\n result = obj\n return result\n\n\n@overload\ndef ensure_dtype_can_hold_na(dtype: np.dtype) -> np.dtype:\n ...\n\n\n@overload\ndef ensure_dtype_can_hold_na(dtype: ExtensionDtype) -> ExtensionDtype:\n ...\n\n\ndef ensure_dtype_can_hold_na(dtype: DtypeObj) -> DtypeObj:\n \"\"\"\n If we have a dtype that cannot hold NA values, find the best match that can.\n \"\"\"\n if isinstance(dtype, ExtensionDtype):\n # TODO: ExtensionDtype.can_hold_na?\n return dtype\n elif dtype.kind == \"b\":\n return np.dtype(object)\n elif dtype.kind in [\"i\", \"u\"]:\n return np.dtype(np.float64)\n return dtype\n\n\ndef maybe_promote(dtype: np.dtype, fill_value=np.nan):\n \"\"\"\n Find the minimal dtype that can hold both the given dtype and fill_value.\n\n Parameters\n ----------\n dtype : np.dtype\n fill_value : scalar, default np.nan\n\n Returns\n -------\n dtype\n Upcasted from dtype argument if necessary.\n fill_value\n Upcasted from fill_value argument if necessary.\n\n Raises\n ------\n ValueError\n If fill_value is a non-scalar and dtype is not object.\n \"\"\"\n # TODO(2.0): need to directly use the non-cached version as long as we\n # possibly raise a deprecation warning for datetime dtype\n if dtype.kind == \"M\":\n return _maybe_promote(dtype, fill_value)\n # for performance, we are using a cached version of the actual implementation\n # of the function in _maybe_promote. However, this doesn't always work (in case\n # of non-hashable arguments), so we fallback to the actual implementation if needed\n try:\n # error: Argument 3 to \"__call__\" of \"_lru_cache_wrapper\" has incompatible type\n # \"Type[Any]\"; expected \"Hashable\" [arg-type]\n return _maybe_promote_cached(\n dtype, fill_value, type(fill_value) # type: ignore[arg-type]\n )\n except TypeError:\n # if fill_value is not hashable (required for caching)\n return _maybe_promote(dtype, fill_value)\n\n\[email protected]_cache(maxsize=128)\ndef _maybe_promote_cached(dtype, fill_value, fill_value_type):\n # The cached version of _maybe_promote below\n # This also use fill_value_type as (unused) argument to use this in the\n # cache lookup -> to differentiate 1 and True\n return _maybe_promote(dtype, fill_value)\n\n\ndef _maybe_promote(dtype: np.dtype, fill_value=np.nan):\n # The actual implementation of the function, use `maybe_promote` above for\n # a cached version.\n if not is_scalar(fill_value):\n # with object dtype there is nothing to promote, and the user can\n # pass pretty much any weird fill_value they like\n if not is_object_dtype(dtype):\n # with object dtype there is nothing to promote, and the user can\n # pass pretty much any weird fill_value they like\n raise ValueError(\"fill_value must be a scalar\")\n dtype = np.dtype(object)\n return dtype, fill_value\n\n kinds = [\"i\", \"u\", \"f\", \"c\", \"m\", \"M\"]\n if is_valid_na_for_dtype(fill_value, dtype) and dtype.kind in kinds:\n dtype = ensure_dtype_can_hold_na(dtype)\n fv = na_value_for_dtype(dtype)\n return dtype, fv\n\n elif isna(fill_value):\n dtype = np.dtype(object)\n if fill_value is None:\n # but we retain e.g. pd.NA\n fill_value = np.nan\n return dtype, fill_value\n\n # returns tuple of (dtype, fill_value)\n if issubclass(dtype.type, np.datetime64):\n inferred, fv = infer_dtype_from_scalar(fill_value, pandas_dtype=True)\n if inferred == dtype:\n return dtype, fv\n\n # TODO(2.0): once this deprecation is enforced, this whole case\n # becomes equivalent to:\n # dta = DatetimeArray._from_sequence([], dtype=\"M8[ns]\")\n # try:\n # fv = dta._validate_setitem_value(fill_value)\n # return dta.dtype, fv\n # except (ValueError, TypeError):\n # return np.dtype(object), fill_value\n if isinstance(fill_value, date) and not isinstance(fill_value, datetime):\n # deprecate casting of date object to match infer_dtype_from_scalar\n # and DatetimeArray._validate_setitem_value\n try:\n fv = Timestamp(fill_value).to_datetime64()\n except OutOfBoundsDatetime:\n pass\n else:\n warnings.warn(\n \"Using a `date` object for fill_value with `datetime64[ns]` \"\n \"dtype is deprecated. In a future version, this will be cast \"\n \"to object dtype. Pass `fill_value=Timestamp(date_obj)` instead.\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n return dtype, fv\n elif isinstance(fill_value, str):\n try:\n # explicitly wrap in str to convert np.str_\n fv = Timestamp(str(fill_value))\n except (ValueError, TypeError):\n pass\n else:\n if fv.tz is None:\n return dtype, fv.asm8\n\n return np.dtype(\"object\"), fill_value\n\n elif issubclass(dtype.type, np.timedelta64):\n inferred, fv = infer_dtype_from_scalar(fill_value, pandas_dtype=True)\n if inferred == dtype:\n return dtype, fv\n\n return np.dtype(\"object\"), fill_value\n\n elif is_float(fill_value):\n if issubclass(dtype.type, np.bool_):\n dtype = np.dtype(np.object_)\n\n elif issubclass(dtype.type, np.integer):\n dtype = np.dtype(np.float64)\n\n elif dtype.kind == \"f\":\n mst = np.min_scalar_type(fill_value)\n if mst > dtype:\n # e.g. mst is np.float64 and dtype is np.float32\n dtype = mst\n\n elif dtype.kind == \"c\":\n mst = np.min_scalar_type(fill_value)\n dtype = np.promote_types(dtype, mst)\n\n elif is_bool(fill_value):\n if not issubclass(dtype.type, np.bool_):\n dtype = np.dtype(np.object_)\n\n elif is_integer(fill_value):\n if issubclass(dtype.type, np.bool_):\n dtype = np.dtype(np.object_)\n\n elif issubclass(dtype.type, np.integer):\n if not np.can_cast(fill_value, dtype):\n # upcast to prevent overflow\n mst = np.min_scalar_type(fill_value)\n dtype = np.promote_types(dtype, mst)\n if dtype.kind == \"f\":\n # Case where we disagree with numpy\n dtype = np.dtype(np.object_)\n\n elif is_complex(fill_value):\n if issubclass(dtype.type, np.bool_):\n dtype = np.dtype(np.object_)\n\n elif issubclass(dtype.type, (np.integer, np.floating)):\n mst = np.min_scalar_type(fill_value)\n dtype = np.promote_types(dtype, mst)\n\n elif dtype.kind == \"c\":\n mst = np.min_scalar_type(fill_value)\n if mst > dtype:\n # e.g. mst is np.complex128 and dtype is np.complex64\n dtype = mst\n\n else:\n dtype = np.dtype(np.object_)\n\n # in case we have a string that looked like a number\n if issubclass(dtype.type, (bytes, str)):\n dtype = np.dtype(np.object_)\n\n fill_value = _ensure_dtype_type(fill_value, dtype)\n return dtype, fill_value\n\n\ndef _ensure_dtype_type(value, dtype: np.dtype):\n \"\"\"\n Ensure that the given value is an instance of the given dtype.\n\n e.g. if out dtype is np.complex64_, we should have an instance of that\n as opposed to a python complex object.\n\n Parameters\n ----------\n value : object\n dtype : np.dtype\n\n Returns\n -------\n object\n \"\"\"\n # Start with exceptions in which we do _not_ cast to numpy types\n\n # error: Non-overlapping equality check (left operand type: \"dtype[Any]\", right\n # operand type: \"Type[object_]\")\n if dtype == np.object_: # type: ignore[comparison-overlap]\n return value\n\n # Note: before we get here we have already excluded isna(value)\n return dtype.type(value)\n\n\ndef infer_dtype_from(val, pandas_dtype: bool = False) -> tuple[DtypeObj, Any]:\n \"\"\"\n Interpret the dtype from a scalar or array.\n\n Parameters\n ----------\n val : object\n pandas_dtype : bool, default False\n whether to infer dtype including pandas extension types.\n If False, scalar/array belongs to pandas extension types is inferred as\n object\n \"\"\"\n if not is_list_like(val):\n return infer_dtype_from_scalar(val, pandas_dtype=pandas_dtype)\n return infer_dtype_from_array(val, pandas_dtype=pandas_dtype)\n\n\ndef infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> tuple[DtypeObj, Any]:\n \"\"\"\n Interpret the dtype from a scalar.\n\n Parameters\n ----------\n pandas_dtype : bool, default False\n whether to infer dtype including pandas extension types.\n If False, scalar belongs to pandas extension types is inferred as\n object\n \"\"\"\n dtype: DtypeObj = np.dtype(object)\n\n # a 1-element ndarray\n if isinstance(val, np.ndarray):\n if val.ndim != 0:\n msg = \"invalid ndarray passed to infer_dtype_from_scalar\"\n raise ValueError(msg)\n\n dtype = val.dtype\n val = lib.item_from_zerodim(val)\n\n elif isinstance(val, str):\n\n # If we create an empty array using a string to infer\n # the dtype, NumPy will only allocate one character per entry\n # so this is kind of bad. Alternately we could use np.repeat\n # instead of np.empty (but then you still don't want things\n # coming out as np.str_!\n\n dtype = np.dtype(object)\n\n elif isinstance(val, (np.datetime64, datetime)):\n try:\n val = Timestamp(val)\n except OutOfBoundsDatetime:\n return np.dtype(object), val\n\n # error: Non-overlapping identity check (left operand type: \"Timestamp\",\n # right operand type: \"NaTType\")\n if val is NaT or val.tz is None: # type: ignore[comparison-overlap]\n dtype = np.dtype(\"M8[ns]\")\n val = val.to_datetime64()\n else:\n if pandas_dtype:\n dtype = DatetimeTZDtype(unit=\"ns\", tz=val.tz)\n else:\n # return datetimetz as object\n return np.dtype(object), val\n\n elif isinstance(val, (np.timedelta64, timedelta)):\n try:\n val = Timedelta(val)\n except (OutOfBoundsTimedelta, OverflowError):\n dtype = np.dtype(object)\n else:\n dtype = np.dtype(\"m8[ns]\")\n val = np.timedelta64(val.value, \"ns\")\n\n elif is_bool(val):\n dtype = np.dtype(np.bool_)\n\n elif is_integer(val):\n if isinstance(val, np.integer):\n dtype = np.dtype(type(val))\n else:\n dtype = np.dtype(np.int64)\n\n try:\n np.array(val, dtype=dtype)\n except OverflowError:\n dtype = np.array(val).dtype\n\n elif is_float(val):\n if isinstance(val, np.floating):\n dtype = np.dtype(type(val))\n else:\n dtype = np.dtype(np.float64)\n\n elif is_complex(val):\n dtype = np.dtype(np.complex_)\n\n elif pandas_dtype:\n if lib.is_period(val):\n dtype = PeriodDtype(freq=val.freq)\n elif lib.is_interval(val):\n subtype = infer_dtype_from_scalar(val.left, pandas_dtype=True)[0]\n dtype = IntervalDtype(subtype=subtype, closed=val.closed)\n\n return dtype, val\n\n\ndef dict_compat(d: dict[Scalar, Scalar]) -> dict[Scalar, Scalar]:\n \"\"\"\n Convert datetimelike-keyed dicts to a Timestamp-keyed dict.\n\n Parameters\n ----------\n d: dict-like object\n\n Returns\n -------\n dict\n \"\"\"\n return {maybe_box_datetimelike(key): value for key, value in d.items()}\n\n\ndef infer_dtype_from_array(\n arr, pandas_dtype: bool = False\n) -> tuple[DtypeObj, ArrayLike]:\n \"\"\"\n Infer the dtype from an array.\n\n Parameters\n ----------\n arr : array\n pandas_dtype : bool, default False\n whether to infer dtype including pandas extension types.\n If False, array belongs to pandas extension types\n is inferred as object\n\n Returns\n -------\n tuple (numpy-compat/pandas-compat dtype, array)\n\n Notes\n -----\n if pandas_dtype=False. these infer to numpy dtypes\n exactly with the exception that mixed / object dtypes\n are not coerced by stringifying or conversion\n\n if pandas_dtype=True. datetime64tz-aware/categorical\n types will retain there character.\n\n Examples\n --------\n >>> np.asarray([1, '1'])\n array(['1', '1'], dtype='<U21')\n\n >>> infer_dtype_from_array([1, '1'])\n (dtype('O'), [1, '1'])\n \"\"\"\n if isinstance(arr, np.ndarray):\n return arr.dtype, arr\n\n if not is_list_like(arr):\n raise TypeError(\"'arr' must be list-like\")\n\n if pandas_dtype and is_extension_array_dtype(arr):\n return arr.dtype, arr\n\n elif isinstance(arr, ABCSeries):\n return arr.dtype, np.asarray(arr)\n\n # don't force numpy coerce with nan's\n inferred = lib.infer_dtype(arr, skipna=False)\n if inferred in [\"string\", \"bytes\", \"mixed\", \"mixed-integer\"]:\n return (np.dtype(np.object_), arr)\n\n arr = np.asarray(arr)\n return arr.dtype, arr\n\n\ndef maybe_infer_dtype_type(element):\n \"\"\"\n Try to infer an object's dtype, for use in arithmetic ops.\n\n Uses `element.dtype` if that's available.\n Objects implementing the iterator protocol are cast to a NumPy array,\n and from there the array's type is used.\n\n Parameters\n ----------\n element : object\n Possibly has a `.dtype` attribute, and possibly the iterator\n protocol.\n\n Returns\n -------\n tipo : type\n\n Examples\n --------\n >>> from collections import namedtuple\n >>> Foo = namedtuple(\"Foo\", \"dtype\")\n >>> maybe_infer_dtype_type(Foo(np.dtype(\"i8\")))\n dtype('int64')\n \"\"\"\n tipo = None\n if hasattr(element, \"dtype\"):\n tipo = element.dtype\n elif is_list_like(element):\n element = np.asarray(element)\n tipo = element.dtype\n return tipo\n\n\ndef maybe_upcast(\n values: NumpyArrayT,\n fill_value: Scalar = np.nan,\n copy: bool = False,\n) -> tuple[NumpyArrayT, Scalar]:\n \"\"\"\n Provide explicit type promotion and coercion.\n\n Parameters\n ----------\n values : np.ndarray\n The array that we may want to upcast.\n fill_value : what we want to fill with\n copy : bool, default True\n If True always make a copy even if no upcast is required.\n\n Returns\n -------\n values: np.ndarray\n the original array, possibly upcast\n fill_value:\n the fill value, possibly upcast\n \"\"\"\n new_dtype, fill_value = maybe_promote(values.dtype, fill_value)\n # We get a copy in all cases _except_ (values.dtype == new_dtype and not copy)\n upcast_values = values.astype(new_dtype, copy=copy)\n\n return upcast_values, fill_value # type: ignore[return-value]\n\n\ndef invalidate_string_dtypes(dtype_set: set[DtypeObj]):\n \"\"\"\n Change string like dtypes to object for\n ``DataFrame.select_dtypes()``.\n \"\"\"\n # error: Argument 1 to <set> has incompatible type \"Type[generic]\"; expected\n # \"Union[dtype[Any], ExtensionDtype, None]\"\n # error: Argument 2 to <set> has incompatible type \"Type[generic]\"; expected\n # \"Union[dtype[Any], ExtensionDtype, None]\"\n non_string_dtypes = dtype_set - {\n np.dtype(\"S\").type, # type: ignore[arg-type]\n np.dtype(\"<U\").type, # type: ignore[arg-type]\n }\n if non_string_dtypes != dtype_set:\n raise TypeError(\"string dtypes are not allowed, use 'object' instead\")\n\n\ndef coerce_indexer_dtype(indexer, categories):\n \"\"\"coerce the indexer input array to the smallest dtype possible\"\"\"\n length = len(categories)\n if length < _int8_max:\n return ensure_int8(indexer)\n elif length < _int16_max:\n return ensure_int16(indexer)\n elif length < _int32_max:\n return ensure_int32(indexer)\n return ensure_int64(indexer)\n\n\ndef astype_dt64_to_dt64tz(\n values: ArrayLike, dtype: DtypeObj, copy: bool, via_utc: bool = False\n) -> DatetimeArray:\n # GH#33401 we have inconsistent behaviors between\n # Datetimeindex[naive].astype(tzaware)\n # Series[dt64].astype(tzaware)\n # This collects them in one place to prevent further fragmentation.\n\n from pandas.core.construction import ensure_wrapped_if_datetimelike\n\n values = ensure_wrapped_if_datetimelike(values)\n values = cast(\"DatetimeArray\", values)\n aware = isinstance(dtype, DatetimeTZDtype)\n\n if via_utc:\n # Series.astype behavior\n\n # caller is responsible for checking this\n assert values.tz is None and aware\n dtype = cast(DatetimeTZDtype, dtype)\n\n if copy:\n # this should be the only copy\n values = values.copy()\n\n warnings.warn(\n \"Using .astype to convert from timezone-naive dtype to \"\n \"timezone-aware dtype is deprecated and will raise in a \"\n \"future version. Use ser.dt.tz_localize instead.\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n # GH#33401 this doesn't match DatetimeArray.astype, which\n # goes through the `not via_utc` path\n return values.tz_localize(\"UTC\").tz_convert(dtype.tz)\n\n else:\n # DatetimeArray/DatetimeIndex.astype behavior\n if values.tz is None and aware:\n dtype = cast(DatetimeTZDtype, dtype)\n warnings.warn(\n \"Using .astype to convert from timezone-naive dtype to \"\n \"timezone-aware dtype is deprecated and will raise in a \"\n \"future version. Use obj.tz_localize instead.\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n return values.tz_localize(dtype.tz)\n\n elif aware:\n # GH#18951: datetime64_tz dtype but not equal means different tz\n dtype = cast(DatetimeTZDtype, dtype)\n result = values.tz_convert(dtype.tz)\n if copy:\n result = result.copy()\n return result\n\n elif values.tz is not None:\n warnings.warn(\n \"Using .astype to convert from timezone-aware dtype to \"\n \"timezone-naive dtype is deprecated and will raise in a \"\n \"future version. Use obj.tz_localize(None) or \"\n \"obj.tz_convert('UTC').tz_localize(None) instead\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n result = values.tz_convert(\"UTC\").tz_localize(None)\n if copy:\n result = result.copy()\n return result\n\n raise NotImplementedError(\"dtype_equal case should be handled elsewhere\")\n\n\ndef astype_td64_unit_conversion(\n values: np.ndarray, dtype: np.dtype, copy: bool\n) -> np.ndarray:\n \"\"\"\n By pandas convention, converting to non-nano timedelta64\n returns an int64-dtyped array with ints representing multiples\n of the desired timedelta unit. This is essentially division.\n\n Parameters\n ----------\n values : np.ndarray[timedelta64[ns]]\n dtype : np.dtype\n timedelta64 with unit not-necessarily nano\n copy : bool\n\n Returns\n -------\n np.ndarray\n \"\"\"\n if is_dtype_equal(values.dtype, dtype):\n if copy:\n return values.copy()\n return values\n\n # otherwise we are converting to non-nano\n result = values.astype(dtype, copy=False) # avoid double-copying\n result = result.astype(np.float64)\n\n mask = isna(values)\n np.putmask(result, mask, np.nan)\n return result\n\n\n@overload\ndef astype_nansafe(\n arr: np.ndarray, dtype: np.dtype, copy: bool = ..., skipna: bool = ...\n) -> np.ndarray:\n ...\n\n\n@overload\ndef astype_nansafe(\n arr: np.ndarray, dtype: ExtensionDtype, copy: bool = ..., skipna: bool = ...\n) -> ExtensionArray:\n ...\n\n\ndef astype_nansafe(\n arr: np.ndarray, dtype: DtypeObj, copy: bool = True, skipna: bool = False\n) -> ArrayLike:\n \"\"\"\n Cast the elements of an array to a given dtype a nan-safe manner.\n\n Parameters\n ----------\n arr : ndarray\n dtype : np.dtype or ExtensionDtype\n copy : bool, default True\n If False, a view will be attempted but may fail, if\n e.g. the item sizes don't align.\n skipna: bool, default False\n Whether or not we should skip NaN when casting as a string-type.\n\n Raises\n ------\n ValueError\n The dtype was a datetime64/timedelta64 dtype, but it had no unit.\n \"\"\"\n if arr.ndim > 1:\n flat = arr.ravel()\n result = astype_nansafe(flat, dtype, copy=copy, skipna=skipna)\n # error: Item \"ExtensionArray\" of \"Union[ExtensionArray, ndarray]\" has no\n # attribute \"reshape\"\n return result.reshape(arr.shape) # type: ignore[union-attr]\n\n # We get here with 0-dim from sparse\n arr = np.atleast_1d(arr)\n\n # dispatch on extension dtype if needed\n if isinstance(dtype, ExtensionDtype):\n return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy)\n\n elif not isinstance(dtype, np.dtype): # pragma: no cover\n raise ValueError(\"dtype must be np.dtype or ExtensionDtype\")\n\n if arr.dtype.kind in [\"m\", \"M\"] and (\n issubclass(dtype.type, str)\n # error: Non-overlapping equality check (left operand type: \"dtype[Any]\", right\n # operand type: \"Type[object]\")\n or dtype == object # type: ignore[comparison-overlap]\n ):\n from pandas.core.construction import ensure_wrapped_if_datetimelike\n\n arr = ensure_wrapped_if_datetimelike(arr)\n return arr.astype(dtype, copy=copy)\n\n if issubclass(dtype.type, str):\n return lib.ensure_string_array(arr, skipna=skipna, convert_na_value=False)\n\n elif is_datetime64_dtype(arr):\n # Non-overlapping equality check (left operand type: \"dtype[Any]\", right\n # operand type: \"Type[signedinteger[Any]]\")\n if dtype == np.int64: # type: ignore[comparison-overlap]\n warnings.warn(\n f\"casting {arr.dtype} values to int64 with .astype(...) \"\n \"is deprecated and will raise in a future version. \"\n \"Use .view(...) instead.\",\n FutureWarning,\n # stacklevel chosen to be correct when reached via Series.astype\n stacklevel=find_stack_level(),\n )\n if isna(arr).any():\n raise ValueError(\"Cannot convert NaT values to integer\")\n return arr.view(dtype)\n\n # allow frequency conversions\n if dtype.kind == \"M\":\n return arr.astype(dtype)\n\n raise TypeError(f\"cannot astype a datetimelike from [{arr.dtype}] to [{dtype}]\")\n\n elif is_timedelta64_dtype(arr):\n # error: Non-overlapping equality check (left operand type: \"dtype[Any]\", right\n # operand type: \"Type[signedinteger[Any]]\")\n if dtype == np.int64: # type: ignore[comparison-overlap]\n warnings.warn(\n f\"casting {arr.dtype} values to int64 with .astype(...) \"\n \"is deprecated and will raise in a future version. \"\n \"Use .view(...) instead.\",\n FutureWarning,\n # stacklevel chosen to be correct when reached via Series.astype\n stacklevel=find_stack_level(),\n )\n if isna(arr).any():\n raise ValueError(\"Cannot convert NaT values to integer\")\n return arr.view(dtype)\n\n elif dtype.kind == \"m\":\n return astype_td64_unit_conversion(arr, dtype, copy=copy)\n\n raise TypeError(f\"cannot astype a timedelta from [{arr.dtype}] to [{dtype}]\")\n\n elif np.issubdtype(arr.dtype, np.floating) and np.issubdtype(dtype, np.integer):\n return astype_float_to_int_nansafe(arr, dtype, copy)\n\n elif is_object_dtype(arr):\n\n # work around NumPy brokenness, #1987\n if np.issubdtype(dtype.type, np.integer):\n return lib.astype_intsafe(arr, dtype)\n\n # if we have a datetime/timedelta array of objects\n # then coerce to a proper dtype and recall astype_nansafe\n\n elif is_datetime64_dtype(dtype):\n from pandas import to_datetime\n\n return astype_nansafe(\n to_datetime(arr).values,\n dtype,\n copy=copy,\n )\n elif is_timedelta64_dtype(dtype):\n from pandas import to_timedelta\n\n return astype_nansafe(to_timedelta(arr)._values, dtype, copy=copy)\n\n if dtype.name in (\"datetime64\", \"timedelta64\"):\n msg = (\n f\"The '{dtype.name}' dtype has no unit. Please pass in \"\n f\"'{dtype.name}[ns]' instead.\"\n )\n raise ValueError(msg)\n\n if copy or is_object_dtype(arr.dtype) or is_object_dtype(dtype):\n # Explicit copy, or required since NumPy can't view from / to object.\n return arr.astype(dtype, copy=True)\n\n return arr.astype(dtype, copy=copy)\n\n\ndef astype_float_to_int_nansafe(\n values: np.ndarray, dtype: np.dtype, copy: bool\n) -> np.ndarray:\n \"\"\"\n astype with a check preventing converting NaN to an meaningless integer value.\n \"\"\"\n if not np.isfinite(values).all():\n raise IntCastingNaNError(\n \"Cannot convert non-finite values (NA or inf) to integer\"\n )\n return values.astype(dtype, copy=copy)\n\n\ndef astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool = False) -> ArrayLike:\n \"\"\"\n Cast array (ndarray or ExtensionArray) to the new dtype.\n\n Parameters\n ----------\n values : ndarray or ExtensionArray\n dtype : dtype object\n copy : bool, default False\n copy if indicated\n\n Returns\n -------\n ndarray or ExtensionArray\n \"\"\"\n if (\n values.dtype.kind in [\"m\", \"M\"]\n and dtype.kind in [\"i\", \"u\"]\n and isinstance(dtype, np.dtype)\n and dtype.itemsize != 8\n ):\n # TODO(2.0) remove special case once deprecation on DTA/TDA is enforced\n msg = rf\"cannot astype a datetimelike from [{values.dtype}] to [{dtype}]\"\n raise TypeError(msg)\n\n if is_datetime64tz_dtype(dtype) and is_datetime64_dtype(values.dtype):\n return astype_dt64_to_dt64tz(values, dtype, copy, via_utc=True)\n\n if is_dtype_equal(values.dtype, dtype):\n if copy:\n return values.copy()\n return values\n\n if not isinstance(values, np.ndarray):\n # i.e. ExtensionArray\n values = values.astype(dtype, copy=copy)\n\n else:\n values = astype_nansafe(values, dtype, copy=copy)\n\n # in pandas we don't store numpy str dtypes, so convert to object\n if isinstance(dtype, np.dtype) and issubclass(values.dtype.type, str):\n values = np.array(values, dtype=object)\n\n return values\n\n\ndef astype_array_safe(\n values: ArrayLike, dtype, copy: bool = False, errors: str = \"raise\"\n) -> ArrayLike:\n \"\"\"\n Cast array (ndarray or ExtensionArray) to the new dtype.\n\n This basically is the implementation for DataFrame/Series.astype and\n includes all custom logic for pandas (NaN-safety, converting str to object,\n not allowing )\n\n Parameters\n ----------\n values : ndarray or ExtensionArray\n dtype : str, dtype convertible\n copy : bool, default False\n copy if indicated\n errors : str, {'raise', 'ignore'}, default 'raise'\n - ``raise`` : allow exceptions to be raised\n - ``ignore`` : suppress exceptions. On error return original object\n\n Returns\n -------\n ndarray or ExtensionArray\n \"\"\"\n errors_legal_values = (\"raise\", \"ignore\")\n\n if errors not in errors_legal_values:\n invalid_arg = (\n \"Expected value of kwarg 'errors' to be one of \"\n f\"{list(errors_legal_values)}. Supplied value is '{errors}'\"\n )\n raise ValueError(invalid_arg)\n\n if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype):\n msg = (\n f\"Expected an instance of {dtype.__name__}, \"\n \"but got the class instead. Try instantiating 'dtype'.\"\n )\n raise TypeError(msg)\n\n dtype = pandas_dtype(dtype)\n if isinstance(dtype, PandasDtype):\n # Ensure we don't end up with a PandasArray\n dtype = dtype.numpy_dtype\n\n try:\n new_values = astype_array(values, dtype, copy=copy)\n except (ValueError, TypeError):\n # e.g. astype_nansafe can fail on object-dtype of strings\n # trying to convert to float\n if errors == \"ignore\":\n new_values = values\n else:\n raise\n\n return new_values\n\n\ndef soft_convert_objects(\n values: np.ndarray,\n datetime: bool = True,\n numeric: bool = True,\n timedelta: bool = True,\n period: bool = True,\n copy: bool = True,\n) -> ArrayLike:\n \"\"\"\n Try to coerce datetime, timedelta, and numeric object-dtype columns\n to inferred dtype.\n\n Parameters\n ----------\n values : np.ndarray[object]\n datetime : bool, default True\n numeric: bool, default True\n timedelta : bool, default True\n period : bool, default True\n copy : bool, default True\n\n Returns\n -------\n np.ndarray or ExtensionArray\n \"\"\"\n validate_bool_kwarg(datetime, \"datetime\")\n validate_bool_kwarg(numeric, \"numeric\")\n validate_bool_kwarg(timedelta, \"timedelta\")\n validate_bool_kwarg(copy, \"copy\")\n\n conversion_count = sum((datetime, numeric, timedelta))\n if conversion_count == 0:\n raise ValueError(\"At least one of datetime, numeric or timedelta must be True.\")\n\n # Soft conversions\n if datetime or timedelta:\n # GH 20380, when datetime is beyond year 2262, hence outside\n # bound of nanosecond-resolution 64-bit integers.\n try:\n converted = lib.maybe_convert_objects(\n values,\n convert_datetime=datetime,\n convert_timedelta=timedelta,\n convert_period=period,\n )\n except (OutOfBoundsDatetime, ValueError):\n return values\n if converted is not values:\n return converted\n\n if numeric and is_object_dtype(values.dtype):\n converted, _ = lib.maybe_convert_numeric(values, set(), coerce_numeric=True)\n\n # If all NaNs, then do not-alter\n values = converted if not isna(converted).all() else values\n values = values.copy() if copy else values\n\n return values\n\n\ndef convert_dtypes(\n input_array: ArrayLike,\n convert_string: bool = True,\n convert_integer: bool = True,\n convert_boolean: bool = True,\n convert_floating: bool = True,\n) -> DtypeObj:\n \"\"\"\n Convert objects to best possible type, and optionally,\n to types supporting ``pd.NA``.\n\n Parameters\n ----------\n input_array : ExtensionArray or np.ndarray\n convert_string : bool, default True\n Whether object dtypes should be converted to ``StringDtype()``.\n convert_integer : bool, default True\n Whether, if possible, conversion can be done to integer extension types.\n convert_boolean : bool, defaults True\n Whether object dtypes should be converted to ``BooleanDtypes()``.\n convert_floating : bool, defaults True\n Whether, if possible, conversion can be done to floating extension types.\n If `convert_integer` is also True, preference will be give to integer\n dtypes if the floats can be faithfully casted to integers.\n\n Returns\n -------\n np.dtype, or ExtensionDtype\n \"\"\"\n inferred_dtype: str | DtypeObj\n\n if (\n convert_string or convert_integer or convert_boolean or convert_floating\n ) and isinstance(input_array, np.ndarray):\n\n if is_object_dtype(input_array.dtype):\n inferred_dtype = lib.infer_dtype(input_array)\n else:\n inferred_dtype = input_array.dtype\n\n if is_string_dtype(inferred_dtype):\n if not convert_string or inferred_dtype == \"bytes\":\n return input_array.dtype\n else:\n return pandas_dtype(\"string\")\n\n if convert_integer:\n target_int_dtype = pandas_dtype(\"Int64\")\n\n if is_integer_dtype(input_array.dtype):\n from pandas.core.arrays.integer import INT_STR_TO_DTYPE\n\n inferred_dtype = INT_STR_TO_DTYPE.get(\n input_array.dtype.name, target_int_dtype\n )\n elif is_numeric_dtype(input_array.dtype):\n # TODO: de-dup with maybe_cast_to_integer_array?\n arr = input_array[notna(input_array)]\n if (arr.astype(int) == arr).all():\n inferred_dtype = target_int_dtype\n else:\n inferred_dtype = input_array.dtype\n\n if convert_floating:\n if not is_integer_dtype(input_array.dtype) and is_numeric_dtype(\n input_array.dtype\n ):\n from pandas.core.arrays.floating import FLOAT_STR_TO_DTYPE\n\n inferred_float_dtype: DtypeObj = FLOAT_STR_TO_DTYPE.get(\n input_array.dtype.name, pandas_dtype(\"Float64\")\n )\n # if we could also convert to integer, check if all floats\n # are actually integers\n if convert_integer:\n # TODO: de-dup with maybe_cast_to_integer_array?\n arr = input_array[notna(input_array)]\n if (arr.astype(int) == arr).all():\n inferred_dtype = pandas_dtype(\"Int64\")\n else:\n inferred_dtype = inferred_float_dtype\n else:\n inferred_dtype = inferred_float_dtype\n\n if convert_boolean:\n if is_bool_dtype(input_array.dtype):\n inferred_dtype = pandas_dtype(\"boolean\")\n elif isinstance(inferred_dtype, str) and inferred_dtype == \"boolean\":\n inferred_dtype = pandas_dtype(\"boolean\")\n\n if isinstance(inferred_dtype, str):\n # If we couldn't do anything else, then we retain the dtype\n inferred_dtype = input_array.dtype\n\n else:\n return input_array.dtype\n\n # error: Incompatible return value type (got \"Union[str, Union[dtype[Any],\n # ExtensionDtype]]\", expected \"Union[dtype[Any], ExtensionDtype]\")\n return inferred_dtype # type: ignore[return-value]\n\n\ndef maybe_infer_to_datetimelike(\n value: np.ndarray,\n) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray | IntervalArray:\n \"\"\"\n we might have a array (or single object) that is datetime like,\n and no dtype is passed don't change the value unless we find a\n datetime/timedelta set\n\n this is pretty strict in that a datetime/timedelta is REQUIRED\n in addition to possible nulls/string likes\n\n Parameters\n ----------\n value : np.ndarray[object]\n\n Returns\n -------\n np.ndarray, DatetimeArray, TimedeltaArray, PeriodArray, or IntervalArray\n\n \"\"\"\n if not isinstance(value, np.ndarray) or value.dtype != object:\n # Caller is responsible for passing only ndarray[object]\n raise TypeError(type(value)) # pragma: no cover\n\n v = np.array(value, copy=False)\n\n shape = v.shape\n if v.ndim != 1:\n v = v.ravel()\n\n if not len(v):\n return value\n\n def try_datetime(v: np.ndarray) -> ArrayLike:\n # Coerce to datetime64, datetime64tz, or in corner cases\n # object[datetimes]\n from pandas.core.arrays.datetimes import sequence_to_datetimes\n\n try:\n # GH#19671 we pass require_iso8601 to be relatively strict\n # when parsing strings.\n dta = sequence_to_datetimes(v, require_iso8601=True)\n except (ValueError, TypeError):\n # e.g. <class 'numpy.timedelta64'> is not convertible to datetime\n return v.reshape(shape)\n else:\n # GH#19761 we may have mixed timezones, in which cast 'dta' is\n # an ndarray[object]. Only 1 test\n # relies on this behavior, see GH#40111\n return dta.reshape(shape)\n\n def try_timedelta(v: np.ndarray) -> np.ndarray:\n # safe coerce to timedelta64\n\n # will try first with a string & object conversion\n try:\n # bc we know v.dtype == object, this is equivalent to\n # `np.asarray(to_timedelta(v))`, but using a lower-level API that\n # does not require a circular import.\n td_values = array_to_timedelta64(v).view(\"m8[ns]\")\n except (ValueError, OverflowError):\n return v.reshape(shape)\n else:\n return td_values.reshape(shape)\n\n inferred_type, seen_str = lib.infer_datetimelike_array(ensure_object(v))\n if inferred_type in [\"period\", \"interval\"]:\n # Incompatible return value type (got \"Union[ExtensionArray, ndarray]\",\n # expected \"Union[ndarray, DatetimeArray, TimedeltaArray, PeriodArray,\n # IntervalArray]\")\n return lib.maybe_convert_objects( # type: ignore[return-value]\n v, convert_period=True, convert_interval=True\n )\n\n if inferred_type == \"datetime\":\n # error: Incompatible types in assignment (expression has type \"ExtensionArray\",\n # variable has type \"Union[ndarray, List[Any]]\")\n value = try_datetime(v) # type: ignore[assignment]\n elif inferred_type == \"timedelta\":\n value = try_timedelta(v)\n elif inferred_type == \"nat\":\n\n # if all NaT, return as datetime\n if isna(v).all():\n # error: Incompatible types in assignment (expression has type\n # \"ExtensionArray\", variable has type \"Union[ndarray, List[Any]]\")\n value = try_datetime(v) # type: ignore[assignment]\n else:\n\n # We have at least a NaT and a string\n # try timedelta first to avoid spurious datetime conversions\n # e.g. '00:00:01' is a timedelta but technically is also a datetime\n value = try_timedelta(v)\n if lib.infer_dtype(value, skipna=False) in [\"mixed\"]:\n # cannot skip missing values, as NaT implies that the string\n # is actually a datetime\n\n # error: Incompatible types in assignment (expression has type\n # \"ExtensionArray\", variable has type \"Union[ndarray, List[Any]]\")\n value = try_datetime(v) # type: ignore[assignment]\n\n if value.dtype.kind in [\"m\", \"M\"] and seen_str:\n # TODO(2.0): enforcing this deprecation should close GH#40111\n warnings.warn(\n f\"Inferring {value.dtype} from data containing strings is deprecated \"\n \"and will be removed in a future version. To retain the old behavior \"\n f\"explicitly pass Series(data, dtype={value.dtype})\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n return value\n\n\ndef maybe_cast_to_datetime(\n value: ExtensionArray | np.ndarray | list, dtype: DtypeObj | None\n) -> ExtensionArray | np.ndarray:\n \"\"\"\n try to cast the array/value to a datetimelike dtype, converting float\n nan to iNaT\n\n We allow a list *only* when dtype is not None.\n \"\"\"\n from pandas.core.arrays.datetimes import sequence_to_datetimes\n from pandas.core.arrays.timedeltas import TimedeltaArray\n\n if not is_list_like(value):\n raise TypeError(\"value must be listlike\")\n\n if is_timedelta64_dtype(dtype):\n # TODO: _from_sequence would raise ValueError in cases where\n # ensure_nanosecond_dtype raises TypeError\n dtype = cast(np.dtype, dtype)\n dtype = ensure_nanosecond_dtype(dtype)\n res = TimedeltaArray._from_sequence(value, dtype=dtype)\n return res\n\n if dtype is not None:\n is_datetime64 = is_datetime64_dtype(dtype)\n is_datetime64tz = is_datetime64tz_dtype(dtype)\n\n vdtype = getattr(value, \"dtype\", None)\n\n if is_datetime64 or is_datetime64tz:\n dtype = ensure_nanosecond_dtype(dtype)\n\n value = np.array(value, copy=False)\n\n # we have an array of datetime or timedeltas & nulls\n if value.size or not is_dtype_equal(value.dtype, dtype):\n _disallow_mismatched_datetimelike(value, dtype)\n\n try:\n if is_datetime64:\n dta = sequence_to_datetimes(value)\n # GH 25843: Remove tz information since the dtype\n # didn't specify one\n\n if dta.tz is not None:\n warnings.warn(\n \"Data is timezone-aware. Converting \"\n \"timezone-aware data to timezone-naive by \"\n \"passing dtype='datetime64[ns]' to \"\n \"DataFrame or Series is deprecated and will \"\n \"raise in a future version. Use \"\n \"`pd.Series(values).dt.tz_localize(None)` \"\n \"instead.\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n # equiv: dta.view(dtype)\n # Note: NOT equivalent to dta.astype(dtype)\n dta = dta.tz_localize(None)\n\n value = dta\n elif is_datetime64tz:\n dtype = cast(DatetimeTZDtype, dtype)\n # The string check can be removed once issue #13712\n # is solved. String data that is passed with a\n # datetime64tz is assumed to be naive which should\n # be localized to the timezone.\n is_dt_string = is_string_dtype(value.dtype)\n dta = sequence_to_datetimes(value)\n if dta.tz is not None:\n value = dta.astype(dtype, copy=False)\n elif is_dt_string:\n # Strings here are naive, so directly localize\n # equiv: dta.astype(dtype) # though deprecated\n\n value = dta.tz_localize(dtype.tz)\n else:\n # Numeric values are UTC at this point,\n # so localize and convert\n # equiv: Series(dta).astype(dtype) # though deprecated\n if getattr(vdtype, \"kind\", None) == \"M\":\n # GH#24559, GH#33401 deprecate behavior inconsistent\n # with DatetimeArray/DatetimeIndex\n warnings.warn(\n \"In a future version, constructing a Series \"\n \"from datetime64[ns] data and a \"\n \"DatetimeTZDtype will interpret the data \"\n \"as wall-times instead of \"\n \"UTC times, matching the behavior of \"\n \"DatetimeIndex. To treat the data as UTC \"\n \"times, use pd.Series(data).dt\"\n \".tz_localize('UTC').tz_convert(dtype.tz) \"\n \"or pd.Series(data.view('int64'), dtype=dtype)\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n value = dta.tz_localize(\"UTC\").tz_convert(dtype.tz)\n except OutOfBoundsDatetime:\n raise\n except ValueError:\n # TODO(GH#40048): only catch dateutil's ParserError\n # once we can reliably import it in all supported versions\n pass\n\n elif getattr(vdtype, \"kind\", None) in [\"m\", \"M\"]:\n # we are already datetimelike and want to coerce to non-datetimelike;\n # astype_nansafe will raise for anything other than object, then upcast.\n # see test_datetimelike_values_with_object_dtype\n # error: Argument 2 to \"astype_nansafe\" has incompatible type\n # \"Union[dtype[Any], ExtensionDtype]\"; expected \"dtype[Any]\"\n return astype_nansafe(value, dtype) # type: ignore[arg-type]\n\n elif isinstance(value, np.ndarray):\n if value.dtype.kind in [\"M\", \"m\"]:\n # catch a datetime/timedelta that is not of ns variety\n # and no coercion specified\n value = sanitize_to_nanoseconds(value)\n\n elif value.dtype == object:\n value = maybe_infer_to_datetimelike(value)\n\n elif isinstance(value, list):\n # we only get here with dtype=None, which we do not allow\n raise ValueError(\n \"maybe_cast_to_datetime allows a list *only* if dtype is not None\"\n )\n\n # at this point we have converted or raised in all cases where we had a list\n return cast(ArrayLike, value)\n\n\ndef sanitize_to_nanoseconds(values: np.ndarray, copy: bool = False) -> np.ndarray:\n \"\"\"\n Safely convert non-nanosecond datetime64 or timedelta64 values to nanosecond.\n \"\"\"\n dtype = values.dtype\n if dtype.kind == \"M\" and dtype != DT64NS_DTYPE:\n values = conversion.ensure_datetime64ns(values)\n\n elif dtype.kind == \"m\" and dtype != TD64NS_DTYPE:\n values = conversion.ensure_timedelta64ns(values)\n\n elif copy:\n values = values.copy()\n\n return values\n\n\ndef ensure_nanosecond_dtype(dtype: DtypeObj) -> DtypeObj:\n \"\"\"\n Convert dtypes with granularity less than nanosecond to nanosecond\n\n >>> ensure_nanosecond_dtype(np.dtype(\"M8[s]\"))\n dtype('<M8[ns]')\n\n >>> ensure_nanosecond_dtype(np.dtype(\"m8[ps]\"))\n Traceback (most recent call last):\n ...\n TypeError: cannot convert timedeltalike to dtype [timedelta64[ps]]\n \"\"\"\n msg = (\n f\"The '{dtype.name}' dtype has no unit. \"\n f\"Please pass in '{dtype.name}[ns]' instead.\"\n )\n\n # unpack e.g. SparseDtype\n dtype = getattr(dtype, \"subtype\", dtype)\n\n if not isinstance(dtype, np.dtype):\n # i.e. datetime64tz\n pass\n\n elif dtype.kind == \"M\" and dtype != DT64NS_DTYPE:\n # pandas supports dtype whose granularity is less than [ns]\n # e.g., [ps], [fs], [as]\n if dtype <= np.dtype(\"M8[ns]\"):\n if dtype.name == \"datetime64\":\n raise ValueError(msg)\n dtype = DT64NS_DTYPE\n else:\n raise TypeError(f\"cannot convert datetimelike to dtype [{dtype}]\")\n\n elif dtype.kind == \"m\" and dtype != TD64NS_DTYPE:\n # pandas supports dtype whose granularity is less than [ns]\n # e.g., [ps], [fs], [as]\n if dtype <= np.dtype(\"m8[ns]\"):\n if dtype.name == \"timedelta64\":\n raise ValueError(msg)\n dtype = TD64NS_DTYPE\n else:\n raise TypeError(f\"cannot convert timedeltalike to dtype [{dtype}]\")\n return dtype\n\n\n# TODO: overload to clarify that if all types are np.dtype then result is np.dtype\ndef find_common_type(types: list[DtypeObj]) -> DtypeObj:\n \"\"\"\n Find a common data type among the given dtypes.\n\n Parameters\n ----------\n types : list of dtypes\n\n Returns\n -------\n pandas extension or numpy dtype\n\n See Also\n --------\n numpy.find_common_type\n\n \"\"\"\n if not types:\n raise ValueError(\"no types given\")\n\n first = types[0]\n\n # workaround for find_common_type([np.dtype('datetime64[ns]')] * 2)\n # => object\n if lib.dtypes_all_equal(list(types)):\n return first\n\n # get unique types (dict.fromkeys is used as order-preserving set())\n types = list(dict.fromkeys(types).keys())\n\n if any(isinstance(t, ExtensionDtype) for t in types):\n for t in types:\n if isinstance(t, ExtensionDtype):\n res = t._get_common_dtype(types)\n if res is not None:\n return res\n return np.dtype(\"object\")\n\n # take lowest unit\n if all(is_datetime64_dtype(t) for t in types):\n return np.dtype(\"datetime64[ns]\")\n if all(is_timedelta64_dtype(t) for t in types):\n return np.dtype(\"timedelta64[ns]\")\n\n # don't mix bool / int or float or complex\n # this is different from numpy, which casts bool with float/int as int\n has_bools = any(is_bool_dtype(t) for t in types)\n if has_bools:\n for t in types:\n if is_integer_dtype(t) or is_float_dtype(t) or is_complex_dtype(t):\n return np.dtype(\"object\")\n\n # error: Argument 1 to \"find_common_type\" has incompatible type\n # \"List[Union[dtype, ExtensionDtype]]\"; expected \"Sequence[Union[dtype,\n # None, type, _SupportsDtype, str, Tuple[Any, int], Tuple[Any, Union[int,\n # Sequence[int]]], List[Any], _DtypeDict, Tuple[Any, Any]]]\"\n return np.find_common_type(types, []) # type: ignore[arg-type]\n\n\ndef construct_2d_arraylike_from_scalar(\n value: Scalar, length: int, width: int, dtype: np.dtype, copy: bool\n) -> np.ndarray:\n\n shape = (length, width)\n\n if dtype.kind in [\"m\", \"M\"]:\n value = maybe_unbox_datetimelike_tz_deprecation(value, dtype)\n # error: Non-overlapping equality check (left operand type: \"dtype[Any]\", right\n # operand type: \"Type[object]\")\n elif dtype == object: # type: ignore[comparison-overlap]\n if isinstance(value, (np.timedelta64, np.datetime64)):\n # calling np.array below would cast to pytimedelta/pydatetime\n out = np.empty(shape, dtype=object)\n out.fill(value)\n return out\n\n # Attempt to coerce to a numpy array\n try:\n arr = np.array(value, dtype=dtype, copy=copy)\n except (ValueError, TypeError) as err:\n raise TypeError(\n f\"DataFrame constructor called with incompatible data and dtype: {err}\"\n ) from err\n\n if arr.ndim != 0:\n raise ValueError(\"DataFrame constructor not properly called!\")\n\n return np.full(shape, arr)\n\n\ndef construct_1d_arraylike_from_scalar(\n value: Scalar, length: int, dtype: DtypeObj | None\n) -> ArrayLike:\n \"\"\"\n create a np.ndarray / pandas type of specified shape and dtype\n filled with values\n\n Parameters\n ----------\n value : scalar value\n length : int\n dtype : pandas_dtype or np.dtype\n\n Returns\n -------\n np.ndarray / pandas type of length, filled with value\n\n \"\"\"\n\n if dtype is None:\n try:\n dtype, value = infer_dtype_from_scalar(value, pandas_dtype=True)\n except OutOfBoundsDatetime:\n dtype = np.dtype(object)\n\n if isinstance(dtype, ExtensionDtype):\n cls = dtype.construct_array_type()\n subarr = cls._from_sequence([value] * length, dtype=dtype)\n\n else:\n\n if length and is_integer_dtype(dtype) and isna(value):\n # coerce if we have nan for an integer dtype\n dtype = np.dtype(\"float64\")\n elif isinstance(dtype, np.dtype) and dtype.kind in (\"U\", \"S\"):\n # we need to coerce to object dtype to avoid\n # to allow numpy to take our string as a scalar value\n dtype = np.dtype(\"object\")\n if not isna(value):\n value = ensure_str(value)\n elif dtype.kind in [\"M\", \"m\"]:\n value = maybe_unbox_datetimelike_tz_deprecation(value, dtype)\n\n subarr = np.empty(length, dtype=dtype)\n subarr.fill(value)\n\n return subarr\n\n\ndef maybe_unbox_datetimelike_tz_deprecation(value: Scalar, dtype: DtypeObj):\n \"\"\"\n Wrap maybe_unbox_datetimelike with a check for a timezone-aware Timestamp\n along with a timezone-naive datetime64 dtype, which is deprecated.\n \"\"\"\n # Caller is responsible for checking dtype.kind in [\"m\", \"M\"]\n\n if isinstance(value, datetime):\n # we dont want to box dt64, in particular datetime64(\"NaT\")\n value = maybe_box_datetimelike(value, dtype)\n\n try:\n value = maybe_unbox_datetimelike(value, dtype)\n except TypeError:\n if (\n isinstance(value, Timestamp)\n and value.tzinfo is not None\n and isinstance(dtype, np.dtype)\n and dtype.kind == \"M\"\n ):\n warnings.warn(\n \"Data is timezone-aware. Converting \"\n \"timezone-aware data to timezone-naive by \"\n \"passing dtype='datetime64[ns]' to \"\n \"DataFrame or Series is deprecated and will \"\n \"raise in a future version. Use \"\n \"`pd.Series(values).dt.tz_localize(None)` \"\n \"instead.\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n new_value = value.tz_localize(None)\n return maybe_unbox_datetimelike(new_value, dtype)\n else:\n raise\n return value\n\n\ndef construct_1d_object_array_from_listlike(values: Sized) -> np.ndarray:\n \"\"\"\n Transform any list-like object in a 1-dimensional numpy array of object\n dtype.\n\n Parameters\n ----------\n values : any iterable which has a len()\n\n Raises\n ------\n TypeError\n * If `values` does not have a len()\n\n Returns\n -------\n 1-dimensional numpy array of dtype object\n \"\"\"\n # numpy will try to interpret nested lists as further dimensions, hence\n # making a 1D array that contains list-likes is a bit tricky:\n result = np.empty(len(values), dtype=\"object\")\n result[:] = values\n return result\n\n\ndef maybe_cast_to_integer_array(\n arr: list | np.ndarray, dtype: np.dtype, copy: bool = False\n) -> np.ndarray:\n \"\"\"\n Takes any dtype and returns the casted version, raising for when data is\n incompatible with integer/unsigned integer dtypes.\n\n Parameters\n ----------\n arr : np.ndarray or list\n The array to cast.\n dtype : np.dtype\n The integer dtype to cast the array to.\n copy: bool, default False\n Whether to make a copy of the array before returning.\n\n Returns\n -------\n ndarray\n Array of integer or unsigned integer dtype.\n\n Raises\n ------\n OverflowError : the dtype is incompatible with the data\n ValueError : loss of precision has occurred during casting\n\n Examples\n --------\n If you try to coerce negative values to unsigned integers, it raises:\n\n >>> pd.Series([-1], dtype=\"uint64\")\n Traceback (most recent call last):\n ...\n OverflowError: Trying to coerce negative values to unsigned integers\n\n Also, if you try to coerce float values to integers, it raises:\n\n >>> pd.Series([1, 2, 3.5], dtype=\"int64\")\n Traceback (most recent call last):\n ...\n ValueError: Trying to coerce float values to integers\n \"\"\"\n assert is_integer_dtype(dtype)\n\n try:\n if not isinstance(arr, np.ndarray):\n casted = np.array(arr, dtype=dtype, copy=copy)\n else:\n casted = arr.astype(dtype, copy=copy)\n except OverflowError as err:\n raise OverflowError(\n \"The elements provided in the data cannot all be \"\n f\"casted to the dtype {dtype}\"\n ) from err\n\n if np.array_equal(arr, casted):\n return casted\n\n # We do this casting to allow for proper\n # data and dtype checking.\n #\n # We didn't do this earlier because NumPy\n # doesn't handle `uint64` correctly.\n arr = np.asarray(arr)\n\n if is_unsigned_integer_dtype(dtype) and (arr < 0).any():\n raise OverflowError(\"Trying to coerce negative values to unsigned integers\")\n\n if is_float_dtype(arr.dtype):\n if not np.isfinite(arr).all():\n raise IntCastingNaNError(\n \"Cannot convert non-finite values (NA or inf) to integer\"\n )\n raise ValueError(\"Trying to coerce float values to integers\")\n if is_object_dtype(arr.dtype):\n raise ValueError(\"Trying to coerce float values to integers\")\n\n if casted.dtype < arr.dtype:\n # GH#41734 e.g. [1, 200, 923442] and dtype=\"int8\" -> overflows\n warnings.warn(\n f\"Values are too large to be losslessly cast to {dtype}. \"\n \"In a future version this will raise OverflowError. To retain the \"\n f\"old behavior, use pd.Series(values).astype({dtype})\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n return casted\n\n if arr.dtype.kind in [\"m\", \"M\"]:\n # test_constructor_maskedarray_nonfloat\n warnings.warn(\n f\"Constructing Series or DataFrame from {arr.dtype} values and \"\n f\"dtype={dtype} is deprecated and will raise in a future version. \"\n \"Use values.view(dtype) instead.\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n return casted\n\n # No known cases that get here, but raising explicitly to cover our bases.\n raise ValueError(f\"values cannot be losslessly cast to {dtype}\")\n\n\ndef convert_scalar_for_putitemlike(scalar: Scalar, dtype: np.dtype) -> Scalar:\n \"\"\"\n Convert datetimelike scalar if we are setting into a datetime64\n or timedelta64 ndarray.\n\n Parameters\n ----------\n scalar : scalar\n dtype : np.dtype\n\n Returns\n -------\n scalar\n \"\"\"\n if dtype.kind in [\"m\", \"M\"]:\n scalar = maybe_box_datetimelike(scalar, dtype)\n return maybe_unbox_datetimelike(scalar, dtype)\n else:\n validate_numeric_casting(dtype, scalar)\n return scalar\n\n\ndef validate_numeric_casting(dtype: np.dtype, value: Scalar) -> None:\n \"\"\"\n Check that we can losslessly insert the given value into an array\n with the given dtype.\n\n Parameters\n ----------\n dtype : np.dtype\n value : scalar\n\n Raises\n ------\n ValueError\n \"\"\"\n # error: Argument 1 to \"__call__\" of \"ufunc\" has incompatible type\n # \"Union[Union[str, int, float, bool], Union[Any, Timestamp, Timedelta, Any]]\";\n # expected \"Union[Union[int, float, complex, str, bytes, generic],\n # Sequence[Union[int, float, complex, str, bytes, generic]],\n # Sequence[Sequence[Any]], _SupportsArray]\"\n if (\n issubclass(dtype.type, (np.integer, np.bool_))\n and is_float(value)\n and np.isnan(value) # type: ignore[arg-type]\n ):\n raise ValueError(\"Cannot assign nan to integer series\")\n\n elif dtype.kind in [\"i\", \"u\", \"f\", \"c\"]:\n if is_bool(value) or isinstance(value, np.timedelta64):\n # numpy will cast td64 to integer if we're not careful\n raise ValueError(\n f\"Cannot assign {type(value).__name__} to float/integer series\"\n )\n elif dtype.kind == \"b\":\n if is_scalar(value) and not is_bool(value):\n raise ValueError(f\"Cannot assign {type(value).__name__} to bool series\")\n\n\ndef can_hold_element(arr: ArrayLike, element: Any) -> bool:\n \"\"\"\n Can we do an inplace setitem with this element in an array with this dtype?\n\n Parameters\n ----------\n arr : np.ndarray or ExtensionArray\n element : Any\n\n Returns\n -------\n bool\n \"\"\"\n dtype = arr.dtype\n if not isinstance(dtype, np.dtype) or dtype.kind in [\"m\", \"M\"]:\n if isinstance(dtype, (PeriodDtype, IntervalDtype, DatetimeTZDtype, np.dtype)):\n # np.dtype here catches datetime64ns and timedelta64ns; we assume\n # in this case that we have DatetimeArray/TimedeltaArray\n arr = cast(\n \"PeriodArray | DatetimeArray | TimedeltaArray | IntervalArray\", arr\n )\n try:\n arr._validate_setitem_value(element)\n return True\n except (ValueError, TypeError):\n return False\n\n # This is technically incorrect, but maintains the behavior of\n # ExtensionBlock._can_hold_element\n return True\n\n # error: Non-overlapping equality check (left operand type: \"dtype[Any]\", right\n # operand type: \"Type[object]\")\n if dtype == object: # type: ignore[comparison-overlap]\n return True\n\n tipo = maybe_infer_dtype_type(element)\n\n if dtype.kind in [\"i\", \"u\"]:\n if isinstance(element, range):\n return _dtype_can_hold_range(element, dtype)\n\n if tipo is not None:\n if tipo.kind not in [\"i\", \"u\"]:\n if is_float(element) and element.is_integer():\n return True\n\n if isinstance(element, np.ndarray) and element.dtype.kind == \"f\":\n # If all can be losslessly cast to integers, then we can hold them\n # We do something similar in putmask_smart\n casted = element.astype(dtype)\n comp = casted == element\n return comp.all()\n\n # Anything other than integer we cannot hold\n return False\n elif dtype.itemsize < tipo.itemsize:\n return False\n elif not isinstance(tipo, np.dtype):\n # i.e. nullable IntegerDtype; we can put this into an ndarray\n # losslessly iff it has no NAs\n return not element._mask.any()\n\n return True\n\n # We have not inferred an integer from the dtype\n # check if we have a builtin int or a float equal to an int\n return is_integer(element) or (is_float(element) and element.is_integer())\n\n elif dtype.kind == \"f\":\n if tipo is not None:\n # TODO: itemsize check?\n if tipo.kind not in [\"f\", \"i\", \"u\"]:\n # Anything other than float/integer we cannot hold\n return False\n elif not isinstance(tipo, np.dtype):\n # i.e. nullable IntegerDtype or FloatingDtype;\n # we can put this into an ndarray losslessly iff it has no NAs\n return not element._mask.any()\n return True\n\n return lib.is_integer(element) or lib.is_float(element)\n\n elif dtype.kind == \"c\":\n if tipo is not None:\n return tipo.kind in [\"c\", \"f\", \"i\", \"u\"]\n return (\n lib.is_integer(element) or lib.is_complex(element) or lib.is_float(element)\n )\n\n elif dtype.kind == \"b\":\n if tipo is not None:\n return tipo.kind == \"b\"\n return lib.is_bool(element)\n\n elif dtype.kind == \"S\":\n # TODO: test tests.frame.methods.test_replace tests get here,\n # need more targeted tests. xref phofl has a PR about this\n if tipo is not None:\n return tipo.kind == \"S\" and tipo.itemsize <= dtype.itemsize\n return isinstance(element, bytes) and len(element) <= dtype.itemsize\n\n raise NotImplementedError(dtype)\n\n\ndef _dtype_can_hold_range(rng: range, dtype: np.dtype) -> bool:\n \"\"\"\n maybe_infer_dtype_type infers to int64 (and float64 for very large endpoints),\n but in many cases a range can be held by a smaller integer dtype.\n Check if this is one of those cases.\n \"\"\"\n if not len(rng):\n return True\n return np.can_cast(rng[0], dtype) and np.can_cast(rng[-1], dtype)\n" ]
[ [ "pandas.core.indexes.datetimes.DatetimeIndex", "pandas.to_datetime", "pandas.Series", "numpy.asarray", "pandas.DataFrame", "pandas.tests.plotting.common.TestPlotBase.setup_method", "pandas.core.indexes.period.Period", "pandas.core.indexes.datetimes.date_range", "numpy.random.randn", "pandas.core.indexes.timedeltas.timedelta_range", "pandas.isna", "pandas.core.indexes.period.PeriodIndex", "pandas.plotting._matplotlib.converter.get_datevalue", "numpy.random.randint", "matplotlib.pyplot.gca", "pandas._testing.assert_numpy_array_equal", "pandas._libs.tslibs.to_offset", "numpy.arange", "pandas._testing.makeTimeSeries", "pandas.core.indexes.period.period_range", "pandas.Index", "matplotlib.pyplot.gcf", "pandas._testing.makeDataFrame", "matplotlib.pyplot.close", "pandas._testing.assert_series_equal", "pandas._testing.assert_produces_warning", "matplotlib.pyplot.savefig", "pandas._testing.makePeriodSeries", "pandas._testing.close", "numpy.array", "numpy.random.RandomState", "pandas.plotting._matplotlib.converter._from_ordinal", "pandas.tseries.offsets.WeekOfMonth", "pandas._testing.ensure_clean", "matplotlib.pyplot.clf", "pandas.core.indexes.datetimes.bdate_range", "pandas._testing.makeTimeDataFrame", "numpy.fabs" ], [ "numpy.can_cast", "pandas.util._validators.validate_bool_kwarg", "pandas._libs.tslibs.Timestamp", "pandas.to_datetime", "numpy.asarray", "pandas.core.dtypes.common.is_extension_array_dtype", "pandas.core.dtypes.common.is_dtype_equal", "numpy.issubdtype", "pandas.core.dtypes.dtypes.DatetimeTZDtype", "numpy.promote_types", "numpy.dtype", "pandas.core.dtypes.common.is_datetime64tz_dtype", "pandas.core.dtypes.common.is_complex_dtype", "pandas.core.dtypes.missing.notna", "pandas.core.dtypes.dtypes.IntervalDtype", "pandas._libs.lib.is_complex", "numpy.iinfo", "pandas._libs.tslibs.conversion.ensure_datetime64ns", "pandas.core.dtypes.common.is_datetime64_dtype", "pandas.core.dtypes.common.ensure_object", "pandas.core.dtypes.inference.is_list_like", "numpy.min_scalar_type", "pandas.errors.IntCastingNaNError", "pandas.core.dtypes.common.is_unsigned_integer_dtype", "pandas.core.dtypes.common.is_numeric_dtype", "numpy.allclose", "pandas._libs.lib.is_integer", "numpy.full", "numpy.atleast_1d", "pandas.core.dtypes.common.is_complex", "pandas.core.dtypes.common.ensure_int64", "pandas._libs.tslibs.conversion.ensure_timedelta64ns", "pandas.core.dtypes.common.is_float_dtype", "pandas.core.dtypes.common.is_float", "pandas.core.dtypes.common.is_string_dtype", "pandas.core.arrays.timedeltas.TimedeltaArray._from_sequence", "numpy.putmask", "pandas.core.dtypes.common.is_integer_dtype", "pandas.core.dtypes.common.pandas_dtype", "pandas.core.dtypes.common.is_timedelta64_dtype", "numpy.isnan", "pandas._libs.lib.astype_intsafe", "pandas.core.dtypes.common.ensure_int8", "pandas._libs.lib.ensure_string_array", "pandas.core.dtypes.common.ensure_str", "numpy.timedelta64", "pandas.to_timedelta", "pandas.core.dtypes.common.ensure_int16", "pandas.core.construction.ensure_wrapped_if_datetimelike", "numpy.find_common_type", "numpy.array", "pandas.core.dtypes.dtypes.PeriodDtype", "pandas.core.dtypes.common.is_bool", "pandas.util._exceptions.find_stack_level", "pandas.core.dtypes.missing.is_valid_na_for_dtype", "pandas._libs.tslibs.Timedelta", "pandas.core.dtypes.common.is_bool_dtype", "numpy.array_equal", "pandas.core.arrays.datetimes.sequence_to_datetimes", "numpy.isfinite", "pandas._libs.lib.is_float", "pandas.core.dtypes.common.is_scalar", "pandas._libs.tslibs.timedeltas.array_to_timedelta64", "pandas._libs.lib.is_period", "pandas.core.dtypes.common.is_integer", "pandas.core.arrays.integer.INT_STR_TO_DTYPE.get", "pandas.core.dtypes.missing.na_value_for_dtype", "pandas.core.dtypes.common.is_object_dtype", "pandas.core.dtypes.missing.isna", "pandas._libs.lib.maybe_convert_objects", "pandas.core.dtypes.common.ensure_int32", "pandas._libs.lib.infer_dtype", "pandas._libs.lib.is_bool", "pandas._libs.lib.is_interval", "pandas._libs.lib.item_from_zerodim", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
krassowski/jupyter-helpers
[ "9feb0af6563b56d02688c18e1a9a415d15d9b1a2" ]
[ "jupyter_helpers/table.py" ]
[ "import pandas as pd\nfrom IPython.core.display import display\n\n\ndef bordered_table(hide_headers=[], color='#ddd'):\n return [\n {'selector': 'th', 'props': [('text-align', 'center'), ('border', f'1px solid {color}')]},\n {'selector': 'td', 'props': [('border', f'1px solid {color}')]},\n *[\n {'selector': f'thead tr:nth-child({row})', 'props': [('display', 'none')]}\n for row in hide_headers\n ]\n ]\n\n\ndef display_table(table, n_rows=50, n_cols=None, long_names=-1):\n if not n_cols:\n n_cols = n_rows\n with pd.option_context(\n 'display.max_rows', n_rows,\n 'display.max_columns', n_cols,\n 'display.max_colwidth', long_names\n ):\n display(table)\n" ]
[ [ "pandas.option_context" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
chelffey/tilebot
[ "3fa4cc3ea7c03786fb8ac02458ef4e6d464603ec" ]
[ "src/tiler.py" ]
[ "# Purpose: takes a list of filenames AND/OR publically accessible urls. \n# Returns a tiled image file of tiles SIZExSIZE, separated by spaces of width \n# DIFF, in rows if length ROWSIZE. \n# files that can't be retrieved are returned blank. \n\nimport os\nimport numpy as np\nfrom PIL import Image\nimport urllib.request\nimport validators\n\n# global vars\nEMPTY = \"empty\"\n\n'''\nCrops given 'Image' object to the largest square possible. \nCenters the image. \n'''\ndef crop_square(im):\n # crops to square, based on smallest length.\n width, height = im.size\n side_length = min(width, height)\n width_pad = (width - side_length) // 2\n height_pad = (height - side_length) // 2\n left = width_pad\n top = height_pad\n right = width - width_pad\n bottom = height - height_pad \n return im.crop((left, top, right, bottom))\n\n\n'''\nPurpose: transparent-pads images to precisely correct size. Robustness for images that are too small, or one pixel off the correct size. \nInput: a = a numpy array representing thumbnail. \n Assumption: thumbnail x, y dim are NO GREATER than size. \nside = desired side length. \nReturns thumbnail of precisely (SIZE x SIZE x 4).\nPadding will CENTRE the thumbnail. \n'''\ndef pad_thumbnail(a, side):\n ax, ay, az = a.shape \n if (ax < side): # not tall enough. add row of (padx x y).\n padx = side - ax\n x1 = padx // 2\n x2 = x1 + padx % 2\n row1 = np.full((x1, ay, 4), [255, 255, 255, 0], np.uint8) \n row2 = np.full((x2, ay, 4), [255, 255, 255, 0], np.uint8) \n a = np.concatenate((row1, a, row2))\n elif (ax > side): # too tall, crop. \n cutoff = side - ax\n a = a[:cutoff]\n if (ay < side): # not wide enough. add col of (pady x side)\n pady = side - ay\n y1 = pady // 2\n y2 = y1 + pady % 2\n col1 = np.full((side, y1, 4), [255, 255, 255, 0], np.uint8) \n col2 = np.full((side, y2, 4), [255, 255, 255, 0], np.uint8) \n a = np.hstack((col1, a, col2))\n elif (ay > side): # too wide, crop. \n cutoff = side - ay\n a = a[:, :cutoff]\n return a\n\n\n'''\nOpens image file from local directory. \nReturns as an np array of thumbnail SIZE, in 4dim RGBA format. \n'''\ndef gen_thumbnail(filename, tileSize, default):\n if (filename == EMPTY):\n return default\n \n # save from web into folder if url. \n if (validators.url(filename)):\n try:\n urllib.request.urlretrieve(filename, \".temp_web_images/temp_image\")\n filename = \".temp_web_images/temp_image\"\n except:\n print(\"error: url could not be retrieved.\")\n return default # if image can't be retrieved. \n\n with Image.open(filename) as im:\n im = im.convert(\"RGBA\") # add transparency\n\n x, y = im.size # scale down to thumbnail.\n tsize = int(tileSize * (max(x, y) / min(x, y)))\n im.thumbnail((tsize, tsize), Image.ANTIALIAS) \n\n im = crop_square(im) # THIS LINE: toggle to change whether square or original aspect ratio.\n \n a = np.asarray(im) # create np array from values.\n a = pad_thumbnail(a, tileSize) # for robustness. \n \n # delete temp saved image\n if (filename == \".temp_web_images/temp_image\"):\n os.remove(\".temp_web_images/temp_image\")\n \n return a\n\n\n'''\nMain functionality. Converts list of filenames into a tiled grid of thumbnails.\nReturns as Image object. \n'''\ndef tile_images(files, tileSize, rowLength, space):\n # initialise transparent padding\n row_space = np.full((space, tileSize, 4), [255, 255, 255, 0], np.uint8) \n col_space = np.full((tileSize, space, 4), [255, 255, 255, 0], np.uint8) \n square = np.full((tileSize, tileSize, 4), [255, 255, 255, 0], np.uint8)\n row_div = np.full((space, tileSize*rowLength + space*(rowLength-1), 4), [255, 255, 255, 0], np.uint8)\n\n # initialise folder to save web images into \n if not os.path.exists('.temp_web_images'):\n os.makedirs('.temp_web_images')\n\n # reshape 1D file list into 2D structured grid of row length rowLength\n to_add = rowLength - (len(files) % rowLength)\n if to_add != rowLength:\n files.extend([EMPTY]*to_add)\n arr = np.array(files)\n newFiles = arr.reshape(len(files) // rowLength, rowLength)\n\n # create each row array and add to list.\n rowList = []\n for row in newFiles:\n thisRow = []\n for file in row:\n thisRow.extend([gen_thumbnail(file, tileSize, square), col_space])\n rowArr = np.hstack([np.array(i) for i in thisRow[:-1]])\n rowList.extend([rowArr, row_div])\n\n # concat row arrays into a single grid array\n arr = np.concatenate([np.array(i) for i in rowList[:-1]]) # elegant numpy approach: from https://stackoverflow.com/questions/10346336/list-of-lists-into-numpy-array\n im = Image.fromarray(arr)\n return im\n \n\n\n\nif __name__ == \"__main__\":\n print(\"hello world!! im tilebot\")\n\n files = [\n \"./pic/bamboo.jpg\",\n \"./pic/coconut.png\",\n \"./pic/fish.png\",\n \"./pic/shiro.jpg\",\n \"./pic/calico-cat.png\",\n \"./pic/ghost.png\",\n \"./pic/field.jpg\",\n \"./pic/blue.gif\",\n \"./pic/boy.jpg\"\n ]\n\n urls = [\n \"./pic/bamboo.jpg\",\n \"./pic/coconut.png\",\n \"./pic/fish.png\",\n \"./pic/shiro.jpg\",\n \"https://cdn.i-scmp.com/sites/default/files/styles/1200x800/public/d8/images/methode/2020/10/30/8caac9de-1a82-11eb-8f67-a484f6db61a1_image_hires_175647.jpg?itok=T-dFsg-A&v=1604051814\",\n \"https://www.nme.com/wp-content/uploads/2021/03/Genshin-Impact-miHoYo.jpg\",\n \"https://www.indiewire.com/wp-content/uploads/2020/12/genshin1.jpg\",\n \"./pic/calico-cat.png\",\n \"./pic/ghost.png\",\n \"./pic/field.jpg\",\n \"./pic/blue.gif\",\n \"./pic/boy.jpg\",\n \"https://blog.playstation.com/tachyon/2020/11/Featured-Image-Genshin-Impact-update-out-tomorrow.jpg?fit=1024,720\",\n \"https://cdn.vox-cdn.com/thumbor/pot2y4VQxXpzedEZ8eDMrFR2wLg=/0x308:7680x4320/1200x800/filters:focal(3413x728:4641x1956)/cdn.vox-cdn.com/uploads/chorus_image/image/67716030/ba84dbaad79d15323968a64863c1e069.0.jpg\",\n \"https://gamerbraves.sgp1.cdn.digitaloceanspaces.com/2020/01/arknights-feature-c.jpg\",\n \"https://webusstatic.yo-star.com/uy0news/ae/19c9d44c8cf7d7bc770ee588b52dc2e0.png\"\n ]\n\n # doesn't work - these urls aren't publically accessible. \n disc_urls = [\n \"https://cdn.discordapp.com/attachments/841255574330408981/841266535376486460/EzFyC5ZVcAA1-_m.jpg\",\n \"https://cdn.discordapp.com/attachments/841255574330408981/841266037214806046/Elu0GiWVkAEzrHm.png\",\n \"https://cdn.discordapp.com/attachments/841255574330408981/841265455237824512/tumblr_nayd2yGcBC1rscimho1_500.png\"\n ]\n\n tilesize = 136\n rowlength = 6\n spacing = 4\n\n im = tile_images(files, tilesize, rowlength, spacing)\n im.save(\"./pic/merge-GRID.png\", \"PNG\")\n\n im = tile_images(urls, tilesize, rowlength, spacing)\n im.save(\"./pic/url_merged_2.png\", \"PNG\")\n" ]
[ [ "numpy.hstack", "numpy.asarray", "numpy.full", "numpy.concatenate", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
guntbert/astropy
[ "f2d2add09e5b1638b2698f19a4d46fcca19e82be", "f2d2add09e5b1638b2698f19a4d46fcca19e82be", "f2d2add09e5b1638b2698f19a4d46fcca19e82be" ]
[ "astropy/visualization/wcsaxes/tests/test_display_world_coordinates.py", "astropy/io/fits/column.py", "astropy/io/votable/tests/vo_test.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom astropy.visualization.wcsaxes.core import WCSAxes\nimport matplotlib.pyplot as plt\nfrom matplotlib.backend_bases import KeyEvent\n\nfrom astropy.wcs import WCS\nfrom astropy.coordinates import FK5\nfrom astropy.time import Time\nfrom astropy.tests.image_tests import ignore_matplotlibrc\n\nfrom .test_images import BaseImageTests\n\n\nclass TestDisplayWorldCoordinate(BaseImageTests):\n\n @ignore_matplotlibrc\n def test_overlay_coords(self, tmpdir):\n wcs = WCS(self.msx_header)\n\n fig = plt.figure(figsize=(4, 4))\n canvas = fig.canvas\n\n ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=wcs)\n fig.add_axes(ax)\n\n # On some systems, fig.canvas.draw is not enough to force a draw, so we\n # save to a temporary file.\n fig.savefig(tmpdir.join('test1.png').strpath)\n\n # Testing default displayed world coordinates\n string_world = ax._display_world_coords(0.523412, 0.518311)\n assert string_world == '0\\xb029\\'45\" -0\\xb029\\'20\" (world)'\n\n # Test pixel coordinates\n event1 = KeyEvent('test_pixel_coords', canvas, 'w')\n fig.canvas.key_press_event(event1.key, guiEvent=event1)\n string_pixel = ax._display_world_coords(0.523412, 0.523412)\n assert string_pixel == \"0.523412 0.523412 (pixel)\"\n\n event3 = KeyEvent('test_pixel_coords', canvas, 'w')\n fig.canvas.key_press_event(event3.key, guiEvent=event3)\n # Test that it still displays world coords when there are no overlay coords\n string_world2 = ax._display_world_coords(0.523412, 0.518311)\n assert string_world2 == '0\\xb029\\'45\" -0\\xb029\\'20\" (world)'\n\n overlay = ax.get_coords_overlay('fk5')\n\n # Regression test for bug that caused format to always be taken from\n # main world coordinates.\n overlay[0].set_major_formatter('d.ddd')\n\n # On some systems, fig.canvas.draw is not enough to force a draw, so we\n # save to a temporary file.\n fig.savefig(tmpdir.join('test2.png').strpath)\n\n event4 = KeyEvent('test_pixel_coords', canvas, 'w')\n fig.canvas.key_press_event(event4.key, guiEvent=event4)\n # Test that it displays the overlay world coordinates\n string_world3 = ax._display_world_coords(0.523412, 0.518311)\n\n assert string_world3 == '267.176\\xb0 -28\\xb045\\'56\" (world, overlay 1)'\n\n overlay = ax.get_coords_overlay(FK5())\n\n # Regression test for bug that caused format to always be taken from\n # main world coordinates.\n overlay[0].set_major_formatter('d.ddd')\n\n # On some systems, fig.canvas.draw is not enough to force a draw, so we\n # save to a temporary file.\n fig.savefig(tmpdir.join('test3.png').strpath)\n\n event5 = KeyEvent('test_pixel_coords', canvas, 'w')\n fig.canvas.key_press_event(event4.key, guiEvent=event4)\n # Test that it displays the overlay world coordinates\n string_world4 = ax._display_world_coords(0.523412, 0.518311)\n\n assert string_world4 == '267.176\\xb0 -28\\xb045\\'56\" (world, overlay 2)'\n\n overlay = ax.get_coords_overlay(FK5(equinox=Time(\"J2030\")))\n\n # Regression test for bug that caused format to always be taken from\n # main world coordinates.\n overlay[0].set_major_formatter('d.ddd')\n\n # On some systems, fig.canvas.draw is not enough to force a draw, so we\n # save to a temporary file.\n fig.savefig(tmpdir.join('test4.png').strpath)\n\n event6 = KeyEvent('test_pixel_coords', canvas, 'w')\n fig.canvas.key_press_event(event5.key, guiEvent=event6)\n # Test that it displays the overlay world coordinates\n string_world5 = ax._display_world_coords(0.523412, 0.518311)\n\n assert string_world5 == '267.652\\xb0 -28\\xb046\\'23\" (world, overlay 3)'\n\n @ignore_matplotlibrc\n def test_cube_coords(self, tmpdir):\n wcs = WCS(self.cube_header)\n\n fig = plt.figure(figsize=(4, 4))\n canvas = fig.canvas\n\n ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=wcs, slices=('y', 50, 'x'))\n fig.add_axes(ax)\n\n # On some systems, fig.canvas.draw is not enough to force a draw, so we\n # save to a temporary file.\n fig.savefig(tmpdir.join('test.png').strpath)\n\n # Testing default displayed world coordinates\n string_world = ax._display_world_coords(0.523412, 0.518311)\n assert string_world == '3h26m52.0s 30\\xb037\\'17\\\" 2563 (world)'\n\n # Test pixel coordinates\n event1 = KeyEvent('test_pixel_coords', canvas, 'w')\n fig.canvas.key_press_event(event1.key, guiEvent=event1)\n string_pixel = ax._display_world_coords(0.523412, 0.523412)\n assert string_pixel == \"0.523412 0.523412 (pixel)\"\n\n @ignore_matplotlibrc\n def test_cube_coords_uncorr_slicing(self, tmpdir):\n\n # Regression test for a bug that occurred with coordinate formatting if\n # some dimensions were uncorrelated and sliced out.\n\n wcs = WCS(self.cube_header)\n\n fig = plt.figure(figsize=(4, 4))\n canvas = fig.canvas\n\n ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=wcs, slices=('x', 'y', 2))\n fig.add_axes(ax)\n\n # On some systems, fig.canvas.draw is not enough to force a draw, so we\n # save to a temporary file.\n fig.savefig(tmpdir.join('test.png').strpath)\n\n # Testing default displayed world coordinates\n string_world = ax._display_world_coords(0.523412, 0.518311)\n assert string_world == '3h26m56.6s 30\\xb018\\'19\\\" (world)'\n\n # Test pixel coordinates\n event1 = KeyEvent('test_pixel_coords', canvas, 'w')\n fig.canvas.key_press_event(event1.key, guiEvent=event1)\n string_pixel = ax._display_world_coords(0.523412, 0.523412)\n assert string_pixel == \"0.523412 0.523412 (pixel)\"\n", "# Licensed under a 3-clause BSD style license - see PYFITS.rst\n\nimport copy\nimport operator\nimport re\nimport sys\nimport warnings\nimport weakref\nimport numbers\n\nfrom functools import reduce\nfrom collections import OrderedDict\nfrom contextlib import suppress\n\nimport numpy as np\nfrom numpy import char as chararray\n\nfrom .card import Card, CARD_LENGTH\nfrom .util import (pairwise, _is_int, _convert_array, encode_ascii, cmp,\n NotifierMixin)\nfrom .verify import VerifyError, VerifyWarning\n\nfrom astropy.utils import lazyproperty, isiterable, indent\nfrom astropy.utils.exceptions import AstropyUserWarning\n\n__all__ = ['Column', 'ColDefs', 'Delayed']\n\n\n# mapping from TFORM data type to numpy data type (code)\n# L: Logical (Boolean)\n# B: Unsigned Byte\n# I: 16-bit Integer\n# J: 32-bit Integer\n# K: 64-bit Integer\n# E: Single-precision Floating Point\n# D: Double-precision Floating Point\n# C: Single-precision Complex\n# M: Double-precision Complex\n# A: Character\nFITS2NUMPY = {'L': 'i1', 'B': 'u1', 'I': 'i2', 'J': 'i4', 'K': 'i8', 'E': 'f4',\n 'D': 'f8', 'C': 'c8', 'M': 'c16', 'A': 'a'}\n\n# the inverse dictionary of the above\nNUMPY2FITS = {val: key for key, val in FITS2NUMPY.items()}\n# Normally booleans are represented as ints in Astropy, but if passed in a numpy\n# boolean array, that should be supported\nNUMPY2FITS['b1'] = 'L'\n# Add unsigned types, which will be stored as signed ints with a TZERO card.\nNUMPY2FITS['u2'] = 'I'\nNUMPY2FITS['u4'] = 'J'\nNUMPY2FITS['u8'] = 'K'\n# Add half precision floating point numbers which will be up-converted to\n# single precision.\nNUMPY2FITS['f2'] = 'E'\n\n# This is the order in which values are converted to FITS types\n# Note that only double precision floating point/complex are supported\nFORMATORDER = ['L', 'B', 'I', 'J', 'K', 'D', 'M', 'A']\n\n# Convert single precision floating point/complex to double precision.\nFITSUPCONVERTERS = {'E': 'D', 'C': 'M'}\n\n# mapping from ASCII table TFORM data type to numpy data type\n# A: Character\n# I: Integer (32-bit)\n# J: Integer (64-bit; non-standard)\n# F: Float (64-bit; fixed decimal notation)\n# E: Float (64-bit; exponential notation)\n# D: Float (64-bit; exponential notation, always 64-bit by convention)\nASCII2NUMPY = {'A': 'a', 'I': 'i4', 'J': 'i8', 'F': 'f8', 'E': 'f8', 'D': 'f8'}\n\n# Maps FITS ASCII column format codes to the appropriate Python string\n# formatting codes for that type.\nASCII2STR = {'A': '', 'I': 'd', 'J': 'd', 'F': 'f', 'E': 'E', 'D': 'E'}\n\n# For each ASCII table format code, provides a default width (and decimal\n# precision) for when one isn't given explicitly in the column format\nASCII_DEFAULT_WIDTHS = {'A': (1, 0), 'I': (10, 0), 'J': (15, 0),\n 'E': (15, 7), 'F': (16, 7), 'D': (25, 17)}\n\n# TDISPn for both ASCII and Binary tables\nTDISP_RE_DICT = {}\nTDISP_RE_DICT['F'] = re.compile(r'(?:(?P<formatc>[F])(?:(?P<width>[0-9]+)\\.{1}'\n r'(?P<precision>[0-9])+)+)|')\nTDISP_RE_DICT['A'] = TDISP_RE_DICT['L'] = \\\n re.compile(r'(?:(?P<formatc>[AL])(?P<width>[0-9]+)+)|')\nTDISP_RE_DICT['I'] = TDISP_RE_DICT['B'] = \\\n TDISP_RE_DICT['O'] = TDISP_RE_DICT['Z'] = \\\n re.compile(r'(?:(?P<formatc>[IBOZ])(?:(?P<width>[0-9]+)'\n r'(?:\\.{0,1}(?P<precision>[0-9]+))?))|')\nTDISP_RE_DICT['E'] = TDISP_RE_DICT['G'] = \\\n TDISP_RE_DICT['D'] = \\\n re.compile(r'(?:(?P<formatc>[EGD])(?:(?P<width>[0-9]+)\\.'\n r'(?P<precision>[0-9]+))+)'\n r'(?:E{0,1}(?P<exponential>[0-9]+)?)|')\nTDISP_RE_DICT['EN'] = TDISP_RE_DICT['ES'] = \\\n re.compile(r'(?:(?P<formatc>E[NS])(?:(?P<width>[0-9]+)\\.{1}'\n r'(?P<precision>[0-9])+)+)')\n\n# mapping from TDISP format to python format\n# A: Character\n# L: Logical (Boolean)\n# I: 16-bit Integer\n# Can't predefine zero padding and space padding before hand without\n# knowing the value being formatted, so grabbing precision and using that\n# to zero pad, ignoring width. Same with B, O, and Z\n# B: Binary Integer\n# O: Octal Integer\n# Z: Hexadecimal Integer\n# F: Float (64-bit; fixed decimal notation)\n# EN: Float (engineering fortran format, exponential multiple of thee\n# ES: Float (scientific, same as EN but non-zero leading digit\n# E: Float, exponential notation\n# Can't get exponential restriction to work without knowing value\n# before hand, so just using width and precision, same with D, G, EN, and\n# ES formats\n# D: Double-precision Floating Point with exponential\n# (E but for double precision)\n# G: Double-precision Floating Point, may or may not show exponent\nTDISP_FMT_DICT = {'I' : '{{:{width}d}}',\n 'B' : '{{:{width}b}}',\n 'O' : '{{:{width}o}}',\n 'Z' : '{{:{width}x}}',\n 'F' : '{{:{width}.{precision}f}}',\n 'G' : '{{:{width}.{precision}g}}'}\nTDISP_FMT_DICT['A'] = TDISP_FMT_DICT['L'] = '{{:>{width}}}'\nTDISP_FMT_DICT['E'] = TDISP_FMT_DICT['D'] = \\\n TDISP_FMT_DICT['EN'] = TDISP_FMT_DICT['ES'] ='{{:{width}.{precision}e}}'\n\n# tuple of column/field definition common names and keyword names, make\n# sure to preserve the one-to-one correspondence when updating the list(s).\n# Use lists, instead of dictionaries so the names can be displayed in a\n# preferred order.\nKEYWORD_NAMES = ('TTYPE', 'TFORM', 'TUNIT', 'TNULL', 'TSCAL', 'TZERO',\n 'TDISP', 'TBCOL', 'TDIM', 'TCTYP', 'TCUNI', 'TCRPX',\n 'TCRVL', 'TCDLT', 'TRPOS')\nKEYWORD_ATTRIBUTES = ('name', 'format', 'unit', 'null', 'bscale', 'bzero',\n 'disp', 'start', 'dim', 'coord_type', 'coord_unit',\n 'coord_ref_point', 'coord_ref_value', 'coord_inc',\n 'time_ref_pos')\n\"\"\"This is a list of the attributes that can be set on `Column` objects.\"\"\"\n\n\nKEYWORD_TO_ATTRIBUTE = OrderedDict(zip(KEYWORD_NAMES, KEYWORD_ATTRIBUTES))\n\nATTRIBUTE_TO_KEYWORD = OrderedDict(zip(KEYWORD_ATTRIBUTES, KEYWORD_NAMES))\n\n\n# TODO: Define a list of default comments to associate with each table keyword\n\n# TFORMn regular expression\nTFORMAT_RE = re.compile(r'(?P<repeat>^[0-9]*)(?P<format>[LXBIJKAEDCMPQ])'\n r'(?P<option>[!-~]*)', re.I)\n\n# TFORMn for ASCII tables; two different versions depending on whether\n# the format is floating-point or not; allows empty values for width\n# in which case defaults are used\nTFORMAT_ASCII_RE = re.compile(r'(?:(?P<format>[AIJ])(?P<width>[0-9]+)?)|'\n r'(?:(?P<formatf>[FED])'\n r'(?:(?P<widthf>[0-9]+)\\.'\n r'(?P<precision>[0-9]+))?)')\n\nTTYPE_RE = re.compile(r'[0-9a-zA-Z_]+')\n\"\"\"\nRegular expression for valid table column names. See FITS Standard v3.0 section\n7.2.2.\n\"\"\"\n\n# table definition keyword regular expression\nTDEF_RE = re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)')\n\n# table dimension keyword regular expression (fairly flexible with whitespace)\nTDIM_RE = re.compile(r'\\(\\s*(?P<dims>(?:\\d+,\\s*)+\\s*\\d+)\\s*\\)\\s*')\n\n# value for ASCII table cell with value = TNULL\n# this can be reset by user.\nASCIITNULL = 0\n\n# The default placeholder to use for NULL values in ASCII tables when\n# converting from binary to ASCII tables\nDEFAULT_ASCII_TNULL = '---'\n\n\nclass Delayed:\n \"\"\"Delayed file-reading data.\"\"\"\n\n def __init__(self, hdu=None, field=None):\n self.hdu = weakref.proxy(hdu)\n self.field = field\n\n def __getitem__(self, key):\n # This forces the data for the HDU to be read, which will replace\n # the corresponding Delayed objects in the Tables Columns to be\n # transformed into ndarrays. It will also return the value of the\n # requested data element.\n return self.hdu.data[key][self.field]\n\n\nclass _BaseColumnFormat(str):\n \"\"\"\n Base class for binary table column formats (just called _ColumnFormat)\n and ASCII table column formats (_AsciiColumnFormat).\n \"\"\"\n\n def __eq__(self, other):\n if not other:\n return False\n\n if isinstance(other, str):\n if not isinstance(other, self.__class__):\n try:\n other = self.__class__(other)\n except ValueError:\n return False\n else:\n return False\n\n return self.canonical == other.canonical\n\n def __hash__(self):\n return hash(self.canonical)\n\n @lazyproperty\n def dtype(self):\n \"\"\"\n The Numpy dtype object created from the format's associated recformat.\n \"\"\"\n\n return np.dtype(self.recformat)\n\n @classmethod\n def from_column_format(cls, format):\n \"\"\"Creates a column format object from another column format object\n regardless of their type.\n\n That is, this can convert a _ColumnFormat to an _AsciiColumnFormat\n or vice versa at least in cases where a direct translation is possible.\n \"\"\"\n\n return cls.from_recformat(format.recformat)\n\n\nclass _ColumnFormat(_BaseColumnFormat):\n \"\"\"\n Represents a FITS binary table column format.\n\n This is an enhancement over using a normal string for the format, since the\n repeat count, format code, and option are available as separate attributes,\n and smart comparison is used. For example 1J == J.\n \"\"\"\n\n def __new__(cls, format):\n self = super().__new__(cls, format)\n self.repeat, self.format, self.option = _parse_tformat(format)\n self.format = self.format.upper()\n if self.format in ('P', 'Q'):\n # TODO: There should be a generic factory that returns either\n # _FormatP or _FormatQ as appropriate for a given TFORMn\n if self.format == 'P':\n recformat = _FormatP.from_tform(format)\n else:\n recformat = _FormatQ.from_tform(format)\n # Format of variable length arrays\n self.p_format = recformat.format\n else:\n self.p_format = None\n return self\n\n @classmethod\n def from_recformat(cls, recformat):\n \"\"\"Creates a column format from a Numpy record dtype format.\"\"\"\n\n return cls(_convert_format(recformat, reverse=True))\n\n @lazyproperty\n def recformat(self):\n \"\"\"Returns the equivalent Numpy record format string.\"\"\"\n\n return _convert_format(self)\n\n @lazyproperty\n def canonical(self):\n \"\"\"\n Returns a 'canonical' string representation of this format.\n\n This is in the proper form of rTa where T is the single character data\n type code, a is the optional part, and r is the repeat. If repeat == 1\n (the default) it is left out of this representation.\n \"\"\"\n\n if self.repeat == 1:\n repeat = ''\n else:\n repeat = str(self.repeat)\n\n return f'{repeat}{self.format}{self.option}'\n\n\nclass _AsciiColumnFormat(_BaseColumnFormat):\n \"\"\"Similar to _ColumnFormat but specifically for columns in ASCII tables.\n\n The formats of ASCII table columns and binary table columns are inherently\n incompatible in FITS. They don't support the same ranges and types of\n values, and even reuse format codes in subtly different ways. For example\n the format code 'Iw' in ASCII columns refers to any integer whose string\n representation is at most w characters wide, so 'I' can represent\n effectively any integer that will fit in a FITS columns. Whereas for\n binary tables 'I' very explicitly refers to a 16-bit signed integer.\n\n Conversions between the two column formats can be performed using the\n ``to/from_binary`` methods on this class, or the ``to/from_ascii``\n methods on the `_ColumnFormat` class. But again, not all conversions are\n possible and may result in a `ValueError`.\n \"\"\"\n\n def __new__(cls, format, strict=False):\n self = super().__new__(cls, format)\n self.format, self.width, self.precision = \\\n _parse_ascii_tformat(format, strict)\n\n # This is to support handling logical (boolean) data from binary tables\n # in an ASCII table\n self._pseudo_logical = False\n return self\n\n @classmethod\n def from_column_format(cls, format):\n inst = cls.from_recformat(format.recformat)\n # Hack\n if format.format == 'L':\n inst._pseudo_logical = True\n return inst\n\n @classmethod\n def from_recformat(cls, recformat):\n \"\"\"Creates a column format from a Numpy record dtype format.\"\"\"\n\n return cls(_convert_ascii_format(recformat, reverse=True))\n\n @lazyproperty\n def recformat(self):\n \"\"\"Returns the equivalent Numpy record format string.\"\"\"\n\n return _convert_ascii_format(self)\n\n @lazyproperty\n def canonical(self):\n \"\"\"\n Returns a 'canonical' string representation of this format.\n\n This is in the proper form of Tw.d where T is the single character data\n type code, w is the width in characters for this field, and d is the\n number of digits after the decimal place (for format codes 'E', 'F',\n and 'D' only).\n \"\"\"\n\n if self.format in ('E', 'F', 'D'):\n return f'{self.format}{self.width}.{self.precision}'\n\n return f'{self.format}{self.width}'\n\n\nclass _FormatX(str):\n \"\"\"For X format in binary tables.\"\"\"\n\n def __new__(cls, repeat=1):\n nbytes = ((repeat - 1) // 8) + 1\n # use an array, even if it is only ONE u1 (i.e. use tuple always)\n obj = super().__new__(cls, repr((nbytes,)) + 'u1')\n obj.repeat = repeat\n return obj\n\n def __getnewargs__(self):\n return (self.repeat,)\n\n @property\n def tform(self):\n return f'{self.repeat}X'\n\n\n# TODO: Table column formats need to be verified upon first reading the file;\n# as it is, an invalid P format will raise a VerifyError from some deep,\n# unexpected place\nclass _FormatP(str):\n \"\"\"For P format in variable length table.\"\"\"\n\n # As far as I can tell from my reading of the FITS standard, a type code is\n # *required* for P and Q formats; there is no default\n _format_re_template = (r'(?P<repeat>\\d+)?{}(?P<dtype>[LXBIJKAEDCM])'\n r'(?:\\((?P<max>\\d*)\\))?')\n _format_code = 'P'\n _format_re = re.compile(_format_re_template.format(_format_code))\n _descriptor_format = '2i4'\n\n def __new__(cls, dtype, repeat=None, max=None):\n obj = super().__new__(cls, cls._descriptor_format)\n obj.format = NUMPY2FITS[dtype]\n obj.dtype = dtype\n obj.repeat = repeat\n obj.max = max\n return obj\n\n def __getnewargs__(self):\n return (self.dtype, self.repeat, self.max)\n\n @classmethod\n def from_tform(cls, format):\n m = cls._format_re.match(format)\n if not m or m.group('dtype') not in FITS2NUMPY:\n raise VerifyError(f'Invalid column format: {format}')\n repeat = m.group('repeat')\n array_dtype = m.group('dtype')\n max = m.group('max')\n if not max:\n max = None\n return cls(FITS2NUMPY[array_dtype], repeat=repeat, max=max)\n\n @property\n def tform(self):\n repeat = '' if self.repeat is None else self.repeat\n max = '' if self.max is None else self.max\n return f'{repeat}{self._format_code}{self.format}({max})'\n\n\nclass _FormatQ(_FormatP):\n \"\"\"Carries type description of the Q format for variable length arrays.\n\n The Q format is like the P format but uses 64-bit integers in the array\n descriptors, allowing for heaps stored beyond 2GB into a file.\n \"\"\"\n\n _format_code = 'Q'\n _format_re = re.compile(_FormatP._format_re_template.format(_format_code))\n _descriptor_format = '2i8'\n\n\nclass ColumnAttribute:\n \"\"\"\n Descriptor for attributes of `Column` that are associated with keywords\n in the FITS header and describe properties of the column as specified in\n the FITS standard.\n\n Each `ColumnAttribute` may have a ``validator`` method defined on it.\n This validates values set on this attribute to ensure that they meet the\n FITS standard. Invalid values will raise a warning and will not be used in\n formatting the column. The validator should take two arguments--the\n `Column` it is being assigned to, and the new value for the attribute, and\n it must raise an `AssertionError` if the value is invalid.\n\n The `ColumnAttribute` itself is a decorator that can be used to define the\n ``validator`` for each column attribute. For example::\n\n @ColumnAttribute('TTYPE')\n def name(col, name):\n if not isinstance(name, str):\n raise AssertionError\n\n The actual object returned by this decorator is the `ColumnAttribute`\n instance though, not the ``name`` function. As such ``name`` is not a\n method of the class it is defined in.\n\n The setter for `ColumnAttribute` also updates the header of any table\n HDU this column is attached to in order to reflect the change. The\n ``validator`` should ensure that the value is valid for inclusion in a FITS\n header.\n \"\"\"\n\n def __init__(self, keyword):\n self._keyword = keyword\n self._validator = None\n\n # The name of the attribute associated with this keyword is currently\n # determined from the KEYWORD_NAMES/ATTRIBUTES lists. This could be\n # make more flexible in the future, for example, to support custom\n # column attributes.\n self._attr = '_' + KEYWORD_TO_ATTRIBUTE[self._keyword]\n\n def __get__(self, obj, objtype=None):\n if obj is None:\n return self\n else:\n return getattr(obj, self._attr)\n\n def __set__(self, obj, value):\n if self._validator is not None:\n self._validator(obj, value)\n\n old_value = getattr(obj, self._attr, None)\n setattr(obj, self._attr, value)\n obj._notify('column_attribute_changed', obj, self._attr[1:], old_value,\n value)\n\n def __call__(self, func):\n \"\"\"\n Set the validator for this column attribute.\n\n Returns ``self`` so that this can be used as a decorator, as described\n in the docs for this class.\n \"\"\"\n\n self._validator = func\n\n return self\n\n def __repr__(self):\n return f\"{self.__class__.__name__}('{self._keyword}')\"\n\n\nclass Column(NotifierMixin):\n \"\"\"\n Class which contains the definition of one column, e.g. ``ttype``,\n ``tform``, etc. and the array containing values for the column.\n \"\"\"\n\n def __init__(self, name=None, format=None, unit=None, null=None,\n bscale=None, bzero=None, disp=None, start=None, dim=None,\n array=None, ascii=None, coord_type=None, coord_unit=None,\n coord_ref_point=None, coord_ref_value=None, coord_inc=None,\n time_ref_pos=None):\n \"\"\"\n Construct a `Column` by specifying attributes. All attributes\n except ``format`` can be optional; see :ref:`column_creation` and\n :ref:`creating_ascii_table` for more information regarding\n ``TFORM`` keyword.\n\n Parameters\n ----------\n name : str, optional\n column name, corresponding to ``TTYPE`` keyword\n\n format : str\n column format, corresponding to ``TFORM`` keyword\n\n unit : str, optional\n column unit, corresponding to ``TUNIT`` keyword\n\n null : str, optional\n null value, corresponding to ``TNULL`` keyword\n\n bscale : int-like, optional\n bscale value, corresponding to ``TSCAL`` keyword\n\n bzero : int-like, optional\n bzero value, corresponding to ``TZERO`` keyword\n\n disp : str, optional\n display format, corresponding to ``TDISP`` keyword\n\n start : int, optional\n column starting position (ASCII table only), corresponding\n to ``TBCOL`` keyword\n\n dim : str, optional\n column dimension corresponding to ``TDIM`` keyword\n\n array : iterable, optional\n a `list`, `numpy.ndarray` (or other iterable that can be used to\n initialize an ndarray) providing initial data for this column.\n The array will be automatically converted, if possible, to the data\n format of the column. In the case were non-trivial ``bscale``\n and/or ``bzero`` arguments are given, the values in the array must\n be the *physical* values--that is, the values of column as if the\n scaling has already been applied (the array stored on the column\n object will then be converted back to its storage values).\n\n ascii : bool, optional\n set `True` if this describes a column for an ASCII table; this\n may be required to disambiguate the column format\n\n coord_type : str, optional\n coordinate/axis type corresponding to ``TCTYP`` keyword\n\n coord_unit : str, optional\n coordinate/axis unit corresponding to ``TCUNI`` keyword\n\n coord_ref_point : int-like, optional\n pixel coordinate of the reference point corresponding to ``TCRPX``\n keyword\n\n coord_ref_value : int-like, optional\n coordinate value at reference point corresponding to ``TCRVL``\n keyword\n\n coord_inc : int-like, optional\n coordinate increment at reference point corresponding to ``TCDLT``\n keyword\n\n time_ref_pos : str, optional\n reference position for a time coordinate column corresponding to\n ``TRPOS`` keyword\n \"\"\"\n\n if format is None:\n raise ValueError('Must specify format to construct Column.')\n\n # any of the input argument (except array) can be a Card or just\n # a number/string\n kwargs = {'ascii': ascii}\n for attr in KEYWORD_ATTRIBUTES:\n value = locals()[attr] # get the argument's value\n\n if isinstance(value, Card):\n value = value.value\n\n kwargs[attr] = value\n\n valid_kwargs, invalid_kwargs = self._verify_keywords(**kwargs)\n\n if invalid_kwargs:\n msg = ['The following keyword arguments to Column were invalid:']\n\n for val in invalid_kwargs.values():\n msg.append(indent(val[1]))\n\n raise VerifyError('\\n'.join(msg))\n\n for attr in KEYWORD_ATTRIBUTES:\n setattr(self, attr, valid_kwargs.get(attr))\n\n # TODO: Try to eliminate the following two special cases\n # for recformat and dim:\n # This is not actually stored as an attribute on columns for some\n # reason\n recformat = valid_kwargs['recformat']\n\n # The 'dim' keyword's original value is stored in self.dim, while\n # *only* the tuple form is stored in self._dims.\n self._dims = self.dim\n self.dim = dim\n\n # Awful hack to use for now to keep track of whether the column holds\n # pseudo-unsigned int data\n self._pseudo_unsigned_ints = False\n\n # if the column data is not ndarray, make it to be one, i.e.\n # input arrays can be just list or tuple, not required to be ndarray\n # does not include Object array because there is no guarantee\n # the elements in the object array are consistent.\n if not isinstance(array,\n (np.ndarray, chararray.chararray, Delayed)):\n try: # try to convert to a ndarray first\n if array is not None:\n array = np.array(array)\n except Exception:\n try: # then try to convert it to a strings array\n itemsize = int(recformat[1:])\n array = chararray.array(array, itemsize=itemsize)\n except ValueError:\n # then try variable length array\n # Note: This includes _FormatQ by inheritance\n if isinstance(recformat, _FormatP):\n array = _VLF(array, dtype=recformat.dtype)\n else:\n raise ValueError('Data is inconsistent with the '\n 'format `{}`.'.format(format))\n\n array = self._convert_to_valid_data_type(array)\n\n # We have required (through documentation) that arrays passed in to\n # this constructor are already in their physical values, so we make\n # note of that here\n if isinstance(array, np.ndarray):\n self._physical_values = True\n else:\n self._physical_values = False\n\n self._parent_fits_rec = None\n self.array = array\n\n def __repr__(self):\n text = ''\n for attr in KEYWORD_ATTRIBUTES:\n value = getattr(self, attr)\n if value is not None:\n text += attr + ' = ' + repr(value) + '; '\n return text[:-2]\n\n def __eq__(self, other):\n \"\"\"\n Two columns are equal if their name and format are the same. Other\n attributes aren't taken into account at this time.\n \"\"\"\n\n # According to the FITS standard column names must be case-insensitive\n a = (self.name.lower(), self.format)\n b = (other.name.lower(), other.format)\n return a == b\n\n def __hash__(self):\n \"\"\"\n Like __eq__, the hash of a column should be based on the unique column\n name and format, and be case-insensitive with respect to the column\n name.\n \"\"\"\n\n return hash((self.name.lower(), self.format))\n\n @property\n def array(self):\n \"\"\"\n The Numpy `~numpy.ndarray` associated with this `Column`.\n\n If the column was instantiated with an array passed to the ``array``\n argument, this will return that array. However, if the column is\n later added to a table, such as via `BinTableHDU.from_columns` as\n is typically the case, this attribute will be updated to reference\n the associated field in the table, which may no longer be the same\n array.\n \"\"\"\n\n # Ideally the .array attribute never would have existed in the first\n # place, or would have been internal-only. This is a legacy of the\n # older design from Astropy that needs to have continued support, for\n # now.\n\n # One of the main problems with this design was that it created a\n # reference cycle. When the .array attribute was updated after\n # creating a FITS_rec from the column (as explained in the docstring) a\n # reference cycle was created. This is because the code in BinTableHDU\n # (and a few other places) does essentially the following:\n #\n # data._coldefs = columns # The ColDefs object holding this Column\n # for col in columns:\n # col.array = data.field(col.name)\n #\n # This way each columns .array attribute now points to the field in the\n # table data. It's actually a pretty confusing interface (since it\n # replaces the array originally pointed to by .array), but it's the way\n # things have been for a long, long time.\n #\n # However, this results, in *many* cases, in a reference cycle.\n # Because the array returned by data.field(col.name), while sometimes\n # an array that owns its own data, is usually like a slice of the\n # original data. It has the original FITS_rec as the array .base.\n # This results in the following reference cycle (for the n-th column):\n #\n # data -> data._coldefs -> data._coldefs[n] ->\n # data._coldefs[n].array -> data._coldefs[n].array.base -> data\n #\n # Because ndarray objects do not handled by Python's garbage collector\n # the reference cycle cannot be broken. Therefore the FITS_rec's\n # refcount never goes to zero, its __del__ is never called, and its\n # memory is never freed. This didn't occur in *all* cases, but it did\n # occur in many cases.\n #\n # To get around this, Column.array is no longer a simple attribute\n # like it was previously. Now each Column has a ._parent_fits_rec\n # attribute which is a weakref to a FITS_rec object. Code that\n # previously assigned each col.array to field in a FITS_rec (as in\n # the example a few paragraphs above) is still used, however now\n # array.setter checks if a reference cycle will be created. And if\n # so, instead of saving directly to the Column's __dict__, it creates\n # the ._prent_fits_rec weakref, and all lookups of the column's .array\n # go through that instead.\n #\n # This alone does not fully solve the problem. Because\n # _parent_fits_rec is a weakref, if the user ever holds a reference to\n # the Column, but deletes all references to the underlying FITS_rec,\n # the .array attribute would suddenly start returning None instead of\n # the array data. This problem is resolved on FITS_rec's end. See the\n # note in the FITS_rec._coldefs property for the rest of the story.\n\n # If the Columns's array is not a reference to an existing FITS_rec,\n # then it is just stored in self.__dict__; otherwise check the\n # _parent_fits_rec reference if it 's still available.\n if 'array' in self.__dict__:\n return self.__dict__['array']\n elif self._parent_fits_rec is not None:\n parent = self._parent_fits_rec()\n if parent is not None:\n return parent[self.name]\n else:\n return None\n\n @array.setter\n def array(self, array):\n # The following looks over the bases of the given array to check if it\n # has a ._coldefs attribute (i.e. is a FITS_rec) and that that _coldefs\n # contains this Column itself, and would create a reference cycle if we\n # stored the array directly in self.__dict__.\n # In this case it instead sets up the _parent_fits_rec weakref to the\n # underlying FITS_rec, so that array.getter can return arrays through\n # self._parent_fits_rec().field(self.name), rather than storing a\n # hard reference to the field like it used to.\n base = array\n while True:\n if (hasattr(base, '_coldefs') and\n isinstance(base._coldefs, ColDefs)):\n for col in base._coldefs:\n if col is self and self._parent_fits_rec is None:\n self._parent_fits_rec = weakref.ref(base)\n\n # Just in case the user already set .array to their own\n # array.\n if 'array' in self.__dict__:\n del self.__dict__['array']\n return\n\n if getattr(base, 'base', None) is not None:\n base = base.base\n else:\n break\n\n self.__dict__['array'] = array\n\n @array.deleter\n def array(self):\n try:\n del self.__dict__['array']\n except KeyError:\n pass\n\n self._parent_fits_rec = None\n\n @ColumnAttribute('TTYPE')\n def name(col, name):\n if name is None:\n # Allow None to indicate deleting the name, or to just indicate an\n # unspecified name (when creating a new Column).\n return\n\n # Check that the name meets the recommended standard--other column\n # names are *allowed*, but will be discouraged\n if isinstance(name, str) and not TTYPE_RE.match(name):\n warnings.warn(\n 'It is strongly recommended that column names contain only '\n 'upper and lower-case ASCII letters, digits, or underscores '\n 'for maximum compatibility with other software '\n '(got {!r}).'.format(name), VerifyWarning)\n\n # This ensures that the new name can fit into a single FITS card\n # without any special extension like CONTINUE cards or the like.\n if (not isinstance(name, str)\n or len(str(Card('TTYPE', name))) != CARD_LENGTH):\n raise AssertionError(\n 'Column name must be a string able to fit in a single '\n 'FITS card--typically this means a maximum of 68 '\n 'characters, though it may be fewer if the string '\n 'contains special characters like quotes.')\n\n @ColumnAttribute('TCTYP')\n def coord_type(col, coord_type):\n if coord_type is None:\n return\n\n if (not isinstance(coord_type, str)\n or len(coord_type) > 8):\n raise AssertionError(\n 'Coordinate/axis type must be a string of atmost 8 '\n 'characters.')\n\n @ColumnAttribute('TCUNI')\n def coord_unit(col, coord_unit):\n if (coord_unit is not None\n and not isinstance(coord_unit, str)):\n raise AssertionError(\n 'Coordinate/axis unit must be a string.')\n\n @ColumnAttribute('TCRPX')\n def coord_ref_point(col, coord_ref_point):\n if (coord_ref_point is not None\n and not isinstance(coord_ref_point, numbers.Real)):\n raise AssertionError(\n 'Pixel coordinate of the reference point must be '\n 'real floating type.')\n\n @ColumnAttribute('TCRVL')\n def coord_ref_value(col, coord_ref_value):\n if (coord_ref_value is not None\n and not isinstance(coord_ref_value, numbers.Real)):\n raise AssertionError(\n 'Coordinate value at reference point must be real '\n 'floating type.')\n\n @ColumnAttribute('TCDLT')\n def coord_inc(col, coord_inc):\n if (coord_inc is not None\n and not isinstance(coord_inc, numbers.Real)):\n raise AssertionError(\n 'Coordinate increment must be real floating type.')\n\n @ColumnAttribute('TRPOS')\n def time_ref_pos(col, time_ref_pos):\n if (time_ref_pos is not None\n and not isinstance(time_ref_pos, str)):\n raise AssertionError(\n 'Time reference position must be a string.')\n\n format = ColumnAttribute('TFORM')\n unit = ColumnAttribute('TUNIT')\n null = ColumnAttribute('TNULL')\n bscale = ColumnAttribute('TSCAL')\n bzero = ColumnAttribute('TZERO')\n disp = ColumnAttribute('TDISP')\n start = ColumnAttribute('TBCOL')\n dim = ColumnAttribute('TDIM')\n\n @lazyproperty\n def ascii(self):\n \"\"\"Whether this `Column` represents a column in an ASCII table.\"\"\"\n\n return isinstance(self.format, _AsciiColumnFormat)\n\n @lazyproperty\n def dtype(self):\n return self.format.dtype\n\n def copy(self):\n \"\"\"\n Return a copy of this `Column`.\n \"\"\"\n tmp = Column(format='I') # just use a throw-away format\n tmp.__dict__ = self.__dict__.copy()\n return tmp\n\n @staticmethod\n def _convert_format(format, cls):\n \"\"\"The format argument to this class's initializer may come in many\n forms. This uses the given column format class ``cls`` to convert\n to a format of that type.\n\n TODO: There should be an abc base class for column format classes\n \"\"\"\n\n # Short circuit in case we're already a _BaseColumnFormat--there is at\n # least one case in which this can happen\n if isinstance(format, _BaseColumnFormat):\n return format, format.recformat\n\n if format in NUMPY2FITS:\n with suppress(VerifyError):\n # legit recarray format?\n recformat = format\n format = cls.from_recformat(format)\n\n try:\n # legit FITS format?\n format = cls(format)\n recformat = format.recformat\n except VerifyError:\n raise VerifyError(f'Illegal format `{format}`.')\n\n return format, recformat\n\n @classmethod\n def _verify_keywords(cls, name=None, format=None, unit=None, null=None,\n bscale=None, bzero=None, disp=None, start=None,\n dim=None, ascii=None, coord_type=None, coord_unit=None,\n coord_ref_point=None, coord_ref_value=None,\n coord_inc=None, time_ref_pos=None):\n \"\"\"\n Given the keyword arguments used to initialize a Column, specifically\n those that typically read from a FITS header (so excluding array),\n verify that each keyword has a valid value.\n\n Returns a 2-tuple of dicts. The first maps valid keywords to their\n values. The second maps invalid keywords to a 2-tuple of their value,\n and a message explaining why they were found invalid.\n \"\"\"\n\n valid = {}\n invalid = {}\n\n format, recformat = cls._determine_formats(format, start, dim, ascii)\n valid.update(format=format, recformat=recformat)\n\n # Currently we don't have any validation for name, unit, bscale, or\n # bzero so include those by default\n # TODO: Add validation for these keywords, obviously\n for k, v in [('name', name), ('unit', unit), ('bscale', bscale),\n ('bzero', bzero)]:\n if v is not None and v != '':\n valid[k] = v\n\n # Validate null option\n # Note: Enough code exists that thinks empty strings are sensible\n # inputs for these options that we need to treat '' as None\n if null is not None and null != '':\n msg = None\n if isinstance(format, _AsciiColumnFormat):\n null = str(null)\n if len(null) > format.width:\n msg = (\n \"ASCII table null option (TNULLn) is longer than \"\n \"the column's character width and will be truncated \"\n \"(got {!r}).\".format(null))\n else:\n tnull_formats = ('B', 'I', 'J', 'K')\n\n if not _is_int(null):\n # Make this an exception instead of a warning, since any\n # non-int value is meaningless\n msg = (\n 'Column null option (TNULLn) must be an integer for '\n 'binary table columns (got {!r}). The invalid value '\n 'will be ignored for the purpose of formatting '\n 'the data in this column.'.format(null))\n\n elif not (format.format in tnull_formats or\n (format.format in ('P', 'Q') and\n format.p_format in tnull_formats)):\n # TODO: We should also check that TNULLn's integer value\n # is in the range allowed by the column's format\n msg = (\n 'Column null option (TNULLn) is invalid for binary '\n 'table columns of type {!r} (got {!r}). The invalid '\n 'value will be ignored for the purpose of formatting '\n 'the data in this column.'.format(format, null))\n\n if msg is None:\n valid['null'] = null\n else:\n invalid['null'] = (null, msg)\n\n # Validate the disp option\n # TODO: Add full parsing and validation of TDISPn keywords\n if disp is not None and disp != '':\n msg = None\n if not isinstance(disp, str):\n msg = (\n 'Column disp option (TDISPn) must be a string (got {!r}).'\n 'The invalid value will be ignored for the purpose of '\n 'formatting the data in this column.'.format(disp))\n\n elif (isinstance(format, _AsciiColumnFormat) and\n disp[0].upper() == 'L'):\n # disp is at least one character long and has the 'L' format\n # which is not recognized for ASCII tables\n msg = (\n \"Column disp option (TDISPn) may not use the 'L' format \"\n \"with ASCII table columns. The invalid value will be \"\n \"ignored for the purpose of formatting the data in this \"\n \"column.\")\n\n if msg is None:\n valid['disp'] = disp\n else:\n invalid['disp'] = (disp, msg)\n\n # Validate the start option\n if start is not None and start != '':\n msg = None\n if not isinstance(format, _AsciiColumnFormat):\n # The 'start' option only applies to ASCII columns\n msg = (\n 'Column start option (TBCOLn) is not allowed for binary '\n 'table columns (got {!r}). The invalid keyword will be '\n 'ignored for the purpose of formatting the data in this '\n 'column.'.format(start))\n else:\n try:\n start = int(start)\n except (TypeError, ValueError):\n pass\n\n if not _is_int(start) or start < 1:\n msg = (\n 'Column start option (TBCOLn) must be a positive integer '\n '(got {!r}). The invalid value will be ignored for the '\n 'purpose of formatting the data in this column.'.format(start))\n\n if msg is None:\n valid['start'] = start\n else:\n invalid['start'] = (start, msg)\n\n # Process TDIMn options\n # ASCII table columns can't have a TDIMn keyword associated with it;\n # for now we just issue a warning and ignore it.\n # TODO: This should be checked by the FITS verification code\n if dim is not None and dim != '':\n msg = None\n dims_tuple = tuple()\n # NOTE: If valid, the dim keyword's value in the the valid dict is\n # a tuple, not the original string; if invalid just the original\n # string is returned\n if isinstance(format, _AsciiColumnFormat):\n msg = (\n 'Column dim option (TDIMn) is not allowed for ASCII table '\n 'columns (got {!r}). The invalid keyword will be ignored '\n 'for the purpose of formatting this column.'.format(dim))\n\n elif isinstance(dim, str):\n dims_tuple = _parse_tdim(dim)\n elif isinstance(dim, tuple):\n dims_tuple = dim\n else:\n msg = (\n \"`dim` argument must be a string containing a valid value \"\n \"for the TDIMn header keyword associated with this column, \"\n \"or a tuple containing the C-order dimensions for the \"\n \"column. The invalid value will be ignored for the purpose \"\n \"of formatting this column.\")\n\n if dims_tuple:\n if reduce(operator.mul, dims_tuple) > format.repeat:\n msg = (\n \"The repeat count of the column format {!r} for column {!r} \"\n \"is fewer than the number of elements per the TDIM \"\n \"argument {!r}. The invalid TDIMn value will be ignored \"\n \"for the purpose of formatting this column.\".format(\n name, format, dim))\n\n if msg is None:\n valid['dim'] = dims_tuple\n else:\n invalid['dim'] = (dim, msg)\n\n if coord_type is not None and coord_type != '':\n msg = None\n if not isinstance(coord_type, str):\n msg = (\n \"Coordinate/axis type option (TCTYPn) must be a string \"\n \"(got {!r}). The invalid keyword will be ignored for the \"\n \"purpose of formatting this column.\".format(coord_type))\n elif len(coord_type) > 8:\n msg = (\n \"Coordinate/axis type option (TCTYPn) must be a string \"\n \"of atmost 8 characters (got {!r}). The invalid keyword \"\n \"will be ignored for the purpose of formatting this \"\n \"column.\".format(coord_type))\n\n if msg is None:\n valid['coord_type'] = coord_type\n else:\n invalid['coord_type'] = (coord_type, msg)\n\n if coord_unit is not None and coord_unit != '':\n msg = None\n if not isinstance(coord_unit, str):\n msg = (\n \"Coordinate/axis unit option (TCUNIn) must be a string \"\n \"(got {!r}). The invalid keyword will be ignored for the \"\n \"purpose of formatting this column.\".format(coord_unit))\n\n if msg is None:\n valid['coord_unit'] = coord_unit\n else:\n invalid['coord_unit'] = (coord_unit, msg)\n\n for k, v in [('coord_ref_point', coord_ref_point),\n ('coord_ref_value', coord_ref_value),\n ('coord_inc', coord_inc)]:\n if v is not None and v != '':\n msg = None\n if not isinstance(v, numbers.Real):\n msg = (\n \"Column {} option ({}n) must be a real floating type (got {!r}). \"\n \"The invalid value will be ignored for the purpose of formatting \"\n \"the data in this column.\".format(k, ATTRIBUTE_TO_KEYWORD[k], v))\n\n if msg is None:\n valid[k] = v\n else:\n invalid[k] = (v, msg)\n\n if time_ref_pos is not None and time_ref_pos != '':\n msg=None\n if not isinstance(time_ref_pos, str):\n msg = (\n \"Time coordinate reference position option (TRPOSn) must be \"\n \"a string (got {!r}). The invalid keyword will be ignored for \"\n \"the purpose of formatting this column.\".format(time_ref_pos))\n\n if msg is None:\n valid['time_ref_pos'] = time_ref_pos\n else:\n invalid['time_ref_pos'] = (time_ref_pos, msg)\n\n return valid, invalid\n\n @classmethod\n def _determine_formats(cls, format, start, dim, ascii):\n \"\"\"\n Given a format string and whether or not the Column is for an\n ASCII table (ascii=None means unspecified, but lean toward binary table\n where ambiguous) create an appropriate _BaseColumnFormat instance for\n the column's format, and determine the appropriate recarray format.\n\n The values of the start and dim keyword arguments are also useful, as\n the former is only valid for ASCII tables and the latter only for\n BINARY tables.\n \"\"\"\n\n # If the given format string is unambiguously a Numpy dtype or one of\n # the Numpy record format type specifiers supported by Astropy then that\n # should take priority--otherwise assume it is a FITS format\n if isinstance(format, np.dtype):\n format, _, _ = _dtype_to_recformat(format)\n\n # check format\n if ascii is None and not isinstance(format, _BaseColumnFormat):\n # We're just give a string which could be either a Numpy format\n # code, or a format for a binary column array *or* a format for an\n # ASCII column array--there may be many ambiguities here. Try our\n # best to guess what the user intended.\n format, recformat = cls._guess_format(format, start, dim)\n elif not ascii and not isinstance(format, _BaseColumnFormat):\n format, recformat = cls._convert_format(format, _ColumnFormat)\n elif ascii and not isinstance(format, _AsciiColumnFormat):\n format, recformat = cls._convert_format(format,\n _AsciiColumnFormat)\n else:\n # The format is already acceptable and unambiguous\n recformat = format.recformat\n\n return format, recformat\n\n @classmethod\n def _guess_format(cls, format, start, dim):\n if start and dim:\n # This is impossible; this can't be a valid FITS column\n raise ValueError(\n 'Columns cannot have both a start (TCOLn) and dim '\n '(TDIMn) option, since the former is only applies to '\n 'ASCII tables, and the latter is only valid for binary '\n 'tables.')\n elif start:\n # Only ASCII table columns can have a 'start' option\n guess_format = _AsciiColumnFormat\n elif dim:\n # Only binary tables can have a dim option\n guess_format = _ColumnFormat\n else:\n # If the format is *technically* a valid binary column format\n # (i.e. it has a valid format code followed by arbitrary\n # \"optional\" codes), but it is also strictly a valid ASCII\n # table format, then assume an ASCII table column was being\n # requested (the more likely case, after all).\n with suppress(VerifyError):\n format = _AsciiColumnFormat(format, strict=True)\n\n # A safe guess which reflects the existing behavior of previous\n # Astropy versions\n guess_format = _ColumnFormat\n\n try:\n format, recformat = cls._convert_format(format, guess_format)\n except VerifyError:\n # For whatever reason our guess was wrong (for example if we got\n # just 'F' that's not a valid binary format, but it an ASCII format\n # code albeit with the width/precision omitted\n guess_format = (_AsciiColumnFormat\n if guess_format is _ColumnFormat\n else _ColumnFormat)\n # If this fails too we're out of options--it is truly an invalid\n # format, or at least not supported\n format, recformat = cls._convert_format(format, guess_format)\n\n return format, recformat\n\n def _convert_to_valid_data_type(self, array):\n # Convert the format to a type we understand\n if isinstance(array, Delayed):\n return array\n elif array is None:\n return array\n else:\n format = self.format\n dims = self._dims\n\n if dims:\n shape = dims[:-1] if 'A' in format else dims\n shape = (len(array),) + shape\n array = array.reshape(shape)\n\n if 'P' in format or 'Q' in format:\n return array\n elif 'A' in format:\n if array.dtype.char in 'SU':\n if dims:\n # The 'last' dimension (first in the order given\n # in the TDIMn keyword itself) is the number of\n # characters in each string\n fsize = dims[-1]\n else:\n fsize = np.dtype(format.recformat).itemsize\n return chararray.array(array, itemsize=fsize, copy=False)\n else:\n return _convert_array(array, np.dtype(format.recformat))\n elif 'L' in format:\n # boolean needs to be scaled back to storage values ('T', 'F')\n if array.dtype == np.dtype('bool'):\n return np.where(array == np.False_, ord('F'), ord('T'))\n else:\n return np.where(array == 0, ord('F'), ord('T'))\n elif 'X' in format:\n return _convert_array(array, np.dtype('uint8'))\n else:\n # Preserve byte order of the original array for now; see #77\n numpy_format = array.dtype.byteorder + format.recformat\n\n # Handle arrays passed in as unsigned ints as pseudo-unsigned\n # int arrays; blatantly tacked in here for now--we need columns\n # to have explicit knowledge of whether they treated as\n # pseudo-unsigned\n bzeros = {2: np.uint16(2**15), 4: np.uint32(2**31),\n 8: np.uint64(2**63)}\n if (array.dtype.kind == 'u' and\n array.dtype.itemsize in bzeros and\n self.bscale in (1, None, '') and\n self.bzero == bzeros[array.dtype.itemsize]):\n # Basically the array is uint, has scale == 1.0, and the\n # bzero is the appropriate value for a pseudo-unsigned\n # integer of the input dtype, then go ahead and assume that\n # uint is assumed\n numpy_format = numpy_format.replace('i', 'u')\n self._pseudo_unsigned_ints = True\n\n # The .base here means we're dropping the shape information,\n # which is only used to format recarray fields, and is not\n # useful for converting input arrays to the correct data type\n dtype = np.dtype(numpy_format).base\n\n return _convert_array(array, dtype)\n\n\nclass ColDefs(NotifierMixin):\n \"\"\"\n Column definitions class.\n\n It has attributes corresponding to the `Column` attributes\n (e.g. `ColDefs` has the attribute ``names`` while `Column`\n has ``name``). Each attribute in `ColDefs` is a list of\n corresponding attribute values from all `Column` objects.\n \"\"\"\n\n _padding_byte = '\\x00'\n _col_format_cls = _ColumnFormat\n\n def __new__(cls, input, ascii=False):\n klass = cls\n\n if (hasattr(input, '_columns_type') and\n issubclass(input._columns_type, ColDefs)):\n klass = input._columns_type\n elif (hasattr(input, '_col_format_cls') and\n issubclass(input._col_format_cls, _AsciiColumnFormat)):\n klass = _AsciiColDefs\n\n if ascii: # force ASCII if this has been explicitly requested\n klass = _AsciiColDefs\n\n return object.__new__(klass)\n\n def __getnewargs__(self):\n return (self._arrays,)\n\n def __init__(self, input, ascii=False):\n \"\"\"\n Parameters\n ----------\n\n input : sequence of `Column`, `ColDefs`, other\n An existing table HDU, an existing `ColDefs`, or any multi-field\n Numpy array or `numpy.recarray`.\n\n ascii : bool\n Use True to ensure that ASCII table columns are used.\n\n \"\"\"\n from .hdu.table import _TableBaseHDU\n from .fitsrec import FITS_rec\n\n if isinstance(input, ColDefs):\n self._init_from_coldefs(input)\n elif (isinstance(input, FITS_rec) and hasattr(input, '_coldefs') and\n input._coldefs):\n # If given a FITS_rec object we can directly copy its columns, but\n # only if its columns have already been defined, otherwise this\n # will loop back in on itself and blow up\n self._init_from_coldefs(input._coldefs)\n elif isinstance(input, np.ndarray) and input.dtype.fields is not None:\n # Construct columns from the fields of a record array\n self._init_from_array(input)\n elif isiterable(input):\n # if the input is a list of Columns\n self._init_from_sequence(input)\n elif isinstance(input, _TableBaseHDU):\n # Construct columns from fields in an HDU header\n self._init_from_table(input)\n else:\n raise TypeError('Input to ColDefs must be a table HDU, a list '\n 'of Columns, or a record/field array.')\n\n # Listen for changes on all columns\n for col in self.columns:\n col._add_listener(self)\n\n def _init_from_coldefs(self, coldefs):\n \"\"\"Initialize from an existing ColDefs object (just copy the\n columns and convert their formats if necessary).\n \"\"\"\n\n self.columns = [self._copy_column(col) for col in coldefs]\n\n def _init_from_sequence(self, columns):\n for idx, col in enumerate(columns):\n if not isinstance(col, Column):\n raise TypeError('Element {} in the ColDefs input is not a '\n 'Column.'.format(idx))\n\n self._init_from_coldefs(columns)\n\n def _init_from_array(self, array):\n self.columns = []\n for idx in range(len(array.dtype)):\n cname = array.dtype.names[idx]\n ftype = array.dtype.fields[cname][0]\n format = self._col_format_cls.from_recformat(ftype)\n\n # Determine the appropriate dimensions for items in the column\n # (typically just 1D)\n dim = array.dtype[idx].shape[::-1]\n if dim and (len(dim) > 1 or 'A' in format):\n if 'A' in format:\n # n x m string arrays must include the max string\n # length in their dimensions (e.g. l x n x m)\n dim = (array.dtype[idx].base.itemsize,) + dim\n dim = repr(dim).replace(' ', '')\n else:\n dim = None\n\n # Check for unsigned ints.\n bzero = None\n if ftype.base.kind == 'u':\n if 'I' in format:\n bzero = np.uint16(2**15)\n elif 'J' in format:\n bzero = np.uint32(2**31)\n elif 'K' in format:\n bzero = np.uint64(2**63)\n\n c = Column(name=cname, format=format,\n array=array.view(np.ndarray)[cname], bzero=bzero,\n dim=dim)\n self.columns.append(c)\n\n def _init_from_table(self, table):\n hdr = table._header\n nfields = hdr['TFIELDS']\n\n # go through header keywords to pick out column definition keywords\n # definition dictionaries for each field\n col_keywords = [{} for i in range(nfields)]\n for keyword, value in hdr.items():\n key = TDEF_RE.match(keyword)\n try:\n keyword = key.group('label')\n except Exception:\n continue # skip if there is no match\n if keyword in KEYWORD_NAMES:\n col = int(key.group('num'))\n if 0 < col <= nfields:\n attr = KEYWORD_TO_ATTRIBUTE[keyword]\n if attr == 'format':\n # Go ahead and convert the format value to the\n # appropriate ColumnFormat container now\n value = self._col_format_cls(value)\n col_keywords[col - 1][attr] = value\n\n # Verify the column keywords and display any warnings if necessary;\n # we only want to pass on the valid keywords\n for idx, kwargs in enumerate(col_keywords):\n valid_kwargs, invalid_kwargs = Column._verify_keywords(**kwargs)\n for val in invalid_kwargs.values():\n warnings.warn(\n 'Invalid keyword for column {}: {}'.format(idx + 1, val[1]),\n VerifyWarning)\n # Special cases for recformat and dim\n # TODO: Try to eliminate the need for these special cases\n del valid_kwargs['recformat']\n if 'dim' in valid_kwargs:\n valid_kwargs['dim'] = kwargs['dim']\n col_keywords[idx] = valid_kwargs\n\n # data reading will be delayed\n for col in range(nfields):\n col_keywords[col]['array'] = Delayed(table, col)\n\n # now build the columns\n self.columns = [Column(**attrs) for attrs in col_keywords]\n\n # Add the table HDU is a listener to changes to the columns\n # (either changes to individual columns, or changes to the set of\n # columns (add/remove/etc.))\n self._add_listener(table)\n\n def __copy__(self):\n return self.__class__(self)\n\n def __deepcopy__(self, memo):\n return self.__class__([copy.deepcopy(c, memo) for c in self.columns])\n\n def _copy_column(self, column):\n \"\"\"Utility function used currently only by _init_from_coldefs\n to help convert columns from binary format to ASCII format or vice\n versa if necessary (otherwise performs a straight copy).\n \"\"\"\n\n if isinstance(column.format, self._col_format_cls):\n # This column has a FITS format compatible with this column\n # definitions class (that is ascii or binary)\n return column.copy()\n\n new_column = column.copy()\n\n # Try to use the Numpy recformat as the equivalency between the\n # two formats; if that conversion can't be made then these\n # columns can't be transferred\n # TODO: Catch exceptions here and raise an explicit error about\n # column format conversion\n new_column.format = self._col_format_cls.from_column_format(\n column.format)\n\n # Handle a few special cases of column format options that are not\n # compatible between ASCII an binary tables\n # TODO: This is sort of hacked in right now; we really need\n # separate classes for ASCII and Binary table Columns, and they\n # should handle formatting issues like these\n if not isinstance(new_column.format, _AsciiColumnFormat):\n # the column is a binary table column...\n new_column.start = None\n if new_column.null is not None:\n # We can't just \"guess\" a value to represent null\n # values in the new column, so just disable this for\n # now; users may modify it later\n new_column.null = None\n else:\n # the column is an ASCII table column...\n if new_column.null is not None:\n new_column.null = DEFAULT_ASCII_TNULL\n if (new_column.disp is not None and\n new_column.disp.upper().startswith('L')):\n # ASCII columns may not use the logical data display format;\n # for now just drop the TDISPn option for this column as we\n # don't have a systematic conversion of boolean data to ASCII\n # tables yet\n new_column.disp = None\n\n return new_column\n\n def __getattr__(self, name):\n \"\"\"\n Automatically returns the values for the given keyword attribute for\n all `Column`s in this list.\n\n Implements for example self.units, self.formats, etc.\n \"\"\"\n cname = name[:-1]\n if cname in KEYWORD_ATTRIBUTES and name[-1] == 's':\n attr = []\n for col in self.columns:\n val = getattr(col, cname)\n attr.append(val if val is not None else '')\n return attr\n raise AttributeError(name)\n\n @lazyproperty\n def dtype(self):\n # Note: This previously returned a dtype that just used the raw field\n # widths based on the format's repeat count, and did not incorporate\n # field *shapes* as provided by TDIMn keywords.\n # Now this incorporates TDIMn from the start, which makes *this* method\n # a little more complicated, but simplifies code elsewhere (for example\n # fields will have the correct shapes even in the raw recarray).\n formats = []\n offsets = [0]\n\n for format_, dim in zip(self.formats, self._dims):\n dt = format_.dtype\n\n if len(offsets) < len(self.formats):\n # Note: the size of the *original* format_ may be greater than\n # one would expect from the number of elements determined by\n # dim. The FITS format allows this--the rest of the field is\n # filled with undefined values.\n offsets.append(offsets[-1] + dt.itemsize)\n\n if dim:\n if format_.format == 'A':\n dt = np.dtype((dt.char + str(dim[-1]), dim[:-1]))\n else:\n dt = np.dtype((dt.base, dim))\n\n formats.append(dt)\n\n return np.dtype({'names': self.names,\n 'formats': formats,\n 'offsets': offsets})\n\n @lazyproperty\n def names(self):\n return [col.name for col in self.columns]\n\n @lazyproperty\n def formats(self):\n return [col.format for col in self.columns]\n\n @lazyproperty\n def _arrays(self):\n return [col.array for col in self.columns]\n\n @lazyproperty\n def _recformats(self):\n return [fmt.recformat for fmt in self.formats]\n\n @lazyproperty\n def _dims(self):\n \"\"\"Returns the values of the TDIMn keywords parsed into tuples.\"\"\"\n\n return [col._dims for col in self.columns]\n\n def __getitem__(self, key):\n if isinstance(key, str):\n key = _get_index(self.names, key)\n\n x = self.columns[key]\n if _is_int(key):\n return x\n else:\n return ColDefs(x)\n\n def __len__(self):\n return len(self.columns)\n\n def __repr__(self):\n rep = 'ColDefs('\n if hasattr(self, 'columns') and self.columns:\n # The hasattr check is mostly just useful in debugging sessions\n # where self.columns may not be defined yet\n rep += '\\n '\n rep += '\\n '.join([repr(c) for c in self.columns])\n rep += '\\n'\n rep += ')'\n return rep\n\n def __add__(self, other, option='left'):\n if isinstance(other, Column):\n b = [other]\n elif isinstance(other, ColDefs):\n b = list(other.columns)\n else:\n raise TypeError('Wrong type of input.')\n if option == 'left':\n tmp = list(self.columns) + b\n else:\n tmp = b + list(self.columns)\n return ColDefs(tmp)\n\n def __radd__(self, other):\n return self.__add__(other, 'right')\n\n def __sub__(self, other):\n if not isinstance(other, (list, tuple)):\n other = [other]\n _other = [_get_index(self.names, key) for key in other]\n indx = list(range(len(self)))\n for x in _other:\n indx.remove(x)\n tmp = [self[i] for i in indx]\n return ColDefs(tmp)\n\n def _update_column_attribute_changed(self, column, attr, old_value,\n new_value):\n \"\"\"\n Handle column attribute changed notifications from columns that are\n members of this `ColDefs`.\n\n `ColDefs` itself does not currently do anything with this, and just\n bubbles the notification up to any listening table HDUs that may need\n to update their headers, etc. However, this also informs the table of\n the numerical index of the column that changed.\n \"\"\"\n\n idx = 0\n for idx, col in enumerate(self.columns):\n if col is column:\n break\n\n if attr == 'name':\n del self.names\n elif attr == 'format':\n del self.formats\n\n self._notify('column_attribute_changed', column, idx, attr, old_value,\n new_value)\n\n def add_col(self, column):\n \"\"\"\n Append one `Column` to the column definition.\n \"\"\"\n\n if not isinstance(column, Column):\n raise AssertionError\n\n self._arrays.append(column.array)\n # Obliterate caches of certain things\n del self.dtype\n del self._recformats\n del self._dims\n del self.names\n del self.formats\n\n self.columns.append(column)\n\n # Listen for changes on the new column\n column._add_listener(self)\n\n # If this ColDefs is being tracked by a Table, inform the\n # table that its data is now invalid.\n self._notify('column_added', self, column)\n return self\n\n def del_col(self, col_name):\n \"\"\"\n Delete (the definition of) one `Column`.\n\n col_name : str or int\n The column's name or index\n \"\"\"\n\n indx = _get_index(self.names, col_name)\n col = self.columns[indx]\n\n del self._arrays[indx]\n # Obliterate caches of certain things\n del self.dtype\n del self._recformats\n del self._dims\n del self.names\n del self.formats\n\n del self.columns[indx]\n\n col._remove_listener(self)\n\n # If this ColDefs is being tracked by a table HDU, inform the HDU (or\n # any other listeners) that the column has been removed\n # Just send a reference to self, and the index of the column that was\n # removed\n self._notify('column_removed', self, indx)\n return self\n\n def change_attrib(self, col_name, attrib, new_value):\n \"\"\"\n Change an attribute (in the ``KEYWORD_ATTRIBUTES`` list) of a `Column`.\n\n Parameters\n ----------\n col_name : str or int\n The column name or index to change\n\n attrib : str\n The attribute name\n\n new_value : object\n The new value for the attribute\n \"\"\"\n\n setattr(self[col_name], attrib, new_value)\n\n def change_name(self, col_name, new_name):\n \"\"\"\n Change a `Column`'s name.\n\n Parameters\n ----------\n col_name : str\n The current name of the column\n\n new_name : str\n The new name of the column\n \"\"\"\n\n if new_name != col_name and new_name in self.names:\n raise ValueError(f'New name {new_name} already exists.')\n else:\n self.change_attrib(col_name, 'name', new_name)\n\n def change_unit(self, col_name, new_unit):\n \"\"\"\n Change a `Column`'s unit.\n\n Parameters\n ----------\n col_name : str or int\n The column name or index\n\n new_unit : str\n The new unit for the column\n \"\"\"\n\n self.change_attrib(col_name, 'unit', new_unit)\n\n def info(self, attrib='all', output=None):\n \"\"\"\n Get attribute(s) information of the column definition.\n\n Parameters\n ----------\n attrib : str\n Can be one or more of the attributes listed in\n ``astropy.io.fits.column.KEYWORD_ATTRIBUTES``. The default is\n ``\"all\"`` which will print out all attributes. It forgives plurals\n and blanks. If there are two or more attribute names, they must be\n separated by comma(s).\n\n output : file, optional\n File-like object to output to. Outputs to stdout by default.\n If `False`, returns the attributes as a `dict` instead.\n\n Notes\n -----\n This function doesn't return anything by default; it just prints to\n stdout.\n \"\"\"\n\n if output is None:\n output = sys.stdout\n\n if attrib.strip().lower() in ['all', '']:\n lst = KEYWORD_ATTRIBUTES\n else:\n lst = attrib.split(',')\n for idx in range(len(lst)):\n lst[idx] = lst[idx].strip().lower()\n if lst[idx][-1] == 's':\n lst[idx] = list[idx][:-1]\n\n ret = {}\n\n for attr in lst:\n if output:\n if attr not in KEYWORD_ATTRIBUTES:\n output.write(\"'{}' is not an attribute of the column \"\n \"definitions.\\n\".format(attr))\n continue\n output.write(f\"{attr}:\\n\")\n output.write(' {}\\n'.format(getattr(self, attr + 's')))\n else:\n ret[attr] = getattr(self, attr + 's')\n\n if not output:\n return ret\n\n\nclass _AsciiColDefs(ColDefs):\n \"\"\"ColDefs implementation for ASCII tables.\"\"\"\n\n _padding_byte = ' '\n _col_format_cls = _AsciiColumnFormat\n\n def __init__(self, input, ascii=True):\n super().__init__(input)\n\n # if the format of an ASCII column has no width, add one\n if not isinstance(input, _AsciiColDefs):\n self._update_field_metrics()\n else:\n for idx, s in enumerate(input.starts):\n self.columns[idx].start = s\n\n self._spans = input.spans\n self._width = input._width\n\n @lazyproperty\n def dtype(self):\n dtype = {}\n\n for j in range(len(self)):\n data_type = 'S' + str(self.spans[j])\n dtype[self.names[j]] = (data_type, self.starts[j] - 1)\n\n return np.dtype(dtype)\n\n @property\n def spans(self):\n \"\"\"A list of the widths of each field in the table.\"\"\"\n\n return self._spans\n\n @lazyproperty\n def _recformats(self):\n if len(self) == 1:\n widths = []\n else:\n widths = [y - x for x, y in pairwise(self.starts)]\n\n # Widths is the width of each field *including* any space between\n # fields; this is so that we can map the fields to string records in a\n # Numpy recarray\n widths.append(self._width - self.starts[-1] + 1)\n return ['a' + str(w) for w in widths]\n\n def add_col(self, column):\n super().add_col(column)\n self._update_field_metrics()\n\n def del_col(self, col_name):\n super().del_col(col_name)\n self._update_field_metrics()\n\n def _update_field_metrics(self):\n \"\"\"\n Updates the list of the start columns, the list of the widths of each\n field, and the total width of each record in the table.\n \"\"\"\n\n spans = [0] * len(self.columns)\n end_col = 0 # Refers to the ASCII text column, not the table col\n for idx, col in enumerate(self.columns):\n width = col.format.width\n\n # Update the start columns and column span widths taking into\n # account the case that the starting column of a field may not\n # be the column immediately after the previous field\n if not col.start:\n col.start = end_col + 1\n end_col = col.start + width - 1\n spans[idx] = width\n\n self._spans = spans\n self._width = end_col\n\n\n# Utilities\n\n\nclass _VLF(np.ndarray):\n \"\"\"Variable length field object.\"\"\"\n\n def __new__(cls, input, dtype='a'):\n \"\"\"\n Parameters\n ----------\n input\n a sequence of variable-sized elements.\n \"\"\"\n\n if dtype == 'a':\n try:\n # this handles ['abc'] and [['a','b','c']]\n # equally, beautiful!\n input = [chararray.array(x, itemsize=1) for x in input]\n except Exception:\n raise ValueError(\n f'Inconsistent input data array: {input}')\n\n a = np.array(input, dtype=object)\n self = np.ndarray.__new__(cls, shape=(len(input),), buffer=a,\n dtype=object)\n self.max = 0\n self.element_dtype = dtype\n return self\n\n def __array_finalize__(self, obj):\n if obj is None:\n return\n self.max = obj.max\n self.element_dtype = obj.element_dtype\n\n def __setitem__(self, key, value):\n \"\"\"\n To make sure the new item has consistent data type to avoid\n misalignment.\n \"\"\"\n\n if isinstance(value, np.ndarray) and value.dtype == self.dtype:\n pass\n elif isinstance(value, chararray.chararray) and value.itemsize == 1:\n pass\n elif self.element_dtype == 'a':\n value = chararray.array(value, itemsize=1)\n else:\n value = np.array(value, dtype=self.element_dtype)\n np.ndarray.__setitem__(self, key, value)\n self.max = max(self.max, len(value))\n\n\ndef _get_index(names, key):\n \"\"\"\n Get the index of the ``key`` in the ``names`` list.\n\n The ``key`` can be an integer or string. If integer, it is the index\n in the list. If string,\n\n a. Field (column) names are case sensitive: you can have two\n different columns called 'abc' and 'ABC' respectively.\n\n b. When you *refer* to a field (presumably with the field\n method), it will try to match the exact name first, so in\n the example in (a), field('abc') will get the first field,\n and field('ABC') will get the second field.\n\n If there is no exact name matched, it will try to match the\n name with case insensitivity. So, in the last example,\n field('Abc') will cause an exception since there is no unique\n mapping. If there is a field named \"XYZ\" and no other field\n name is a case variant of \"XYZ\", then field('xyz'),\n field('Xyz'), etc. will get this field.\n \"\"\"\n\n if _is_int(key):\n indx = int(key)\n elif isinstance(key, str):\n # try to find exact match first\n try:\n indx = names.index(key.rstrip())\n except ValueError:\n # try to match case-insentively,\n _key = key.lower().rstrip()\n names = [n.lower().rstrip() for n in names]\n count = names.count(_key) # occurrence of _key in names\n if count == 1:\n indx = names.index(_key)\n elif count == 0:\n raise KeyError(f\"Key '{key}' does not exist.\")\n else: # multiple match\n raise KeyError(f\"Ambiguous key name '{key}'.\")\n else:\n raise KeyError(f\"Illegal key '{key!r}'.\")\n\n return indx\n\n\ndef _unwrapx(input, output, repeat):\n \"\"\"\n Unwrap the X format column into a Boolean array.\n\n Parameters\n ----------\n input\n input ``Uint8`` array of shape (`s`, `nbytes`)\n\n output\n output Boolean array of shape (`s`, `repeat`)\n\n repeat\n number of bits\n \"\"\"\n\n pow2 = np.array([128, 64, 32, 16, 8, 4, 2, 1], dtype='uint8')\n nbytes = ((repeat - 1) // 8) + 1\n for i in range(nbytes):\n _min = i * 8\n _max = min((i + 1) * 8, repeat)\n for j in range(_min, _max):\n output[..., j] = np.bitwise_and(input[..., i], pow2[j - i * 8])\n\n\ndef _wrapx(input, output, repeat):\n \"\"\"\n Wrap the X format column Boolean array into an ``UInt8`` array.\n\n Parameters\n ----------\n input\n input Boolean array of shape (`s`, `repeat`)\n\n output\n output ``Uint8`` array of shape (`s`, `nbytes`)\n\n repeat\n number of bits\n \"\"\"\n\n output[...] = 0 # reset the output\n nbytes = ((repeat - 1) // 8) + 1\n unused = nbytes * 8 - repeat\n for i in range(nbytes):\n _min = i * 8\n _max = min((i + 1) * 8, repeat)\n for j in range(_min, _max):\n if j != _min:\n np.left_shift(output[..., i], 1, output[..., i])\n np.add(output[..., i], input[..., j], output[..., i])\n\n # shift the unused bits\n np.left_shift(output[..., i], unused, output[..., i])\n\n\ndef _makep(array, descr_output, format, nrows=None):\n \"\"\"\n Construct the P (or Q) format column array, both the data descriptors and\n the data. It returns the output \"data\" array of data type `dtype`.\n\n The descriptor location will have a zero offset for all columns\n after this call. The final offset will be calculated when the file\n is written.\n\n Parameters\n ----------\n array\n input object array\n\n descr_output\n output \"descriptor\" array of data type int32 (for P format arrays) or\n int64 (for Q format arrays)--must be nrows long in its first dimension\n\n format\n the _FormatP object representing the format of the variable array\n\n nrows : int, optional\n number of rows to create in the column; defaults to the number of rows\n in the input array\n \"\"\"\n\n # TODO: A great deal of this is redundant with FITS_rec._convert_p; see if\n # we can merge the two somehow.\n\n _offset = 0\n\n if not nrows:\n nrows = len(array)\n\n data_output = _VLF([None] * nrows, dtype=format.dtype)\n\n if format.dtype == 'a':\n _nbytes = 1\n else:\n _nbytes = np.array([], dtype=format.dtype).itemsize\n\n for idx in range(nrows):\n if idx < len(array):\n rowval = array[idx]\n else:\n if format.dtype == 'a':\n rowval = ' ' * data_output.max\n else:\n rowval = [0] * data_output.max\n if format.dtype == 'a':\n data_output[idx] = chararray.array(encode_ascii(rowval),\n itemsize=1)\n else:\n data_output[idx] = np.array(rowval, dtype=format.dtype)\n\n descr_output[idx, 0] = len(data_output[idx])\n descr_output[idx, 1] = _offset\n _offset += len(data_output[idx]) * _nbytes\n\n return data_output\n\n\ndef _parse_tformat(tform):\n \"\"\"Parse ``TFORMn`` keyword for a binary table into a\n ``(repeat, format, option)`` tuple.\n \"\"\"\n\n try:\n (repeat, format, option) = TFORMAT_RE.match(tform.strip()).groups()\n except Exception:\n # TODO: Maybe catch this error use a default type (bytes, maybe?) for\n # unrecognized column types. As long as we can determine the correct\n # byte width somehow..\n raise VerifyError(f'Format {tform!r} is not recognized.')\n\n if repeat == '':\n repeat = 1\n else:\n repeat = int(repeat)\n\n return (repeat, format.upper(), option)\n\n\ndef _parse_ascii_tformat(tform, strict=False):\n \"\"\"\n Parse the ``TFORMn`` keywords for ASCII tables into a ``(format, width,\n precision)`` tuple (the latter is always zero unless format is one of 'E',\n 'F', or 'D').\n \"\"\"\n\n match = TFORMAT_ASCII_RE.match(tform.strip())\n if not match:\n raise VerifyError(f'Format {tform!r} is not recognized.')\n\n # Be flexible on case\n format = match.group('format')\n if format is None:\n # Floating point format\n format = match.group('formatf').upper()\n width = match.group('widthf')\n precision = match.group('precision')\n if width is None or precision is None:\n if strict:\n raise VerifyError('Format {!r} is not unambiguously an ASCII '\n 'table format.')\n else:\n width = 0 if width is None else width\n precision = 1 if precision is None else precision\n else:\n format = format.upper()\n width = match.group('width')\n if width is None:\n if strict:\n raise VerifyError('Format {!r} is not unambiguously an ASCII '\n 'table format.')\n else:\n # Just use a default width of 0 if unspecified\n width = 0\n precision = 0\n\n def convert_int(val):\n msg = ('Format {!r} is not valid--field width and decimal precision '\n 'must be integers.')\n try:\n val = int(val)\n except (ValueError, TypeError):\n raise VerifyError(msg.format(tform))\n\n return val\n\n if width and precision:\n # This should only be the case for floating-point formats\n width, precision = convert_int(width), convert_int(precision)\n elif width:\n # Just for integer/string formats; ignore precision\n width = convert_int(width)\n else:\n # For any format, if width was unspecified use the set defaults\n width, precision = ASCII_DEFAULT_WIDTHS[format]\n\n if width <= 0:\n raise VerifyError(\"Format {!r} not valid--field width must be a \"\n \"positive integeter.\".format(tform))\n\n if precision >= width:\n raise VerifyError(\"Format {!r} not valid--the number of decimal digits \"\n \"must be less than the format's total \"\n \"width {}.\".format(tform, width))\n\n return format, width, precision\n\n\ndef _parse_tdim(tdim):\n \"\"\"Parse the ``TDIM`` value into a tuple (may return an empty tuple if\n the value ``TDIM`` value is empty or invalid).\n \"\"\"\n\n m = tdim and TDIM_RE.match(tdim)\n if m:\n dims = m.group('dims')\n return tuple(int(d.strip()) for d in dims.split(','))[::-1]\n\n # Ignore any dim values that don't specify a multidimensional column\n return tuple()\n\n\ndef _scalar_to_format(value):\n \"\"\"\n Given a scalar value or string, returns the minimum FITS column format\n that can represent that value. 'minimum' is defined by the order given in\n FORMATORDER.\n \"\"\"\n\n # First, if value is a string, try to convert to the appropriate scalar\n # value\n for type_ in (int, float, complex):\n try:\n value = type_(value)\n break\n except ValueError:\n continue\n\n numpy_dtype_str = np.min_scalar_type(value).str\n numpy_dtype_str = numpy_dtype_str[1:] # Strip endianness\n\n try:\n fits_format = NUMPY2FITS[numpy_dtype_str]\n return FITSUPCONVERTERS.get(fits_format, fits_format)\n except KeyError:\n return \"A\" + str(len(value))\n\n\ndef _cmp_recformats(f1, f2):\n \"\"\"\n Compares two numpy recformats using the ordering given by FORMATORDER.\n \"\"\"\n\n if f1[0] == 'a' and f2[0] == 'a':\n return cmp(int(f1[1:]), int(f2[1:]))\n else:\n f1, f2 = NUMPY2FITS[f1], NUMPY2FITS[f2]\n return cmp(FORMATORDER.index(f1), FORMATORDER.index(f2))\n\n\ndef _convert_fits2record(format):\n \"\"\"\n Convert FITS format spec to record format spec.\n \"\"\"\n\n repeat, dtype, option = _parse_tformat(format)\n\n if dtype in FITS2NUMPY:\n if dtype == 'A':\n output_format = FITS2NUMPY[dtype] + str(repeat)\n # to accommodate both the ASCII table and binary table column\n # format spec, i.e. A7 in ASCII table is the same as 7A in\n # binary table, so both will produce 'a7'.\n # Technically the FITS standard does not allow this but it's a very\n # common mistake\n if format.lstrip()[0] == 'A' and option != '':\n # make sure option is integer\n output_format = FITS2NUMPY[dtype] + str(int(option))\n else:\n repeat_str = ''\n if repeat != 1:\n repeat_str = str(repeat)\n output_format = repeat_str + FITS2NUMPY[dtype]\n\n elif dtype == 'X':\n output_format = _FormatX(repeat)\n elif dtype == 'P':\n output_format = _FormatP.from_tform(format)\n elif dtype == 'Q':\n output_format = _FormatQ.from_tform(format)\n elif dtype == 'F':\n output_format = 'f8'\n else:\n raise ValueError(f'Illegal format `{format}`.')\n\n return output_format\n\n\ndef _convert_record2fits(format):\n \"\"\"\n Convert record format spec to FITS format spec.\n \"\"\"\n\n recformat, kind, dtype = _dtype_to_recformat(format)\n shape = dtype.shape\n itemsize = dtype.base.itemsize\n if dtype.char == 'U':\n # Unicode dtype--itemsize is 4 times actual ASCII character length,\n # which what matters for FITS column formats\n # Use dtype.base--dtype may be a multi-dimensional dtype\n itemsize = itemsize // 4\n\n option = str(itemsize)\n\n ndims = len(shape)\n repeat = 1\n if ndims > 0:\n nel = np.array(shape, dtype='i8').prod()\n if nel > 1:\n repeat = nel\n\n if kind == 'a':\n # This is a kludge that will place string arrays into a\n # single field, so at least we won't lose data. Need to\n # use a TDIM keyword to fix this, declaring as (slength,\n # dim1, dim2, ...) as mwrfits does\n\n ntot = int(repeat) * int(option)\n\n output_format = str(ntot) + 'A'\n elif recformat in NUMPY2FITS: # record format\n if repeat != 1:\n repeat = str(repeat)\n else:\n repeat = ''\n output_format = repeat + NUMPY2FITS[recformat]\n else:\n raise ValueError(f'Illegal format `{format}`.')\n\n return output_format\n\n\ndef _dtype_to_recformat(dtype):\n \"\"\"\n Utility function for converting a dtype object or string that instantiates\n a dtype (e.g. 'float32') into one of the two character Numpy format codes\n that have been traditionally used by Astropy.\n\n In particular, use of 'a' to refer to character data is long since\n deprecated in Numpy, but Astropy remains heavily invested in its use\n (something to try to get away from sooner rather than later).\n \"\"\"\n\n if not isinstance(dtype, np.dtype):\n dtype = np.dtype(dtype)\n\n kind = dtype.base.kind\n\n if kind in ('U', 'S'):\n recformat = kind = 'a'\n else:\n itemsize = dtype.base.itemsize\n recformat = kind + str(itemsize)\n\n return recformat, kind, dtype\n\n\ndef _convert_format(format, reverse=False):\n \"\"\"\n Convert FITS format spec to record format spec. Do the opposite if\n reverse=True.\n \"\"\"\n\n if reverse:\n return _convert_record2fits(format)\n else:\n return _convert_fits2record(format)\n\n\ndef _convert_ascii_format(format, reverse=False):\n \"\"\"Convert ASCII table format spec to record format spec.\"\"\"\n\n if reverse:\n recformat, kind, dtype = _dtype_to_recformat(format)\n itemsize = dtype.itemsize\n\n if kind == 'a':\n return 'A' + str(itemsize)\n elif NUMPY2FITS.get(recformat) == 'L':\n # Special case for logical/boolean types--for ASCII tables we\n # represent these as single character columns containing 'T' or 'F'\n # (a la the storage format for Logical columns in binary tables)\n return 'A1'\n elif kind == 'i':\n # Use for the width the maximum required to represent integers\n # of that byte size plus 1 for signs, but use a minimum of the\n # default width (to keep with existing behavior)\n width = 1 + len(str(2 ** (itemsize * 8)))\n width = max(width, ASCII_DEFAULT_WIDTHS['I'][0])\n return 'I' + str(width)\n elif kind == 'f':\n # This is tricky, but go ahead and use D if float-64, and E\n # if float-32 with their default widths\n if itemsize >= 8:\n format = 'D'\n else:\n format = 'E'\n width = '.'.join(str(w) for w in ASCII_DEFAULT_WIDTHS[format])\n return format + width\n # TODO: There may be reasonable ways to represent other Numpy types so\n # let's see what other possibilities there are besides just 'a', 'i',\n # and 'f'. If it doesn't have a reasonable ASCII representation then\n # raise an exception\n else:\n format, width, precision = _parse_ascii_tformat(format)\n\n # This gives a sensible \"default\" dtype for a given ASCII\n # format code\n recformat = ASCII2NUMPY[format]\n\n # The following logic is taken from CFITSIO:\n # For integers, if the width <= 4 we can safely use 16-bit ints for all\n # values [for the non-standard J format code just always force 64-bit]\n if format == 'I' and width <= 4:\n recformat = 'i2'\n elif format == 'A':\n recformat += str(width)\n\n return recformat\n\n\ndef _parse_tdisp_format(tdisp):\n \"\"\"\n Parse the ``TDISPn`` keywords for ASCII and binary tables into a\n ``(format, width, precision, exponential)`` tuple (the TDISP values\n for ASCII and binary are identical except for 'Lw',\n which is only present in BINTABLE extensions\n\n Parameters\n ----------\n tdisp: str\n TDISPn FITS Header keyword. Used to specify display formatting.\n\n Returns\n -------\n formatc: str\n The format characters from TDISPn\n width: str\n The width int value from TDISPn\n precision: str\n The precision int value from TDISPn\n exponential: str\n The exponential int value from TDISPn\n\n \"\"\"\n\n # Use appropriate regex for format type\n tdisp = tdisp.strip()\n fmt_key = tdisp[0] if tdisp[0] !='E' or tdisp[1] not in 'NS' else tdisp[:2]\n try:\n tdisp_re = TDISP_RE_DICT[fmt_key]\n except KeyError:\n raise VerifyError(f'Format {tdisp} is not recognized.')\n\n match = tdisp_re.match(tdisp.strip())\n if not match or match.group('formatc') is None:\n raise VerifyError(f'Format {tdisp} is not recognized.')\n\n formatc = match.group('formatc')\n width = match.group('width')\n precision = None\n exponential = None\n\n # Some formats have precision and exponential\n if tdisp[0] in ('I', 'B', 'O', 'Z', 'F', 'E', 'G', 'D'):\n precision = match.group('precision')\n if precision is None:\n precision = 1\n if tdisp[0] in ('E', 'D', 'G') and tdisp[1] not in ('N', 'S'):\n exponential = match.group('exponential')\n if exponential is None:\n exponential = 1\n\n # Once parsed, check format dict to do conversion to a formatting string\n return formatc, width, precision, exponential\n\n\ndef _fortran_to_python_format(tdisp):\n \"\"\"\n Turn the TDISPn fortran format pieces into a final Python format string.\n See the format_type definitions above the TDISP_FMT_DICT. If codes is\n changed to take advantage of the exponential specification, will need to\n add it as another input parameter.\n\n Parameters\n ----------\n tdisp: str\n TDISPn FITS Header keyword. Used to specify display formatting.\n\n Returns\n -------\n format_string: str\n The TDISPn keyword string translated into a Python format string.\n \"\"\"\n format_type, width, precision, exponential = _parse_tdisp_format(tdisp)\n\n try:\n fmt = TDISP_FMT_DICT[format_type]\n return fmt.format(width=width, precision=precision)\n\n except KeyError:\n raise VerifyError(f'Format {format_type} is not recognized.')\n\n\ndef python_to_tdisp(format_string, logical_dtype = False):\n \"\"\"\n Turn the Python format string to a TDISP FITS compliant format string. Not\n all formats convert. these will cause a Warning and return None.\n\n Parameters\n ----------\n format_string: str\n TDISPn FITS Header keyword. Used to specify display formatting.\n logical_dtype: bool\n True is this format type should be a logical type, 'L'. Needs special\n handeling.\n\n Returns\n -------\n tdsip_string: str\n The TDISPn keyword string translated into a Python format string.\n \"\"\"\n\n fmt_to_tdisp = {'a': 'A', 's': 'A', 'd': 'I', 'b': 'B', 'o': 'O', 'x': 'Z',\n 'X': 'Z', 'f': 'F', 'F': 'F', 'g': 'G', 'G': 'G', 'e': 'E',\n 'E': 'E'}\n\n if format_string in [None, \"\", \"{}\"]:\n return None\n\n # Strip out extra format characters that aren't a type or a width/precision\n if format_string[0] == '{' and format_string != \"{}\":\n fmt_str = format_string.lstrip(\"{:\").rstrip('}')\n elif format_string[0] == '%':\n fmt_str = format_string.lstrip(\"%\")\n else:\n fmt_str = format_string\n\n precision, sep = '', ''\n\n # Character format, only translate right aligned, and don't take zero fills\n if fmt_str[-1].isdigit() and fmt_str[0] == '>' and fmt_str[1] != '0':\n ftype = fmt_to_tdisp['a']\n width = fmt_str[1:]\n\n elif fmt_str[-1] == 's' and fmt_str != 's':\n ftype = fmt_to_tdisp['a']\n width = fmt_str[:-1].lstrip('0')\n\n # Number formats, don't take zero fills\n elif fmt_str[-1].isalpha() and len(fmt_str) > 1 and fmt_str[0] != '0':\n ftype = fmt_to_tdisp[fmt_str[-1]]\n fmt_str = fmt_str[:-1]\n\n # If format has a \".\" split out the width and precision\n if '.' in fmt_str:\n width, precision = fmt_str.split('.')\n sep = '.'\n if width == \"\":\n ascii_key = ftype if ftype != 'G' else 'F'\n width = str(int(precision) + (ASCII_DEFAULT_WIDTHS[ascii_key][0] -\n ASCII_DEFAULT_WIDTHS[ascii_key][1]))\n # Otherwise we just have a width\n else:\n width = fmt_str\n\n else:\n warnings.warn('Format {} cannot be mapped to the accepted '\n 'TDISPn keyword values. Format will not be '\n 'moved into TDISPn keyword.'.format(format_string),\n AstropyUserWarning)\n return None\n\n # Catch logical data type, set the format type back to L in this case\n if logical_dtype:\n ftype = 'L'\n\n return ftype + width + sep + precision\n", "# -*- coding: utf-8 -*-\n\n\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nThis is a set of regression tests for vo.\n\"\"\"\n\n\n# STDLIB\nimport difflib\nimport io\nimport pathlib\nimport sys\nimport gzip\nfrom unittest import mock\n\n# THIRD-PARTY\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_array_equal\n\n# LOCAL\nfrom astropy.io.votable.table import parse, parse_single_table, validate\nfrom astropy.io.votable import tree\nfrom astropy.io.votable.exceptions import VOTableSpecError, VOWarning\nfrom astropy.io.votable.xmlutil import validate_schema\nfrom astropy.utils.data import get_pkg_data_filename, get_pkg_data_filenames\nfrom astropy.tests.helper import raises, catch_warnings\n\n# Determine the kind of float formatting in this build of Python\nif hasattr(sys, 'float_repr_style'):\n legacy_float_repr = (sys.float_repr_style == 'legacy')\nelse:\n legacy_float_repr = sys.platform.startswith('win')\n\n\ndef assert_validate_schema(filename, version):\n if sys.platform.startswith('win'):\n return\n\n try:\n rc, stdout, stderr = validate_schema(filename, version)\n except OSError:\n # If xmllint is not installed, we want the test to pass anyway\n return\n assert rc == 0, 'File did not validate against VOTable schema'\n\n\ndef test_parse_single_table():\n table = parse_single_table(get_pkg_data_filename('data/regression.xml'))\n assert isinstance(table, tree.Table)\n assert len(table.array) == 5\n\n\ndef test_parse_single_table2():\n table2 = parse_single_table(get_pkg_data_filename('data/regression.xml'),\n table_number=1)\n assert isinstance(table2, tree.Table)\n assert len(table2.array) == 1\n assert len(table2.array.dtype.names) == 28\n\n\n@raises(IndexError)\ndef test_parse_single_table3():\n parse_single_table(get_pkg_data_filename('data/regression.xml'),\n table_number=3)\n\n\ndef _test_regression(tmpdir, _python_based=False, binary_mode=1):\n # Read the VOTABLE\n votable = parse(get_pkg_data_filename('data/regression.xml'),\n _debug_python_based_parser=_python_based)\n table = votable.get_first_table()\n\n dtypes = [\n (('string test', 'string_test'), '|O8'),\n (('fixed string test', 'string_test_2'), '|S10'),\n ('unicode_test', '|O8'),\n (('unicode test', 'fixed_unicode_test'), '<U10'),\n (('string array test', 'string_array_test'), '|S4'),\n ('unsignedByte', '|u1'),\n ('short', '<i2'),\n ('int', '<i4'),\n ('long', '<i8'),\n ('double', '<f8'),\n ('float', '<f4'),\n ('array', '|O8'),\n ('bit', '|b1'),\n ('bitarray', '|b1', (3, 2)),\n ('bitvararray', '|O8'),\n ('bitvararray2', '|O8'),\n ('floatComplex', '<c8'),\n ('doubleComplex', '<c16'),\n ('doubleComplexArray', '|O8'),\n ('doubleComplexArrayFixed', '<c16', (2,)),\n ('boolean', '|b1'),\n ('booleanArray', '|b1', (4,)),\n ('nulls', '<i4'),\n ('nulls_array', '<i4', (2, 2)),\n ('precision1', '<f8'),\n ('precision2', '<f8'),\n ('doublearray', '|O8'),\n ('bitarray2', '|b1', (16,))\n ]\n if sys.byteorder == 'big':\n new_dtypes = []\n for dtype in dtypes:\n dtype = list(dtype)\n dtype[1] = dtype[1].replace('<', '>')\n new_dtypes.append(tuple(dtype))\n dtypes = new_dtypes\n assert table.array.dtype == dtypes\n\n votable.to_xml(str(tmpdir.join(\"regression.tabledata.xml\")),\n _debug_python_based_parser=_python_based)\n assert_validate_schema(str(tmpdir.join(\"regression.tabledata.xml\")),\n votable.version)\n\n if binary_mode == 1:\n votable.get_first_table().format = 'binary'\n votable.version = '1.1'\n elif binary_mode == 2:\n votable.get_first_table()._config['version_1_3_or_later'] = True\n votable.get_first_table().format = 'binary2'\n votable.version = '1.3'\n\n # Also try passing a file handle\n with open(str(tmpdir.join(\"regression.binary.xml\")), \"wb\") as fd:\n votable.to_xml(fd, _debug_python_based_parser=_python_based)\n assert_validate_schema(str(tmpdir.join(\"regression.binary.xml\")),\n votable.version)\n # Also try passing a file handle\n with open(str(tmpdir.join(\"regression.binary.xml\")), \"rb\") as fd:\n votable2 = parse(fd, _debug_python_based_parser=_python_based)\n votable2.get_first_table().format = 'tabledata'\n votable2.to_xml(str(tmpdir.join(\"regression.bin.tabledata.xml\")),\n _astropy_version=\"testing\",\n _debug_python_based_parser=_python_based)\n assert_validate_schema(str(tmpdir.join(\"regression.bin.tabledata.xml\")),\n votable.version)\n\n with open(\n get_pkg_data_filename(\n 'data/regression.bin.tabledata.truth.{}.xml'.format(\n votable.version)),\n 'rt', encoding='utf-8') as fd:\n truth = fd.readlines()\n with open(str(tmpdir.join(\"regression.bin.tabledata.xml\")),\n 'rt', encoding='utf-8') as fd:\n output = fd.readlines()\n\n # If the lines happen to be different, print a diff\n # This is convenient for debugging\n sys.stdout.writelines(\n difflib.unified_diff(truth, output, fromfile='truth', tofile='output'))\n\n assert truth == output\n\n # Test implicit gzip saving\n votable2.to_xml(\n str(tmpdir.join(\"regression.bin.tabledata.xml.gz\")),\n _astropy_version=\"testing\",\n _debug_python_based_parser=_python_based)\n with gzip.GzipFile(\n str(tmpdir.join(\"regression.bin.tabledata.xml.gz\")), 'rb') as gzfd:\n output = gzfd.readlines()\n output = [x.decode('utf-8').rstrip() for x in output]\n truth = [x.rstrip() for x in truth]\n\n assert truth == output\n\n\[email protected]('legacy_float_repr')\ndef test_regression(tmpdir):\n _test_regression(tmpdir, False)\n\n\[email protected]('legacy_float_repr')\ndef test_regression_python_based_parser(tmpdir):\n _test_regression(tmpdir, True)\n\n\[email protected]('legacy_float_repr')\ndef test_regression_binary2(tmpdir):\n _test_regression(tmpdir, False, 2)\n\n\nclass TestFixups:\n def setup_class(self):\n self.table = parse(get_pkg_data_filename('data/regression.xml')).get_first_table()\n self.array = self.table.array\n self.mask = self.table.array.mask\n\n def test_implicit_id(self):\n assert_array_equal(self.array['string_test_2'],\n self.array['fixed string test'])\n\n\nclass TestReferences:\n def setup_class(self):\n self.votable = parse(get_pkg_data_filename('data/regression.xml'))\n self.table = self.votable.get_first_table()\n self.array = self.table.array\n self.mask = self.table.array.mask\n\n def test_fieldref(self):\n fieldref = self.table.groups[1].entries[0]\n assert isinstance(fieldref, tree.FieldRef)\n assert fieldref.get_ref().name == 'boolean'\n assert fieldref.get_ref().datatype == 'boolean'\n\n def test_paramref(self):\n paramref = self.table.groups[0].entries[0]\n assert isinstance(paramref, tree.ParamRef)\n assert paramref.get_ref().name == 'INPUT'\n assert paramref.get_ref().datatype == 'float'\n\n def test_iter_fields_and_params_on_a_group(self):\n assert len(list(self.table.groups[1].iter_fields_and_params())) == 2\n\n def test_iter_groups_on_a_group(self):\n assert len(list(self.table.groups[1].iter_groups())) == 1\n\n def test_iter_groups(self):\n # Because of the ref'd table, there are more logical groups\n # than actually exist in the file\n assert len(list(self.votable.iter_groups())) == 9\n\n def test_ref_table(self):\n tables = list(self.votable.iter_tables())\n for x, y in zip(tables[0].array.data[0], tables[1].array.data[0]):\n assert_array_equal(x, y)\n\n def test_iter_coosys(self):\n assert len(list(self.votable.iter_coosys())) == 1\n\n\ndef test_select_columns_by_index():\n columns = [0, 5, 13]\n table = parse(\n get_pkg_data_filename('data/regression.xml'), columns=columns).get_first_table()\n array = table.array\n mask = table.array.mask\n assert array['string_test'][0] == b\"String & test\"\n columns = ['string_test', 'unsignedByte', 'bitarray']\n for c in columns:\n assert not np.all(mask[c])\n assert np.all(mask['unicode_test'])\n\n\ndef test_select_columns_by_name():\n columns = ['string_test', 'unsignedByte', 'bitarray']\n table = parse(\n get_pkg_data_filename('data/regression.xml'), columns=columns).get_first_table()\n array = table.array\n mask = table.array.mask\n assert array['string_test'][0] == b\"String & test\"\n for c in columns:\n assert not np.all(mask[c])\n assert np.all(mask['unicode_test'])\n\n\nclass TestParse:\n def setup_class(self):\n self.votable = parse(get_pkg_data_filename('data/regression.xml'))\n self.table = self.votable.get_first_table()\n self.array = self.table.array\n self.mask = self.table.array.mask\n\n def test_string_test(self):\n assert issubclass(self.array['string_test'].dtype.type,\n np.object_)\n assert_array_equal(\n self.array['string_test'],\n [b'String & test', b'String &amp; test', b'XXXX',\n b'', b''])\n\n def test_fixed_string_test(self):\n assert issubclass(self.array['string_test_2'].dtype.type,\n np.string_)\n assert_array_equal(\n self.array['string_test_2'],\n [b'Fixed stri', b'0123456789', b'XXXX', b'', b''])\n\n def test_unicode_test(self):\n assert issubclass(self.array['unicode_test'].dtype.type,\n np.object_)\n assert_array_equal(self.array['unicode_test'],\n [\"Ceçi n'est pas un pipe\",\n 'வணக்கம்',\n 'XXXX', '', ''])\n\n def test_fixed_unicode_test(self):\n assert issubclass(self.array['fixed_unicode_test'].dtype.type,\n np.unicode_)\n assert_array_equal(self.array['fixed_unicode_test'],\n [\"Ceçi n'est\",\n 'வணக்கம்',\n '0123456789', '', ''])\n\n def test_unsignedByte(self):\n assert issubclass(self.array['unsignedByte'].dtype.type,\n np.uint8)\n assert_array_equal(self.array['unsignedByte'],\n [128, 255, 0, 255, 255])\n assert not np.any(self.mask['unsignedByte'])\n\n def test_short(self):\n assert issubclass(self.array['short'].dtype.type,\n np.int16)\n assert_array_equal(self.array['short'],\n [4096, 32767, -4096, 32767, 32767])\n assert not np.any(self.mask['short'])\n\n def test_int(self):\n assert issubclass(self.array['int'].dtype.type,\n np.int32)\n assert_array_equal(\n self.array['int'],\n [268435456, 2147483647, -268435456, 268435455, 123456789])\n assert_array_equal(self.mask['int'],\n [False, False, False, False, True])\n\n def test_long(self):\n assert issubclass(self.array['long'].dtype.type,\n np.int64)\n assert_array_equal(\n self.array['long'],\n [922337203685477, 123456789, -1152921504606846976,\n 1152921504606846975, 123456789])\n assert_array_equal(self.mask['long'],\n [False, True, False, False, True])\n\n def test_double(self):\n assert issubclass(self.array['double'].dtype.type,\n np.float64)\n assert_array_equal(self.array['double'],\n [8.9990234375, 0.0, np.inf, np.nan, -np.inf])\n assert_array_equal(self.mask['double'],\n [False, False, False, True, False])\n\n def test_float(self):\n assert issubclass(self.array['float'].dtype.type,\n np.float32)\n assert_array_equal(self.array['float'],\n [1.0, 0.0, np.inf, np.inf, np.nan])\n assert_array_equal(self.mask['float'],\n [False, False, False, False, True])\n\n def test_array(self):\n assert issubclass(self.array['array'].dtype.type,\n np.object_)\n match = [[],\n [[42, 32], [12, 32]],\n [[12, 34], [56, 78], [87, 65], [43, 21]],\n [[-1, 23]],\n [[31, -1]]]\n for a, b in zip(self.array['array'], match):\n # assert issubclass(a.dtype.type, np.int64)\n # assert a.shape[1] == 2\n for a0, b0 in zip(a, b):\n assert issubclass(a0.dtype.type, np.int64)\n assert_array_equal(a0, b0)\n assert self.array.data['array'][3].mask[0][0]\n assert self.array.data['array'][4].mask[0][1]\n\n def test_bit(self):\n assert issubclass(self.array['bit'].dtype.type,\n np.bool_)\n assert_array_equal(self.array['bit'],\n [True, False, True, False, False])\n\n def test_bit_mask(self):\n assert_array_equal(self.mask['bit'],\n [False, False, False, False, True])\n\n def test_bitarray(self):\n assert issubclass(self.array['bitarray'].dtype.type,\n np.bool_)\n assert self.array['bitarray'].shape == (5, 3, 2)\n assert_array_equal(self.array['bitarray'],\n [[[True, False],\n [True, True],\n [False, True]],\n\n [[False, True],\n [False, False],\n [True, True]],\n\n [[True, True],\n [True, False],\n [False, False]],\n\n [[False, False],\n [False, False],\n [False, False]],\n\n [[False, False],\n [False, False],\n [False, False]]])\n\n def test_bitarray_mask(self):\n assert_array_equal(self.mask['bitarray'],\n [[[False, False],\n [False, False],\n [False, False]],\n\n [[False, False],\n [False, False],\n [False, False]],\n\n [[False, False],\n [False, False],\n [False, False]],\n\n [[True, True],\n [True, True],\n [True, True]],\n\n [[True, True],\n [True, True],\n [True, True]]])\n\n def test_bitvararray(self):\n assert issubclass(self.array['bitvararray'].dtype.type,\n np.object_)\n match = [[True, True, True],\n [False, False, False, False, False],\n [True, False, True, False, True],\n [], []]\n for a, b in zip(self.array['bitvararray'], match):\n assert_array_equal(a, b)\n match_mask = [[False, False, False],\n [False, False, False, False, False],\n [False, False, False, False, False],\n False, False]\n for a, b in zip(self.array['bitvararray'], match_mask):\n assert_array_equal(a.mask, b)\n\n def test_bitvararray2(self):\n assert issubclass(self.array['bitvararray2'].dtype.type,\n np.object_)\n match = [[],\n\n [[[False, True],\n [False, False],\n [True, False]],\n [[True, False],\n [True, False],\n [True, False]]],\n\n [[[True, True],\n [True, True],\n [True, True]]],\n\n [],\n\n []]\n for a, b in zip(self.array['bitvararray2'], match):\n for a0, b0 in zip(a, b):\n assert a0.shape == (3, 2)\n assert issubclass(a0.dtype.type, np.bool_)\n assert_array_equal(a0, b0)\n\n def test_floatComplex(self):\n assert issubclass(self.array['floatComplex'].dtype.type,\n np.complex64)\n assert_array_equal(self.array['floatComplex'],\n [np.nan+0j, 0+0j, 0+-1j, np.nan+0j, np.nan+0j])\n assert_array_equal(self.mask['floatComplex'],\n [True, False, False, True, True])\n\n def test_doubleComplex(self):\n assert issubclass(self.array['doubleComplex'].dtype.type,\n np.complex128)\n assert_array_equal(\n self.array['doubleComplex'],\n [np.nan+0j, 0+0j, 0+-1j, np.nan+(np.inf*1j), np.nan+0j])\n assert_array_equal(self.mask['doubleComplex'],\n [True, False, False, True, True])\n\n def test_doubleComplexArray(self):\n assert issubclass(self.array['doubleComplexArray'].dtype.type,\n np.object_)\n assert ([len(x) for x in self.array['doubleComplexArray']] ==\n [0, 2, 2, 0, 0])\n\n def test_boolean(self):\n assert issubclass(self.array['boolean'].dtype.type,\n np.bool_)\n assert_array_equal(self.array['boolean'],\n [True, False, True, False, False])\n\n def test_boolean_mask(self):\n assert_array_equal(self.mask['boolean'],\n [False, False, False, False, True])\n\n def test_boolean_array(self):\n assert issubclass(self.array['booleanArray'].dtype.type,\n np.bool_)\n assert_array_equal(self.array['booleanArray'],\n [[True, True, True, True],\n [True, True, False, True],\n [True, True, False, True],\n [False, False, False, False],\n [False, False, False, False]])\n\n def test_boolean_array_mask(self):\n assert_array_equal(self.mask['booleanArray'],\n [[False, False, False, False],\n [False, False, False, False],\n [False, False, True, False],\n [True, True, True, True],\n [True, True, True, True]])\n\n def test_nulls(self):\n assert_array_equal(self.array['nulls'],\n [0, -9, 2, -9, -9])\n assert_array_equal(self.mask['nulls'],\n [False, True, False, True, True])\n\n def test_nulls_array(self):\n assert_array_equal(self.array['nulls_array'],\n [[[-9, -9], [-9, -9]],\n [[0, 1], [2, 3]],\n [[-9, 0], [-9, 1]],\n [[0, -9], [1, -9]],\n [[-9, -9], [-9, -9]]])\n assert_array_equal(self.mask['nulls_array'],\n [[[True, True],\n [True, True]],\n\n [[False, False],\n [False, False]],\n\n [[True, False],\n [True, False]],\n\n [[False, True],\n [False, True]],\n\n [[True, True],\n [True, True]]])\n\n def test_double_array(self):\n assert issubclass(self.array['doublearray'].dtype.type,\n np.object_)\n assert len(self.array['doublearray'][0]) == 0\n assert_array_equal(self.array['doublearray'][1],\n [0, 1, np.inf, -np.inf, np.nan, 0, -1])\n assert_array_equal(self.array.data['doublearray'][1].mask,\n [False, False, False, False, False, False, True])\n\n def test_bit_array2(self):\n assert_array_equal(self.array['bitarray2'][0],\n [True, True, True, True,\n False, False, False, False,\n True, True, True, True,\n False, False, False, False])\n\n def test_bit_array2_mask(self):\n assert not np.any(self.mask['bitarray2'][0])\n assert np.all(self.mask['bitarray2'][1:])\n\n def test_get_coosys_by_id(self):\n coosys = self.votable.get_coosys_by_id('J2000')\n assert coosys.system == 'eq_FK5'\n\n def test_get_field_by_utype(self):\n fields = list(self.votable.get_fields_by_utype(\"myint\"))\n assert fields[0].name == \"int\"\n assert fields[0].values.min == -1000\n\n def test_get_info_by_id(self):\n info = self.votable.get_info_by_id('QUERY_STATUS')\n assert info.value == 'OK'\n\n if self.votable.version != '1.1':\n info = self.votable.get_info_by_id(\"ErrorInfo\")\n assert info.value == \"One might expect to find some INFO here, too...\" # noqa\n\n def test_repr(self):\n assert '3 tables' in repr(self.votable)\n assert repr(list(self.votable.iter_fields_and_params())[0]) == \\\n '<PARAM ID=\"awesome\" arraysize=\"*\" datatype=\"float\" name=\"INPUT\" unit=\"deg\" value=\"[0.0 0.0]\"/>' # noqa\n # Smoke test\n repr(list(self.votable.iter_groups()))\n\n # Resource\n assert repr(self.votable.resources) == '[</>]'\n\n\nclass TestThroughTableData(TestParse):\n def setup_class(self):\n votable = parse(get_pkg_data_filename('data/regression.xml'))\n\n self.xmlout = bio = io.BytesIO()\n votable.to_xml(bio)\n bio.seek(0)\n self.votable = parse(bio)\n self.table = self.votable.get_first_table()\n self.array = self.table.array\n self.mask = self.table.array.mask\n\n def test_bit_mask(self):\n assert_array_equal(self.mask['bit'],\n [False, False, False, False, False])\n\n def test_bitarray_mask(self):\n assert not np.any(self.mask['bitarray'])\n\n def test_bit_array2_mask(self):\n assert not np.any(self.mask['bitarray2'])\n\n def test_schema(self, tmpdir):\n # have to use an actual file because assert_validate_schema only works\n # on filenames, not file-like objects\n fn = str(tmpdir.join(\"test_through_tabledata.xml\"))\n with open(fn, 'wb') as f:\n f.write(self.xmlout.getvalue())\n assert_validate_schema(fn, '1.1')\n\n\nclass TestThroughBinary(TestParse):\n def setup_class(self):\n votable = parse(get_pkg_data_filename('data/regression.xml'))\n votable.get_first_table().format = 'binary'\n\n self.xmlout = bio = io.BytesIO()\n votable.to_xml(bio)\n bio.seek(0)\n self.votable = parse(bio)\n\n self.table = self.votable.get_first_table()\n self.array = self.table.array\n self.mask = self.table.array.mask\n\n # Masked values in bit fields don't roundtrip through the binary\n # representation -- that's not a bug, just a limitation, so\n # override the mask array checks here.\n def test_bit_mask(self):\n assert not np.any(self.mask['bit'])\n\n def test_bitarray_mask(self):\n assert not np.any(self.mask['bitarray'])\n\n def test_bit_array2_mask(self):\n assert not np.any(self.mask['bitarray2'])\n\n\nclass TestThroughBinary2(TestParse):\n def setup_class(self):\n votable = parse(get_pkg_data_filename('data/regression.xml'))\n votable.version = '1.3'\n votable.get_first_table()._config['version_1_3_or_later'] = True\n votable.get_first_table().format = 'binary2'\n\n self.xmlout = bio = io.BytesIO()\n votable.to_xml(bio)\n bio.seek(0)\n self.votable = parse(bio)\n\n self.table = self.votable.get_first_table()\n self.array = self.table.array\n self.mask = self.table.array.mask\n\n def test_get_coosys_by_id(self):\n # No COOSYS in VOTable 1.2 or later\n pass\n\n\ndef table_from_scratch():\n from astropy.io.votable.tree import VOTableFile, Resource, Table, Field\n\n # Create a new VOTable file...\n votable = VOTableFile()\n\n # ...with one resource...\n resource = Resource()\n votable.resources.append(resource)\n\n # ... with one table\n table = Table(votable)\n resource.tables.append(table)\n\n # Define some fields\n table.fields.extend([\n Field(votable, ID=\"filename\", datatype=\"char\"),\n Field(votable, ID=\"matrix\", datatype=\"double\", arraysize=\"2x2\")])\n\n # Now, use those field definitions to create the numpy record arrays, with\n # the given number of rows\n table.create_arrays(2)\n\n # Now table.array can be filled with data\n table.array[0] = ('test1.xml', [[1, 0], [0, 1]])\n table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])\n\n # Now write the whole thing to a file.\n # Note, we have to use the top-level votable file object\n out = io.StringIO()\n votable.to_xml(out)\n\n\ndef test_open_files():\n for filename in get_pkg_data_filenames('data', pattern='*.xml'):\n if filename.endswith('custom_datatype.xml'):\n continue\n parse(filename)\n\n\n@raises(VOTableSpecError)\ndef test_too_many_columns():\n parse(get_pkg_data_filename('data/too_many_columns.xml.gz'))\n\n\ndef test_build_from_scratch(tmpdir):\n # Create a new VOTable file...\n votable = tree.VOTableFile()\n\n # ...with one resource...\n resource = tree.Resource()\n votable.resources.append(resource)\n\n # ... with one table\n table = tree.Table(votable)\n resource.tables.append(table)\n\n # Define some fields\n table.fields.extend([\n tree.Field(votable, ID=\"filename\", datatype=\"char\"),\n tree.Field(votable, ID=\"matrix\", datatype=\"double\", arraysize=\"2x2\")])\n\n # Now, use those field definitions to create the numpy record arrays, with\n # the given number of rows\n table.create_arrays(2)\n\n # Now table.array can be filled with data\n table.array[0] = ('test1.xml', [[1, 0], [0, 1]])\n table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])\n\n # Now write the whole thing to a file.\n # Note, we have to use the top-level votable file object\n votable.to_xml(str(tmpdir.join(\"new_votable.xml\")))\n\n votable = parse(str(tmpdir.join(\"new_votable.xml\")))\n\n table = votable.get_first_table()\n assert_array_equal(\n table.array.mask, np.array([(False, [[False, False], [False, False]]),\n (False, [[False, False], [False, False]])],\n dtype=[('filename', '?'),\n ('matrix', '?', (2, 2))]))\n\n\ndef test_validate(test_path_object=False):\n \"\"\"\n test_path_object is needed for test below ``test_validate_path_object``\n so that file could be passed as pathlib.Path object.\n \"\"\"\n output = io.StringIO()\n fpath = get_pkg_data_filename('data/regression.xml')\n if test_path_object:\n fpath = pathlib.Path(fpath)\n\n # We can't test xmllint, because we can't rely on it being on the\n # user's machine.\n with catch_warnings():\n result = validate(fpath,\n output, xmllint=False)\n\n assert result is False\n\n output.seek(0)\n output = output.readlines()\n\n # Uncomment to generate new groundtruth\n # with open('validation.txt', 'wt', encoding='utf-8') as fd:\n # fd.write(u''.join(output))\n\n with open(\n get_pkg_data_filename('data/validation.txt'),\n 'rt', encoding='utf-8') as fd:\n truth = fd.readlines()\n\n truth = truth[1:]\n output = output[1:-1]\n\n sys.stdout.writelines(\n difflib.unified_diff(truth, output, fromfile='truth', tofile='output'))\n\n assert truth == output\n\n\[email protected]('subprocess.Popen')\ndef test_validate_xmllint_true(mock_subproc_popen):\n process_mock = mock.Mock()\n attrs = {'communicate.return_value': ('ok', 'ko'),\n 'returncode': 0}\n process_mock.configure_mock(**attrs)\n mock_subproc_popen.return_value = process_mock\n\n assert validate(get_pkg_data_filename('data/empty_table.xml'),\n xmllint=True)\n\n\ndef test_validate_path_object():\n \"\"\"\n Validating when source is passed as path object. (#4412)\n \"\"\"\n test_validate(test_path_object=True)\n\n\ndef test_gzip_filehandles(tmpdir):\n votable = parse(get_pkg_data_filename('data/regression.xml'))\n\n with open(str(tmpdir.join(\"regression.compressed.xml\")), 'wb') as fd:\n votable.to_xml(\n fd,\n compressed=True,\n _astropy_version=\"testing\")\n\n with open(str(tmpdir.join(\"regression.compressed.xml\")), 'rb') as fd:\n votable = parse(fd)\n\n\ndef test_from_scratch_example():\n with catch_warnings(VOWarning) as warning_lines:\n try:\n _run_test_from_scratch_example()\n except ValueError as e:\n warning_lines.append(str(e))\n\n assert len(warning_lines) == 0\n\n\ndef _run_test_from_scratch_example():\n from astropy.io.votable.tree import VOTableFile, Resource, Table, Field\n\n # Create a new VOTable file...\n votable = VOTableFile()\n\n # ...with one resource...\n resource = Resource()\n votable.resources.append(resource)\n\n # ... with one table\n table = Table(votable)\n resource.tables.append(table)\n\n # Define some fields\n table.fields.extend([\n Field(votable, name=\"filename\", datatype=\"char\", arraysize=\"*\"),\n Field(votable, name=\"matrix\", datatype=\"double\", arraysize=\"2x2\")])\n\n # Now, use those field definitions to create the numpy record arrays, with\n # the given number of rows\n table.create_arrays(2)\n\n # Now table.array can be filled with data\n table.array[0] = ('test1.xml', [[1, 0], [0, 1]])\n table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])\n\n assert table.array[0][0] == 'test1.xml'\n\n\ndef test_fileobj():\n # Assert that what we get back is a raw C file pointer\n # so it will be super fast in the C extension.\n from astropy.utils.xml import iterparser\n filename = get_pkg_data_filename('data/regression.xml')\n with iterparser._convert_to_fd_or_read_function(filename) as fd:\n if sys.platform == 'win32':\n fd()\n else:\n assert isinstance(fd, io.FileIO)\n\n\ndef test_nonstandard_units():\n from astropy import units as u\n\n votable = parse(get_pkg_data_filename('data/nonstandard_units.xml'))\n\n assert isinstance(\n votable.get_first_table().fields[0].unit, u.UnrecognizedUnit)\n\n votable = parse(get_pkg_data_filename('data/nonstandard_units.xml'),\n unit_format='generic')\n\n assert not isinstance(\n votable.get_first_table().fields[0].unit, u.UnrecognizedUnit)\n\n\ndef test_resource_structure():\n # Based on issue #1223, as reported by @astro-friedel and @RayPlante\n from astropy.io.votable import tree as vot\n\n vtf = vot.VOTableFile()\n\n r1 = vot.Resource()\n vtf.resources.append(r1)\n t1 = vot.Table(vtf)\n t1.name = \"t1\"\n t2 = vot.Table(vtf)\n t2.name = 't2'\n r1.tables.append(t1)\n r1.tables.append(t2)\n\n r2 = vot.Resource()\n vtf.resources.append(r2)\n t3 = vot.Table(vtf)\n t3.name = \"t3\"\n t4 = vot.Table(vtf)\n t4.name = \"t4\"\n r2.tables.append(t3)\n r2.tables.append(t4)\n\n r3 = vot.Resource()\n vtf.resources.append(r3)\n t5 = vot.Table(vtf)\n t5.name = \"t5\"\n t6 = vot.Table(vtf)\n t6.name = \"t6\"\n r3.tables.append(t5)\n r3.tables.append(t6)\n\n buff = io.BytesIO()\n vtf.to_xml(buff)\n\n buff.seek(0)\n vtf2 = parse(buff)\n\n assert len(vtf2.resources) == 3\n\n for r in range(len(vtf2.resources)):\n res = vtf2.resources[r]\n assert len(res.tables) == 2\n assert len(res.resources) == 0\n\n\ndef test_no_resource_check():\n output = io.StringIO()\n\n with catch_warnings():\n # We can't test xmllint, because we can't rely on it being on the\n # user's machine.\n result = validate(get_pkg_data_filename('data/no_resource.xml'),\n output, xmllint=False)\n\n assert result is False\n\n output.seek(0)\n output = output.readlines()\n\n # Uncomment to generate new groundtruth\n # with open('no_resource.txt', 'wt', encoding='utf-8') as fd:\n # fd.write(u''.join(output))\n\n with open(\n get_pkg_data_filename('data/no_resource.txt'),\n 'rt', encoding='utf-8') as fd:\n truth = fd.readlines()\n\n truth = truth[1:]\n output = output[1:-1]\n\n sys.stdout.writelines(\n difflib.unified_diff(truth, output, fromfile='truth', tofile='output'))\n\n assert truth == output\n\n\ndef test_instantiate_vowarning():\n # This used to raise a deprecation exception.\n # See https://github.com/astropy/astroquery/pull/276\n VOWarning(())\n\n\ndef test_custom_datatype():\n votable = parse(get_pkg_data_filename('data/custom_datatype.xml'),\n datatype_mapping={'bar': 'int'})\n\n table = votable.get_first_table()\n assert table.array.dtype['foo'] == np.int32\n" ]
[ [ "matplotlib.backend_bases.KeyEvent", "matplotlib.pyplot.figure" ], [ "numpy.uint32", "numpy.dtype", "numpy.bitwise_and", "numpy.char.array", "numpy.uint16", "numpy.uint64", "numpy.ndarray.__setitem__", "numpy.left_shift", "numpy.add", "numpy.min_scalar_type", "numpy.array" ], [ "numpy.all", "numpy.array", "numpy.testing.assert_array_equal", "numpy.any" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
2018-B-GR1-Python/Velasco-Yepez-Andres-David
[ "0c017d6e5f169f31207ddec5ceffc8dd82d327eb" ]
[ "03_spyder/proyecto_spyder.py" ]
[ "\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 28 01:15:31 2018\n\n@author: Andres\n\"\"\"\n\nimport pandas as pd\nurl = 'http://catalogo.datosabiertos.gob.ec/api/action/datastore_search?resource_id=8513f446-1c94-426e-8592-d4cbdd295f33&limit=1000'\n\ndatos = pd.read_json(url, typ='frame')\ndatos =pd.DataFrame.from_dict(datos[\"result\"][\"records\"]).set_index(\"_id\")\n#datos[ datos['Sexo'] != 'MASCULINO' ]\ndatos.loc[289,'Canton']\nclass Homicidio:\n\n def __init__(self,Canton,Circuito,Distrito,Edad,Estado_Civil,Fecha_infraccion,Hora_infraccion,Nacionalidad,Provincia,Sexo,Zona,tipo_muert_matriz):\n self.Canton=Canton\n self.Circuito=Circuito\n self.Distrito=Distrito\n self.Edad = Edad\n self.Estado_civil=Estado_Civil\n self.Fecha_infraccion=Fecha_infraccion\n self.Hora_infraccion=Hora_infraccion\n self.Nacionalidad=Nacionalidad\n self.Provincia = Provincia\n self.Sexo = Sexo\n self.Zona = Zona\n self.tipo = tipo_muert_matriz\n\n def get_list(self):\n return [self.Canton,self.Circuito,self.Distrito,self.Edad,self.Estado_civil,self.Fecha_infraccion,\n self.Hora_infraccion,self.Nacionalidad,self.Provincia,self.Sexo,self.Zona,self.tipo]\n\ndef insertar(Canton,Circuito,Distrito,Edad,Estado_Civil,Fecha_infraccion,Hora_infraccion,Nacionalidad,Provincia,Sexo,Zona,tipo_muert_matriz):\n global datos\n _id = datos.index+1\n homicidio=Homicidio(Canton,Circuito,Distrito,Edad,Estado_Civil,Fecha_infraccion,Hora_infraccion,Nacionalidad,Provincia,Sexo,Zona,tipo_muert_matriz)\n s = homicidio.get_list()\n serie = pd.Series(s,index=datos.columns)\n datos = datos.append(serie,ignore_index=True) # adding a row\n \n\n\n\ninsertar(\"MIAMI\",\t\"CI\",\"W\",\"211\",\"SOLTERO\",\t\"2019-04-05T00:00:00\",\"2015-12-02T23:00:00\",\t\n \"EEUU\",\"FLORIDA\",\"MASCULINO\",\t\"ZONA 80\",\"Asesinatos\"\n)\n\n\n \n\n" ]
[ [ "pandas.Series", "pandas.read_json", "pandas.DataFrame.from_dict" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
leonidk/cupdice
[ "f3386337922337eaaae2f244607f1af73516843f" ]
[ "lavalle_rrts.py" ]
[ "#!/usr/bin/env python\n\n# rrt.py\n# This program generates a simple rapidly\n# exploring random tree (RRT) in a rectangular region.\n#\n# Written by Steve LaValle\n# May 2011\n\nimport sys, random, math, pygame\nfrom pygame.locals import *\nfrom math import sqrt,cos,sin,atan2\nimport heapq\nimport numpy as np\n\n#constants\nXDIM = 500\nYDIM = 500\nWINSIZE = np.array([XDIM, YDIM])\nMAX_STEP_SIZE = 12\nNUMNODES = 5000\nNUM_OBSTACLES = 30\nOBSTACLE_WIDTH = 80\nOBSTACLE_HEIGHT = 80\nRAND_SEARCH_PROB = 0.25\nGOAL_TOL = 1e-3\n\nstart = WINSIZE/2\ngoal1 = np.zeros((1,2))\ngoal2 = WINSIZE.reshape((1,2))\n\n\n\ndef step_from_to(p1,p2):\n if np.linalg.norm(p1-p2) < MAX_STEP_SIZE:\n return p2\n else:\n diff = p2-p1\n return p1 + MAX_STEP_SIZE*diff/np.linalg.norm(diff)\n\ndef main():\n #initialize and prepare screen\n pygame.init()\n screen = pygame.display.set_mode(WINSIZE)\n pygame.display.set_caption('RRT S. LaValle May 2011')\n white = 255, 240, 200\n black = 20, 20, 40\n screen.fill(black)\n\n obstacles = []\n for _ in range(NUM_OBSTACLES):\n rand_rect = np.random.rand(4)*np.array([XDIM,YDIM,OBSTACLE_WIDTH,OBSTACLE_HEIGHT]) + np.ones(4)*MAX_STEP_SIZE\n if (rand_rect[:2] < start).all() and (rand_rect[:2]+rand_rect[2:] > start).all():\n print('skip!')\n continue\n if (rand_rect[:2] < goal1).all() and (rand_rect[:2]+rand_rect[2:] > goal1).all():\n print('skip!')\n continue\n if (rand_rect[:2] < goal2).all() and (rand_rect[:2]+rand_rect[2:] > goal2).all():\n print('skip!')\n continue\n obstacles.append(rand_rect)\n for idx,o in enumerate(obstacles):\n weight = idx/(len(obstacles)-1)\n color = (240-240*weight,128,40+(255-40)*weight)\n screen.fill(color,o)\n\n nodes = np.array([start])[:np.newaxis]\n connections = np.array([0])\n print(nodes.shape,connections.shape)\n for goal in [goal1,goal2]:\n searching = True\n prev_node = None\n for i in range(NUMNODES):\n if searching:\n # get a random configuration\n #valid = False\n #while not valid:\n if prev_node is None:\n rand = np.random.rand(1,2)*WINSIZE if np.random.rand() > RAND_SEARCH_PROB else goal\n else:\n rand = prev_node\n #valid = True\n #for o in obstacles:\n #if (o[:2] < rand[0]).all() and (o[:2]+o[2:] > rand[0]).all():\n #valid = False\n #break\n\n dists = np.linalg.norm(nodes-rand,axis=1)\n #print(dists)\n closest_idx = np.argmin(dists)\n closest = nodes[closest_idx]\n new_node = step_from_to(closest,rand)\n valid_new_node = True\n for o in obstacles:\n if (o[:2] < new_node[0]).all() and (o[:2]+o[2:] > new_node[0]).all():\n valid_new_node = False\n break\n if valid_new_node:\n if (rand == goal).all() and np.linalg.norm(new_node - goal) < dists.min():\n prev_node = new_node\n #print('new')\n else:\n prev_node = None\n #print('cancel')\n if np.linalg.norm(new_node - goal) > GOAL_TOL:\n #print(goal,new_node)\n\n nodes = np.append(nodes,new_node,0)\n connections = np.append(connections,closest_idx)\n #print(np.linalg.norm(new_node - goal),nodes.shape,connections.shape)\n\n pygame.draw.line(screen,white,np.squeeze(closest),np.squeeze(new_node))\n else:\n print(new_node,goal)\n path_node = closest_idx\n while path_node != 0:\n print(path_node,end=',',flush=True)\n path_node = connections[path_node]\n print(0)\n searching = False\n break\n else:\n prev_node = None\n pygame.display.update()\n #print i, \" \", nodes\n\n for e in pygame.event.get():\n if e.type == QUIT or (e.type == KEYUP and e.key == K_ESCAPE):\n sys.exit(\"Leaving because you requested it.\")\n \n while True:\n for e in pygame.event.get():\n if e.type == QUIT or (e.type == KEYUP and e.key == K_ESCAPE):\n sys.exit(\"Leaving because you requested it.\")\n# if python says run, then we should run\nif __name__ == '__main__':\n main()\n\n" ]
[ [ "numpy.squeeze", "numpy.linalg.norm", "numpy.ones", "numpy.append", "numpy.argmin", "numpy.random.rand", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mehrdadzakershahrak/Online-Explanation-Generation
[ "e41ad9b5a390abdaf271562a56105c191e33b74d" ]
[ "rovers/fastdownward/experiments/issue750/relativescatter.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom collections import defaultdict\n\nfrom matplotlib import ticker\n\nfrom downward.reports.scatter import ScatterPlotReport\nfrom downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot\n\n\n# TODO: handle outliers\n\n# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)\nclass RelativeScatterMatplotlib(Matplotlib):\n @classmethod\n def _plot(cls, report, axes, categories, styles):\n # Display grid\n axes.grid(b=True, linestyle='-', color='0.75')\n\n has_points = False\n # Generate the scatter plots\n for category, coords in sorted(categories.items()):\n X, Y = zip(*coords)\n axes.scatter(X, Y, s=42, label=category, **styles[category])\n if X and Y:\n has_points = True\n\n if report.xscale == 'linear' or report.yscale == 'linear':\n plot_size = report.missing_val * 1.01\n else:\n plot_size = report.missing_val * 1.25\n\n # make 5 ticks above and below 1\n yticks = []\n tick_step = report.ylim_top**(1/5.0)\n for i in xrange(-5, 6):\n yticks.append(tick_step**i)\n axes.set_yticks(yticks)\n axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())\n\n axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)\n axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)\n\n for axis in [axes.xaxis, axes.yaxis]:\n MatplotlibPlot.change_axis_formatter(\n axis,\n report.missing_val if report.show_missing else None)\n return has_points\n\n\nclass RelativeScatterPlotReport(ScatterPlotReport):\n \"\"\"\n Generate a scatter plot that shows a relative comparison of two\n algorithms with regard to the given attribute. The attribute value\n of algorithm 1 is shown on the x-axis and the relation to the value\n of algorithm 2 on the y-axis.\n \"\"\"\n\n def __init__(self, show_missing=True, get_category=None, **kwargs):\n ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)\n if self.output_format == 'tex':\n raise \"not supported\"\n else:\n self.writer = RelativeScatterMatplotlib\n\n def _fill_categories(self, runs):\n # We discard the *runs* parameter.\n # Map category names to value tuples\n categories = defaultdict(list)\n self.ylim_bottom = 2\n self.ylim_top = 0.5\n self.xlim_left = float(\"inf\")\n for (domain, problem), runs in self.problem_runs.items():\n if len(runs) != 2:\n continue\n run1, run2 = runs\n assert (run1['algorithm'] == self.algorithms[0] and\n run2['algorithm'] == self.algorithms[1])\n val1 = run1.get(self.attribute)\n val2 = run2.get(self.attribute)\n if val1 is None or val2 is None:\n continue\n category = self.get_category(run1, run2)\n assert val1 > 0, (domain, problem, self.algorithms[0], val1)\n assert val2 > 0, (domain, problem, self.algorithms[1], val2)\n x = val1\n y = val2 / float(val1)\n\n categories[category].append((x, y))\n\n self.ylim_top = max(self.ylim_top, y)\n self.ylim_bottom = min(self.ylim_bottom, y)\n self.xlim_left = min(self.xlim_left, x)\n\n # center around 1\n if self.ylim_bottom < 1:\n self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))\n if self.ylim_top > 1:\n self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))\n return categories\n\n def _set_scales(self, xscale, yscale):\n # ScatterPlot uses log-scaling on the x-axis by default.\n PlotReport._set_scales(\n self, xscale or self.attribute.scale or 'log', 'log')\n" ]
[ [ "matplotlib.ticker.ScalarFormatter" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lerongil/qiskit-terra
[ "a25af2a2378bc3d4f5ec73b948d048d1b707454c", "a25af2a2378bc3d4f5ec73b948d048d1b707454c", "a25af2a2378bc3d4f5ec73b948d048d1b707454c", "a25af2a2378bc3d4f5ec73b948d048d1b707454c", "a25af2a2378bc3d4f5ec73b948d048d1b707454c", "a25af2a2378bc3d4f5ec73b948d048d1b707454c", "a25af2a2378bc3d4f5ec73b948d048d1b707454c" ]
[ "test/python/quantum_info/test_weyl.py", "qiskit/visualization/tools/pi_check.py", "qiskit/quantum_info/operators/channel/kraus.py", "qiskit/pulse/pulse_lib/continuous.py", "test/python/quantum_info/operators/channel/test_transformations.py", "qiskit/pulse/pulse_lib/samplers/decorators.py", "qiskit/quantum_info/operators/channel/choi.py" ]
[ "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n# pylint: disable=invalid-name\n\n\"\"\"Tests for Weyl coorindate routines.\"\"\"\n\nimport unittest\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom qiskit.test import QiskitTestCase\nfrom qiskit.quantum_info.random import random_unitary\nfrom qiskit.quantum_info.synthesis.weyl import weyl_coordinates\nfrom qiskit.quantum_info.synthesis.local_invariance import (two_qubit_local_invariants,\n local_equivalence)\n\n\nclass TestWeyl(QiskitTestCase):\n \"\"\"Test Weyl coordinate routines\"\"\"\n\n def test_weyl_coordinates_simple(self):\n \"\"\"Check Weyl coordinates against known cases.\n \"\"\"\n # Identity [0,0,0]\n U = np.identity(4)\n weyl = weyl_coordinates(U)\n assert_allclose(weyl, [0, 0, 0])\n\n # CNOT [pi/4, 0, 0]\n U = np.array([[1, 0, 0, 0],\n [0, 0, 0, 1],\n [0, 0, 1, 0],\n [0, 1, 0, 0]], dtype=complex)\n weyl = weyl_coordinates(U)\n assert_allclose(weyl, [np.pi / 4, 0, 0], atol=1e-07)\n\n # SWAP [pi/4, pi/4 ,pi/4]\n U = np.array([[1, 0, 0, 0],\n [0, 0, 1, 0],\n [0, 1, 0, 0],\n [0, 0, 0, 1]], dtype=complex)\n\n weyl = weyl_coordinates(U)\n assert_allclose(weyl, [np.pi / 4, np.pi / 4, np.pi / 4])\n\n # SQRT ISWAP [pi/8, pi/8, 0]\n U = np.array([[1, 0, 0, 0],\n [0, 1 / np.sqrt(2), 1j / np.sqrt(2), 0],\n [0, 1j / np.sqrt(2), 1 / np.sqrt(2), 0],\n [0, 0, 0, 1]], dtype=complex)\n\n weyl = weyl_coordinates(U)\n assert_allclose(weyl, [np.pi / 8, np.pi / 8, 0])\n\n def test_weyl_coordinates_random(self):\n \"\"\"Randomly check Weyl coordinates with local invariants.\n \"\"\"\n for _ in range(10):\n U = random_unitary(4).data\n weyl = weyl_coordinates(U)\n local_equiv = local_equivalence(weyl)\n local = two_qubit_local_invariants(U)\n assert_allclose(local, local_equiv)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Check if number close to values of PI\n\"\"\"\n\nimport numpy as np\n\nfrom qiskit.exceptions import QiskitError\n\nN, D = np.meshgrid(np.arange(1, 9), np.arange(1, 9))\nFRAC_MESH = N / D * np.pi\n\n\ndef pi_check(inpt, eps=1e-6, output='text', ndigits=5):\n \"\"\" Computes if a number is close to an integer\n fraction or multiple of PI and returns the\n corresponding string.\n\n Args:\n inpt (float): Number to check.\n eps (float): EPS to check against.\n output (str): Options are 'text' (default),\n 'latex', and 'mpl'.\n ndigits (int): Number of digits to print\n if returning raw inpt.\n\n Returns:\n str: string representation of output.\n\n Raises:\n QiskitError: if output is not a valid option.\n \"\"\"\n inpt = float(inpt)\n if abs(inpt) < 1e-14:\n return str(0)\n val = inpt / np.pi\n\n if output == 'text':\n pi = 'pi'\n elif output == 'latex':\n pi = '\\\\pi'\n elif output == 'mpl':\n pi = '$\\\\pi$'\n else:\n raise QiskitError('pi_check parameter output should be text, latex, or mpl')\n\n if abs(val) >= 1:\n if abs(val % 1) < eps:\n val = int(round(val))\n if val == 1:\n str_out = '{}'.format(pi)\n elif val == -1:\n str_out = '-{}'.format(pi)\n else:\n str_out = '{}{}'.format(val, pi)\n return str_out\n\n val = np.pi / inpt\n if abs(abs(val) - abs(round(val))) < eps:\n val = int(round(val))\n if val > 0:\n str_out = '{}/{}'.format(pi, val)\n else:\n str_out = '-{}/{}'.format(pi, abs(val))\n return str_out\n\n # Look for all fracs in 8\n abs_val = abs(inpt)\n frac = np.where(np.abs(abs_val - FRAC_MESH) < 1e-8)\n if frac[0].shape[0]:\n numer = int(frac[1][0]) + 1\n denom = int(frac[0][0]) + 1\n if inpt < 0:\n numer *= -1\n\n if numer == 1 and denom == 1:\n str_out = '{}'.format(pi)\n elif numer == -1 and denom == 1:\n str_out = '-{}'.format(pi)\n elif numer == 1:\n str_out = '{}/{}'.format(pi, denom)\n elif numer == -1:\n str_out = '-{}/{}'.format(pi, denom)\n elif denom == 1:\n str_out = '{}/{}'.format(numer, pi)\n else:\n str_out = '{}{}/{}'.format(numer, pi, denom)\n\n return str_out\n # nothing found\n str_out = '%.{}g'.format(ndigits) % inpt\n return str_out\n", "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=len-as-condition\n\n\"\"\"\nKraus representation of a Quantum Channel.\n\n\nThe Kraus representation for a quantum channel E is given by a set of matrices [A_i] such that\n\n E(ρ) = sum_i A_i.ρ.A_i^dagger\n\nA general operator map G can also be written using the generalized Kraus representation which\nis given by two sets of matrices [A_i], [B_i] such that\n\n G(ρ) = sum_i A_i.ρ.B_i^dagger\n\nSee [1] for further details.\n\nReferences:\n [1] C.J. Wood, J.D. Biamonte, D.G. Cory, Quant. Inf. Comp. 15, 0579-0811 (2015)\n Open access: arXiv:1111.6950 [quant-ph]\n\"\"\"\n\nfrom numbers import Number\n\nimport numpy as np\n\nfrom qiskit.circuit.quantumcircuit import QuantumCircuit\nfrom qiskit.circuit.instruction import Instruction\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.quantum_info.operators.predicates import is_identity_matrix\nfrom qiskit.quantum_info.operators.channel.quantum_channel import QuantumChannel\nfrom qiskit.quantum_info.operators.channel.choi import Choi\nfrom qiskit.quantum_info.operators.channel.superop import SuperOp\nfrom qiskit.quantum_info.operators.channel.transformations import _to_kraus\n\n\nclass Kraus(QuantumChannel):\n \"\"\"Kraus representation of a quantum channel.\"\"\"\n\n def __init__(self, data, input_dims=None, output_dims=None):\n \"\"\"Initialize a quantum channel Kraus operator.\n\n Args:\n data (QuantumCircuit or\n Instruction or\n BaseOperator or\n matrix): data to initialize superoperator.\n input_dims (tuple): the input subsystem dimensions.\n [Default: None]\n output_dims (tuple): the output subsystem dimensions.\n [Default: None]\n\n Raises:\n QiskitError: if input data cannot be initialized as a\n a list of Kraus matrices.\n\n Additional Information\n ----------------------\n If the input or output dimensions are None, they will be\n automatically determined from the input data. If the input data is\n a list of Numpy arrays of shape (2**N, 2**N) qubit systems will be used. If\n the input does not correspond to an N-qubit channel, it will assign a\n single subsystem with dimension specified by the shape of the input.\n \"\"\"\n # If the input is a list or tuple we assume it is a list of Kraus\n # matrices, if it is a numpy array we assume that it is a single Kraus\n # operator\n if isinstance(data, (list, tuple, np.ndarray)):\n # Check if it is a single unitary matrix A for channel:\n # E(rho) = A * rho * A^\\dagger\n if isinstance(data, np.ndarray) or np.array(data).ndim == 2:\n # Convert single Kraus op to general Kraus pair\n kraus = ([np.array(data, dtype=complex)], None)\n shape = kraus[0][0].shape\n\n # Check if single Kraus set [A_i] for channel:\n # E(rho) = sum_i A_i * rho * A_i^dagger\n elif isinstance(data, list) and len(data) > 0:\n # Get dimensions from first Kraus op\n kraus = [np.array(data[0], dtype=complex)]\n shape = kraus[0].shape\n # Iterate over remaining ops and check they are same shape\n for i in data[1:]:\n op = np.array(i, dtype=complex)\n if op.shape != shape:\n raise QiskitError(\n \"Kraus operators are different dimensions.\")\n kraus.append(op)\n # Convert single Kraus set to general Kraus pair\n kraus = (kraus, None)\n\n # Check if generalized Kraus set ([A_i], [B_i]) for channel:\n # E(rho) = sum_i A_i * rho * B_i^dagger\n elif isinstance(data,\n tuple) and len(data) == 2 and len(data[0]) > 0:\n kraus_left = [np.array(data[0][0], dtype=complex)]\n shape = kraus_left[0].shape\n for i in data[0][1:]:\n op = np.array(i, dtype=complex)\n if op.shape != shape:\n raise QiskitError(\n \"Kraus operators are different dimensions.\")\n kraus_left.append(op)\n if data[1] is None:\n kraus = (kraus_left, None)\n else:\n kraus_right = []\n for i in data[1]:\n op = np.array(i, dtype=complex)\n if op.shape != shape:\n raise QiskitError(\n \"Kraus operators are different dimensions.\")\n kraus_right.append(op)\n kraus = (kraus_left, kraus_right)\n else:\n raise QiskitError(\"Invalid input for Kraus channel.\")\n else:\n # Otherwise we initialize by conversion from another Qiskit\n # object into the QuantumChannel.\n if isinstance(data, (QuantumCircuit, Instruction)):\n # If the input is a Terra QuantumCircuit or Instruction we\n # convert it to a SuperOp\n data = SuperOp._init_instruction(data)\n else:\n # We use the QuantumChannel init transform to initialize\n # other objects into a QuantumChannel or Operator object.\n data = self._init_transformer(data)\n input_dim, output_dim = data.dim\n # Now that the input is an operator we convert it to a Kraus\n kraus = _to_kraus(data.rep, data._data, input_dim, output_dim)\n if input_dims is None:\n input_dims = data.input_dims()\n if output_dims is None:\n output_dims = data.output_dims()\n\n output_dim, input_dim = kraus[0][0].shape\n # Check and format input and output dimensions\n input_dims = self._automatic_dims(input_dims, input_dim)\n output_dims = self._automatic_dims(output_dims, output_dim)\n # Initialize either single or general Kraus\n if kraus[1] is None or np.allclose(kraus[0], kraus[1]):\n # Standard Kraus map\n super().__init__('Kraus', (kraus[0], None), input_dims,\n output_dims)\n else:\n # General (non-CPTP) Kraus map\n super().__init__('Kraus', kraus, input_dims, output_dims)\n\n @property\n def data(self):\n \"\"\"Return list of Kraus matrices for channel.\"\"\"\n if self._data[1] is None:\n # If only a single Kraus set, don't return the tuple\n # Just the fist set\n return self._data[0]\n else:\n # Otherwise return the tuple of both kraus sets\n return self._data\n\n def is_cptp(self, atol=None, rtol=None):\n \"\"\"Return True if completely-positive trace-preserving.\"\"\"\n if self._data[1] is not None:\n return False\n if atol is None:\n atol = self._atol\n if rtol is None:\n rtol = self._rtol\n accum = 0j\n for op in self._data[0]:\n accum += np.dot(np.transpose(np.conj(op)), op)\n return is_identity_matrix(accum, rtol=rtol, atol=atol)\n\n def conjugate(self):\n \"\"\"Return the conjugate of the QuantumChannel.\"\"\"\n kraus_l, kraus_r = self._data\n kraus_l = [k.conj() for k in kraus_l]\n if kraus_r is not None:\n kraus_r = [k.conj() for k in kraus_r]\n return Kraus((kraus_l, kraus_r), self.input_dims(), self.output_dims())\n\n def transpose(self):\n \"\"\"Return the transpose of the QuantumChannel.\"\"\"\n kraus_l, kraus_r = self._data\n kraus_l = [k.T for k in kraus_l]\n if kraus_r is not None:\n kraus_r = [k.T for k in kraus_r]\n return Kraus((kraus_l, kraus_r),\n input_dims=self.output_dims(),\n output_dims=self.input_dims())\n\n def compose(self, other, qargs=None, front=False):\n \"\"\"Return the composition channel self∘other.\n\n Args:\n other (QuantumChannel): a quantum channel subclass.\n qargs (list): a list of subsystem positions to compose other on.\n front (bool): If False compose in standard order other(self(input))\n otherwise compose in reverse order self(other(input))\n [default: False]\n\n Returns:\n Kraus: The composition channel as a Kraus object.\n\n Raises:\n QiskitError: if other cannot be converted to a channel, or\n has incompatible dimensions.\n \"\"\"\n if qargs is not None:\n return Kraus(\n SuperOp(self).compose(other, qargs=qargs, front=front))\n\n if not isinstance(other, Kraus):\n other = Kraus(other)\n # Check dimensions match up\n if front and self._input_dim != other._output_dim:\n raise QiskitError(\n 'input_dim of self must match output_dim of other')\n if not front and self._output_dim != other._input_dim:\n raise QiskitError(\n 'input_dim of other must match output_dim of self')\n\n if front:\n ka_l, ka_r = self._data\n kb_l, kb_r = other._data\n input_dim = other._input_dim\n output_dim = self._output_dim\n else:\n ka_l, ka_r = other._data\n kb_l, kb_r = self._data\n input_dim = self._input_dim\n output_dim = other._output_dim\n\n kab_l = [np.dot(a, b) for a in ka_l for b in kb_l]\n if ka_r is None and kb_r is None:\n kab_r = None\n elif ka_r is None:\n kab_r = [np.dot(a, b) for a in ka_l for b in kb_r]\n elif kb_r is None:\n kab_r = [np.dot(a, b) for a in ka_r for b in kb_l]\n else:\n kab_r = [np.dot(a, b) for a in ka_r for b in kb_r]\n return Kraus((kab_l, kab_r), input_dim, output_dim)\n\n def power(self, n):\n \"\"\"The matrix power of the channel.\n\n Args:\n n (int): compute the matrix power of the superoperator matrix.\n\n Returns:\n Kraus: the matrix power of the SuperOp converted to a Kraus channel.\n\n Raises:\n QiskitError: if the input and output dimensions of the\n QuantumChannel are not equal, or the power is not an integer.\n \"\"\"\n if n > 0:\n return super().power(n)\n return Kraus(SuperOp(self).power(n))\n\n def tensor(self, other):\n \"\"\"Return the tensor product channel self ⊗ other.\n\n Args:\n other (QuantumChannel): a quantum channel subclass.\n\n Returns:\n Kraus: the tensor product channel self ⊗ other as a Kraus\n object.\n\n Raises:\n QiskitError: if other cannot be converted to a channel.\n \"\"\"\n return self._tensor_product(other, reverse=False)\n\n def expand(self, other):\n \"\"\"Return the tensor product channel other ⊗ self.\n\n Args:\n other (QuantumChannel): a quantum channel subclass.\n\n Returns:\n Kraus: the tensor product channel other ⊗ self as a Kraus\n object.\n\n Raises:\n QiskitError: if other cannot be converted to a channel.\n \"\"\"\n return self._tensor_product(other, reverse=True)\n\n def add(self, other):\n \"\"\"Return the QuantumChannel self + other.\n\n Args:\n other (QuantumChannel): a quantum channel subclass.\n\n Returns:\n Kraus: the linear addition self + other as a Kraus object.\n\n Raises:\n QiskitError: if other cannot be converted to a channel, or\n has incompatible dimensions.\n \"\"\"\n # Since we cannot directly add two channels in the Kraus\n # representation we try and use the other channels method\n # or convert to the Choi representation\n return Kraus(Choi(self).add(other))\n\n def subtract(self, other):\n \"\"\"Return the QuantumChannel self - other.\n\n Args:\n other (QuantumChannel): a quantum channel subclass.\n\n Returns:\n Kraus: the linear subtraction self - other as Kraus object.\n\n Raises:\n QiskitError: if other cannot be converted to a channel, or\n has incompatible dimensions.\n \"\"\"\n # Since we cannot directly subtract two channels in the Kraus\n # representation we try and use the other channels method\n # or convert to the Choi representation\n return Kraus(Choi(self).subtract(other))\n\n def multiply(self, other):\n \"\"\"Return the QuantumChannel self + other.\n\n Args:\n other (complex): a complex number.\n\n Returns:\n Kraus: the scalar multiplication other * self as a Kraus object.\n\n Raises:\n QiskitError: if other is not a valid scalar.\n \"\"\"\n if not isinstance(other, Number):\n raise QiskitError(\"other is not a number\")\n # If the number is complex we need to convert to general\n # kraus channel so we multiply via Choi representation\n if isinstance(other, complex) or other < 0:\n # Convert to Choi-matrix\n return Kraus(Choi(self).multiply(other))\n # If the number is real we can update the Kraus operators\n # directly\n val = np.sqrt(other)\n kraus_r = None\n kraus_l = [val * k for k in self._data[0]]\n if self._data[1] is not None:\n kraus_r = [val * k for k in self._data[1]]\n return Kraus((kraus_l, kraus_r), self._input_dim, self._output_dim)\n\n def _evolve(self, state, qargs=None):\n \"\"\"Evolve a quantum state by the quantum channel.\n\n Args:\n state (DensityMatrix or Statevector): The input state.\n qargs (list): a list of quantum state subsystem positions to apply\n the quantum channel on.\n\n Returns:\n DensityMatrix: the output quantum state as a density matrix.\n\n Raises:\n QiskitError: if the quantum channel dimension does not match the\n specified quantum state subsystem dimensions.\n \"\"\"\n return SuperOp(self)._evolve(state, qargs)\n\n def _tensor_product(self, other, reverse=False):\n \"\"\"Return the tensor product channel.\n\n Args:\n other (QuantumChannel): a quantum channel subclass.\n reverse (bool): If False return self ⊗ other, if True return\n if True return (other ⊗ self) [Default: False\n Returns:\n Kraus: the tensor product channel as a Kraus object.\n\n Raises:\n QiskitError: if other cannot be converted to a channel.\n \"\"\"\n # Convert other to Kraus\n if not isinstance(other, Kraus):\n other = Kraus(other)\n\n # Get tensor matrix\n ka_l, ka_r = self._data\n kb_l, kb_r = other._data\n if reverse:\n input_dims = self.input_dims() + other.input_dims()\n output_dims = self.output_dims() + other.output_dims()\n kab_l = [np.kron(b, a) for a in ka_l for b in kb_l]\n else:\n input_dims = other.input_dims() + self.input_dims()\n output_dims = other.output_dims() + self.output_dims()\n kab_l = [np.kron(a, b) for a in ka_l for b in kb_l]\n if ka_r is None and kb_r is None:\n kab_r = None\n else:\n if ka_r is None:\n ka_r = ka_l\n if kb_r is None:\n kb_r = kb_l\n if reverse:\n kab_r = [np.kron(b, a) for a in ka_r for b in kb_r]\n else:\n kab_r = [np.kron(a, b) for a in ka_r for b in kb_r]\n data = (kab_l, kab_r)\n return Kraus(data, input_dims, output_dims)\n", "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=missing-return-doc, invalid-unary-operand-type\n\n\"\"\"Module for builtin continuous pulse functions.\"\"\"\n\nimport functools\nfrom typing import Union, Tuple, Optional\n\nimport numpy as np\n\n\ndef constant(times: np.ndarray, amp: complex) -> np.ndarray:\n \"\"\"Continuous constant pulse.\n\n Args:\n times: Times to output pulse for.\n amp: Complex pulse amplitude.\n \"\"\"\n return np.full(len(times), amp, dtype=np.complex_)\n\n\ndef zero(times: np.ndarray) -> np.ndarray:\n \"\"\"Continuous zero pulse.\n\n Args:\n times: Times to output pulse for.\n \"\"\"\n return constant(times, 0)\n\n\ndef square(times: np.ndarray, amp: complex, period: float, phase: float = 0) -> np.ndarray:\n \"\"\"Continuous square wave.\n\n Args:\n times: Times to output wave for.\n amp: Pulse amplitude. Wave range is [-amp, amp].\n period: Pulse period, units of dt.\n phase: Pulse phase.\n \"\"\"\n x = times/period+phase/np.pi\n return amp*(2*(2*np.floor(x) - np.floor(2*x)) + 1).astype(np.complex_)\n\n\ndef sawtooth(times: np.ndarray, amp: complex, period: float, phase: float = 0) -> np.ndarray:\n \"\"\"Continuous sawtooth wave.\n\n Args:\n times: Times to output wave for.\n amp: Pulse amplitude. Wave range is [-amp, amp].\n period: Pulse period, units of dt.\n phase: Pulse phase.\n \"\"\"\n x = times/period+phase/np.pi\n return amp*2*(x-np.floor(1/2+x)).astype(np.complex_)\n\n\ndef triangle(times: np.ndarray, amp: complex, period: float, phase: float = 0) -> np.ndarray:\n \"\"\"Continuous triangle wave.\n\n Args:\n times: Times to output wave for.\n amp: Pulse amplitude. Wave range is [-amp, amp].\n period: Pulse period, units of dt.\n phase: Pulse phase.\n \"\"\"\n return amp*(-2*np.abs(sawtooth(times, 1, period, (phase-np.pi/2)/2)) + 1).astype(np.complex_)\n\n\ndef cos(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:\n \"\"\"Continuous cosine wave.\n\n Args:\n times: Times to output wave for.\n amp: Pulse amplitude.\n freq: Pulse frequency, units of 1/dt.\n phase: Pulse phase.\n \"\"\"\n return amp*np.cos(2*np.pi*freq*times+phase).astype(np.complex_)\n\n\ndef sin(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:\n \"\"\"Continuous cosine wave.\n\n Args:\n times: Times to output wave for.\n amp: Pulse amplitude.\n freq: Pulse frequency, units of 1/dt.\n phase: Pulse phase.\n \"\"\"\n return amp*np.sin(2*np.pi*freq*times+phase).astype(np.complex_)\n\n\ndef _fix_gaussian_width(gaussian_samples, amp: float, center: float, sigma: float,\n zeroed_width: Optional[float] = None, rescale_amp: bool = False,\n ret_scale_factor: bool = False) -> np.ndarray:\n r\"\"\"Enforce that the supplied gaussian pulse is zeroed at a specific width.\n\n This is achieved by subtracting $\\Omega_g(center \\pm zeroed_width/2)$ from all samples.\n\n amp: Pulse amplitude at `2\\times center+1`.\n center: Center (mean) of pulse.\n sigma: Width (standard deviation) of pulse.\n zeroed_width: Subtract baseline to gaussian pulses to make sure\n $\\Omega_g(center \\pm zeroed_width/2)=0$ is satisfied. This is used to avoid\n large discontinuities at the start of a gaussian pulse. If unsupplied,\n defaults to $2*(center+1)$ such that the samples are zero at $\\Omega_g(-1)$.\n rescale_amp: If `zeroed_width` is not `None` and `rescale_amp=True` the pulse will\n be rescaled so that $\\Omega_g(center)-\\Omega_g(center\\pm zeroed_width/2)=amp$.\n ret_scale_factor: Return amplitude scale factor.\n \"\"\"\n if zeroed_width is None:\n zeroed_width = 2*(center+1)\n\n zero_offset = gaussian(np.array([-zeroed_width/2]), amp, center, sigma)\n gaussian_samples -= zero_offset\n amp_scale_factor = 1.\n if rescale_amp:\n amp_scale_factor = amp/(amp-zero_offset) if amp-zero_offset != 0 else 1.\n gaussian_samples *= amp_scale_factor\n\n if ret_scale_factor:\n return gaussian_samples, amp_scale_factor\n return gaussian_samples\n\n\ndef gaussian(times: np.ndarray, amp: complex, center: float, sigma: float,\n zeroed_width: Optional[float] = None, rescale_amp: bool = False,\n ret_x: bool = False) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:\n r\"\"\"Continuous unnormalized gaussian pulse.\n\n Integrated area under curve is $\\Omega_g(amp, sigma) = amp \\times np.sqrt(2\\pi \\sigma^2)$\n\n Args:\n times: Times to output pulse for.\n amp: Pulse amplitude at `center`. If `zeroed_width` is set pulse amplitude at center\n will be $amp-\\Omega_g(center\\pm zeroed_width/2)$ unless `rescale_amp` is set,\n in which case all samples will be rescaled such that the center\n amplitude will be `amp`.\n center: Center (mean) of pulse.\n sigma: Width (standard deviation) of pulse.\n zeroed_width: Subtract baseline to gaussian pulses to make sure\n $\\Omega_g(center \\pm zeroed_width/2)=0$ is satisfied. This is used to avoid\n large discontinuities at the start of a gaussian pulse.\n rescale_amp: If `zeroed_width` is not `None` and `rescale_amp=True` the pulse will\n be rescaled so that $\\Omega_g(center)-\\Omega_g(center\\pm zeroed_width/2)=amp$.\n ret_x: Return centered and standard deviation normalized pulse location.\n $x=(times-center)/sigma.\n \"\"\"\n times = np.asarray(times, dtype=np.complex_)\n x = (times-center)/sigma\n gauss = amp*np.exp(-x**2/2).astype(np.complex_)\n\n if zeroed_width is not None:\n gauss = _fix_gaussian_width(gauss, amp=amp, center=center, sigma=sigma,\n zeroed_width=zeroed_width, rescale_amp=rescale_amp)\n\n if ret_x:\n return gauss, x\n return gauss\n\n\ndef gaussian_deriv(times: np.ndarray, amp: complex, center: float, sigma: float,\n ret_gaussian: bool = False) -> np.ndarray:\n \"\"\"Continuous unnormalized gaussian derivative pulse.\n\n Args:\n times: Times to output pulse for.\n amp: Pulse amplitude at `center`.\n center: Center (mean) of pulse.\n sigma: Width (standard deviation) of pulse.\n ret_gaussian: Return gaussian with which derivative was taken with.\n \"\"\"\n gauss, x = gaussian(times, amp=amp, center=center, sigma=sigma, ret_x=True)\n gauss_deriv = -x/sigma*gauss\n if ret_gaussian:\n return gauss_deriv, gauss\n return gauss_deriv\n\n\ndef sech_fn(x, *args, **kwargs):\n r\"\"\"Hyperbolic secant function\"\"\"\n return 1.0 / np.cosh(x, *args, **kwargs)\n\n\ndef sech(times: np.ndarray, amp: complex, center: float, sigma: float,\n ret_x: bool = False) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:\n r\"\"\"Continuous unnormalized sech pulse.\n\n Args:\n times: Times to output pulse for.\n amp: Pulse amplitude at `center`.\n center: Center (mean) of pulse.\n sigma: Width (standard deviation) of pulse.\n ret_x: Return centered and standard deviation normalized pulse location.\n $x=(times-center)/sigma.\n \"\"\"\n times = np.asarray(times, dtype=np.complex_)\n x = (times-center)/sigma\n sech_out = amp*sech_fn(x).astype(np.complex_)\n\n if ret_x:\n return sech_out, x\n return sech_out\n\n\ndef sech_deriv(times: np.ndarray, amp: complex, center: float, sigma: float,\n ret_sech: bool = False) -> np.ndarray:\n \"\"\"Continuous unnormalized sech derivative pulse.\n\n Args:\n times: Times to output pulse for.\n amp: Pulse amplitude at `center`.\n center: Center (mean) of pulse.\n sigma: Width (standard deviation) of pulse.\n ret_sech: Return sech with which derivative was taken with.\n \"\"\"\n sech_out, x = sech(times, amp=amp, center=center, sigma=sigma, ret_x=True)\n sech_out_deriv = - sech_out * np.tanh(x) / sigma\n if ret_sech:\n return sech_out_deriv, sech_out\n return sech_out_deriv\n\n\ndef gaussian_square(times: np.ndarray, amp: complex, center: float, width: float,\n sigma: float, zeroed_width: Optional[float] = None) -> np.ndarray:\n r\"\"\"Continuous gaussian square pulse.\n\n Args:\n times: Times to output pulse for.\n amp: Pulse amplitude.\n center: Center of the square pulse component.\n width: Width of the square pulse component.\n sigma: Width (standard deviation) of gaussian rise/fall portion of the pulse.\n zeroed_width: Subtract baseline of gaussian square pulse\n to enforce $\\OmegaSquare(center \\pm zeroed_width/2)=0$.\n \"\"\"\n square_start = center-width/2\n square_stop = center+width/2\n if zeroed_width:\n zeroed_width = min(width, zeroed_width)\n gauss_zeroed_width = zeroed_width-width\n else:\n gauss_zeroed_width = None\n\n funclist = [functools.partial(gaussian, amp=amp, center=square_start, sigma=sigma,\n zeroed_width=gauss_zeroed_width, rescale_amp=True),\n functools.partial(gaussian, amp=amp, center=square_stop, sigma=sigma,\n zeroed_width=gauss_zeroed_width, rescale_amp=True),\n functools.partial(constant, amp=amp)]\n condlist = [times <= square_start, times >= square_stop]\n return np.piecewise(times.astype(np.complex_), condlist, funclist)\n\n\ndef drag(times: np.ndarray, amp: complex, center: float, sigma: float, beta: float,\n zeroed_width: Optional[float] = None, rescale_amp: bool = False) -> np.ndarray:\n r\"\"\"Continuous Y-only correction DRAG pulse for standard nonlinear oscillator (SNO) [1].\n\n [1] Gambetta, J. M., Motzoi, F., Merkel, S. T. & Wilhelm, F. K.\n Analytic control methods for high-fidelity unitary operations\n in a weakly nonlinear oscillator. Phys. Rev. A 83, 012308 (2011).\n\n\n Args:\n times: Times to output pulse for.\n amp: Pulse amplitude at `center`.\n center: Center (mean) of pulse.\n sigma: Width (standard deviation) of pulse.\n beta: Y correction amplitude. For the SNO this is $\\beta=-\\frac{\\lambda_1^2}{4\\Delta_2}$.\n Where $\\lambds_1$ is the relative coupling strength between the first excited and second\n excited states and $\\Delta_2$ is the detuning between the respective excited states.\n zeroed_width: Subtract baseline to gaussian pulses to make sure\n $\\Omega_g(center \\pm zeroed_width/2)=0$ is satisfied. This is used to avoid\n large discontinuities at the start of a gaussian pulse.\n rescale_amp: If `zeroed_width` is not `None` and `rescale_amp=True` the pulse will\n be rescaled so that $\\Omega_g(center)-\\Omega_g(center\\pm zeroed_width/2)=amp$.\n\n \"\"\"\n gauss_deriv, gauss = gaussian_deriv(times, amp=amp, center=center, sigma=sigma,\n ret_gaussian=True)\n if zeroed_width is not None:\n gauss, scale_factor = _fix_gaussian_width(gauss, amp=amp, center=center, sigma=sigma,\n zeroed_width=zeroed_width,\n rescale_amp=rescale_amp,\n ret_scale_factor=True)\n gauss_deriv *= scale_factor\n\n return gauss + 1j*beta*gauss_deriv\n", "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=invalid-name\n\n\"\"\"Tests for quantum channel representation transformations.\"\"\"\n\nimport unittest\n\nimport numpy as np\n\nfrom qiskit import QiskitError\nfrom qiskit.quantum_info.states import DensityMatrix\nfrom qiskit.quantum_info.operators.predicates import matrix_equal\nfrom qiskit.quantum_info.operators.operator import Operator\nfrom qiskit.quantum_info.operators.channel.choi import Choi\nfrom qiskit.quantum_info.operators.channel.superop import SuperOp\nfrom qiskit.quantum_info.operators.channel.kraus import Kraus\nfrom qiskit.quantum_info.operators.channel.stinespring import Stinespring\nfrom qiskit.quantum_info.operators.channel.ptm import PTM\nfrom qiskit.quantum_info.operators.channel.chi import Chi\nfrom .channel_test_case import ChannelTestCase\n\n\nclass TestTransformations(ChannelTestCase):\n \"\"\"Tests for Operator channel representation.\"\"\"\n\n unitary_mat = [\n ChannelTestCase.UI, ChannelTestCase.UX, ChannelTestCase.UY,\n ChannelTestCase.UZ, ChannelTestCase.UH\n ]\n unitary_choi = [\n ChannelTestCase.choiI, ChannelTestCase.choiX, ChannelTestCase.choiY,\n ChannelTestCase.choiZ, ChannelTestCase.choiH\n ]\n unitary_chi = [\n ChannelTestCase.chiI, ChannelTestCase.chiX, ChannelTestCase.chiY,\n ChannelTestCase.chiZ, ChannelTestCase.chiH\n ]\n unitary_sop = [\n ChannelTestCase.sopI, ChannelTestCase.sopX, ChannelTestCase.sopY,\n ChannelTestCase.sopZ, ChannelTestCase.sopH\n ]\n unitary_ptm = [\n ChannelTestCase.ptmI, ChannelTestCase.ptmX, ChannelTestCase.ptmY,\n ChannelTestCase.ptmZ, ChannelTestCase.ptmH\n ]\n\n def test_operator_to_operator(self):\n \"\"\"Test Operator to Operator transformation.\"\"\"\n # Test unitary channels\n for mat in self.unitary_mat:\n chan1 = Operator(mat)\n chan2 = Operator(chan1)\n self.assertEqual(chan1, chan2)\n\n def test_operator_to_choi(self):\n \"\"\"Test Operator to Choi transformation.\"\"\"\n # Test unitary channels\n for mat, choi in zip(self.unitary_mat, self.unitary_choi):\n chan1 = Choi(choi)\n chan2 = Choi(Operator(mat))\n self.assertEqual(chan1, chan2)\n\n def test_operator_to_superop(self):\n \"\"\"Test Operator to SuperOp transformation.\"\"\"\n # Test unitary channels\n for mat, sop in zip(self.unitary_mat, self.unitary_sop):\n chan1 = SuperOp(sop)\n chan2 = SuperOp(Operator(mat))\n self.assertEqual(chan1, chan2)\n\n def test_operator_to_kraus(self):\n \"\"\"Test Operator to Kraus transformation.\"\"\"\n # Test unitary channels\n for mat in self.unitary_mat:\n chan1 = Kraus(mat)\n chan2 = Kraus(Operator(mat))\n self.assertEqual(chan1, chan2)\n\n def test_operator_to_stinespring(self):\n \"\"\"Test Operator to Stinespring transformation.\"\"\"\n # Test unitary channels\n for mat in self.unitary_mat:\n chan1 = Stinespring(mat)\n chan2 = Stinespring(Operator(chan1))\n self.assertEqual(chan1, chan2)\n\n def test_operator_to_chi(self):\n \"\"\"Test Operator to Chi transformation.\"\"\"\n # Test unitary channels\n for mat, chi in zip(self.unitary_mat, self.unitary_chi):\n chan1 = Chi(chi)\n chan2 = Chi(Operator(mat))\n self.assertEqual(chan1, chan2)\n\n def test_operator_to_ptm(self):\n \"\"\"Test Operator to PTM transformation.\"\"\"\n # Test unitary channels\n for mat, ptm in zip(self.unitary_mat, self.unitary_ptm):\n chan1 = PTM(ptm)\n chan2 = PTM(Operator(mat))\n self.assertEqual(chan1, chan2)\n\n def test_choi_to_operator(self):\n \"\"\"Test Choi to Operator transformation.\"\"\"\n # Test unitary channels\n for mat, choi in zip(self.unitary_mat, self.unitary_choi):\n chan1 = Operator(mat)\n chan2 = Operator(Choi(choi))\n self.assertTrue(\n matrix_equal(chan2.data, chan1.data, ignore_phase=True))\n\n def test_choi_to_choi(self):\n \"\"\"Test Choi to Choi transformation.\"\"\"\n # Test unitary channels\n for choi in self.unitary_choi:\n chan1 = Choi(choi)\n chan2 = Choi(chan1)\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = Choi(self.depol_choi(p))\n chan2 = Choi(chan1)\n self.assertEqual(chan1, chan2)\n\n def test_choi_to_superop(self):\n \"\"\"Test Choi to SuperOp transformation.\"\"\"\n # Test unitary channels\n for choi, sop in zip(self.unitary_choi, self.unitary_sop):\n chan1 = SuperOp(sop)\n chan2 = SuperOp(Choi(choi))\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = SuperOp(self.depol_sop(p))\n chan2 = SuperOp(Choi(self.depol_choi(p)))\n self.assertEqual(chan1, chan2)\n\n def test_choi_to_kraus(self):\n \"\"\"Test Choi to Kraus transformation.\"\"\"\n # Test unitary channels\n for mat, choi in zip(self.unitary_mat, self.unitary_choi):\n chan1 = Kraus(mat)\n chan2 = Kraus(Choi(choi))\n self.assertTrue(\n matrix_equal(chan2.data[0], chan1.data[0], ignore_phase=True))\n # Test depolarizing channels\n rho = DensityMatrix(np.diag([1, 0]))\n for p in [0.25, 0.5, 0.75, 1]:\n target = rho.evolve(Kraus(self.depol_kraus(p)))\n output = rho.evolve(Kraus(Choi(self.depol_choi(p))))\n self.assertEqual(output, target)\n\n def test_choi_to_stinespring(self):\n \"\"\"Test Choi to Stinespring transformation.\"\"\"\n # Test unitary channels\n for mat, choi in zip(self.unitary_mat, self.unitary_choi):\n chan1 = Kraus(mat)\n chan2 = Kraus(Choi(choi))\n self.assertTrue(\n matrix_equal(chan2.data[0], chan1.data[0], ignore_phase=True))\n # Test depolarizing channels\n rho = DensityMatrix(np.diag([1, 0]))\n for p in [0.25, 0.5, 0.75, 1]:\n target = rho.evolve(Stinespring(self.depol_stine(p)))\n output = rho.evolve(Stinespring(Choi(self.depol_choi(p))))\n self.assertEqual(output, target)\n\n def test_choi_to_chi(self):\n \"\"\"Test Choi to Chi transformation.\"\"\"\n # Test unitary channels\n for choi, chi in zip(self.unitary_choi, self.unitary_chi):\n chan1 = Chi(chi)\n chan2 = Chi(Choi(choi))\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = Chi(self.depol_chi(p))\n chan2 = Chi(Choi(self.depol_choi(p)))\n self.assertEqual(chan1, chan2)\n\n def test_choi_to_ptm(self):\n \"\"\"Test Choi to PTM transformation.\"\"\"\n # Test unitary channels\n for choi, ptm in zip(self.unitary_choi, self.unitary_ptm):\n chan1 = PTM(ptm)\n chan2 = PTM(Choi(choi))\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = PTM(self.depol_ptm(p))\n chan2 = PTM(Choi(self.depol_choi(p)))\n self.assertEqual(chan1, chan2)\n\n def test_superop_to_operator(self):\n \"\"\"Test SuperOp to Operator transformation.\"\"\"\n for mat, sop in zip(self.unitary_mat, self.unitary_sop):\n chan1 = Operator(mat)\n chan2 = Operator(SuperOp(sop))\n self.assertTrue(\n matrix_equal(chan2.data, chan1.data, ignore_phase=True))\n self.assertRaises(QiskitError, Operator, SuperOp(self.depol_sop(0.5)))\n\n def test_superop_to_choi(self):\n \"\"\"Test SuperOp to Choi transformation.\"\"\"\n # Test unitary channels\n for choi, sop in zip(self.unitary_choi, self.unitary_sop):\n chan1 = Choi(choi)\n chan2 = Choi(SuperOp(sop))\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0, 0.25, 0.5, 0.75, 1]:\n chan1 = Choi(self.depol_choi(p))\n chan2 = Choi(SuperOp(self.depol_sop(p)))\n self.assertEqual(chan1, chan2)\n\n def test_superop_to_superop(self):\n \"\"\"Test SuperOp to SuperOp transformation.\"\"\"\n # Test unitary channels\n for sop in self.unitary_sop:\n chan1 = SuperOp(sop)\n chan2 = SuperOp(chan1)\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0, 0.25, 0.5, 0.75, 1]:\n chan1 = SuperOp(self.depol_sop(p))\n chan2 = SuperOp(chan1)\n self.assertEqual(chan1, chan2)\n\n def test_superop_to_kraus(self):\n \"\"\"Test SuperOp to Kraus transformation.\"\"\"\n # Test unitary channels\n for mat, sop in zip(self.unitary_mat, self.unitary_sop):\n chan1 = Kraus(mat)\n chan2 = Kraus(SuperOp(sop))\n self.assertTrue(\n matrix_equal(chan2.data[0], chan1.data[0], ignore_phase=True))\n # Test depolarizing channels\n rho = DensityMatrix(np.diag([1, 0]))\n for p in [0.25, 0.5, 0.75, 1]:\n target = rho.evolve(Kraus(self.depol_kraus(p)))\n output = rho.evolve(Kraus(SuperOp(self.depol_sop(p))))\n self.assertEqual(output, target)\n\n def test_superop_to_stinespring(self):\n \"\"\"Test SuperOp to Stinespring transformation.\"\"\"\n # Test unitary channels\n for mat, sop in zip(self.unitary_mat, self.unitary_sop):\n chan1 = Stinespring(mat)\n chan2 = Stinespring(SuperOp(sop))\n self.assertTrue(\n matrix_equal(chan2.data[0], chan1.data[0], ignore_phase=True))\n # Test depolarizing channels\n rho = DensityMatrix(np.diag([1, 0]))\n for p in [0.25, 0.5, 0.75, 1]:\n target = rho.evolve(Stinespring(self.depol_stine(p)))\n output = rho.evolve(Stinespring(SuperOp(self.depol_sop(p))))\n self.assertEqual(output, target)\n\n def test_superop_to_chi(self):\n \"\"\"Test SuperOp to Chi transformation.\"\"\"\n # Test unitary channels\n for sop, ptm in zip(self.unitary_sop, self.unitary_ptm):\n chan1 = PTM(ptm)\n chan2 = PTM(SuperOp(sop))\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = Chi(self.depol_chi(p))\n chan2 = Chi(SuperOp(self.depol_sop(p)))\n self.assertEqual(chan1, chan2)\n\n def test_superop_to_ptm(self):\n \"\"\"Test SuperOp to PTM transformation.\"\"\"\n # Test unitary channels\n for sop, ptm in zip(self.unitary_sop, self.unitary_ptm):\n chan1 = PTM(ptm)\n chan2 = PTM(SuperOp(sop))\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = PTM(self.depol_ptm(p))\n chan2 = PTM(SuperOp(self.depol_sop(p)))\n self.assertEqual(chan1, chan2)\n\n def test_kraus_to_operator(self):\n \"\"\"Test Kraus to Operator transformation.\"\"\"\n for mat in self.unitary_mat:\n chan1 = Operator(mat)\n chan2 = Operator(Kraus(mat))\n self.assertTrue(\n matrix_equal(chan2.data, chan1.data, ignore_phase=True))\n self.assertRaises(QiskitError, Operator, Kraus(self.depol_kraus(0.5)))\n\n def test_kraus_to_choi(self):\n \"\"\"Test Kraus to Choi transformation.\"\"\"\n # Test unitary channels\n for mat, choi in zip(self.unitary_mat, self.unitary_choi):\n chan1 = Choi(choi)\n chan2 = Choi(Kraus(mat))\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = Choi(self.depol_choi(p))\n chan2 = Choi(Kraus(self.depol_kraus(p)))\n self.assertEqual(chan1, chan2)\n\n def test_kraus_to_superop(self):\n \"\"\"Test Kraus to SuperOp transformation.\"\"\"\n # Test unitary channels\n for mat, sop in zip(self.unitary_mat, self.unitary_sop):\n chan1 = SuperOp(sop)\n chan2 = SuperOp(Kraus(mat))\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = SuperOp(self.depol_sop(p))\n chan2 = SuperOp(Kraus(self.depol_kraus(p)))\n self.assertEqual(chan1, chan2)\n\n def test_kraus_to_kraus(self):\n \"\"\"Test Kraus to Kraus transformation.\"\"\"\n # Test unitary channels\n for mat in self.unitary_mat:\n chan1 = Kraus(mat)\n chan2 = Kraus(chan1)\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = Kraus(self.depol_kraus(p))\n chan2 = Kraus(chan1)\n self.assertEqual(chan1, chan2)\n\n def test_kraus_to_stinespring(self):\n \"\"\"Test Kraus to Stinespring transformation.\"\"\"\n # Test unitary channels\n for mat in self.unitary_mat:\n chan1 = Stinespring(mat)\n chan2 = Stinespring(Kraus(mat))\n self.assertTrue(\n matrix_equal(chan2.data[0], chan1.data[0], ignore_phase=True))\n # Test depolarizing channels\n rho = DensityMatrix(np.diag([1, 0]))\n for p in [0.25, 0.5, 0.75, 1]:\n target = rho.evolve(Stinespring(self.depol_stine(p)))\n output = rho.evolve(Stinespring(Kraus(self.depol_kraus(p))))\n self.assertEqual(output, target)\n\n def test_kraus_to_chi(self):\n \"\"\"Test Kraus to Chi transformation.\"\"\"\n # Test unitary channels\n for mat, chi in zip(self.unitary_mat, self.unitary_chi):\n chan1 = Chi(chi)\n chan2 = Chi(Kraus(mat))\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = Chi(self.depol_chi(p))\n chan2 = Chi(Kraus(self.depol_kraus(p)))\n self.assertEqual(chan1, chan2)\n\n def test_kraus_to_ptm(self):\n \"\"\"Test Kraus to PTM transformation.\"\"\"\n # Test unitary channels\n for mat, ptm in zip(self.unitary_mat, self.unitary_ptm):\n chan1 = PTM(ptm)\n chan2 = PTM(Kraus(mat))\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = PTM(self.depol_ptm(p))\n chan2 = PTM(Kraus(self.depol_kraus(p)))\n self.assertEqual(chan1, chan2)\n\n def test_stinespring_to_operator(self):\n \"\"\"Test Stinespring to Operator transformation.\"\"\"\n for mat in self.unitary_mat:\n chan1 = Operator(mat)\n chan2 = Operator(Stinespring(mat))\n self.assertTrue(\n matrix_equal(chan2.data, chan1.data, ignore_phase=True))\n self.assertRaises(QiskitError, Operator,\n Stinespring(self.depol_stine(0.5)))\n\n def test_stinespring_to_choi(self):\n \"\"\"Test Stinespring to Choi transformation.\"\"\"\n # Test unitary channels\n for mat, choi in zip(self.unitary_mat, self.unitary_choi):\n chan1 = Choi(choi)\n chan2 = Choi(Stinespring(mat))\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = Choi(self.depol_choi(p))\n chan2 = Choi(Stinespring(self.depol_stine(p)))\n self.assertEqual(chan1, chan2)\n\n def test_stinespring_to_superop(self):\n \"\"\"Test Stinespring to SuperOp transformation.\"\"\"\n # Test unitary channels\n for mat, sop in zip(self.unitary_mat, self.unitary_sop):\n chan1 = SuperOp(sop)\n chan2 = SuperOp(Kraus(mat))\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = SuperOp(self.depol_sop(p))\n chan2 = SuperOp(Stinespring(self.depol_stine(p)))\n self.assertEqual(chan1, chan2)\n\n def test_stinespring_to_kraus(self):\n \"\"\"Test Stinespring to Kraus transformation.\"\"\"\n # Test unitary channels\n for mat in self.unitary_mat:\n chan1 = Kraus(mat)\n chan2 = Kraus(Stinespring(mat))\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = Kraus(self.depol_kraus(p))\n chan2 = Kraus(Stinespring(self.depol_stine(p)))\n self.assertEqual(chan1, chan2)\n\n def test_stinespring_to_stinespring(self):\n \"\"\"Test Stinespring to Stinespring transformation.\"\"\"\n # Test unitary channels\n for mat in self.unitary_mat:\n chan1 = Stinespring(mat)\n chan2 = Stinespring(chan1)\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = Stinespring(self.depol_stine(p))\n chan2 = Stinespring(chan1)\n self.assertEqual(chan1, chan2)\n\n def test_stinespring_to_chi(self):\n \"\"\"Test Stinespring to Chi transformation.\"\"\"\n # Test unitary channels\n for mat, chi in zip(self.unitary_mat, self.unitary_chi):\n chan1 = Chi(chi)\n chan2 = Chi(Stinespring(mat))\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = Chi(self.depol_chi(p))\n chan2 = Chi(Stinespring(self.depol_stine(p)))\n self.assertEqual(chan1, chan2)\n\n def test_stinespring_to_ptm(self):\n \"\"\"Test Stinespring to PTM transformation.\"\"\"\n # Test unitary channels\n for mat, ptm in zip(self.unitary_mat, self.unitary_ptm):\n chan1 = PTM(ptm)\n chan2 = PTM(Stinespring(mat))\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = PTM(self.depol_ptm(p))\n chan2 = PTM(Stinespring(self.depol_stine(p)))\n self.assertEqual(chan1, chan2)\n\n def test_chi_to_operator(self):\n \"\"\"Test Chi to Operator transformation.\"\"\"\n for mat, chi in zip(self.unitary_mat, self.unitary_chi):\n chan1 = Operator(mat)\n chan2 = Operator(Chi(chi))\n self.assertTrue(\n matrix_equal(chan2.data, chan1.data, ignore_phase=True))\n self.assertRaises(QiskitError, Operator, Chi(self.depol_chi(0.5)))\n\n def test_chi_to_choi(self):\n \"\"\"Test Chi to Choi transformation.\"\"\"\n # Test unitary channels\n for chi, choi in zip(self.unitary_chi, self.unitary_choi):\n chan1 = Choi(choi)\n chan2 = Choi(Chi(chi))\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = Choi(self.depol_choi(p))\n chan2 = Choi(Chi(self.depol_chi(p)))\n self.assertEqual(chan1, chan2)\n\n def test_chi_to_superop(self):\n \"\"\"Test Chi to SuperOp transformation.\"\"\"\n # Test unitary channels\n for chi, sop in zip(self.unitary_chi, self.unitary_sop):\n chan1 = SuperOp(sop)\n chan2 = SuperOp(Chi(chi))\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = SuperOp(self.depol_sop(p))\n chan2 = SuperOp(Chi(self.depol_chi(p)))\n self.assertEqual(chan1, chan2)\n\n def test_chi_to_kraus(self):\n \"\"\"Test Chi to Kraus transformation.\"\"\"\n # Test unitary channels\n for mat, chi in zip(self.unitary_mat, self.unitary_chi):\n chan1 = Kraus(mat)\n chan2 = Kraus(Chi(chi))\n self.assertTrue(\n matrix_equal(chan2.data[0], chan1.data[0], ignore_phase=True))\n # Test depolarizing channels\n rho = DensityMatrix(np.diag([1, 0]))\n for p in [0.25, 0.5, 0.75, 1]:\n target = rho.evolve(Kraus(self.depol_kraus(p)))\n output = rho.evolve(Kraus(Chi(self.depol_chi(p))))\n self.assertEqual(output, target)\n\n def test_chi_to_stinespring(self):\n \"\"\"Test Chi to Stinespring transformation.\"\"\"\n # Test unitary channels\n for mat, chi in zip(self.unitary_mat, self.unitary_chi):\n chan1 = Kraus(mat)\n chan2 = Kraus(Chi(chi))\n self.assertTrue(\n matrix_equal(chan2.data[0], chan1.data[0], ignore_phase=True))\n # Test depolarizing channels\n rho = DensityMatrix(np.diag([1, 0]))\n for p in [0.25, 0.5, 0.75, 1]:\n target = rho.evolve(Stinespring(self.depol_stine(p)))\n output = rho.evolve(Stinespring(Chi(self.depol_chi(p))))\n self.assertEqual(output, target)\n\n def test_chi_to_chi(self):\n \"\"\"Test Chi to Chi transformation.\"\"\"\n # Test unitary channels\n for chi in self.unitary_chi:\n chan1 = Chi(chi)\n chan2 = Chi(chan1)\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = Chi(self.depol_chi(p))\n chan2 = Chi(chan1)\n self.assertEqual(chan1, chan2)\n\n def test_chi_to_ptm(self):\n \"\"\"Test Chi to PTM transformation.\"\"\"\n # Test unitary channels\n for chi, ptm in zip(self.unitary_chi, self.unitary_ptm):\n chan1 = PTM(ptm)\n chan2 = PTM(Chi(chi))\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = PTM(self.depol_ptm(p))\n chan2 = PTM(Chi(self.depol_chi(p)))\n self.assertEqual(chan1, chan2)\n\n def test_ptm_to_operator(self):\n \"\"\"Test PTM to Operator transformation.\"\"\"\n for mat, ptm in zip(self.unitary_mat, self.unitary_ptm):\n chan1 = Operator(mat)\n chan2 = Operator(PTM(ptm))\n self.assertTrue(\n matrix_equal(chan2.data, chan1.data, ignore_phase=True))\n self.assertRaises(QiskitError, Operator, PTM(self.depol_ptm(0.5)))\n\n def test_ptm_to_choi(self):\n \"\"\"Test PTM to Choi transformation.\"\"\"\n # Test unitary channels\n for ptm, choi in zip(self.unitary_ptm, self.unitary_choi):\n chan1 = Choi(choi)\n chan2 = Choi(PTM(ptm))\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = Choi(self.depol_choi(p))\n chan2 = Choi(PTM(self.depol_ptm(p)))\n self.assertEqual(chan1, chan2)\n\n def test_ptm_to_superop(self):\n \"\"\"Test PTM to SuperOp transformation.\"\"\"\n # Test unitary channels\n for ptm, sop in zip(self.unitary_ptm, self.unitary_sop):\n chan1 = SuperOp(sop)\n chan2 = SuperOp(PTM(ptm))\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = SuperOp(self.depol_sop(p))\n chan2 = SuperOp(PTM(self.depol_ptm(p)))\n self.assertEqual(chan1, chan2)\n\n def test_ptm_to_kraus(self):\n \"\"\"Test PTM to Kraus transformation.\"\"\"\n # Test unitary channels\n for mat, ptm in zip(self.unitary_mat, self.unitary_ptm):\n chan1 = Kraus(mat)\n chan2 = Kraus(PTM(ptm))\n self.assertTrue(\n matrix_equal(chan2.data[0], chan1.data[0], ignore_phase=True))\n # Test depolarizing channels\n rho = DensityMatrix(np.diag([1, 0]))\n for p in [0.25, 0.5, 0.75, 1]:\n target = rho.evolve(Kraus(self.depol_kraus(p)))\n output = rho.evolve(Kraus(PTM(self.depol_ptm(p))))\n self.assertEqual(output, target)\n\n def test_ptm_to_stinespring(self):\n \"\"\"Test PTM to Stinespring transformation.\"\"\"\n # Test unitary channels\n for mat, ptm in zip(self.unitary_mat, self.unitary_ptm):\n chan1 = Kraus(mat)\n chan2 = Kraus(PTM(ptm))\n self.assertTrue(\n matrix_equal(chan2.data[0], chan1.data[0], ignore_phase=True))\n # Test depolarizing channels\n rho = DensityMatrix(np.diag([1, 0]))\n for p in [0.25, 0.5, 0.75, 1]:\n target = rho.evolve(Stinespring(self.depol_stine(p)))\n output = rho.evolve(Stinespring(PTM(self.depol_ptm(p))))\n self.assertEqual(output, target)\n\n def test_ptm_to_chi(self):\n \"\"\"Test PTM to Chi transformation.\"\"\"\n # Test unitary channels\n for chi, ptm in zip(self.unitary_chi, self.unitary_ptm):\n chan1 = Chi(chi)\n chan2 = Chi(PTM(ptm))\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = Chi(self.depol_chi(p))\n chan2 = Chi(PTM(self.depol_ptm(p)))\n self.assertEqual(chan1, chan2)\n\n def test_ptm_to_ptm(self):\n \"\"\"Test PTM to PTM transformation.\"\"\"\n # Test unitary channels\n for ptm in self.unitary_ptm:\n chan1 = PTM(ptm)\n chan2 = PTM(chan1)\n self.assertEqual(chan1, chan2)\n # Test depolarizing channels\n for p in [0.25, 0.5, 0.75, 1]:\n chan1 = PTM(self.depol_ptm(p))\n chan2 = PTM(chan1)\n self.assertEqual(chan1, chan2)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=missing-return-doc\n\n\"\"\"Sampler decorator module for sampling of continuous pulses to discrete pulses to be\nexposed to user.\n\nSome atypical boilerplate has been added to solve the problem of decorators not preserving\ntheir wrapped function signatures. Below we explain the problem that samplers solve and how\nwe implement this.\n\nA sampler is a function that takes an continuous pulse function with signature:\n ```python\n def f(times: np.ndarray, *args, **kwargs) -> np.ndarray:\n ...\n ```\nand returns a new function:\n def f(duration: int, *args, **kwargs) -> SamplePulse:\n ...\n\nSamplers are used to build up pulse commands from continuous pulse functions.\n\nIn Python the creation of a dynamic function that wraps another function will cause\nthe underlying signature and documentation of the underlying function to be overwritten.\nIn order to circumvent this issue the Python standard library provides the decorator\n`functools.wraps` which allows the programmer to expose the names and signature of the\nwrapped function as those of the dynamic function.\n\nSamplers are implemented by creating a function with signature\n @sampler\n def left(continuous_pulse: Callable, duration: int, *args, **kwargs)\n ...\n\nThis will create a sampler function for `left`. Since it is a dynamic function it would not\nhave the docstring of `left` available too `help`. This could be fixed by wrapping with\n`functools.wraps` in the `sampler`, but this would then cause the signature to be that of the\nsampler function which is called on the continuous pulse, below:\n `(continuous_pulse: Callable, duration: int, *args, **kwargs)``\nThis is not correct for the sampler as the output sampled functions accept only a function.\nFor the standard sampler we get around this by not using `functools.wraps` and\nexplicitly defining our samplers such as `left`, `right` and `midpoint` and\ncalling `sampler` internally on the function that implements the sampling schemes such as\n`left_sample`, `right_sample` and `midpoint_sample` respectively. See `left` for an example of this.\n\n\nIn this way our standard samplers will expose the proper help signature, but a user can\nstill create their own sampler with\n @sampler\n def custom_sampler(time, *args, **kwargs):\n ...\nHowever, in this case it will be missing documentation of the underlying sampling methods.\nWe believe that the definition of custom samplers will be rather infrequent.\n\nHowever, users will frequently apply sampler instances too continuous pulses. Therefore, a different\napproach was required for sampled continuous functions (the output of an continuous pulse function\ndecorated by a sampler instance).\n\nA sampler instance is a decorator that may be used to wrap continuous pulse functions such as\nlinear below:\n```python\n @left\n def linear(times: np.ndarray, m: float, b: float) -> np.ndarray:\n ```Linear test function\n Args:\n times: Input times.\n m: Slope.\n b: Intercept\n Returns:\n np.ndarray\n ```\n return m*times+b\n```\nWhich after decoration may be called with a duration rather than an array of times\n ```python\n duration = 10\n pulse_command = linear(10, 0.1, 0.1)\n ```\nIf one calls help on `linear` they will find\n ```\n linear(duration:int, *args, **kwargs) -> numpy.ndarray\n Discretized continuous pulse function: `linear` using\n sampler: `_left`.\n\n The first argument (time) of the continuous pulse function has been replaced with\n a discretized `duration` of type (int).\n\n Args:\n duration (int)\n *args: Remaining arguments of continuous pulse function.\n See continuous pulse function documentation below.\n **kwargs: Remaining kwargs of continuous pulse function.\n See continuous pulse function documentation below.\n\n Sampled continuous function:\n\n function linear in module test.python.pulse.test_samplers\n linear(x:numpy.ndarray, m:float, b:float) -> numpy.ndarray\n Linear test function\n Args:\n x: Input times.\n m: Slope.\n b: Intercept\n Returns:\n np.ndarray\n ```\nThis is partly because `functools.wraps` has been used on the underlying function.\nThis in itself is not sufficient as the signature of the sampled function has\n`duration`, whereas the signature of the continuous function is `time`.\n\nThis is achieved by removing `__wrapped__` set by `functools.wraps` in order to preserve\nthe correct signature and also applying `_update_annotations` and `_update_docstring`\nto the generated function which corrects the function annotations and adds an informative\ndocstring respectively.\n\nThe user therefore has access to the correct sampled function docstring in its entirety, while\nstill seeing the signature for the continuous pulse function and all of its arguments.\n\"\"\"\n\nimport functools\nfrom typing import Callable\nimport textwrap\nimport pydoc\n\nimport numpy as np\n\nimport qiskit.pulse.commands as commands\n\nfrom . import strategies\n\n\ndef _update_annotations(discretized_pulse: Callable) -> Callable:\n \"\"\"Update annotations of discretized continuous pulse function with duration.\n\n Args:\n discretized_pulse: Discretized decorated continuous pulse.\n \"\"\"\n undecorated_annotations = list(discretized_pulse.__annotations__.items())\n decorated_annotations = undecorated_annotations[1:]\n decorated_annotations.insert(0, ('duration', int))\n discretized_pulse.__annotations__ = dict(decorated_annotations)\n return discretized_pulse\n\n\ndef _update_docstring(discretized_pulse: Callable, sampler_inst: Callable) -> Callable:\n \"\"\"Update annotations of discretized continuous pulse function.\n\n Args:\n discretized_pulse: Discretized decorated continuous pulse.\n sampler_inst: Applied sampler.\n \"\"\"\n wrapped_docstring = pydoc.render_doc(discretized_pulse, '%s')\n header, body = wrapped_docstring.split('\\n', 1)\n body = textwrap.indent(body, ' ')\n wrapped_docstring = header+body\n updated_ds = \"\"\"\n Discretized continuous pulse function: `{continuous_name}` using\n sampler: `{sampler_name}`.\n\n The first argument (time) of the continuous pulse function has been replaced with\n a discretized `duration` of type (int).\n\n Args:\n duration (int)\n *args: Remaining arguments of continuous pulse function.\n See continuous pulse function documentation below.\n **kwargs: Remaining kwargs of continuous pulse function.\n See continuous pulse function documentation below.\n\n Sampled continuous function:\n\n {continuous_doc}\n \"\"\".format(continuous_name=discretized_pulse.__name__,\n sampler_name=sampler_inst.__name__,\n continuous_doc=wrapped_docstring)\n\n discretized_pulse.__doc__ = updated_ds\n return discretized_pulse\n\n\ndef sampler(sample_function: Callable) -> Callable:\n \"\"\"Sampler decorator base method.\n\n Samplers are used for converting an continuous function to a discretized pulse.\n\n They operate on a function with the signature:\n `def f(times: np.ndarray, *args, **kwargs) -> np.ndarray`\n Where `times` is a numpy array of floats with length n_times and the output array\n is a complex numpy array with length n_times. The output of the decorator is an\n instance of `FunctionalPulse` with signature:\n `def g(duration: int, *args, **kwargs) -> SamplePulse`\n\n Note if your continuous pulse function outputs a `complex` scalar rather than a\n `np.ndarray`, you should first vectorize it before applying a sampler.\n\n\n This class implements the sampler boilerplate for the sampler.\n\n Args:\n sample_function: A sampler function to be decorated.\n \"\"\"\n\n def generate_sampler(continuous_pulse: Callable) -> Callable:\n \"\"\"Return a decorated sampler function.\"\"\"\n\n @functools.wraps(continuous_pulse)\n def call_sampler(duration: int, *args, **kwargs) -> commands.SamplePulse:\n \"\"\"Replace the call to the continuous function with a call to the sampler applied\n to the analytic pulse function.\"\"\"\n sampled_pulse = sample_function(continuous_pulse, duration, *args, **kwargs)\n return np.asarray(sampled_pulse, dtype=np.complex_)\n\n # Update type annotations for wrapped continuous function to be discrete\n call_sampler = _update_annotations(call_sampler)\n # Update docstring with that of the sampler and include sampled function documentation.\n call_sampler = _update_docstring(call_sampler, sample_function)\n # Unset wrapped to return base sampler signature\n # but still get rest of benefits of wraps\n # such as __name__, __qualname__\n call_sampler.__dict__.pop('__wrapped__')\n # wrap with functional pulse\n return commands.functional_pulse(call_sampler)\n\n return generate_sampler\n\n\ndef left(continuous_pulse: Callable) -> Callable:\n r\"\"\"Left sampling strategy decorator.\n\n See `pulse.samplers.sampler` for more information.\n\n For `duration`, return:\n $$\\{f(t) \\in \\mathbb{C} | t \\in \\mathbb{Z} \\wedge 0<=t<\\texttt{duration}\\}$$\n\n Args:\n continuous_pulse: To sample.\n \"\"\"\n\n return sampler(strategies.left_sample)(continuous_pulse)\n\n\ndef right(continuous_pulse: Callable) -> Callable:\n r\"\"\"Right sampling strategy decorator.\n\n See `pulse.samplers.sampler` for more information.\n\n For `duration`, return:\n $$\\{f(t) \\in \\mathbb{C} | t \\in \\mathbb{Z} \\wedge 0<t<=\\texttt{duration}\\}$$\n\n Args:\n continuous_pulse: To sample.\n \"\"\"\n\n return sampler(strategies.right_sample)(continuous_pulse)\n\n\ndef midpoint(continuous_pulse: Callable) -> Callable:\n r\"\"\"Midpoint sampling strategy decorator.\n\n See `pulse.samplers.sampler` for more information.\n\n For `duration`, return:\n $$\\{f(t+0.5) \\in \\mathbb{C} | t \\in \\mathbb{Z} \\wedge 0<=t<\\texttt{duration}\\}$$\n\n Args:\n continuous_pulse: To sample.\n \"\"\"\n return sampler(strategies.midpoint_sample)(continuous_pulse)\n", "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nChoi-matrix representation of a Quantum Channel.\n\n\nFor a quantum channel E, the Choi matrix Λ is defined by:\nΛ = sum_{i,j} |i⟩⟨j|⊗E(|i⟩⟨j|)\n\nEvolution of a density matrix with respect to the Choi-matrix is given by:\n\n E(ρ) = Tr_{1}[Λ.(ρ^T⊗I)]\n\nSee [1] for further details.\n\nReferences:\n [1] C.J. Wood, J.D. Biamonte, D.G. Cory, Quant. Inf. Comp. 15, 0579-0811 (2015)\n Open access: arXiv:1111.6950 [quant-ph]\n\"\"\"\n\nfrom numbers import Number\n\nimport numpy as np\n\nfrom qiskit.circuit.quantumcircuit import QuantumCircuit\nfrom qiskit.circuit.instruction import Instruction\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.quantum_info.operators.channel.quantum_channel import QuantumChannel\nfrom qiskit.quantum_info.operators.channel.superop import SuperOp\nfrom qiskit.quantum_info.operators.channel.transformations import _to_choi\nfrom qiskit.quantum_info.operators.channel.transformations import _bipartite_tensor\n\n\nclass Choi(QuantumChannel):\n \"\"\"Choi-matrix representation of a quantum channel\"\"\"\n\n def __init__(self, data, input_dims=None, output_dims=None):\n \"\"\"Initialize a quantum channel Choi matrix operator.\n\n Args:\n data (QuantumCircuit or\n Instruction or\n BaseOperator or\n matrix): data to initialize superoperator.\n input_dims (tuple): the input subsystem dimensions.\n [Default: None]\n output_dims (tuple): the output subsystem dimensions.\n [Default: None]\n\n Raises:\n QiskitError: if input data cannot be initialized as a\n Choi matrix.\n\n Additional Information\n ----------------------\n If the input or output dimensions are None, they will be\n automatically determined from the input data. If the input data is\n a Numpy array of shape (4**N, 4**N) qubit systems will be used. If\n the input operator is not an N-qubit operator, it will assign a\n single subsystem with dimension specified by the shape of the input.\n \"\"\"\n # If the input is a raw list or matrix we assume that it is\n # already a Choi matrix.\n if isinstance(data, (list, np.ndarray)):\n # Initialize from raw numpy or list matrix.\n choi_mat = np.array(data, dtype=complex)\n # Determine input and output dimensions\n dim_l, dim_r = choi_mat.shape\n if dim_l != dim_r:\n raise QiskitError('Invalid Choi-matrix input.')\n if input_dims:\n input_dim = np.product(input_dims)\n if output_dims:\n output_dim = np.product(output_dims)\n if output_dims is None and input_dims is None:\n output_dim = int(np.sqrt(dim_l))\n input_dim = dim_l // output_dim\n elif input_dims is None:\n input_dim = dim_l // output_dim\n elif output_dims is None:\n output_dim = dim_l // input_dim\n # Check dimensions\n if input_dim * output_dim != dim_l:\n raise QiskitError(\"Invalid shape for input Choi-matrix.\")\n else:\n # Otherwise we initialize by conversion from another Qiskit\n # object into the QuantumChannel.\n if isinstance(data, (QuantumCircuit, Instruction)):\n # If the input is a Terra QuantumCircuit or Instruction we\n # convert it to a SuperOp\n data = SuperOp._init_instruction(data)\n else:\n # We use the QuantumChannel init transform to initialize\n # other objects into a QuantumChannel or Operator object.\n data = self._init_transformer(data)\n input_dim, output_dim = data.dim\n # Now that the input is an operator we convert it to a Choi object\n choi_mat = _to_choi(data.rep, data._data, input_dim, output_dim)\n if input_dims is None:\n input_dims = data.input_dims()\n if output_dims is None:\n output_dims = data.output_dims()\n # Check and format input and output dimensions\n input_dims = self._automatic_dims(input_dims, input_dim)\n output_dims = self._automatic_dims(output_dims, output_dim)\n super().__init__('Choi', choi_mat, input_dims, output_dims)\n\n @property\n def _bipartite_shape(self):\n \"\"\"Return the shape for bipartite matrix\"\"\"\n return (self._input_dim, self._output_dim, self._input_dim,\n self._output_dim)\n\n def conjugate(self):\n \"\"\"Return the conjugate of the QuantumChannel.\"\"\"\n return Choi(np.conj(self._data), self.input_dims(), self.output_dims())\n\n def transpose(self):\n \"\"\"Return the transpose of the QuantumChannel.\"\"\"\n # Make bipartite matrix\n d_in, d_out = self.dim\n data = np.reshape(self._data, (d_in, d_out, d_in, d_out))\n # Swap input and output indices on bipartite matrix\n data = np.transpose(data, (1, 0, 3, 2))\n # Transpose channel has input and output dimensions swapped\n data = np.reshape(data, (d_in * d_out, d_in * d_out))\n return Choi(\n data, input_dims=self.output_dims(), output_dims=self.input_dims())\n\n def compose(self, other, qargs=None, front=False):\n \"\"\"Return the composition channel self∘other.\n\n Args:\n other (QuantumChannel): a quantum channel.\n qargs (list): a list of subsystem positions to compose other on.\n front (bool): If False compose in standard order other(self(input))\n otherwise compose in reverse order self(other(input))\n [default: False]\n\n Returns:\n Choi: The composition channel as a Choi object.\n\n Raises:\n QiskitError: if other cannot be converted to a channel or\n has incompatible dimensions.\n \"\"\"\n if qargs is not None:\n return Choi(\n SuperOp(self).compose(other, qargs=qargs, front=front))\n\n # Convert to Choi matrix\n if not isinstance(other, Choi):\n other = Choi(other)\n # Check dimensions match up\n if front and self._input_dim != other._output_dim:\n raise QiskitError(\n 'input_dim of self must match output_dim of other')\n if not front and self._output_dim != other._input_dim:\n raise QiskitError(\n 'input_dim of other must match output_dim of self')\n\n if front:\n first = np.reshape(other._data, other._bipartite_shape)\n second = np.reshape(self._data, self._bipartite_shape)\n input_dim = other._input_dim\n input_dims = other.input_dims()\n output_dim = self._output_dim\n output_dims = self.output_dims()\n else:\n first = np.reshape(self._data, self._bipartite_shape)\n second = np.reshape(other._data, other._bipartite_shape)\n input_dim = self._input_dim\n input_dims = self.input_dims()\n output_dim = other._output_dim\n output_dims = other.output_dims()\n\n # Contract Choi matrices for composition\n data = np.reshape(\n np.einsum('iAjB,AkBl->ikjl', first, second),\n (input_dim * output_dim, input_dim * output_dim))\n return Choi(data, input_dims, output_dims)\n\n def power(self, n):\n \"\"\"The matrix power of the channel.\n\n Args:\n n (int): compute the matrix power of the superoperator matrix.\n\n Returns:\n Choi: the matrix power of the SuperOp converted to a Choi channel.\n\n Raises:\n QiskitError: if the input and output dimensions of the\n QuantumChannel are not equal, or the power is not an integer.\n \"\"\"\n if n > 0:\n return super().power(n)\n return Choi(SuperOp(self).power(n))\n\n def tensor(self, other):\n \"\"\"Return the tensor product channel self ⊗ other.\n\n Args:\n other (QuantumChannel): a quantum channel.\n\n Returns:\n Choi: the tensor product channel self ⊗ other as a Choi object.\n\n Raises:\n QiskitError: if other cannot be converted to a channel.\n \"\"\"\n # Convert other to Choi\n if not isinstance(other, Choi):\n other = Choi(other)\n\n input_dims = other.input_dims() + self.input_dims()\n output_dims = other.output_dims() + self.output_dims()\n data = _bipartite_tensor(\n self._data,\n other.data,\n shape1=self._bipartite_shape,\n shape2=other._bipartite_shape)\n return Choi(data, input_dims, output_dims)\n\n def expand(self, other):\n \"\"\"Return the tensor product channel other ⊗ self.\n\n Args:\n other (QuantumChannel): a quantum channel.\n\n Returns:\n Choi: the tensor product channel other ⊗ self as a Choi object.\n\n Raises:\n QiskitError: if other cannot be converted to a channel.\n \"\"\"\n # Convert other to Choi\n if not isinstance(other, Choi):\n other = Choi(other)\n\n input_dims = self.input_dims() + other.input_dims()\n output_dims = self.output_dims() + other.output_dims()\n data = _bipartite_tensor(\n other.data,\n self._data,\n shape1=other._bipartite_shape,\n shape2=self._bipartite_shape)\n return Choi(data, input_dims, output_dims)\n\n def add(self, other):\n \"\"\"Return the QuantumChannel self + other.\n\n Args:\n other (QuantumChannel): a quantum channel.\n\n Returns:\n Choi: the linear addition self + other as a Choi object.\n\n Raises:\n QiskitError: if other cannot be converted to a channel or\n has incompatible dimensions.\n \"\"\"\n if not isinstance(other, Choi):\n other = Choi(other)\n if self.dim != other.dim:\n raise QiskitError(\"other QuantumChannel dimensions are not equal\")\n return Choi(self._data + other.data, self._input_dims,\n self._output_dims)\n\n def subtract(self, other):\n \"\"\"Return the QuantumChannel self - other.\n\n Args:\n other (QuantumChannel): a quantum channel.\n\n Returns:\n Choi: the linear subtraction self - other as Choi object.\n\n Raises:\n QiskitError: if other cannot be converted to a channel or\n has incompatible dimensions.\n \"\"\"\n if not isinstance(other, Choi):\n other = Choi(other)\n if self.dim != other.dim:\n raise QiskitError(\"other QuantumChannel dimensions are not equal\")\n return Choi(self._data - other.data, self._input_dims,\n self._output_dims)\n\n def multiply(self, other):\n \"\"\"Return the QuantumChannel self + other.\n\n Args:\n other (complex): a complex number.\n\n Returns:\n Choi: the scalar multiplication other * self as a Choi object.\n\n Raises:\n QiskitError: if other is not a valid scalar.\n \"\"\"\n if not isinstance(other, Number):\n raise QiskitError(\"other is not a number\")\n return Choi(other * self._data, self._input_dims, self._output_dims)\n\n def _evolve(self, state, qargs=None):\n \"\"\"Evolve a quantum state by the quantum channel.\n\n Args:\n state (DensityMatrix or Statevector): The input state.\n qargs (list): a list of quantum state subsystem positions to apply\n the quantum channel on.\n\n Returns:\n DensityMatrix: the output quantum state as a density matrix.\n\n Raises:\n QiskitError: if the quantum channel dimension does not match the\n specified quantum state subsystem dimensions.\n \"\"\"\n return SuperOp(self)._evolve(state, qargs)\n" ]
[ [ "numpy.array", "numpy.identity", "numpy.sqrt", "numpy.testing.assert_allclose" ], [ "numpy.arange", "numpy.abs" ], [ "numpy.dot", "numpy.conj", "numpy.sqrt", "numpy.allclose", "numpy.kron", "numpy.array" ], [ "numpy.cosh", "numpy.asarray", "numpy.cos", "numpy.sin", "numpy.floor", "numpy.exp", "numpy.array", "numpy.tanh" ], [ "numpy.diag" ], [ "numpy.asarray" ], [ "numpy.product", "numpy.conj", "numpy.einsum", "numpy.sqrt", "numpy.reshape", "numpy.transpose", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
YodaEmbedding/experiments
[ "567c6a1c18fac2d951fe2af54aaa4917b7d529d2" ]
[ "py/pyanaconda/tf_straightline.py" ]
[ "# Fit a straight line, of the form y=m*x+b\n\nimport tensorflow as tf\n\nxs = [0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00] # Features\nys = [-0.82, -0.94, -0.12, 0.26, 0.39, 0.64, 1.02, 1.00] # Labels\n\n\"\"\"\nwith enough iterations, initial weights dont matter since our cost function is convex.\n\"\"\"\nm_initial = -0.5 # Initial guesses\nb_initial = 1.0\n\n\n\"\"\"\ndefine free variables to be solved. we'll be taking partial derivatives of m and b with respect to j (cost).\n\"\"\"\nm = tf.Variable(m_initial) # Parameters\nb = tf.Variable(b_initial)\n\nerror = 0.0\nfor i in range(len(xs)):\n y_model = m * xs[i] + b # Output of the model aka yhat\n error += (\n ys[i] - y_model\n ) ** 2 # Difference squared - this is the \"cost\" to be minimized\n\n\n\"\"\"\nonce cost function is defined, use gradient descent to find global minimum.\n\"\"\"\n\noperation = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(\n error\n) # Does one step\n\n\nwith tf.Session() as session:\n session.run(tf.initialize_all_variables()) # Initialize session\n\n _EPOCHS = 10000 # number of \"sweeps\" across data\n\n for iteration in range(_EPOCHS):\n session.run(operation)\n\nprint(\"Slope:\", m.eval(), \"Intercept:\", b.eval())\n" ]
[ [ "tensorflow.Session", "tensorflow.initialize_all_variables", "tensorflow.train.GradientDescentOptimizer", "tensorflow.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
CDInstitute/CompoNET.github.io
[ "f978f31a78628c70b1033ed02a75de8a50aa905d" ]
[ "dataset/generator.py" ]
[ "import bpy, bmesh\nfrom math import radians\nimport numpy as np\nimport os\n\nimport random\nimport sys\n\nsys.path.append(\"D:\\ProgramFiles\\Anaconda\\envs\\py37\\Lib\\site-packages\")\nfrom pyntcloud import PyntCloud\n\nfile_dir = os.path.dirname(__file__)\nsys.path.append(file_dir)\n\nfrom blender_utils import extrude, gancio, get_min_max\nfrom dataset_config import *\nfrom material import Material\nfrom module import *\nfrom point_cloud import PointCloud\nfrom renderer import Renderer\nfrom shp2obj import Collection, deselect_all\nfrom volume import *\n\n\nclass BuildingFactory:\n\t\"\"\"\n\tFactory that produces volumes.\n\t\"\"\"\n\tdef __init__(self):\n\n\t\tself.mapping = {'Patio': (Patio, 4),\n\t\t\t 'L': (LBuilding, 2),\n\t\t\t 'C': (CBuilding, 3),\n\t\t\t 'Single': (ComposedBuilding, 1),\n\t\t\t 'Skyscraper': (Skyscraper, 1),\n\t\t\t 'Closedpatio': (ClosedPatio, 2),\n\t\t\t 'Equalpatio': (PatioEqual, 4)}\n\t\tself.mapping = {x: y for x, y in self.mapping.items() if x in BUILDINGS}\n\n\tdef produce(self, name=None):\n\t\t\"\"\"\n\t\tFunction that produces a volume based on the given scale.\n\t\t:param scale: tuple (width, length, height)\n\t\t:return: generated volume, Volume\n\t\t\"\"\"\n\t\tif name:\n\t\t\tname = name.lower().capitalize()\n\t\t\tassert name in list(self.mapping.keys()), \"{} building typology \" \\\n\t\t\t \"does not exist\".format(name)\n\t\telse:\n\t\t\tname = np.random.choice(list(self.mapping.keys()))\n\t\t_volumes = CollectionFactory().produce(number=self.mapping[name][1]).collection\n\t\treturn self.mapping[name][0](_volumes)\n\n\nclass ComposedBuilding:\n\t\"\"\"\n\tClass that represents a building composed of one or several volumes.\n\t\"\"\"\n\tdef __init__(self, volumes):\n\t\tassert isinstance(volumes, list), \"Expected volumes as list,\" \\\n\t\t \" got {}\".format(type(volumes))\n\t\tself.volumes = volumes\n\n\t# def demolish(self):\n\t# \tfor v in self.volumes:\n\t# \t\ttry:\n\t# \t\t\tdeselect_all()\n\t# \t\t\tv.mesh.select_set(True)\n\t# \t\t\tbpy.ops.object.delete()\n\t# \t\texcept Exception:\n\t# \t\t\tpass\n\n\tdef demolish(self):\n\t\tfor _mesh in bpy.data.collections['Building'].objects:\n\t\t\ttry:\n\t\t\t\tdeselect_all()\n\t\t\t\t_mesh.select_set(True)\n\t\t\t\tbpy.ops.object.delete()\n\t\t\texcept Exception:\n\t\t\t\tpass\n\n\tdef get_bb(self):\n\t\t\"\"\"\n\t\tFunction that gets the bounding box of the Building\n\t\t:return: bounding box, list of float\n\t\t[width_from, height_from, width_to, height_to]\n\t\t\"\"\"\n\t\tx_min, y_min, x_max, y_max = list(get_min_max(self.volumes[0].mesh, 0)) + \\\n\t\t list(get_min_max(self.volumes[0].mesh, 1))\n\t\tfor v in self.volumes[1:]:\n\t\t\t_bb = list(get_min_max(v.mesh, 0)) + \\\n\t\t\t list(get_min_max(v.mesh, 1))\n\t\t\tx_min, y_min = min(_bb[0], x_min), min(_bb[1], y_min)\n\t\t\tx_max, y_max = max(_bb[2], x_max), max(_bb[3], y_max)\n\t\treturn [round(x_min, 3), round(y_min, 3), round(x_max, 3),\n\t\t round(y_max, 3)]\n\n\tdef make(self):\n\t\t\"\"\"\n\t\tFunction that composes the building based on its typology.\n\t\t:return:\n\t\t\"\"\"\n\t\tself._correct_volumes()\n\t\treturn self.volumes\n\n\tdef save(self, filename='test', ext='obj'):\n\t\t\"\"\"\n\t\tFunction that saves the building as a separate file.\n\t\t:param filename: name of the file to write without extension, str,\n\t\tdefault='test'\n\t\t:param ext: file extension, str, default='obj'\n\t\t:return:\n\t\t\"\"\"\n\t\tdeselect_all()\n\t\tfor v in self.volumes:\n\t\t\tv.mesh.select_set(True)\n\t\tif not MODEL_SAVE in os.listdir(file_dir):\n\t\t\tos.mkdir(file_dir + '/' + MODEL_SAVE)\n\t\tif ext == 'obj':\n\t\t\tbpy.ops.export_scene.obj(filepath='{}/Models/{}.{}'.format(file_dir,\n\t\t\t filename,\n\t\t\t ext),\n\t\t\t use_selection=False)\n\t\telif ext == 'ply':\n\t\t\tbpy.ops.export_mesh.ply(\n\t\t\t\tfilepath='{}/{}/{}.{}'.format(file_dir, CLOUD_SAVE, filename, ext),\n\t\t\t\tuse_selection=False)\n\t\telse:\n\t\t\treturn NotImplementedError\n\n\tdef _correct_volumes(self):\n\t\tfor v in self.volumes:\n\t\t\tv.create()\n\n\nclass LBuilding(ComposedBuilding):\n\t\"\"\"\n\tClass that represents an L-shaped building.\n\t\"\"\"\n\tdef __init__(self, volumes):\n\t\tComposedBuilding.__init__(self, volumes)\n\n\tdef make(self):\n\t\t# add rotation if len > width (or vice versa)\n\t\tself._correct_volumes()\n\t\tgancio(self.volumes[0], self.volumes[1], 0, 0, 0)\n\t\treturn self.volumes\n\n\tdef _correct_volumes(self):\n\n\t\tif np.random.random() < 0.5: # same height\n\t\t\t_height = max(min(self.volumes[0].height,\n\t\t\t min(self.volumes[0].width * 3, MAX_HEIGHT)),\n\t\t\t MIN_HEIGHT)\n\t\t\tfor v in self.volumes:\n\t\t\t\tv.height = _height\n\n\t\tfor v in self.volumes:\n\t\t\tv.create()\n\t\tself.volumes = sorted(self.volumes, key=lambda x: x.length,\n\t\t reverse=True)\n\n\nclass CBuilding(LBuilding):\n\tdef __init__(self, volumes):\n\t\tLBuilding.__init__(self, volumes)\n\t\tassert len(\n\t\t\tvolumes) == 3, \"C-shaped bulding can be composed of 3 volumes\" \\\n\t\t \"only, got {}\".format(len(volumes))\n\n\tdef make(self):\n\t\tself._correct_volumes()\n\t\tfor v in self.volumes[1:]:\n\t\t\tif v.width < v.length:\n\t\t\t\tv.mesh.rotation_euler[2] = radians(90)\n\n\t\tgancio(self.volumes[0], self.volumes[1], 0, 1, 0)\n\t\tgancio(self.volumes[0], self.volumes[2], 0, 0, 0)\n\t\treturn self.volumes\n\n\nclass Patio(ComposedBuilding):\n\t\"\"\"\n\tClass that represents an L-shaped building.\n\t\"\"\"\n\tdef __init__(self, volumes):\n\t\tComposedBuilding.__init__(self, volumes)\n\t\tassert len(volumes) in [2, 4], \"Patio bulding can be composed of 4 \" \\\n\t\t \"volumes only, got {}\".format(len(volumes))\n\t\tself.width = [3, 12]\n\t\tself.length = [6, 20]\n\n\tdef make(self):\n\n\t\tself._correct_volumes()\n\t\tif np.random.random() < 0.5:\n\t\t\t# circular linkage between buildings\n\t\t\tfor i, _v in enumerate(self.volumes[:-1]):\n\t\t\t\tif i % 2 == 0:\n\t\t\t\t\tself.volumes[i + 1].mesh.rotation_euler[2] = radians(90)\n\t\t\t\tif i == 0:\n\t\t\t\t\tgancio(_v, self.volumes[i + 1], 0, 1, 1)\n\t\t\t\telif i == 1:\n\t\t\t\t\tgancio(_v, self.volumes[i + 1], 1, 1, 0)\n\t\t\t\telif i == 2:\n\t\t\t\t\tgancio(_v, self.volumes[i + 1], 0, 0, 0)\n\t\telse:\n\t\t\t# cap linkage between buildings\n\t\t\tfor i, _v in enumerate(self.volumes[:-1]):\n\t\t\t\tif i % 2 == 0:\n\t\t\t\t\tself.volumes[i + 1].mesh.rotation_euler[2] = radians(90)\n\t\t\t\tif i == 0:\n\t\t\t\t\tgancio(_v, self.volumes[i + 1], 1, 1, 0)\n\t\t\t\telif i == 1:\n\t\t\t\t\tgancio(_v, self.volumes[i + 1], 1, 1, 0)\n\t\t\t\telif i == 2:\n\t\t\t\t\tgancio(_v, self.volumes[i + 1], 1, 0, 1)\n\n\t\treturn self.volumes\n\n\tdef _correct_volumes(self):\n\t\tfor v in self.volumes:\n\t\t\tv.width = min(max(v.width, self.width[0]), self.width[1])\n\t\t\tv.length = v.width * (np.random.random() + 1.5)\n\t\t\tv.height = max(min(v.height, min(v.width * 3, MAX_HEIGHT)), MIN_HEIGHT)\n\t\t\tv.create()\n\t\tself.volumes = sorted(self.volumes, key=lambda x: x.length)\n\n\nclass PatioEqual(Patio):\n\t\"\"\"\n\tClass that represents a Patio building with equal height volumes.\n\t\"\"\"\n\n\tdef __init__(self, volumes):\n\t\tPatio.__init__(self, volumes)\n\n\tdef _correct_volumes(self):\n\t\t_height = max(min(self.volumes[0].height, min(self.volumes[0].width * 3,\n\t\t MAX_HEIGHT)), MIN_HEIGHT)\n\t\tfor v in self.volumes:\n\t\t\tv.width = min(max(v.width, self.width[0]), self.width[1])\n\t\t\tv.length = v.width * (np.random.random() + 1.5)\n\t\t\tv.height = _height\n\t\t\tv.create()\n\t\tself.volumes = sorted(self.volumes, key=lambda x: x.length)\n\n\nclass ClosedPatio(Patio):\n\t\"\"\"\n\tClass that represents a Patio building with equal height volumes.\n\t\"\"\"\n\n\tdef __init__(self, volumes):\n\t\tPatio.__init__(self, volumes)\n\t\tassert len(self.volumes) == 2, \"Expected 2 volumes for Closed Patio, \" \\\n\t\t \"got {}\".format(len(self.volumes))\n\n\tdef _correct_volumes(self):\n\t\tfor v in self.volumes:\n\t\t\tv.width = min(max(v.width, self.width[0]), self.width[1])\n\t\t\tv.length = v.width * (np.random.random() + 1.5)\n\t\t\tv.height = max(min(v.height, min(v.width * 3, MAX_HEIGHT)),\n\t\t MIN_HEIGHT)\n\t\t\tv.create()\n\n\t\tfor v in self.volumes[:2]:\n\t\t\tv1 = Factory().produce(scale=(v.width, v.length, v.height))\n\t\t\tself.volumes.append(v1)\n\n\nclass TBuilding(ComposedBuilding):\n\t\"\"\"\n\tClass that represents a T-shaped building with random location of the\n\tsecond volume along the side of the first volume.\n\t\"\"\"\n\tdef __init__(self, volumes):\n\t\tComposedBuilding.__init__(self, volumes)\n\t\tassert len(volumes) == 2, \"L-shaped bulding can be composed of 2 volumes\" \\\n\t\t \"only, got {}\".format(len(volumes))\n\n\tdef make(self):\n\t\tself._correct_volumes()\n\t\tx_min, x_max = get_min_max(self.volumes[0].mesh, 0) # width\n\t\ty_min, y_max = get_min_max(self.volumes[0].mesh, 1) # length\n\n\t\tif random.random() < 0.5:\n\t\t\tself.volumes[1].mesh.location[0] = random.choice(np.linspace(int(x_min + (self.volumes[1].length)),\n\t\t\t\t int(x_max - (self.volumes[1].length)), 10))\n\t\t\tself.volumes[1].mesh.location[1] = y_min - self.volumes[1].width\n\n\t\telse:\n\t\t\tself.volumes[1].mesh.location[1] = random.choice(np.linspace(\n\t\t\t\tint(y_min + (self.volumes[1].width)),\n\t\t\t\tint(y_max - (self.volumes[1].width)), 10))\n\t\t\tself.volumes[1].mesh.location[0] = x_min - self.volumes[1].length\n\n\t\treturn self.volumes\n\n\nclass Skyscraper(ComposedBuilding):\n\t\"\"\"\n\tClass that represents a Skyscraper building with height significantly larger\n\tthan width or length of the building.\n\t\"\"\"\n\n\tdef __init__(self, volumes):\n\t\tComposedBuilding.__init__(self, volumes)\n\n\tdef _correct_volumes(self):\n\t\tfor _v in self.volumes:\n\t\t\t_v.height = np.random.randint(100, 200)\n\t\t\t_v.length = max(30, _v.length)\n\t\t\t_v.width = max(30, _v.width)\n\t\t\t_v.create()\n\n\nclass EBuilding(ComposedBuilding):\n\t\"\"\"\n\tClass that represents a E-shaped building with random locations of the\n\tvolumes along the side of the first volume.\n\t\"\"\"\n\tdef __init__(self, volumes):\n\t\tComposedBuilding.__init__(self, volumes)\n\n\tdef make(self):\n\n\t\tself._correct_volumes()\n\t\tx_min, x_max = get_min_max(self.volumes[0].mesh, 0) # width\n\t\ty_min, y_max = get_min_max(self.volumes[0].mesh, 1) # length\n\n\t\tif random.random() < 0.5:\n\t\t\tfor _volume in self.volumes[1:]:\n\t\t\t\t_volume.mesh.location[0] = random.choice(np.linspace(int(x_min + (_volume.length)),\n\t\t\t\t\t int(x_max - (_volume.length)), 10))\n\t\t\t\t_volume.mesh.location[1] = y_min - _volume.width\n\n\t\telse:\n\t\t\tfor _volume in self.volumes[1:]:\n\t\t\t\t_volume.mesh.location[1] = random.choice(np.linspace(\n\t\t\t\t\tint(y_min + (_volume.width)),\n\t\t\t\t\tint(y_max - (_volume.width)), 10))\n\t\t\t\t_volume.mesh.location[0] = x_min - _volume.length\n\n\t\treturn self.volumes\n\n\nif __name__ == '__main__':\n\n\tNUM_IMAGES = 1\n\tfor image in range(NUM_IMAGES):\n\t\tf = CollectionFactory()\n\t\tcollection = f.produce(number=np.random.randint(1, 4))\n\t\tbuilding = ComposedBuilding(collection.collection)\n\t\tbuilding.make()\n\n\t\taxis = 1\n\n\t\tfor j, v in enumerate(collection.collection):\n\n\t\t\tmod = GridApplier(Window)\n\t\t\tw = Window()\n\t\t\tw.connect(v, 1)\n\t\t\tstep = (np.random.randint(1, 6), np.random.randint(1, 6))\n\t\t\tif j == 0:\n\t\t\t\tmod.apply(w, step=step, offset=(2.0, 2.0, 2.0, 1.0))\n\t\t\telse:\n\t\t\t\tmod.apply(w, step=step)\n\n\t\t\tw = Window()\n\t\t\tw.connect(v, 0, 0)\n\t\t\tstep = (np.random.randint(1, 6), np.random.randint(1, 6))\n\t\t\tif j == 0:\n\t\t\t\tmod.apply(w, step=step, offset=(2.0, 2.0, 2.0, 1.0))\n\t\t\telse:\n\t\t\t\tmod.apply(w, step=step)\n\n\t\trenderer = Renderer(mode=0)\n\t\trenderer.render(filename='building_{}'.format(image))\n\t\tbuilding.save(image)\n\t\tbuilding.save(image, ext='ply')\n\t\tbuilding.demolish()\n\t\tcloud = PointCloud()\n\t\tcloud.make(image)\n\t\t# cloud = PyntCloud.from_file(\"Models/{}.obj\".format(image))\n\t\t# cloud.to_file(\"{}.ply\".format(image))\n\t\t# cloud.to_file(\"{}.npz\".format(image))\n\n\n" ]
[ [ "numpy.random.random", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
genisplaja/cunet
[ "58a200c84810f20099265e30200327eefddb3eff", "58a200c84810f20099265e30200327eefddb3eff", "58a200c84810f20099265e30200327eefddb3eff" ]
[ "cunet/ftanet/network/ftanet.py", "cunet/ftanet/evaluator.py", "cunet/ftanet/loader.py" ]
[ "import tensorflow as tf\r\nfrom tensorflow.keras import backend as K\r\nfrom tensorflow.keras import Input, Model\r\nfrom tensorflow.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, \\\r\n GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, \\\r\n Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\r\n\r\n\r\ndef SF_Module(x_list, n_channel, reduction, limitation):\r\n ## Split\r\n fused = None\r\n for x_s in x_list:\r\n if fused==None:\r\n fused = x_s\r\n else:\r\n fused = Add()([fused, x_s])\r\n \r\n ## Fuse\r\n fused = GlobalAveragePooling2D()(fused)\r\n fused = BatchNormalization()(fused)\r\n fused = Dense(max(n_channel // reduction, limitation), activation='selu')(fused)\r\n\r\n ## Select\r\n masks = []\r\n for i in range(len(x_list)):\r\n masks.append(Dense(n_channel)(fused))\r\n mask_stack = Lambda(K.stack, arguments={'axis': -1})(masks)\r\n mask_stack = Softmax(axis=-2)(mask_stack) # (n_channel, n_kernel)\r\n\r\n selected = None\r\n for i, x_s in enumerate(x_list):\r\n mask = Lambda(lambda z: z[:, :, i])(mask_stack)\r\n mask = Reshape((1, 1, n_channel))(mask)\r\n x_s = Multiply()([x_s, mask])\r\n if selected==None:\r\n selected = x_s\r\n else:\r\n selected = Add()([selected, x_s])\r\n\r\n return selected\r\n\r\n\r\ndef FTA_Module(x, shape, kt, kf):\r\n x = BatchNormalization()(x)\r\n\r\n ## Residual\r\n x_r = Conv2D(shape[2], (1, 1), padding='same', activation='relu')(x)\r\n\r\n ## Time Attention\r\n # Attn Map (1, T, C), FC\r\n a_t = Lambda(K.mean, arguments={'axis': -3})(x)\r\n a_t = Conv1D(shape[2], kt, padding='same', activation='selu')(a_t)\r\n a_t = Conv1D(shape[2], kt, padding='same', activation='selu')(a_t) #2\r\n a_t = Softmax(axis=-2)(a_t)\r\n a_t = Reshape((1, shape[1], shape[2]))(a_t)\r\n # Reweight\r\n x_t = Conv2D(shape[2], (3, 3), padding='same', activation='selu')(x)\r\n x_t = Conv2D(shape[2], (5, 5), padding='same', activation='selu')(x_t)\r\n x_t = Multiply()([x_t, a_t])\r\n\r\n # Frequency Attention\r\n # Attn Map (F, 1, C), Conv1D\r\n a_f = Lambda(K.mean, arguments={'axis': -2})(x)\r\n a_f = Conv1D(shape[2], kf, padding='same', activation='selu')(a_f)\r\n a_f = Conv1D(shape[2], kf, padding='same', activation='selu')(a_f)\r\n a_f = Softmax(axis=-2)(a_f)\r\n a_f = Reshape((shape[0], 1, shape[2]))(a_f)\r\n # Reweight\r\n x_f = Conv2D(shape[2], (3, 3), padding='same', activation='selu')(x)\r\n x_f = Conv2D(shape[2], (5, 5), padding='same', activation='selu')(x_f)\r\n x_f = Multiply()([x_f, a_f])\r\n\r\n return x_r, x_t, x_f\r\n\r\n\r\ndef create_model(input_shape=(320, 430, 3)):\r\n visible = Input(shape=input_shape)\r\n x = BatchNormalization()(visible)\r\n\r\n ## Bottom\r\n # bm = BatchNormalization()(x)\r\n bm = x\r\n bm = Conv2D(16, (4, 1), padding='valid', strides=(4, 1), activation='selu')(bm) # 80\r\n bm = Conv2D(16, (4, 1), padding='valid', strides=(4, 1), activation='selu')(bm) # 20\r\n bm = Conv2D(16, (4, 1), padding='valid', strides=(4, 1), activation='selu')(bm) # 5\r\n bm = Conv2D(1, (5, 1), padding='valid', strides=(5, 1), activation='selu')(bm) # 1\r\n\r\n # 保持高分辨率,关注细节\r\n shape=input_shape\r\n x_r, x_t, x_f = FTA_Module(x, (shape[0], shape[1], 32), 3, 3)\r\n x = SF_Module([x_r, x_t, x_f], 32, 4, 4)\r\n x = MaxPooling2D((2, 2))(x)\r\n\r\n x_r, x_t, x_f = FTA_Module(x, (shape[0]//2, shape[1]//2, 64), 3, 3)\r\n x = SF_Module([x_r, x_t, x_f], 64, 4, 4)\r\n x = MaxPooling2D((2, 2))(x)\r\n\r\n x_r, x_t, x_f = FTA_Module(x, (shape[0]//4, shape[1]//4, 128), 3, 3)\r\n x = SF_Module([x_r, x_t, x_f], 128, 4, 4)\r\n\r\n x_r, x_t, x_f = FTA_Module(x, (shape[0]//4, shape[1]//4, 128), 3, 3)\r\n x = SF_Module([x_r, x_t, x_f], 128, 4, 4)\r\n\r\n x = UpSampling2D((2, 2))(x)\r\n x_r, x_t, x_f = FTA_Module(x, (shape[0]//2, shape[1]//2, 64), 3, 3)\r\n x = SF_Module([x_r, x_t, x_f], 64, 4, 4)\r\n\r\n x = UpSampling2D((2, 2))(x)\r\n x_r, x_t, x_f = FTA_Module(x, (shape[0], shape[1], 32), 3, 3)\r\n x = SF_Module([x_r, x_t, x_f], 32, 4, 4)\r\n \r\n x_r, x_t, x_f = FTA_Module(x, (shape[0], shape[1], 1), 3, 3)\r\n x = SF_Module([x_r, x_t, x_f], 1, 4, 4)\r\n x = Concatenate(axis=1)([bm, x])\r\n \r\n # Softmax\r\n x = Lambda(K.squeeze, arguments={'axis': -1})(x) # (321, 430)\r\n x = Softmax(axis=-2)(x)\r\n\r\n return Model(inputs=visible, outputs=x)", "import re\r\nimport numpy as np\r\nimport pandas as pd\r\nimport numpy\r\nimport glob\r\nfrom tqdm import tqdm\r\nimport random\r\nimport pickle\r\nfrom numpy.core.fromnumeric import std\r\nimport mir_eval\r\nfrom cfp import cfp_process\r\nfrom tensorflow import keras\r\n\r\nimport os\r\nimport tensorflow as tf\r\nfrom tensorflow.keras import backend as K\r\nfrom tensorflow.keras.models import load_model\r\nfrom tensorflow.keras.metrics import categorical_accuracy\r\nfrom loader import load_data_for_test, load_data\r\n\r\nfrom tensorflow.keras.models import load_model\r\n\r\nfrom constant import *\r\nfrom loader import *\r\n\r\nfrom network.ftanet import create_model\r\nfrom loader import get_CenFreq\r\n\r\n\r\ndef std_normalize(data): \r\n # normalize as 64 bit, to avoid numpy warnings\r\n data = data.astype(np.float64)\r\n mean = np.mean(data)\r\n std = np.std(data)\r\n data = data.copy() - mean\r\n if std != 0.:\r\n data = data / std\r\n return data.astype(np.float32)\r\n\r\n\r\ndef est(output, CenFreq, time_arr):\r\n # output: (freq_bins, T)\r\n CenFreq[0] = 0\r\n est_time = time_arr\r\n est_freq = np.argmax(output, axis=0)\r\n\r\n for j in range(len(est_freq)):\r\n est_freq[j] = CenFreq[int(est_freq[j])]\r\n\r\n if len(est_freq) != len(est_time):\r\n new_length = min(len(est_freq), len(est_time))\r\n est_freq = est_freq[:new_length]\r\n est_time = est_time[:new_length]\r\n\r\n est_arr = np.concatenate((est_time[:, None], est_freq[:, None]), axis=1)\r\n\r\n return est_arr\r\n\r\n\r\ndef iseg(data):\r\n # data: (batch_size, freq_bins, seg_len)\r\n new_length = data.shape[0] * data.shape[-1] # T = batch_size * seg_len\r\n new_data = np.zeros((data.shape[1], new_length)) # (freq_bins, T)\r\n for i in range(len(data)):\r\n new_data[:, i * data.shape[-1] : (i + 1) * data.shape[-1]] = data[i]\r\n return new_data\r\n\r\n\r\ndef get_est_arr(model, x_list, y_list, batch_size):\r\n for i in range(len(x_list)):\r\n x = x_list[i]\r\n y = y_list[i]\r\n \r\n # predict and concat\r\n num = x.shape[0] // batch_size\r\n if x.shape[0] % batch_size != 0:\r\n num += 1\r\n preds = []\r\n for j in range(num):\r\n # x: (batch_size, freq_bins, seg_len)\r\n if j == num - 1:\r\n X = x[j * batch_size:]\r\n length = x.shape[0] - j * batch_size\r\n else:\r\n X = x[j * batch_size: (j + 1) * batch_size]\r\n length = batch_size\r\n \r\n # for k in range(length): # normalization\r\n # X[k] = std_normalize(X[k])\r\n prediction = model.predict(X, length)\r\n preds.append(prediction)\r\n \r\n # (num*bs, freq_bins, seg_len) to (freq_bins, T)\r\n preds = np.concatenate(preds, axis=0)\r\n preds = iseg(preds)\r\n \r\n # ground-truth\r\n \r\n # trnasform to f0ref\r\n CenFreq = get_CenFreq(StartFreq=31, StopFreq=1250, NumPerOct=60)\r\n # CenFreq = get_CenFreq(StartFreq=20, StopFreq=2048, NumPerOct=60)\r\n # CenFreq = get_CenFreq(StartFreq=81, StopFreq=600, NumPerOct=111)\r\n # CenFreq = get_CenFreq(StartFreq=81, StopFreq=600, NumPerOct=190)\r\n est_arr = est(preds, CenFreq, y)\r\n \r\n # VR, VFA, RPA, RCA, OA\r\n return est_arr\r\n\r\n\r\ndef get_pitch_track(filename):\r\n print('Loading model...')\r\n model = create_model(input_shape=IN_SHAPE)\r\n model.load_weights(\r\n filepath='./model/baseline/OA/best_OA'\r\n ).expect_partial()\r\n print('Model loaded!')\r\n \r\n xlist = []\r\n timestamps = []\r\n # feature = np.load(data_folder + 'cfp/' + fname + '.npy')\r\n feature, _, time_arr = cfp_process(filename, sr=8000, hop=80)\r\n print('feature', np.shape(feature))\r\n \r\n data = batchize_test(feature, size=128)\r\n xlist.append(data)\r\n timestamps.append(time_arr)\r\n \r\n estimation = get_est_arr(model, xlist, timestamps, batch_size=16)\r\n\r\n return estimation[:, 0], estimation[:, 1]\r\n\r\n\r\ndef save_pitch_track_to_dataset(filename, est_time, est_freq):\r\n # Write txt annotation to file\r\n with open(filename, 'w') as f:\r\n for i, j in zip(est_time, est_freq):\r\n f.write(\"{}, {}\\n\".format(i, j))\r\n print('Saved with exit to {}'.format(filename))\r\n\r\n\r\ndef select_vocal_track(ypath, lpath):\r\n ycsv = pd.read_csv(ypath, names=[\"time\", \"freq\"])\r\n gt0 = ycsv['time'].values\r\n gt0 = gt0[:, np.newaxis]\r\n \r\n gt1 = ycsv['freq'].values\r\n gt1 = gt1[:, np.newaxis]\r\n \r\n z = np.zeros(gt1.shape)\r\n \r\n f = open(lpath, 'r')\r\n lines = f.readlines()\r\n \r\n for line in lines:\r\n \r\n if 'start_time' in line.split(',')[0]:\r\n continue\r\n st = float(line.split(',')[0])\r\n et = float(line.split(',')[1])\r\n sid = line.split(',')[2]\r\n for i in range(len(gt1)):\r\n if st < gt0[i, 0] < et and 'singer' in sid:\r\n z[i, 0] = gt1[i, 0]\r\n \r\n gt = np.concatenate((gt0, z), axis=1)\r\n return gt\r\n \r\n\r\ndef get_files_to_test(fp, artist, artists_to_track_mapping):\r\n # Get track to train\r\n tracks_to_test = artists_to_track_mapping[artist]\r\n\r\n # Get filenames to train\r\n files_to_test = []\r\n for track in tracks_to_test:\r\n files_to_test.append(fp + 'audio/' + track + '.wav')\r\n \r\n return files_to_test\r\n\r\n\r\nif __name__ == '__main__':\r\n fp_synth = '/home/genis/Saraga-Melody-Synth/'\r\n fp_hindustani = '/home/genis/Hindustani-Synth-Dataset/'\r\n fp_medley = '/mnt/sda1/genis/carnatic_melody_dataset/resources/medley_aux/'\r\n fp_western_synth = '/home/genis/Western-Synth-Dataset_2/'\r\n \r\n #dataset_filelist_nosynth = glob.glob(fp_nosynth + 'audio/*.wav')\r\n #with open(fp_nosynth + 'artists_to_track_mapping.pkl', 'rb') as map_file:\r\n # artists_to_track_mapping_nosynth = pickle.load(map_file)\r\n\r\n dataset_filelist_synth = glob.glob(fp_synth + 'audio/*.wav')\r\n with open(fp_synth + 'artists_to_track_mapping.pkl', 'rb') as map_file:\r\n artists_to_track_mapping = pickle.load(map_file)\r\n \r\n mahati_test = get_files_to_test(fp_synth, 'Mahati', artists_to_track_mapping)\r\n sumithra_test = get_files_to_test(fp_synth, 'Sumithra Vasudev', artists_to_track_mapping)\r\n modhumudi_test = get_files_to_test(fp_synth, 'Modhumudi Sudhakar', artists_to_track_mapping)\r\n chertala_test = get_files_to_test(fp_synth, 'Cherthala Ranganatha Sharma', artists_to_track_mapping)\r\n test_carnatic_list = [mahati_test, sumithra_test, modhumudi_test, chertala_test]\r\n\r\n test_files = []\r\n for i in test_carnatic_list:\r\n test_files = test_files + random.sample(i, 50)\r\n\r\n #carnatic_synth = test_model(test_files)\r\n\r\n '''\r\n mahati_test_synth = get_files_to_test(fp_synth, 'Mahati', artists_to_track_mapping_synth)\r\n sumithra_test_synth = get_files_to_test(fp_synth, 'Sumithra Vasudev', artists_to_track_mapping_synth)\r\n modhumudi_test_synth = get_files_to_test(fp_synth, 'Modhumudi Sudhakar', artists_to_track_mapping_synth)\r\n chertala_test_synth = get_files_to_test(fp_synth, 'Cherthala Ranganatha Sharma', artists_to_track_mapping_synth)\r\n test_carnatic_list = [mahati_test_synth, sumithra_test_synth, modhumudi_test_synth, chertala_test_synth]\r\n \r\n test_files = []\r\n for i in test_carnatic_list:\r\n test_files = test_files + random.sample(i, 50)\r\n \r\n medley_tracks = glob.glob(fp_medley + 'audio/*.wav')\r\n \r\n hindustani_testing_files = [\r\n 'Raag_Kedar_43.wav',\r\n 'Raag_Kedar_10.wav',\r\n 'Raag_Kalyan_61.wav',\r\n 'Raag_Kalyan_39.wav',\r\n 'Raag_Kalyan_67.wav',\r\n 'Raag_Kalyan_61.wav',\r\n 'Raag_Kedar_20.wav',\r\n 'Raag_Kedar_30.wav',\r\n 'Raag_Kedar_43.wav',\r\n 'Raag_Jog_47.wav',\r\n 'Raag_Jog_37.wav',\r\n 'Raag_Jog_29.wav',\r\n 'Raag_Jog_18.wav',\r\n 'Raag_Jog_12.wav',\r\n 'Raag_Jog_2.wav',\r\n 'Raag_Saraswati_6.wav',\r\n 'Raag_Saraswati_25.wav',\r\n 'Raag_Bhimpalasi_23.wav',\r\n 'Raag_Bhimpalasi_33.wav',\r\n 'Raag_Bhimpalasi_43.wav',\r\n 'Raag_Bhimpalasi_45.wav',\r\n 'Raag_Bhimpalasi_50.wav',\r\n 'Raag_Shree_66.wav',\r\n 'Raag_Dhani_8.wav',\r\n 'Raag_Dhani_25.wav',\r\n 'Raag_Dhani_33.wav',\r\n 'Raag_Dhani_58.wav',\r\n 'Raag_Dhani_35.wav',\r\n 'Raag_Dhani_57.wav',\r\n 'Raag_Bahar_44.wav',\r\n 'Raag_Bahar_29.wav',\r\n 'Multani_17.wav',\r\n 'Raag_Rageshri_61.wav',\r\n 'Raag_Rageshri_36.wav',\r\n 'Maru_Bihag_4.wav',\r\n 'Raageshree_10.wav',\r\n 'Raageshree_12.wav',\r\n 'Raag_Desh_9.wav',\r\n 'Raag_Bhoopali_83.wav',\r\n 'Bhairavi_Bhajan_6.wav',\r\n 'Raag_Bairagi_22.wav',\r\n 'Raag_Multani_11.wav',\r\n 'Raga_Shree_-_Khayal_38.wav',\r\n 'Todi_16.wav',\r\n 'Todi_10.wav',\r\n 'Todi_17.wav',\r\n 'Todi_3.wav',\r\n 'Sudh_Sarang_21.wav',\r\n 'Sudh_Kalyan_25.wav',\r\n 'Raga_Shree_-_Khayal_96.wav',\r\n 'Raga_Lalit_-_Khayal_76.wav',\r\n 'Raag_Yaman_20.wav',\r\n 'Raag_Sooha_Kanada_28.wav',\r\n 'Raag_Sohani_15.wav',\r\n 'Raag_Shree_81.wav',\r\n 'Raag_Sawani_16.wav',\r\n 'Raag_Ramdasi_Malhar_15.wav',\r\n 'Raag_Puriya_55.wav',\r\n 'Raag_Poorva_96.wav',\r\n 'Raag_Poorva_77.wav',\r\n 'Raag_Paraj_25.wav',\r\n 'Raag_Multani_74.wav',\r\n 'Raag_Megh_41.wav',\r\n 'Raag_Malkauns_71.wav',\r\n 'Raag_Lalita_Gauri_24.wav',\r\n 'Raag_Bhoopali_11.wav',\r\n 'Raag_Bhimpalasi_56.wav',\r\n 'Raag_Bibhas_24.wav',\r\n 'Raag_Bihag_11.wav',\r\n 'Raag_Bihag_27.wav',\r\n 'Raag_Bhatiyar_7.wav',\r\n 'Raag_Ahir_Bhairon_58.wav',\r\n 'Raag_Ahir_Bhairon_3.wav',\r\n 'Nirgun_Bhajan_15.wav',\r\n 'Nat_Bhairon_8.wav',\r\n 'Multani_0.wav',\r\n 'Malkauns_1.wav',\r\n 'Malkauns_3.wav',\r\n 'Malkauns_10.wav',\r\n 'Kalavati_10.wav',\r\n 'Aahir_Bhairon_16.wav',\r\n ]\r\n '''\r\n \r\n hindustani_testing_files = glob.glob(fp_hindustani + 'audio/*.wav')\r\n #hindustani_testing_filenames = [fp_hindustani + 'audio/' + x for x in hindustani_testing_files]\r\n\r\n testing_files = [x for x in hindustani_testing_files if 'Deepki' in x] + \\\r\n [x for x in hindustani_testing_files if 'Raag_Jog' in x] + \\\r\n [x for x in hindustani_testing_files if 'Raag_Dhani' in x] + \\\r\n [x for x in hindustani_testing_files if 'Todi' in x] + \\\r\n [x for x in hindustani_testing_files if 'Malkauns' in x] + \\\r\n [x for x in hindustani_testing_files if 'Piloo' in x]\r\n hindustani_synth = test_model(testing_files)\r\n\r\n '''\r\n #mahati = test_model(mahati_test)\r\n #sumithra = test_model(sumithra_test, 'sumithra_nosynth')\r\n #modhumudi = test_model(modhumudi_test, 'modhumudi_nosynth')\r\n #chertala = test_model(chertala_test, 'chertala_nosynth')\r\n #mahati_synth = test_model(mahati_test_synth, 'mahati')\r\n #sumithra_synth = test_model(sumithra_test_synth, 'sumithra')\r\n #modhumudi_synth = test_model(modhumudi_test_synth, 'modhumudi')\r\n #carnatic_synth = test_model(test_files, 'carnatic')\r\n\r\n #hindustani_synth = test_model(hindustani_testing_files, 'hindustani')\r\n #carnatic_synth = test_model(test_files, 'carnatic')\r\n #scores_synth = test_model_on_medley(medley_tracks)\r\n\r\n adc_synth_files = [x for x in files_to_test if 'daisy' in x] + \\\r\n [x for x in files_to_test if 'pop' in x] + \\\r\n [x for x in files_to_test if 'opera' in x]\r\n \r\n mirex05_synth_files = [x for x in files_to_test if 'train' in x]\r\n #medley_synth_files = [x for x in files_to_test if x not in adc_synth_files and 'train' not in x]\r\n adc_scores = test_model(adc_synth_files)\r\n mirex_scores = test_model(mirex05_synth_files)\r\n #medley_scores = test_model(medley_synth_files)\r\n\r\n #scores_western_synth = test_model(files_to_test)\r\n adc_filelist = glob.glob(\r\n '/home/genis/FTANet-melodic/eval_datasets/ADC2004/*.wav'\r\n )\r\n scores_adc = test_model_on_ADC(adc_filelist)\r\n mirex05_filelist = glob.glob(\r\n '/home/genis/FTANet-melodic/eval_datasets/MIREX05/*.wav'\r\n )\r\n scores_mirex05 = test_model_on_MIREX05(mirex05_filelist)\r\n\r\n print('Mahati:', mahati_synth)\r\n print('Sumithra:', sumithra_synth)\r\n print('Modhmudi:', modhumudi_synth)\r\n print('Chertala:', chertala_synth)\r\n print('Complete carnatic:', carnatic_synth)\r\n print('Hindustani:', hindustani_synth)\r\n '''\r\n\r\n", "import os\r\nimport csv\r\n\r\nimport scipy.signal\r\nfrom tqdm import tqdm\r\nimport pickle\r\nimport librosa\r\nimport mir_eval\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom scipy import signal\r\nfrom cfp import cfp_process\r\n\r\n\r\ndataset_path = '/mnt/sda1/genis/carnatic_melody_dataset/resources/Saraga-Synth-Dataset/'\r\n\r\n\r\ndef get_CenFreq(StartFreq=80, StopFreq=1000, NumPerOct=48):\r\n Nest = int(np.ceil(np.log2(StopFreq/StartFreq))*NumPerOct)\r\n central_freq = []\r\n for i in range(0, Nest):\r\n CenFreq = StartFreq*pow(2, float(i)/NumPerOct)\r\n if CenFreq < StopFreq:\r\n central_freq.append(CenFreq)\r\n else:\r\n break\r\n return central_freq\r\n\r\n\r\ndef seq2map(seq, CenFreq):\r\n CenFreq[0] = 0\r\n gtmap = np.zeros((len(CenFreq), len(seq)))\r\n for i in range(len(seq)):\r\n for j in range(len(CenFreq)):\r\n if seq[i] < 0.1:\r\n gtmap[0, i] = 1\r\n break\r\n elif CenFreq[j] > seq[i]:\r\n gtmap[j, i] = 1\r\n break\r\n return gtmap\r\n\r\n\r\ndef batchize(data, gt, xlist, ylist, size=430):\r\n if data.shape[-1] != gt.shape[-1]:\r\n new_length = min(data.shape[-1], gt.shape[-1])\r\n print('data:', data.shape, ', gt shape:', gt.shape)\r\n\r\n data = data[:, :, :new_length]\r\n gt = gt[:, :new_length]\r\n num = int(gt.shape[-1] / size)\r\n if gt.shape[-1] % size != 0:\r\n num += 1\r\n for i in range(num):\r\n if (i + 1) * size > gt.shape[-1]:\r\n batch_x = np.zeros((data.shape[0], data.shape[1], size))\r\n batch_y = np.zeros((gt.shape[0], size))\r\n\r\n tmp_x = data[:, :, i * size:]\r\n tmp_y = gt[:, i * size:]\r\n\r\n batch_x[:, :, :tmp_x.shape[-1]] += tmp_x\r\n batch_y[:, :tmp_y.shape[-1]] += tmp_y\r\n xlist.append(batch_x.transpose(1, 2, 0))\r\n ylist.append(batch_y)\r\n break\r\n else:\r\n batch_x = data[:, :, i * size:(i + 1) * size]\r\n batch_y = gt[:, i * size:(i + 1) * size]\r\n xlist.append(batch_x.transpose(1, 2, 0))\r\n ylist.append(batch_y)\r\n\r\n return xlist, ylist, num\r\n\r\n\r\ndef batchize_test(data, size=430):\r\n xlist = []\r\n num = int(data.shape[-1] / size)\r\n if data.shape[-1] % size != 0:\r\n num += 1\r\n for i in range(num):\r\n if (i + 1) * size > data.shape[-1]:\r\n batch_x = np.zeros((data.shape[0], data.shape[1], size))\r\n\r\n tmp_x = data[:, :, i * size:]\r\n\r\n batch_x[:, :, :tmp_x.shape[-1]] += tmp_x\r\n xlist.append(batch_x.transpose(1, 2, 0))\r\n break\r\n else:\r\n batch_x = data[:, :, i * size:(i + 1) * size]\r\n xlist.append(batch_x.transpose(1, 2, 0))\r\n\r\n return np.array(xlist)\r\n \r\n\r\ndef load_data(track_list, seg_len=430):\r\n \r\n xlist = []\r\n ylist = []\r\n for wav_file in tqdm(track_list):\r\n\r\n ## Load cfp features (3, 320, T)\r\n #feature = np.load(data_folder + 'cfp/' + fname + '.npy')\r\n feature, CenFreq, time_arr = cfp_process(wav_file, sr=8000, hop=80)\r\n print('feature', np.shape(feature))\r\n\r\n ## Load f0 frequency\r\n #pitch = np.loadtxt(data_folder + 'f0ref/' + fname + '.txt')\r\n '''\r\n if 'medley' in wav_file:\r\n ref_arr = csv2ref(wav_file.replace('.wav', 'REF.csv'))\r\n else:\r\n if 'train' in wav_file:\r\n ref_arr = txt2ref_tabs(wav_file.replace('.wav', 'REF.txt'))\r\n else:\r\n ref_arr = txt2ref_spaces(wav_file.replace('.wav', 'REF.txt'))\r\n '''\r\n #ref_arr = csv2ref(wav_file.replace('_MIX_melsynth.wav', '.csv').replace('audio_mix', 'annotation_melody'))\r\n ref_arr = csv2ref(wav_file.replace('.wav', '.csv').replace('audio', 'annotations/melody'))\r\n _, pitch_res = resample_melody(ref_arr, np.shape(feature)[-1])\r\n print('pitch', np.shape(pitch_res))\r\n\r\n ## Transfer to mapp, ping\r\n #CenFreq = get_CenFreq(StartFreq=31, StopFreq=720, NumPerOct=120)\r\n #CenFreq = get_CenFreq(StartFreq=31, StopFreq=1250, NumPerOct=60) # (321) #参数是特征提取时就固定的\r\n \r\n mapping = seq2map(pitch_res, CenFreq) # (321, T)\r\n \r\n # print('CenFreq', np.shape(CenFreq), 'mapping', np.shape(mapping))\r\n \r\n ## Crop to segments\r\n xlist, ylist, num = batchize(feature, mapping, xlist, ylist, size=seg_len)\r\n\r\n #dataset = (xlist, ylist)\r\n #with open(data_file, 'wb') as f:\r\n # pickle.dump(dataset, f)\r\n # print(\"Saved {} segments to {}\".format(len(xlist), data_file))\r\n \r\n return xlist, ylist, len(ylist)\r\n\r\n\r\ndef load_data_for_test(track_list, seg_len=430):\r\n\r\n xlist = []\r\n ylist = []\r\n for wav_file in tqdm(track_list):\r\n \r\n ## Load cfp features (3, 320, T)\r\n # feature = np.load(data_folder + 'cfp/' + fname + '.npy')\r\n feature, _, time_arr = cfp_process(wav_file, sr=8000, hop=80)\r\n print('feature', np.shape(feature))\r\n\r\n ## Load f0 frequency\r\n # pitch = np.loadtxt(data_folder + 'f0ref/' + fname + '.txt')\r\n '''\r\n if 'medley' in wav_file:\r\n ref_arr = csv2ref(wav_file.replace('.wav', 'REF.csv'))\r\n else:\r\n if 'train' in wav_file:\r\n ref_arr = txt2ref_tabs(wav_file.replace('.wav', 'REF.txt'))\r\n else:\r\n ref_arr = txt2ref_spaces(wav_file.replace('.wav', 'REF.txt'))\r\n '''\r\n #ref_arr = csv2ref(wav_file.replace('_MIX_melsynth.wav', '.csv').replace('audio_mix', 'annotation_melody'))\r\n ref_arr = csv2ref(wav_file.replace('.wav', '.csv').replace('audio', 'annotations/melody'))\r\n times, pitch = resample_melody(ref_arr, np.shape(feature)[-1])\r\n ref_arr_res = np.concatenate((times[:, None], pitch[:, None]), axis=1)\r\n print('pitch', np.shape(ref_arr_res))\r\n\r\n data = batchize_test(feature, size=seg_len)\r\n xlist.append(data)\r\n ylist.append(ref_arr_res[:, :])\r\n\r\n #dataset = (xlist, ylist)\r\n #with open(data_file, 'wb') as f:\r\n # pickle.dump(dataset, f)\r\n # print(\"Saved {} segments to {}\".format(len(xlist), data_file))\r\n \r\n return xlist, ylist\r\n\r\n\r\ndef load_single_data_for_test(fname, seg_len=430):\r\n # data_file = 'data/single_' + fname + '_{}test.pkl'.format(seg_len)\r\n # if os.path.exists(data_file):\r\n # with open(data_file, 'rb') as f:\r\n # xlist, ylist = pickle.load(f)\r\n \r\n # else:\r\n data_folder = '/data1/project/MCDNN/data/'\r\n \r\n xlist = []\r\n ylist = []\r\n\r\n ## Get file key\r\n fname = fname.replace('.npy', '').rstrip()\r\n\r\n ## Load cfp features\r\n feature = np.load(data_folder + 'cfp/' + fname + '.npy') # (3, 320, T)\r\n\r\n ## Load f0 frequency\r\n ref_arr = np.loadtxt(data_folder + 'f0ref/' + fname + '.txt') # (T, 2)\r\n\r\n data = batchize_test(feature, seg_len)\r\n xlist.append(data)\r\n ylist.append(ref_arr[:, :])\r\n\r\n # dataset = (xlist, ylist)\r\n # with open(data_file, 'wb') as f:\r\n # pickle.dump(dataset, f)\r\n # print(\"Saved {} segments to {}\".format(len(xlist), data_file))\r\n \r\n return xlist, ylist\r\n\r\n\r\ndef txt_to_pitch(ypath):\r\n pitch = []\r\n with open(ypath, 'r') as fhandle:\r\n reader = csv.reader(fhandle, delimiter=',')\r\n for row in reader:\r\n pitch.append(float(row[1].replace(' ', '')))\r\n \r\n return np.array(pitch)\r\n\r\n\r\ndef txt2ref_spaces(ypath):\r\n times = []\r\n pitch = []\r\n with open(ypath, 'r') as fhandle:\r\n reader = csv.reader(fhandle, delimiter=' ')\r\n for row in reader:\r\n times.append(float(row[0]))\r\n pitch.append(float(row[-1]))\r\n \r\n times = np.array(times)\r\n pitch = np.array(pitch)\r\n ref_arr = np.concatenate((times[:, None], pitch[:, None]), axis=1)\r\n \r\n return ref_arr\r\n\r\n\r\ndef txt2ref_tabs(ypath):\r\n times = []\r\n pitch = []\r\n with open(ypath, 'r') as fhandle:\r\n reader = csv.reader(fhandle, delimiter='\\t')\r\n for row in reader:\r\n times.append(float(row[0]))\r\n pitch.append(float(row[1]))\r\n \r\n times = np.array(times)\r\n pitch = np.array(pitch)\r\n ref_arr = np.concatenate((times[:, None], pitch[:, None]), axis=1)\r\n \r\n return ref_arr\r\n \r\n \r\ndef csv2ref(ypath):\r\n ycsv = pd.read_csv(ypath, names=[\"time\", \"freq\"])\r\n gtt = ycsv['time'].values\r\n gtf = ycsv['freq'].values\r\n ref_arr = np.concatenate((gtt[:, None], gtf[:, None]), axis=1)\r\n return ref_arr\r\n\r\n\r\ndef resample_melody(pitch, new_len):\r\n times = pitch[:, 0]\r\n frequencies = pitch[:, 1]\r\n #frequencies = [0 if p < 100 else p for p in frequencies]\r\n \r\n voicing = []\r\n for freq in frequencies:\r\n voicing.append(1) if freq > 0 else voicing.append(0)\r\n \r\n #times_new = signal.resample(times, new_len)\r\n times_new = np.linspace(times[0], times[-1], num=new_len)\r\n \r\n frequencies_resampled, voicing_resampled = mir_eval.melody.resample_melody_series(\r\n times=times,\r\n frequencies=frequencies,\r\n voicing=np.array(voicing),\r\n times_new=times_new,\r\n kind='linear'\r\n )\r\n \r\n return times_new, frequencies_resampled\r\n" ]
[ [ "tensorflow.keras.layers.Concatenate", "tensorflow.keras.layers.GlobalAveragePooling2D", "tensorflow.keras.Input", "tensorflow.keras.layers.Lambda", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv1D", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.UpSampling2D", "tensorflow.keras.Model", "tensorflow.keras.layers.Add", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Multiply", "tensorflow.keras.layers.Reshape", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.Softmax" ], [ "pandas.read_csv", "numpy.concatenate", "numpy.std", "numpy.argmax", "numpy.mean", "numpy.shape", "numpy.zeros" ], [ "pandas.read_csv", "numpy.log2", "numpy.linspace", "numpy.concatenate", "numpy.shape", "numpy.load", "numpy.array", "numpy.zeros", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
seungkee/2nd-place-solution-to-facebook-image-similarity-matching-track
[ "716667bf416239f448e4ea2730a2cc5146536719", "716667bf416239f448e4ea2730a2cc5146536719" ]
[ "code/descriptor_track/train_eval/make_n_perquery_df_gpu.py", "code/matching_track/train_eval/merge_score2.py" ]
[ "import cv2\nimport os\nimport random\nimport time\nimport torch\nimport torch.backends.cudnn as cudnn\nimport models\nfrom utils.logger import Logger\nimport myexman\nfrom utils import utils\nimport sys\nimport torch.multiprocessing as mp\nimport torch.distributed as dist\nimport socket\nfrom torchvision import transforms,datasets\nfrom eval_metrics import get_matching_from_descs, evaluate_metrics\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nimport torch.nn as nn\nfrom tqdm import tqdm\nimport h5py\ndef add_learner_params(parser):\n parser.add_argument('--problem', default='sim-clr',\n help='The problem to train',\n choices=models.REGISTERED_MODELS,\n )\n parser.add_argument('--name', default='',\n help='Name for the experiment',\n )\n parser.add_argument('--ckpt', default='',\n help='Optional checkpoint to init the model.'\n )\n parser.add_argument('--n_perquery', default=100, type=int)\n parser.add_argument('--query_features', default='')\n parser.add_argument('--ref_features', default='')\n\n parser.add_argument('--verbose', default=False, type=bool)\n parser.add_argument('--num_classes', default=3, type=int)\n # optimizer params\n parser.add_argument('--lr_schedule', default='warmup-anneal')\n parser.add_argument('--opt', default='lars', help='Optimizer to use', choices=['sgd', 'adam', 'lars'])\n parser.add_argument('--iters', default=-1, type=int, help='The number of optimizer updates')\n parser.add_argument('--warmup', default=0, type=float, help='The number of warmup iterations in proportion to \\'iters\\'')\n parser.add_argument('--lr', default=0.1, type=float, help='Base learning rate')\n parser.add_argument('--wd', '--weight_decay', default=1e-4, type=float, dest='weight_decay')\n\n\n # trainer params\n parser.add_argument('--save_freq', default=1000, type=int, help='Frequency to save the model')\n parser.add_argument('--log_freq', default=100, type=int, help='Logging frequency')\n parser.add_argument('--eval_freq', default=10000000000000000, type=int, help='Evaluation frequency')\n parser.add_argument('-j', '--workers', default=4, type=int, help='The number of data loader workers')\n parser.add_argument('--eval_only', default=False, type=bool, help='Skips the training step if True')\n parser.add_argument('--seed', default=-1, type=int, help='Random seed')\n # transfrom params\n parser.add_argument('--im_size', default=224, type=int)\n parser.add_argument('--allgray', default=0, type=int)\n # parallelizm params:\n parser.add_argument('--dist', default='dp', type=str,\n help='dp: DataParallel, ddp: DistributedDataParallel',\n choices=['dp', 'ddp'],\n )\n parser.add_argument('--dist_address', default='127.0.0.1:1234', type=str,\n help='the address and a port of the main node in the <address>:<port> format'\n )\n parser.add_argument('--node_rank', default=0, type=int,\n help='Rank of the node (script launched): 0 for the main node and 1,... for the others',\n )\n parser.add_argument('--world_size', default=1, type=int,\n help='the number of nodes (scripts launched)',\n )\n parser.add_argument('--best_valid_score', default=0, type=float)\n \ndef main():\n parser = myexman.ExParser(file=os.path.basename(__file__))\n add_learner_params(parser)\n\n is_help = False\n if '--help' in sys.argv or '-h' in sys.argv:\n sys.argv.pop(sys.argv.index('--help' if '--help' in sys.argv else '-h'))\n is_help = True\n\n args, _ = parser.parse_known_args(log_params=False)\n\n models.REGISTERED_MODELS[args.problem].add_model_hparams(parser)\n\n if is_help:\n sys.argv.append('--help')\n\n args = parser.parse_args(namespace=args)\n\n if args.data == 'imagenet' and args.aug == False:\n raise Exception('ImageNet models should be eval with aug=True!')\n\n if args.seed != -1:\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n\n args.gpu = 0\n ngpus = torch.cuda.device_count()\n args.number_of_processes = 1\n if args.dist == 'ddp':\n # add additional argument to be able to retrieve # of processes from logs\n # and don't change initial arguments to reproduce the experiment\n args.number_of_processes = args.world_size * ngpus\n parser.update_params_file(args)\n\n args.world_size *= ngpus\n mp.spawn(\n main_worker,\n nprocs=ngpus,\n args=(ngpus, args),\n )\n else:\n parser.update_params_file(args)\n main_worker(args.gpu, -1, args)\n\nclass FacebookDataset(torch.utils.data.Dataset):\n def __init__(self, split, transform=None, imsize=None):\n if split == 'query':\n self.dirname = '/facebook/data/images/query/'\n self.samples = list(np.load('/facebook/data/images/query_imlist.npy'))\n #gt_df=pd.read_csv('/facebook/data/public_ground_truth.csv')\n #gt_df=gt_df[~gt_df['reference_id'].isnull()]\n #self.samples = [x+'.jpg' for x in list(gt_df['query_id'])]\n elif split == 'query_total':\n self.dirname = '/facebook/data/images/query/'\n self.samples = list(np.load('/facebook/data/images/query_total_imlist.npy'))\n else:\n self.dirname = '/facebook/data/images/reference_1M_root/reference_1M/'\n self.samples = list(np.load('/facebook/data/images/ref_imlist.npy'))\n self.transform = transform\n self.imsize = imsize\n def __len__(self):\n return len(self.samples)\n def __getitem__(self, index):\n path = os.path.join(self.dirname, self.samples[index])\n with open(path, 'rb') as f:\n img = Image.open(f)\n img = img.convert('RGB')\n if self.imsize is not None:\n img.thumbnail((self.imsize, self.imsize), Image.ANTIALIAS)\n if self.transform is not None:\n img = self.transform(img)\n return img,index\n\nclass mmDataset(torch.utils.data.Dataset):\n def __init__(self, args):\n self.query_features = nn.functional.normalize(torch.tensor(np.load(args.query_features)),dim=1,p=2)\n def __len__(self):\n return len(self.query_features)\n def __getitem__(self, index): \n return self.query_features[index], index\n \[email protected]_grad()\ndef extract_features(data_loader, args, use_cuda=True):\n features =np.zeros((len(data_loader.dataset),args.n_perquery), dtype=int)\n print('features',features.shape)\n ref_features = torch.tensor(np.load(args.ref_features))\n ref_features = nn.functional.normalize(ref_features, dim=1, p=2)\n ref_features = ref_features.t().cuda(non_blocking=True)\n for samples, index in tqdm(data_loader):\n samples = samples.cuda(non_blocking=True)\n index = index.cuda(non_blocking=True)\n #print(samples.shape)\n #print(ref_features.shape)\n feats = torch.argsort(torch.mm(samples,ref_features),dim=-1)#[:,:100]\n #feats = model(samples).clone()\n feats = feats.reshape((feats.shape[0],-1))\n # init storage feature matrix\n \"\"\"\n if dist.get_rank() == 0 and features is None:\n features = torch.zeros(len(data_loader.dataset), 100)#feats.shape[-1])\n #if use_cuda:\n # features = features.cuda(non_blocking=True)\n #print(f\"Storing features into tensor of shape {features.shape}\")\n \"\"\"\n # get indexes from all processes\n y_all = torch.empty(dist.get_world_size(), index.size(0), dtype=index.dtype, device=index.device)\n y_l = list(y_all.unbind(0))\n y_all_reduce = torch.distributed.all_gather(y_l, index, async_op=True)\n y_all_reduce.wait()\n index_all = torch.cat(y_l)\n\n # share features between processes\n feats_all = torch.empty(\n dist.get_world_size(),\n feats.size(0),\n feats.size(1),\n dtype=feats.dtype,\n device=feats.device,\n )\n #print('6',feats_all.shape)\n output_l = list(feats_all.unbind(0))\n output_all_reduce = torch.distributed.all_gather(output_l, feats, async_op=True)\n output_all_reduce.wait()\n \n # update storage feature matrix\n if dist.get_rank() == 0:\n if use_cuda:\n output_l = torch.cat(output_l)\n output_l = output_l[:,-args.n_perquery:]#torch.argsort(output_l, dim=-1)[:,-100:]\n features[index_all.cpu().numpy()]= output_l.cpu().numpy()\n #output_l = torch.tensor(np.argpartition(output_l.cpu().numpy(), -100)[:,-100:])\n #features[index_all.cpu().numpy()]=output_l\n #print('4',torch.cat(output_l).shape)\n #features.index_copy_(0, index_all, output_l)\n # features.index_copy_(0, index_all, torch.cat(output_l))\n else:\n features.index_copy_(0, index_all.cpu(), torch.cat(output_l).cpu())\n return features\n\ndef valid_mm(args):\n dataset_mm = mmDataset(args)\n sampler = torch.utils.data.DistributedSampler(dataset_mm, shuffle=False)\n data_loader_mm = torch.utils.data.DataLoader(\n dataset_mm,\n sampler=sampler,\n batch_size=4,\n num_workers=4,\n pin_memory=True,\n drop_last=False,\n )\n mm_features = extract_features(data_loader_mm,args,True)\n \n if args.rank==0:\n ref_id_list = np.array([x[:-4] for x in list(np.load('/facebook/data/images/ref_imlist.npy'))])\n query_total_id_list = np.array([x[:-4] for x in list(np.load('/facebook/data/images/query_total_imlist.npy'))]) \n \n len_ref = len(ref_id_list)\n len_query = len(query_total_id_list)\n\n new_query=[]\n new_ref=[]\n for i in tqdm(range(len(mm_features))):\n new_query+=[query_total_id_list[i%len_query]]*args.n_perquery\n new_ref += list(ref_id_list[mm_features[i]%len_ref])\n\n df = pd.DataFrame({'query_id':new_query,'reference_id':new_ref})\n #df=df.drop_duplicates(subset=['query_id','reference_id'], keep='last').reset_index(drop=True)\n df.to_csv(args.query_features+'_'+args.ref_features.split('/')[-1]+f'{args.n_perquery}pq.csv',index=False)\n\ndef valid_all(model, args):\n transform = transforms.Compose([\n transforms.Resize((args.im_size, args.im_size)),\n transforms.ToTensor(),\n transforms.Normalize((0.485,0.456,0.406),(0.229,0.224,0.225)),\n ])\n if args.allgray >= 1:\n transform = transforms.Compose([\n transforms.Resize((args.im_size, args.im_size)),\n transforms.RandomGrayscale(p=1.0),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406),(0.229, 0.224, 0.225)),\n ])\n dataset_ref=FacebookDataset(split=\"ref\",transform=transform)\n dataset_query = FacebookDataset(split=\"query\", transform=transform)\n sampler = torch.utils.data.DistributedSampler(dataset_ref, shuffle=False)\n data_loader_ref = torch.utils.data.DataLoader(\n dataset_ref,\n sampler=sampler,\n batch_size=16,\n num_workers=16,\n pin_memory=True,\n drop_last=False,\n )\n sampler2=torch.utils.data.DistributedSampler(dataset_query,shuffle=False)\n data_loader_query = torch.utils.data.DataLoader(\n dataset_query,\n sampler=sampler2,\n batch_size=16,\n num_workers=16,\n pin_memory=True,\n drop_last=False,\n )\n\n dataset_query_total = FacebookDataset(split=\"query_total\", transform=transform)\n sampler3 = torch.utils.data.DistributedSampler(dataset_query_total, shuffle=False)\n data_loader_query_total = torch.utils.data.DataLoader(\n dataset_query_total,\n sampler=sampler3,\n batch_size=16,\n num_workers=16,\n pin_memory=True,\n drop_last=False,\n )\n query_total_features = extract_features(model, data_loader_query_total, True)\n ref_features = extract_features(model, data_loader_ref, True)\n query_features = extract_features(model, data_loader_query, True)\n if args.rank==0:\n np.save(args.ckpt+'_ref_features.npy',ref_features.cpu().numpy())\n np.save(args.ckpt+'_query_features.npy',query_features.cpu().numpy())\n np.save(args.ckpt+'_query_total_features.npy', query_total_features.cpu().numpy())\n \n ref_features = nn.functional.normalize(ref_features, dim=1, p=2).cpu().numpy()\n query_features = nn.functional.normalize(query_features, dim=1, p=2).cpu().numpy()\n query_total_features = nn.functional.normalize(query_total_features, dim=1, p=2).cpu().numpy()\n\n qry_ids = ['Q' + str(x).zfill(5) for x in range(50_000)]\n ref_ids = ['R' + str(x).zfill(6) for x in range(1_000_000)]\n\n out = args.ckpt+\"_fb-isc-submission.h5\"\n with h5py.File(out, \"w\") as f:\n f.create_dataset(\"query\", data=query_total_features)\n f.create_dataset(\"reference\", data=ref_features)\n f.create_dataset('query_ids', data=qry_ids)\n f.create_dataset('reference_ids', data=ref_ids)\n\n query_id_list = np.array([x[:-4] for x in list(np.load('/facebook/data/images/query_imlist.npy'))])\n ref_truth_id_list = np.array([x[:-4] for x in list(np.load('/facebook/data/images/ref_imlist.npy'))])\n gt_df=pd.read_csv('/facebook/data/public_ground_truth.csv')\n submission_df = get_matching_from_descs(query_features, ref_features, query_id_list, ref_truth_id_list, gt_df)\n ap, rp90 = evaluate_metrics(submission_df, gt_df)\n print(ap, rp90)\n\n query_total_id_list = np.array([x[:-4] for x in list(np.load('/facebook/data/images/query_total_imlist.npy'))])\n total_submission_df = get_matching_from_descs(query_total_features, ref_features, query_total_id_list, ref_truth_id_list, gt_df)\n total_submission_df.to_csv(args.ckpt+'_total_submission_df.csv',index=False)\n\n \"\"\"\n if args.best_valid_score < ap :\n args.best_valid_score = ap\n np.save(os.path.join(args.root,f'ref_features.npy'),ref_features)\n np.save(os.path.join(args.root,f'query_features.npy'),query_features)\n with open(os.path.join(args.root,'logs.out'),\"a\") as f:\n f.write(f'ap : {ap}, rp90 : {rp90}\\n')\n print(f'ap : {ap}, rp90 : {rp90}')\n \"\"\"\n \"\"\"\n pts=[]\n pts.append([65,60])\n pts.append([105,60])\n pts.append([105,135])\n pts.append([65,135])\n blank_image = np.zeros((224,224),np.uint8)\n mask = cv2.fillPoly(blank_image, pts=[np.array(pts)],color=1)\n mask = np.expand_dims(mask,-1)\n mask = mask.astype(np.float32)\n mask = mask.transpose(2,0,1).clip(0,1)\n mask = np.expand_dims(mask,0)\n loss_value = criterion(feats,torch.tensor(mask).cuda()).item()\n print(f'valid loss : {loss_value}')\n with open(os.path.join(args.root,'logs.out'),\"a\") as f:\n f.write(f'valid loss : {loss_value}\\n')\n \"\"\"\ndef valid_one_epoch(model, args):\n transform = transforms.Compose([\n transforms.Resize((args.im_size,args.im_size)),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406),(0.229, 0.224, 0.225)),\n ])\n if args.allgray >= 1:\n transform = transforms.Compose([\n transforms.Resize((args.im_size, args.im_size)),\n transforms.RandomGrayscale(p=1.0),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406),(0.229, 0.224, 0.225)),\n ])\n #dataset_ref = FacebookDataset(split=\"ref\", transform=transform)\n dataset_query = FacebookDataset(split=\"query\", transform=transform)\n \"\"\"\n sampler = torch.utils.data.DistributedSampler(dataset_ref, shuffle=False)\n data_loader_ref = torch.utils.data.DataLoader(\n dataset_ref,\n sampler=sampler,\n batch_size=16,\n num_workers=16,\n pin_memory=True,\n drop_last=False,\n )\n \"\"\"\n \"\"\"\n sampler2 = torch.utils.data.DistributedSampler(dataset_query,shuffle=False)\n data_loader_query = torch.utils.data.DataLoader(\n dataset_query,\n sampler=sampler2,\n batch_size=16,\n num_workers=16,\n pin_memory=True,\n drop_last=False,\n )\n \"\"\"\n data_loader_query = torch.utils.data.DataLoader(\n dataset_query,\n batch_size=1,\n num_workers=1,\n pin_memory=True,\n drop_last=False\n )\n #ref_features = extract_features(model, data_loader_ref, True)\n #query_features = extract_features(model, data_loader_query, True)\n criterion = nn.BCEWithLogitsLoss()\n\n query_features = []\n for samples, index in data_loader_query:\n samples = samples.cuda()\n feats=model(samples).clone()\n \n if args.rank==0:\n #ref_features = nn.functional.normalize(ref_features, dim=1, p=2).cpu().numpy()\n #query_features = nn.functional.normalize(query_features, dim=1, p=2).cpu().numpy()\n #query_id_list = np.array([x[:-4] for x in list(np.load('/facebook/data/images/query_imlist.npy'))])\n #ref_truth_id_list = np.array([x[:-4] for x in list(np.load('/facebook/data/images/ref_truth_imlist.npy'))])\n #gt_df=pd.read_csv('/facebook/data/public_ground_truth.csv')\n #submission_df = get_matching_from_descs(query_features, ref_features, query_id_list, ref_truth_id_list, gt_df)\n #ap, rp90 = evaluate_metrics(submission_df, gt_df)\n \"\"\"\n if args.best_valid_score < ap :\n args.best_valid_score = ap\n np.save(os.path.join(args.root,f'ref_features.npy'),ref_features)\n np.save(os.path.join(args.root,f'query_features.npy'),query_features)\n with open(os.path.join(args.root,'logs.out'),\"a\") as f:\n f.write(f'ap : {ap}, rp90 : {rp90}\\n')\n print(f'ap : {ap}, rp90 : {rp90}')\n \"\"\"\n pts=[]\n pts.append([65,60])\n pts.append([105,60])\n pts.append([105,135])\n pts.append([65,135])\n blank_image = np.zeros((224,224),np.uint8)\n mask = cv2.fillPoly(blank_image, pts=[np.array(pts)],color=1)\n mask = np.expand_dims(mask,-1)\n mask = mask.astype(np.float32)\n mask = mask.transpose(2,0,1).clip(0,1)\n mask = np.expand_dims(mask,0)\n loss_value = criterion(feats,torch.tensor(mask).cuda()).item()\n print(f'valid loss : {loss_value}')\n with open(os.path.join(args.root,'logs.out'),\"a\") as f:\n f.write(f'valid loss : {loss_value}\\n')\n\n \ndef main_worker(gpu, ngpus, args):\n fmt = {\n 'train_time': '.3f',\n 'val_time': '.3f',\n 'lr': '.1e',\n }\n logger = Logger('logs', base=args.root, fmt=fmt)\n\n args.gpu = gpu\n torch.cuda.set_device(gpu)\n args.rank = args.node_rank * ngpus + gpu\n\n device = torch.device('cuda:%d' % args.gpu)\n\n if args.dist == 'ddp':\n dist.init_process_group(\n backend='nccl',\n init_method = 'tcp://%s' % args.dist_address, #'env://',\n world_size=args.world_size,\n rank=args.rank,\n )\n n_gpus_total = dist.get_world_size()\n assert args.batch_size % n_gpus_total == 0\n args.batch_size //= n_gpus_total\n if args.rank == 0:\n print(f'===> {n_gpus_total} GPUs total; batch_size={args.batch_size} per GPU')\n\n print(f'===> Proc {dist.get_rank()}/{dist.get_world_size()}@{socket.gethostname()}', flush=True)\n\n # Data loading code\n #model.prepare_data()\n #train_loader, val_loader = model.dataloaders(iters=args.iters)\n # define optimizer\n cur_iter=0\n #optimizer,scheduler = models.ssl.configure_optimizers(args, model, cur_iter - 1)\n\n # optionally resume from a checkpoint\n #if args.ckpt and not args.eval_only:\n # optimizer.load_state_dict(ckpt['opt_state_dict'])\n\n cudnn.benchmark = True\n\n continue_training = args.iters != 0\n data_time, it_time = 0, 0\n\n while continue_training:\n valid_mm(args)\n dist.barrier()\n break\n \"\"\"\n train_logs = []\n model.train()\n\n start_time = time.time()\n for _, (batch,labels) in enumerate(train_loader):\n #print(len(batch))\n #print(batch)\n cur_iter += 1\n #batch = torch.cat([batch[0], batch[1]],dim=0)\n #batch = batch.to(device)#[x.to(device) for x in batch]\n batch = [x.to(device) for x in batch]\n data_time += time.time() - start_time\n logs = {}\n if not args.eval_only:\n # forward pass and compute loss\n logs = model.train_step(batch, cur_iter)\n loss = logs['loss']\n # gradient step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # save logs for the batch\n train_logs.append({k: utils.tonp(v) for k, v in logs.items()})\n\n #if cur_iter % args.save_freq == 0 and args.rank == 0:\n # save_checkpoint(args.root, model, optimizer, cur_iter)\n if cur_iter%args.eval_freq==0 or cur_iter>=args.iters or cur_iter==1:\n model.eval()\n valid_one_epoch(model,args)\n model.train()\n it_time += time.time() - start_time\n\n if (cur_iter % args.log_freq == 0 or cur_iter >= args.iters) and args.rank == 0:\n save_checkpoint(args.root, model, optimizer, cur_iter = cur_iter)\n train_logs = utils.agg_all_metrics(train_logs)\n\n logger.add_logs(cur_iter, train_logs, pref='train_')\n logger.add_scalar(cur_iter, 'lr', optimizer.param_groups[0]['lr'])\n logger.add_scalar(cur_iter, 'data_time', data_time)\n logger.add_scalar(cur_iter, 'it_time', it_time)\n logger.iter_info()\n logger.save()\n data_time, it_time = 0, 0\n train_logs = []\n\n if scheduler is not None:\n scheduler.step()\n\n if cur_iter >= args.iters:\n continue_training = False\n break\n\n start_time = time.time()\n \"\"\"\n # save_checkpoint(args.root, model, optimizer)\n\n if args.dist == 'ddp':\n dist.destroy_process_group()\n\ndef save_checkpoint(path, model, optimizer, cur_iter=None, is_best=False):\n if cur_iter is None:\n fname = os.path.join(path,'checkpoint.pth.tar')\n if is_best :\n fname = os.path.join(path,'checkpoint_best.pth.tar')\n else:\n fname = os.path.join(path, 'checkpoint-%d.pth.tar' % cur_iter)\n ckpt = model.get_ckpt()\n ckpt.update(\n {\n 'opt_state_dict': optimizer.state_dict(),\n 'iter': cur_iter,\n }\n )\n torch.save(ckpt,fname)\n\nif __name__ == '__main__':\n main()\n\n", "import pandas as pd\nimport numpy as np\np1='repo/0/matching-1009-from-1008-from-1001-nochange'\np2='repo/1/matching-1009-from-1008-from-1001-nochange'\np3='repo/2/matching-1009-from-1008-from-1001-nochange_000002'\n\ndf1_1=pd.read_csv(p1+'/final_cand_n.csv_halfeval.csv')\ndf2_1=pd.read_csv(p2+'/final_cand_n.csv_halfeval.csv')\ndf3_1=pd.read_csv(p3+'/final_cand_n.csv_halfeval.csv')\n\ndf1_1['score']=(np.array(df1_1['score'])+np.array(df2_1['score'])+np.array(df3_1['score']))/3.0\n\ndf=df1_1.drop_duplicates(subset=['query_id','reference_id','score'],keep='last').reset_index(drop=True)\n\nidx = df.groupby(['query_id'])['score'].transform(max) == df['score']\ndf = df[idx].reset_index(drop=True)\n\ndf.to_csv(f'final.csv',index=False)\n" ]
[ [ "numpy.expand_dims", "torch.multiprocessing.spawn", "torch.cat", "torch.utils.data.DataLoader", "pandas.DataFrame", "torch.nn.BCEWithLogitsLoss", "torch.no_grad", "torch.device", "torch.distributed.get_rank", "torch.save", "pandas.read_csv", "torch.utils.data.DistributedSampler", "torch.distributed.init_process_group", "torch.mm", "torch.distributed.barrier", "torch.tensor", "numpy.load", "numpy.zeros", "torch.distributed.destroy_process_group", "torch.cuda.device_count", "torch.distributed.get_world_size", "numpy.array", "torch.nn.functional.normalize", "torch.cuda.set_device", "torch.manual_seed", "torch.distributed.all_gather" ], [ "numpy.array", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
qinfeng2011/wltp
[ "317ad38fb96599a29d22e40f69b6aeb4d205611d" ]
[ "tests/test_wltp_db.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2013-2019 European Commission (JRC);\n# Licensed under the EUPL (the 'Licence');\n# You may not use this work except in compliance with the Licence.\n# You may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl\n\"\"\"(DEPRECATED) Compares the results of a batch of wltp_db vehicles against phase-1b-alpha Heinz's tool.\n\n* Run as Test-case to generate results for sample-vehicles.\n* Run it as cmd-line to compare with Heinz's results.\n\"\"\"\n\nfrom collections import OrderedDict\nimport glob\nimport logging\nimport math\nimport os\nimport re\nimport unittest\nfrom unittest.case import skipIf\n\nfrom wltp import utils\nfrom wltp.utils import memoize\n\nimport numpy as np\nimport numpy.testing as npt\nimport pandas as pd\n\nfrom wltp.experiment import Experiment\nfrom .goodvehicle import goodVehicle\n\n\noverwrite_old_results = (\n True\n) # NOTE: Set 'False' to UPDATE sample-results or run main() (assuming they are ok).\nforce_rerun = False\n\nmydir = os.path.dirname(__file__)\nsamples_dir = \"wltp_db\"\nvehs_data_inp_fname = \"wltp_db_vehicles.csv\"\nvehs_data_out_fname = \"wltp_db_vehicles_out.csv\"\ngened_fname_regex = r\".*wltp_db_vehicles-(\\d+).csv\"\nheinz_fname_regex = r\".*heinz-(\\d+).csv\"\ngened_fname_glob = \"wltp_db_vehicles-*.csv\"\ntrans_fname_glob = \"trans-wltp_db_vehicles-*.csv\"\ndriver_weight = 70\n\"For calculating unladen_mass.\"\nencoding = \"UTF-8\"\n# desc_columns_to_print = ['mean', 'std', 'min', 'max']\n\n\ndef _init_logging(loglevel=logging.DEBUG):\n logging.basicConfig(level=loglevel)\n logging.getLogger().setLevel(level=loglevel)\n log = logging.getLogger(__name__)\n\n return log\n\n\nlog = _init_logging()\n\n\n@memoize\ndef _read_vehicles_inp():\n df = pd.read_csv(vehs_data_inp_fname, encoding=encoding, index_col=0)\n\n return df.copy()\n\n\ndef _read_vehicles_out():\n try:\n df = pd.read_csv(vehs_data_out_fname, encoding=encoding, index_col=0)\n return df\n except Exception:\n ## File corrupts if run interrupted.\n return None\n\n return df.copy()\n\n\ndef _write_vehicle_data(df):\n df = df.to_csv(vehs_data_out_fname, encoding=encoding)\n\n\n@memoize\ndef _read_wots():\n df = pd.read_csv(\"wot_samples.csv\", encoding=encoding, index_col=None)\n\n return df.copy()\n\n\ndef _select_wot(wots, isDiesel):\n wots_labels = [\"average Euro 6 Petrol\", \"average Euro 6 Diesel\"]\n wots = wots[[\"n_norm\", wots_labels[isDiesel]]]\n wots.columns = [\"n_norm\", \"p_norm\"]\n\n return wots\n\n\ndef _make_gened_fname(transplant_original_gears, veh_num):\n root, ext = os.path.splitext(vehs_data_inp_fname)\n transplant = \"trans-\" if transplant_original_gears else \"\"\n outfname = \"{}{}-{:04}{}\".format(transplant, root, veh_num, ext)\n\n return outfname\n\n\ndef _make_heinz_fname(veh_num):\n return \"heinz-{:04}.csv\".format(veh_num)\n\n\n@memoize\ndef _read_gened_file(inpfname):\n df = pd.read_csv(inpfname, header=0, index_col=0)\n assert not df.empty\n assert df.index.name == \"time\", df.index.name\n\n return df.copy()\n\n\n@memoize\ndef _read_heinz_file(veh_num):\n vehfpath = _make_heinz_fname(veh_num)\n try:\n inpfname = glob.glob(vehfpath)[0]\n except IndexError:\n raise FileNotFoundError(\n \"Skipped veh_id(%s), no file found: %s\" % (veh_num, vehfpath)\n )\n\n df = pd.read_csv(inpfname, encoding=\"UTF-8\", header=0, index_col=0)\n assert not df.empty\n assert df.index.name == \"t\", df.index.name\n\n return df.copy()\n\n\n_sources_latest_date = None\n\n\ndef _is_file_up_to_date(result_file, other_dependency_files=()):\n\n result_fnames = [result_file, vehs_data_out_fname]\n if force_rerun or not all(os.path.exists(f) for f in result_fnames):\n return False\n results_date = max([os.path.getmtime(file) for file in result_fnames])\n\n if _sources_latest_date is None:\n source_fnames = [\n __file__,\n \"../../datamodel.py\",\n \"../../experiment.py\",\n vehs_data_inp_fname,\n ]\n _sources_latest_dep_date = max(\n [os.path.getmtime(file) for file in source_fnames]\n )\n\n latest_dep_date = max([os.path.getmtime(file) for file in other_dependency_files])\n latest_dep_date = max(latest_dep_date, _sources_latest_dep_date)\n\n return results_date > latest_dep_date\n\n\ndef _file_pairs(fname_glob):\n \"\"\"\n Generates pairs of files to compare, skipping non-existent and those with mismatching #_of_rows.\n\n Example:\n\n >>> for (veh_num, df_g, df_h) in _file_pairs('wltp_db_vehicles-00*.csv'):\n ... pass\n \"\"\"\n\n all_gened = sorted(glob.glob(fname_glob))\n for g_fname in all_gened:\n m = re.match(gened_fname_regex, g_fname)\n veh_num = int(m.groups()[0])\n\n df_g = _read_gened_file(g_fname)\n df_h = _read_heinz_file(veh_num)\n\n if df_g.shape[0] != df_h.shape[0]:\n log.warning(\n \"Class-mismatched(%s): gened(%s) !+ heinz(%s)!\",\n g_fname,\n df_g.shape,\n df_h.shape,\n )\n continue\n if abs(df_g.v_class.sum() - df_h.v_orig.sum()) > 1:\n log.warning(\n \"Cycle-mismatched(%s): gened(%s) !+ heinz(%s)!\",\n g_fname,\n df_g.v_class.sum(),\n df_h.v_orig.sum(),\n )\n continue\n\n yield (veh_num, df_g, df_h)\n\n\ndef vehicles_applicator(fname_glob, pair_func):\n \"\"\"\n Applies the fun onto a pair of (generated, heinz) files for each tested-vehicle in the glob and\n appends results to list, preffixed by veh_num.\n\n :param pair_func: signature: func(veh_no, gened_df, heinz_df)-->sequence_of_numbers\n :return: a dataframe with the columns returned from the pair_func, row_indexed by veh_num\n \"\"\"\n\n res = []\n for (veh_num, df_g, df_h) in _file_pairs(fname_glob):\n row = pair_func(veh_num, df_g, df_h)\n res.append([int(veh_num)] + list(row))\n assert len(res) > 0\n\n ares = np.array(res)\n df = pd.DataFrame(ares[:, 1:], index=ares[:, 0])\n\n return df\n\n\ndef aggregate_single_columns_means(gened_column, heinz_column):\n \"\"\"\n Runs experiments and aggregates mean-values from one column of each (gened, heinz) file-sets.\n \"\"\"\n vehdata = _run_the_experiments(\n transplant_original_gears=False, compare_results=False\n )\n\n res = vehicles_applicator(\n gened_fname_glob,\n lambda _, df_g, df_h: (df_g[gened_column].mean(), df_h[heinz_column].mean()),\n )\n res.columns = [\"gened\", \"heinz\"]\n vehdata = vehdata.merge(res, how=\"inner\", left_index=True, right_index=True).sort()\n return vehdata\n\n\nclass WltpDbTests(unittest.TestCase):\n \"\"\"Compares a batch of vehicles with results obtained from \"official\" implementation.\"\"\"\n\n # @classmethod\n # def setUpClass(cls):\n\n def setUp(self):\n self.run_comparison = overwrite_old_results\n os.chdir(os.path.join(mydir, samples_dir))\n\n # @skip\n def test0_runExperiment(self, plot_results=False, encoding=\"UTF-8\"):\n _run_the_experiments(\n transplant_original_gears=False,\n compare_results=self.run_comparison,\n encoding=encoding,\n )\n\n # @skip\n def test0_runExperimentTransplant(self, plot_results=False, encoding=\"UTF-8\"):\n _run_the_experiments(\n transplant_original_gears=True,\n compare_results=self.run_comparison,\n encoding=encoding,\n )\n\n def test1_Downscale(self):\n \"\"\"Check mean-downscaled-velocity diff with Heinz within some percent.\n\n ### Comparison history ###\n\n Force class3b, Phase-1b-beta(ver <= 0.0.8, Aug-2014) with Heinz maxt gear-time=2sec::\n\n python heinz diff_prcnt\n count 378.000000 378.000000 0.000000e+00\n mean 45.973545 46.189082 4.688300e-01\n std 1.642335 1.126555 -4.578377e+01\n min 35.866421 36.659117 2.210133e+00\n 25% 46.506718 46.504909 -3.892020e-03\n 50% 46.506718 46.506504 -4.620879e-04\n 75% 46.506718 46.506719 4.116024e-08\n max 46.506718 46.506719 4.116024e-08\n\n Not forcing class3b, honoring declared v_max & unladen_mass::\n\n python heinz diff_prcnt\n count 382.000000 382.000000 0.000000e+00\n mean 44.821337 44.846671 5.652189e-02\n std 5.054214 5.050208 -7.933394e-02\n min 28.091672 28.388418 1.056347e+00\n 25% 46.506718 46.504868 -3.978244e-03\n 50% 46.506718 46.506478 -5.162230e-04\n 75% 46.506718 46.506719 4.116033e-08\n max 46.506718 46.506719 4.116033e-08\n \"\"\"\n\n pcrnt_limit = 0.09\n\n res = vehicles_applicator(\n gened_fname_glob,\n lambda _, df_g, df_h: (df_g[\"v_target\"].mean(), df_h[\"v\"].mean()),\n )\n res.columns = [\"python\", \"heinz\"]\n\n df = res.describe()\n\n df[\"diff_prcnt\"] = 100 * (df.heinz - df.python) / df.min(axis=1)\n print(df)\n\n diff_prcnt = df.loc[\"mean\", \"diff_prcnt\"]\n self.assertLess(np.abs(diff_prcnt), pcrnt_limit)\n\n def _check_gear_diffs(self, fname_glob):\n def read_and_compare_experiment(veh_num, df_my, df_hz):\n ## Count base-calc errors (before dirveability).\n ndiff_gears_orig = np.count_nonzero(df_my[\"gears_orig\"] != df_hz[\"g_max\"])\n\n ## Count all errors.\n #\n my_gears = df_my[\"gears\"]\n gears_hz = df_hz[\"gear\"]\n diff_gears = my_gears != gears_hz\n ndiff_gears = np.count_nonzero(diff_gears)\n\n ## Count Acceleration-only errors.\n #\n accel = np.gradient(df_my[\"v_class\"])\n diff_gears_accel = diff_gears[accel >= 0]\n ndiff_gears_accel = np.count_nonzero(diff_gears_accel)\n\n return (ndiff_gears, ndiff_gears_accel, ndiff_gears_orig)\n\n res = vehicles_applicator(fname_glob, read_and_compare_experiment)\n res.columns = [\"diff_gears\", \"diff_accel\", \"diff_orig\"]\n\n res_totals = res.describe()\n res_totals.loc[\"sum\", :] = res.sum(axis=0)\n res_totals.loc[\"mean%\", :] = (\n 100 * res_totals.loc[\"mean\", :] / 1800\n ) # class3-duration\n\n return res_totals\n\n def test2a_gear_diffs(self):\n \"\"\"Check diff-gears with Heinz stays within some percent.\n\n ### Comparison history ###\n\n Class3b-Vehicles, Phase-1b-beta(ver <= 0.0.8, Aug-2014) with Heinz maxt gear-time=2sec::\n\n count MEAN STD min max\n gears 23387 75.931818 56.921729 6 279\n accell 19146 62.162338 48.831155 4 238\n senza rules 16133 52.379870 35.858415 11 170\n\n Separated test/unladen masses::\n\n diff_gears diff_accel diff_orig\n count 378.000000 378.000000 378.000000\n mean 104.965608 86.171958 90.235450\n std 100.439783 82.613475 109.283901\n min 6.000000 4.000000 11.000000\n 25% 36.250000 25.250000 23.000000\n 50% 69.000000 57.500000 51.000000\n 75% 142.000000 119.750000 104.750000\n max 524.000000 404.000000 600.000000\n sum 39677.000000 32573.000000 34109.000000\n mean% 5.831423 4.787331 5.013081\n\n Not forcing class3b, honoring declared v_max & unladen_mass::\n\n diff_gears diff_accel diff_orig\n count 382.000000 382.000000 382.000000\n mean 75.994764 63.633508 54.083770\n std 58.290971 51.885162 38.762326\n min 2.000000 2.000000 6.000000\n 25% 29.000000 22.000000 19.000000\n 50% 57.000000 48.500000 45.000000\n 75% 111.000000 97.000000 78.750000\n max 279.000000 243.000000 173.000000\n sum 29030.000000 24308.000000 20660.000000\n mean% 4.221931 3.535195 3.004654\n \"\"\"\n\n pcrnt_limit = 4.5 # mean%%(!)\n\n res_totals = self._check_gear_diffs(gened_fname_glob)\n print(res_totals)\n\n diff_prcnt = res_totals.loc[\"mean%\", [\"diff_gears\", \"diff_accel\"]]\n np.testing.assert_array_less(np.abs(diff_prcnt.fillna(0)), pcrnt_limit)\n\n def test2b_gear_diffs_transplanted(self):\n \"\"\"Check driveability-only diff-gears with Heinz stays within some percent.\n\n ### Comparison history ###\n\n Force class3b, Phase-1b-beta(ver <= 0.0.8, Aug-2014) with Heinz maxt gear-time=2sec::\n\n diff_gears diff_accel diff_orig\n count 378.000000 378.000000 378\n mean 15.566138 5.634921 0\n std 16.554295 8.136700 0\n min 0.000000 0.000000 0\n 25% 5.000000 1.000000 0\n 50% 11.000000 3.000000 0\n 75% 19.750000 7.000000 0\n max 123.000000 78.000000 0\n sum 5884.000000 2130.000000 0\n mean% 0.864785 0.313051 0\n\n Not forcing class3b, honoring declared v_max & unladen_mass::\n\n diff_gears diff_accel diff_orig\n count 382.000000 382.000000 382\n mean 12.599476 4.651832 0\n std 15.375930 7.566103 0\n min 0.000000 0.000000 0\n 25% 4.000000 0.000000 0\n 50% 9.000000 2.000000 0\n 75% 15.000000 6.000000 0\n max 123.000000 78.000000 0\n sum 4813.000000 1777.000000 0\n mean% 0.699971 0.258435 0\n \"\"\"\n\n pcrnt_limit = 0.75 # mean%(!)\n\n res_totals = self._check_gear_diffs(trans_fname_glob)\n print(res_totals)\n\n diff_prcnt = res_totals.loc[\"mean%\", [\"diff_gears\", \"diff_accel\"]]\n np.testing.assert_array_less(np.abs(diff_prcnt.fillna(0)), pcrnt_limit)\n\n def _check_n_mean(self, fname_glob):\n res = vehicles_applicator(\n fname_glob, lambda _, df_g, df_h: (df_g[\"rpm\"].mean(), df_h[\"n\"].mean())\n )\n res.columns = [\"python\", \"heinz\"]\n\n res_totals = res.describe()\n res_totals[\"diff_prcnt\"] = (\n 100 * (res_totals.heinz - res_totals.python) / res_totals.min(axis=1)\n )\n\n return res_totals\n\n def test3a_n_mean(self):\n \"\"\"Check mean-rpm diff with Heinz stays within some percent.\n\n ### Comparison history ###\n\n Class3b-Vehicles, Phase-1b-beta(ver <= 0.0.8, Aug-2014) with Heinz maxt gear-time=2sec::\n\n mean std min max\n python 1766.707825 410.762478 1135.458463 3217.428423\n heinz 1759.851498 397.343498 1185.905053 3171.826208\n diff_prcnt -0.3896 -3.3772 4.4428 -1.4377\n\n Separated test/unladen masses::\n\n python heinz diff_prcnt\n count 378.000000 378.000000 0.000000\n mean 1923.908119 1899.366431 -1.292099\n std 628.998854 593.126296 -6.048047\n min 1135.458463 1185.905053 4.442839\n 25% 1497.544940 1495.699889 -0.123357\n 50% 1740.927971 1752.668517 0.674384\n 75% 2121.459309 2111.876041 -0.453780\n max 4965.206982 4897.154914 -1.389625\n\n Not forcing class3b, honoring declared v_max & unladen_mass::\n\n python heinz diff_prcnt\n count 382.000000 382.000000 0.000000\n mean 1835.393402 1827.572965 -0.427914\n std 476.687485 464.264779 -2.675781\n min 1135.458463 1185.905053 4.442839\n 25% 1486.886555 1482.789006 -0.276341\n 50% 1731.983662 1739.781233 0.450210\n 75% 2024.534101 2018.716963 -0.288160\n max 3741.849187 3750.927263 0.242609\n \n Keeping idle engine revs::\n \n python heinz diff_prcnt\n count 382.000000 382.000000 0.000000\n mean 1852.183403 1827.572965 -1.346619\n std 473.142045 464.264779 -1.912113\n min 1168.757027 1185.905053 1.467202\n 25% 1507.030779 1482.789006 -1.634877\n 50% 1749.246014 1739.781233 -0.544021\n 75% 2043.861777 2018.716963 -1.245584\n max 3747.026551 3750.927263 0.104102\n \"\"\"\n\n pcrnt_limit = 1.5\n\n res_totals = self._check_n_mean(gened_fname_glob)\n print(res_totals)\n\n diff_prcnt = res_totals.loc[\"mean\", \"diff_prcnt\"]\n self.assertLess(np.abs(diff_prcnt), pcrnt_limit)\n\n def test3b_n_mean_transplanted(self):\n \"\"\"Check driveability-only mean-rpm diff with Heinz stays within some percent.\n\n ### Comparison history ###\n\n Force class3b, Phase-1b-beta(ver <= 0.0.8, Aug-2014) with Heinz maxt gear-time=2sec::\n\n python heinz diff_prcnt\n count 378.000000 378.000000 0.000000\n mean 1880.045112 1899.366431 1.027705\n std 572.842493 593.126296 3.540904\n min 1150.940393 1185.905053 3.037921\n 25% 1477.913404 1495.699889 1.203486\n 50% 1739.882957 1752.668517 0.734852\n 75% 2073.715015 2111.876041 1.840225\n max 4647.136063 4897.154914 5.380063\n\n Not forcing class3b, honoring declared v_max & unladen_mass::\n\n python heinz diff_prcnt\n count 382.000000 382.000000 0.000000\n mean 1818.519842 1827.572965 0.497829\n std 469.276397 464.264779 -1.079474\n min 1150.940393 1185.905053 3.037921\n 25% 1467.153958 1482.789006 1.065672\n 50% 1730.051632 1739.781233 0.562388\n 75% 2010.264758 2018.716963 0.420452\n max 3704.999890 3750.927263 1.239605\n \"\"\"\n\n pcrnt_limit = 0.55\n\n res_totals = self._check_n_mean(trans_fname_glob)\n print(res_totals)\n\n diff_prcnt = res_totals.loc[\"mean\", \"diff_prcnt\"]\n self.assertLess(np.abs(diff_prcnt), pcrnt_limit)\n\n def _check_n_mean__pmr(self, fname_glob):\n vehdata = _read_vehicles_inp()\n vehdata[\"pmr\"] = 1000.0 * vehdata[\"rated_power\"] / vehdata[\"kerb_mass\"]\n np.testing.assert_allclose(vehdata.pmr_km, vehdata.pmr)\n\n res = vehicles_applicator(\n fname_glob, lambda _, df_g, df_h: (df_g[\"rpm\"].mean(), df_h[\"n\"].mean())\n )\n\n res.columns = [\"gened_mean_rpm\", \"heinz_mean_rpm\"]\n vehdata = vehdata.merge(\n res, how=\"inner\", left_index=True, right_index=True\n ).sort()\n self.assertEqual(vehdata.shape[0], res.shape[0])\n\n df = vehdata.sort(\"pmr\")[[\"gened_mean_rpm\", \"heinz_mean_rpm\"]]\n dfg = df.groupby(pd.cut(vehdata.pmr, 12))\n pmr_histogram = dfg.mean()\n\n dif = (\n pmr_histogram[\"heinz_mean_rpm\"] - pmr_histogram[\"gened_mean_rpm\"]\n ) / pmr_histogram.min(axis=1)\n pmr_histogram[\"diff_prcnt\"] = 100 * dif\n pmr_histogram[\"count\"] = dfg.count().iloc[:, -1]\n\n return pmr_histogram\n\n @skipIf(\n utils.is_travis(),\n \"GroupBy probably fails in old pandas, and cannot upgrade it.\",\n )\n def test4a_n_mean__PMR(self):\n \"\"\"Check mean-rpm diff with Heinz stays within some percent for all PMRs.\n\n ### Comparison history ###\n\n\n Force class3b, Phase-1b-beta(ver <= 0.0.8, Aug-2014) with Heinz maxt gear-time=2sec::\n\n gened_mean_rpm heinz_mean_rpm diff_ratio count\n pmr\n (9.973, 24.823] 1566.018469 1568.360963 0.001496 32\n (24.823, 39.496] 1701.176128 1702.739797 0.000919 32\n (39.496, 54.17] 1731.541637 1724.959671 -0.003816 106\n (54.17, 68.843] 1894.477475 1877.786294 -0.008889 61\n (68.843, 83.517] 1828.518522 1818.720627 -0.005387 40\n (83.517, 98.191] 1824.060716 1830.482140 0.003520 3\n (98.191, 112.864] 1794.673461 1792.693611 -0.001104 31\n (112.864, 127.538] 3217.428423 3171.826208 -0.014377 1\n (127.538, 142.211] 1627.952896 1597.571904 -0.019017 1\n (142.211, 156.885] NaN NaN NaN 0\n (156.885, 171.558] NaN NaN NaN 0\n (171.558, 186.232] 1396.061758 1385.176569 -0.007858 1\n\n Separated test/unladen masses::\n\n gened_mean_rpm heinz_mean_rpm diff_prcnt count\n pmr\n (11.504, 26.225] 1579.612698 1585.721306 0.386716 28\n (26.225, 40.771] 1706.865069 1700.689983 -0.363093 41\n (40.771, 55.317] 1866.150857 1841.779091 -1.323273 119\n (55.317, 69.863] 2122.662626 2085.262950 -1.793523 122\n (69.863, 84.409] 2228.282795 2171.952804 -2.593518 29\n (84.409, 98.955] 1783.316413 1787.378401 0.227777 4\n (98.955, 113.501] 1718.157828 1718.516147 0.020855 31\n (113.501, 128.0475] 2005.415058 1954.763742 -2.591173 2\n (128.0475, 142.594] 1566.601860 1553.383676 -0.850928 1\n (142.594, 157.14] NaN NaN NaN 0\n (157.14, 171.686] NaN NaN NaN 0\n (171.686, 186.232] 1396.061758 1385.176569 -0.785834 1\n\n Not forcing class3b, honoring declared v_max & unladen_mass::\n\n gened_mean_rpm heinz_mean_rpm diff_prcnt count\n pmr\n (9.973, 24.823] 1560.010258 1563.836656 0.245280 33\n (24.823, 39.496] 1725.209986 1725.004638 -0.011904 34\n (39.496, 54.17] 1737.811065 1730.770088 -0.406812 123\n (54.17, 68.843] 1996.999520 1983.753219 -0.667739 94\n (68.843, 83.517] 2051.088434 2034.594136 -0.810692 59\n (83.517, 98.191] 1964.832555 1958.081066 -0.344801 4\n (98.191, 112.864] 1682.122484 1684.443875 0.138004 31\n (112.864, 127.538] 2718.877009 2687.055802 -1.184241 2\n (127.538, 142.211] 1660.925042 1668.155469 0.435325 1\n (142.211, 156.885] NaN NaN NaN 0\n (156.885, 171.558] NaN NaN NaN 0\n (171.558, 186.232] 1396.061758 1385.176569 -0.785834 1\n Mean: 0.419219429398\n\n pandas 0.15.1::\n\n gened_mean_rpm heinz_mean_rpm diff_prcnt count\n pmr \n (9.973, 24.823] 2037.027221 2038.842442 0.089111 33\n (24.823, 39.496] 2257.302959 2229.999526 -1.224369 34\n (39.496, 54.17] 1912.075914 1885.792807 -1.393743 123\n (54.17, 68.843] 1716.720028 1717.808457 0.063402 94\n (68.843, 83.517] 1677.882399 1683.916224 0.359610 59\n (83.517, 98.191] 1535.881170 1551.609661 1.024070 4\n (98.191, 112.864] 1571.290286 1589.997331 1.190553 31\n (112.864, 127.538] 1409.308426 1425.965019 1.181898 2\n (127.538, 142.211] 1975.481368 1967.808440 -0.389923 1\n (142.211, 156.885] NaN NaN NaN 0\n (156.885, 171.558] NaN NaN NaN 0\n (171.558, 186.232] 1950.377512 1937.426430 -0.668468 1\n Mean diff_prcnt: 0.632095580562\n gened_mean_rpm heinz_mean_rpm diff_prcnt count\n\n Keeping idle engine revs::\n pmr \n (9.973, 24.823] 2058.624153 2038.842442 -0.970242 33\n (24.823, 39.496] 2271.419763 2229.999526 -1.857410 34\n (39.496, 54.17] 1927.898841 1885.792807 -2.232803 123\n (54.17, 68.843] 1733.545963 1717.808457 -0.916139 94\n (68.843, 83.517] 1694.461857 1683.916224 -0.626256 59\n (83.517, 98.191] 1553.854990 1551.609661 -0.144710 4\n (98.191, 112.864] 1590.081566 1589.997331 -0.005298 31\n (112.864, 127.538] 1427.367629 1425.965019 -0.098362 2\n (127.538, 142.211] 1989.461646 1967.808440 -1.100372 1\n (142.211, 156.885] NaN NaN NaN 0\n (156.885, 171.558] NaN NaN NaN 0\n (171.558, 186.232] 1960.918157 1937.426430 -1.212522 1\n Mean diff_prcnt: 0.76367613389\n \"\"\"\n\n pcrnt_limit = 0.8\n\n pmr_histogram = self._check_n_mean__pmr(gened_fname_glob)\n\n print(pmr_histogram)\n\n diff_prcnt = pmr_histogram[\"diff_prcnt\"].fillna(0).abs().mean()\n print(\"Mean diff_prcnt: %s\" % diff_prcnt)\n self.assertLess(diff_prcnt, pcrnt_limit)\n\n @skipIf(\n utils.is_travis(),\n \"GroupBy probably fails in old pandas, and cannot upgrade it.\",\n )\n def test4b_n_mean__PMR_transplanted(self):\n \"\"\"Check driveability-only mean-rpm diff with Heinz stays within some percent for all PMRs.\n\n ### Comparison history ###\n\n Force class3b, Phase-1b-beta(ver <= 0.0.8, Aug-2014) with Heinz maxt gear-time=2sec::\n\n gened_mean_rpm heinz_mean_rpm diff_prcnt count\n pmr\n (9.973, 24.823] 1557.225037 1568.360963 0.715113 32\n (24.823, 39.496] 1686.859826 1696.482640 0.570457 34\n (39.496, 54.17] 1771.670097 1789.409819 1.001299 120\n (54.17, 68.843] 2133.400050 2165.214662 1.491263 94\n (68.843, 83.517] 2020.903728 2043.741660 1.130085 59\n (83.517, 98.191] 1886.836446 1890.040533 0.169813 4\n (98.191, 112.864] 1788.434592 1792.693611 0.238142 31\n (112.864, 127.538] 2580.884314 2568.011660 -0.501269 2\n (127.538, 142.211] 1581.625191 1597.571904 1.008249 1\n (142.211, 156.885] NaN NaN NaN 0\n (156.885, 171.558] NaN NaN NaN 0\n (171.558, 186.232] 1367.068837 1385.176569 1.324566 1\n\n Separated test/unladen masses::\n\n gened_mean_rpm heinz_mean_rpm diff_prcnt count\n pmr\n (11.504, 26.225] 1572.733597 1585.721306 0.825805 28\n (26.225, 40.771] 1690.081663 1700.689983 0.627681 41\n (40.771, 55.317] 1821.319706 1841.779091 1.123327 119\n (55.317, 69.863] 2060.507029 2085.262950 1.201448 122\n (69.863, 84.409] 2142.964427 2171.952804 1.352723 29\n (84.409, 98.955] 1783.214173 1787.378401 0.233524 4\n (98.955, 113.501] 1713.473617 1718.516147 0.294287 31\n (113.501, 128.0475] 1950.373771 1954.763742 0.225084 2\n (128.0475, 142.594] 1543.937285 1553.383676 0.611838 1\n (142.594, 157.14] NaN NaN NaN 0\n (157.14, 171.686] NaN NaN NaN 0\n (171.686, 186.232] 1367.068837 1385.176569 1.324566 1\n\n Not forcing class3b, honoring declared v_max & unladen_mass::\n\n gened_mean_rpm heinz_mean_rpm diff_prcnt count\n pmr\n (9.973, 24.823] 1551.901645 1563.836656 0.769057 33\n (24.823, 39.496] 1713.382835 1725.004638 0.678296 34\n (39.496, 54.17] 1722.174466 1730.770088 0.499114 123\n (54.17, 68.843] 1974.768859 1983.753219 0.454958 94\n (68.843, 83.517] 2026.630271 2034.594136 0.392961 59\n (83.517, 98.191] 1954.817179 1958.081066 0.166966 4\n (98.191, 112.864] 1676.678357 1684.443875 0.463149 31\n (112.864, 127.538] 2678.973439 2687.055802 0.301696 2\n (127.538, 142.211] 1658.577318 1668.155469 0.577492 1\n (142.211, 156.885] NaN NaN NaN 0\n (156.885, 171.558] NaN NaN NaN 0\n (171.558, 186.232] 1367.068837 1385.176569 1.324566 1\n Mean diff_prcnt: 0.469021296461\n \n pandas 0.15.1::\n \n gened_mean_rpm heinz_mean_rpm diff_prcnt count\n pmr \n (9.973, 24.823] 2021.882193 2038.842442 0.838835 33\n (24.823, 39.496] 2204.136804 2229.999526 1.173372 34\n (39.496, 54.17] 1880.733341 1885.792807 0.269016 123\n (54.17, 68.843] 1710.819917 1717.808457 0.408491 94\n (68.843, 83.517] 1677.846860 1683.916224 0.361735 59\n (83.517, 98.191] 1541.587174 1551.609661 0.650141 4\n (98.191, 112.864] 1579.049392 1589.997331 0.693325 31\n (112.864, 127.538] 1411.921405 1425.965019 0.994646 2\n (127.538, 142.211] 1976.193317 1967.808440 -0.426102 1\n (142.211, 156.885] NaN NaN NaN 0\n (156.885, 171.558] NaN NaN NaN 0\n (171.558, 186.232] 1954.662077 1937.426430 -0.889616 1\n Mean diff_prcnt: 0.558773102894\n \"\"\"\n\n pcrnt_limit = 0.6\n\n pmr_histogram = self._check_n_mean__pmr(trans_fname_glob)\n\n print(pmr_histogram)\n\n diff_prcnt = pmr_histogram[\"diff_prcnt\"].fillna(0).abs().mean()\n print(\"Mean diff_prcnt: %s\" % diff_prcnt)\n self.assertLess(diff_prcnt, pcrnt_limit)\n\n def _check_n_mean__gear(self, fname_glob):\n def avg_by_column(group_column, aggregate_column, df):\n sr = df.groupby(group_column)[aggregate_column].describe()\n\n ## Ensure 6-gears for all vehicles\n #\n index = [range(7), [\"mean\", \"std\", \"min\", \"max\"]]\n index = pd.MultiIndex.from_product(index, names=[\"gear\", \"aggregate\"])\n sr = sr.reindex(index)\n\n return sr\n\n vehdata = OrderedDict()\n\n for (veh_num, df_g, df_h) in _file_pairs(fname_glob):\n df = pd.concat(\n (avg_by_column(\"gears\", \"rpm\", df_g), avg_by_column(\"gear\", \"n\", df_h)),\n axis=1,\n )\n df.columns = [\"python\", \"heinz\"]\n df[\"diff%\"] = (\n 100 * (df.python - df.heinz) / df.iloc[:, :2].abs().min(axis=1)\n )\n\n vehdata[veh_num] = df\n\n vehdata = pd.Panel(vehdata).to_frame(filter_observations=False)\n\n diff_prcnt_by_gears = vehdata.xs(\"mean\", level=1).mean(axis=1)\n diff_prcnt_by_gears = pd.DataFrame(diff_prcnt_by_gears).unstack()\n diff_prcnt_by_gears.name = \"diff_prcnt_by_gears\"\n\n diff_prcnt_by_gears = diff_prcnt_by_gears[0]\n diff_prcnt_by_gears.columns.name = \"n_mean\"\n\n return diff_prcnt_by_gears\n\n def test5a_n_mean__gear(self):\n \"\"\"Check mean-rpm diff% with Heinz stays within some percent for all gears.\n\n ### Comparison history ###\n\n\n Force class3b, Phase-1b-beta(ver <= 0.0.8, Aug-2014) with Heinz maxt gear-time=2sec::\n\n n_mean python heinz diff%\n gear\n 0 732.358286 804.656085 -9.925769\n 1 870.080494 1177.547512 -44.450903\n 2 1789.787609 1650.383967 6.520319\n 3 1921.271483 1761.172027 7.804359\n 4 1990.286402 1886.563262 5.401895\n 5 2138.445024 2112.552162 1.892950\n 6 2030.970322 1987.865039 2.228276\n\n Not forcing class3b, honoring declared v_max & unladen_mass::\n\n gear\n 0 735.143823 808.795812 -10.052865\n 1 799.834530 1139.979330 -47.027383\n 2 1598.773915 1582.431975 1.119054\n 3 1793.617644 1691.589756 5.768020\n 4 1883.863510 1796.957457 5.024360\n 5 2095.211754 2052.059948 2.430360\n 6 2033.663975 1990.344346 2.238421\n \"\"\"\n pcrnt_limit = 48\n\n histogram = self._check_n_mean__gear(gened_fname_glob)\n\n print(histogram)\n\n diff_prcnt = histogram[\"diff%\"]\n np.testing.assert_array_less(np.abs(diff_prcnt.fillna(0)), pcrnt_limit)\n\n def test5b_n_mean__gear_transplanted(self):\n \"\"\"Check mean-rpm diff% with Heinz stays within some percent for all gears.\n\n ### Comparison history ###\n\n\n Force class3b, Phase-1b-beta(ver <= 0.0.8, Aug-2014) with Heinz maxt gear-time=2sec::\n\n n_mean python heinz diff%\n gear\n 0 732.357001 804.656085 -9.926855\n 1 966.022039 1177.547512 -24.409425\n 2 1678.578373 1650.383967 1.616768\n 3 1791.644768 1761.172027 1.700642\n 4 1883.504933 1886.563262 0.119165\n 5 2099.218160 2112.552162 -0.320293\n 6 1985.732086 1987.865039 -0.096754\n\n Not forcing class3b, honoring declared v_max & unladen_mass::\n\n n_mean python heinz diff%\n gear\n 0 735.077116 808.795812 -10.065886\n 1 932.586982 1139.979330 -24.285307\n 2 1606.040896 1582.431975 1.379144\n 3 1721.141364 1691.589756 1.686708\n 4 1803.212699 1796.957457 0.370703\n 5 2053.822313 2052.059948 0.142138\n 6 1988.195381 1990.344346 -0.097482\n \"\"\"\n pcrnt_limit = 25\n\n histogram = self._check_n_mean__gear(trans_fname_glob)\n\n print(histogram)\n\n diff_prcnt = histogram[\"diff%\"]\n np.testing.assert_array_less(np.abs(diff_prcnt.fillna(0)), pcrnt_limit)\n\n\n###################\n# RUN EXPERIMENTS #\n###################\n\n\ndef _run_the_experiments(\n transplant_original_gears=False,\n plot_results=False,\n compare_results=False,\n encoding=\"UTF-8\",\n):\n\n ## If file existent, it contains also calculated fields\n # from the previous experiment run.\n #\n out_df = _read_vehicles_out()\n\n inp_df = _read_vehicles_inp()\n ## Reconstruct the columns only presetn in the out_df.\n #\n inp_df[\"pmr\"] = np.NAN\n inp_df[\"wltc_class\"] = \"\"\n inp_df[\"f_downscale\"] = np.NAN\n\n wots = _read_wots()\n\n failed_vehicles = 0\n for (ix, row) in inp_df.iterrows():\n veh_num = ix\n heinz_fname = _make_heinz_fname(veh_num)\n outfname = _make_gened_fname(transplant_original_gears, veh_num)\n\n if not out_df is None and _is_file_up_to_date(outfname, [heinz_fname]):\n inp_df.loc[ix] = out_df.loc[ix]\n continue\n\n mdl = goodVehicle()\n veh = mdl[\"vehicle\"]\n\n veh[\"test_mass\"] = row[\"test_mass\"]\n veh[\"unladen_mass\"] = row[\"kerb_mass\"]\n veh[\"f0\"] = row[\"f0_real\"]\n veh[\"f1\"] = row[\"f1_real\"]\n veh[\"f2\"] = row[\"f2_real\"]\n veh[\"p_rated\"] = row[\"rated_power\"]\n veh[\"n_rated\"] = row[\"rated_speed\"]\n veh[\"n_idle\"] = int(row[\"idling_speed\"])\n veh[\"v_max\"] = row[\"v_max\"]\n ngears = int(row[\"no_of_gears\"])\n veh[\"gear_ratios\"] = list(row[\"ndv_1\" : \"ndv_%s\" % ngears]) #'ndv_1'\n veh[\"wot\"] = _select_wot(wots, row[\"IDcat\"] == 2)\n\n if transplant_original_gears:\n log.warning(\">>> Transplanting gears from Heinz's!\")\n df_h = _read_heinz_file(veh_num)\n\n mdl[\"cycle\"] = {\"gears_orig\": df_h[\"g_max\"].values}\n\n try:\n experiment = Experiment(mdl)\n mdl = experiment.run()\n except Exception as ex:\n log.warning(\"VEHICLE_FAILED(%s): %s\", veh_num, str(ex))\n failed_vehicles += 1\n continue\n else:\n params = mdl[\"params\"]\n veh = mdl[\"vehicle\"]\n\n inp_df.loc[ix, \"pmr\"] = veh[\"pmr\"]\n inp_df.loc[ix, \"wltc_class\"] = veh[\"wltc_class\"]\n inp_df.loc[ix, \"f_downscale\"] = params[\"f_downscale\"]\n\n # ankostis_mdb: 't', \"v in km/h\",\"v_orig\",\"a in m/s²\",\"gear\",\"g_min\",\"g_max\",\"gear_modification\",\"error_description\"\n # heinz: 't', 'km_h', 'stg', 'gear'\n cycle_df = pd.DataFrame(mdl[\"cycle\"])\n\n _compare_exp_results(cycle_df, outfname, compare_results)\n\n cycle_df.to_csv(outfname, index_label=\"time\")\n fail_limit_prcnt = 0.1\n assert failed_vehicles < fail_limit_prcnt * inp_df.shape[0], (\n \"TOO MANY(>%f) vehicles have Failed(%i out of %i)!\"\n % (fail_limit_prcnt, failed_vehicles, inp_df.shape[0])\n )\n\n if not transplant_original_gears:\n _write_vehicle_data(inp_df)\n\n return inp_df\n\n\n# vehfpath = os.path.join(samples_dir, 'heinz_Petrol_veh{:05}.dri'.format(veh_num))\n# inpfname = glob.glob(vehfpath)[0]\n# out_df = pd.read_csv(inpfname, encoding='latin-1')\n\n\n###################\n# COMPARE RESULTS #\n###################\n\n\ndef _compare_exp_results(tabular, outfname, run_comparison):\n if run_comparison:\n try:\n data_prev = _read_gened_file(outfname)\n ## Compare changed-tabular\n #\n npt.assert_array_equal(tabular[\"gears\"], data_prev[\"gears\"], outfname)\n # Unreached code in case of assertion.\n # cmp = tabular['gears'] != data_prev['gears']\n # if (cmp.any()):\n # self._plotResults(data_prev)\n # print('>> COMPARING(%s): %s'%(fname, cmp.nonzero()))\n # else:\n # print('>> COMPARING(%s): OK'%fname)\n except FileNotFoundError:\n print(\">> COMPARING(%s): No old-tabular found, 1st time to run\" % outfname)\n run_comparison = False\n\n\n## TODO: Move into wltp/plots\n#\ndef _plotResults(\n veh_fname,\n df_g,\n df_h,\n res,\n ax,\n plot_diffs_gears_only=True,\n plot_original_gears=False,\n):\n if plot_original_gears:\n my_gear_col = \"gears_orig\"\n hz_gear_col = \"g_max\"\n else:\n my_gear_col = \"gears\"\n hz_gear_col = \"gear\"\n\n ax.grid(True)\n\n ax2 = ax.twinx()\n\n tlen = len(df_g.index)\n # ax.set_xticks(np.arange(0.0, tlen, 50.0)) NO! looses auto when zooming.\n\n clutch = df_g[\"clutch\"]\n clutch = clutch.nonzero()[0]\n ax.vlines(clutch, 0, 0.2)\n\n ## Add pickers on driveability lines showing the specific msg.\n #\n driveability = df_g[\"driveability\"]\n driveability_true = driveability.apply(lambda s: isinstance(s, str))\n lines = ax2.vlines(driveability_true.nonzero()[0], 2, 4, \"c\", picker=5)\n lines.set_urls(driveability[driveability_true])\n\n v_max = df_g[\"v_class\"].max()\n ax.hlines(1 / v_max, 0, tlen, color=\"0.75\")\n\n ax.plot(df_g[\"v_class\"] / v_max)\n ax.plot(df_g[\"v_target\"] / v_max, \"-.\")\n\n # ax.plot(df_g['rpm'] / df_g['rpm'].max())\n # p_req = df_g['p_required'] / df_g['p_required'].max()\n # p_req[p_req < 0] = 0\n # ax.plot(p_req)\n\n ## Plot gear diffs.\n #\n my_gears = df_g[my_gear_col]\n hz_v_real = df_h[\"v\"]\n hz_v_target = df_h[\"v_downscale\"]\n hz_gears = df_h[hz_gear_col]\n\n orig_gears = df_g[\"gears_orig\"]\n if plot_diffs_gears_only:\n diff_gears = my_gears != hz_gears\n difft = diff_gears.nonzero()[0]\n difft = set().union(\n difft,\n difft + 1,\n difft + 2,\n difft + 3,\n difft + 4,\n difft + 5,\n difft + 6,\n difft - 1,\n difft - 2,\n difft - 3,\n difft - 4,\n difft - 5,\n difft - 6,\n )\n difft = list(difft)\n my_gears = my_gears[difft]\n hz_gears = hz_gears[difft]\n ax2.plot(difft, my_gears.tolist(), \"o\", color=\"red\")\n ax2.plot(difft, orig_gears[difft].tolist(), \"v\", color=\"green\")\n ax2.plot(difft, hz_gears.tolist(), \"*\", color=\"blue\")\n else:\n ax2.plot(my_gears.tolist(), \"o\", color=\"red\")\n ax2.plot(hz_gears.tolist(), \"*\", color=\"blue\")\n\n ax.plot(df_g[\"v_real\"] / v_max)\n\n ## Add pickers on driveability lines showing the specific msg.\n #\n hz_driveability = df_h[\"gear_modification\"]\n hz_driveability_true = ~hz_driveability.apply(np.isreal)\n lines = ax2.vlines(hz_driveability_true.nonzero()[0], 0, 2, \"m\", picker=5)\n lines.set_urls(hz_driveability[hz_driveability_true])\n\n ax.plot(hz_v_target / v_max, \"--\")\n ax.plot(hz_v_real / v_max, \":\")\n\n ax.text(\n 0.7,\n 0,\n \"Diffs: %.4f\" % res,\n transform=ax.transAxes,\n bbox={\"facecolor\": \"red\", \"alpha\": 0.5, \"pad\": 10},\n )\n\n\ndef plot_diffs_with_heinz(diff_results, res, transplant_original_gears=False):\n from matplotlib import pyplot as plt\n\n def fig_onpick(event):\n pickline = event.artist\n urls = pickline.get_urls()\n rule = urls.iloc[event.ind]\n print(rule)\n text_infos.set_text(\"Rule: %s\" % rule)\n\n fig.canvas.draw()\n\n fig = plt.figure()\n text_infos = fig.text(\n 0.5,\n 0.5,\n \"\",\n transform=fig.transFigure,\n bbox={\"facecolor\": \"grey\", \"alpha\": 0.4, \"pad\": 10},\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n color=\"blue\",\n )\n fig.canvas.mpl_connect(\"pick_event\", fig_onpick)\n orig = \"Driveability\" if transplant_original_gears else \"Pre-Driveability\"\n fig.suptitle(\n \"%s: ±DIFFs: count(%i), min(%i), MEAN(%.2f±%.2f), max(%i).\"\n % (orig, res[0].sum(), res[0].min(), res[0].mean(), res[0].std(), res[0].max())\n )\n\n ## NOTE: Limit subplots to facilitate research.\n #\n # i_to_plot = paths\n # i_to_plot = paths[0:9]\n # i_to_plot = paths[5:6] + paths[7:9] + paths[14:16] + paths[23:24]\n i_to_plot = range[5:8] + range[17:18] + range[22:24]\n\n ## Decide subplot-grid dimensions.\n #\n npaths_to_plot = len(i_to_plot)\n w = math.ceil(math.sqrt(npaths_to_plot))\n h = w - 1 if ((w - 1) * w >= npaths_to_plot) else w\n\n nplotted = 0\n\n for (i, diff) in enumerate(diff_results):\n (\n inpfname,\n df_my,\n df_hz,\n ndiff_gears,\n ndiff_gears_accel,\n ndiff_gears_orig,\n ) = diff\n if i in i_to_plot:\n nplotted += 1\n ax = fig.add_subplot(w, h, nplotted)\n veh_name = os.path.basename(inpfname)\n ax.set_title(\"%i: %s\" % (i, veh_name), fontdict={\"fontsize\": 8})\n _plotResults(\n veh_name,\n df_my,\n df_hz,\n ndiff_gears,\n ax,\n plot_original_gears=not transplant_original_gears,\n )\n\n fig.tight_layout()\n plt.show()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "pandas.read_csv", "numpy.abs", "numpy.gradient", "pandas.Panel", "pandas.DataFrame", "numpy.testing.assert_array_equal", "pandas.cut", "numpy.count_nonzero", "numpy.testing.assert_allclose", "pandas.MultiIndex.from_product", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
Eitan177/StructuralMapping
[ "c20ce43de2698902d606b718c9a9fdf2296b0a52" ]
[ "code/Alignment/FREAD/prosci/loops/fread.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n\nimport sys\nimport os\nimport time\nimport itertools\n\nfrom heapq import heappush, heappop\n\n# Compatibility with Python < 2.6\n#\ntry:\n from heapq import heappushpop\nexcept ImportError:\n def heappushpop(heap, item):\n heappush(heap, item)\n return heappop(heap)\n\nimport numpy\n\nfrom prosci.util.protein import Pdb, ResidueList, Protein\nfrom prosci.util.pdb3d import dist\nfrom prosci.util.geohash import GeometricHash\n\nfrom prosci.loops import ccd\nfrom prosci.loops.loopmodel import ANCHOR_LENGTH, get_max_loop_length, describe_anchors, iterate_database, get_loop_structure, add_oxygens, is_loop_length_in_database, get_min_loop_length_in_database, relabel_loop, is_clash, calculate_rmsd, is_consecutive, make_contact_gh, find_contacts, get_contact_class, find_contacts_simple, get_native_contacts\nfrom prosci.loops.esst import load_esst\n\n\n# Define consecutive residues very loosely, for purposes of checking the framework\ndef is_framework_consecutive(a, b):\n # Set max distance for consecutive C and N atoms to 2.8 Angstroms\n # ... which is very loose\n #\n return is_consecutive(a, b, maxlen_sq=7.84)\n\n\n\ndef _heapadd(heap, item, maxitems):\n if len(heap) < maxitems:\n heappush(heap, item)\n else:\n heappushpop(heap, item)\n\n\n\nclass FreadError(RuntimeError):\n pass\n\nclass BadInputError(FreadError):\n pass\n\nclass UnsupportedLoopLengthError(BadInputError):\n pass\n\nclass LoopStretchError(BadInputError):\n pass\n\nclass NonConsecutiveAnchorError(BadInputError):\n pass\n\n\nclass Fread(object):\n def __init__(self, db=None, subst_table_path=None, score_cutoff=25, open_rmsd_cutoff=1.0, closed_rmsd_cutoff=0.3, vdw_factor=0.7, close=True, verbose=False, errstream=sys.stderr, meld=True, max_melding_rmsd=1.0, nostruc=False, mutate=False, ccd_target_rmsd=0.15, ccd_iterations=5000, max_decoys=100, first_decoys=0, extension_size=sys.maxint, extension_minimum=0, calculate_contacts=False, contact_distance=6.0, contact_identity=0.8):\n self.score_cutoff = score_cutoff\n self.open_rmsd_cutoff = open_rmsd_cutoff\n self.closed_rmsd_cutoff = closed_rmsd_cutoff\n self.vdw_factor = vdw_factor\n self.close = close\n self.verbose = verbose\n self.errstream = errstream\n self.meld = meld\n self.max_melding_rmsd = max_melding_rmsd\n self.min_database_loop_length = 1\n self.no_structure_output = nostruc\n self.mutate = mutate\n self.ccd_target_rmsd = ccd_target_rmsd\n self.ccd_iterations = ccd_iterations\n self.max_decoys = max_decoys\n self.first_decoys = first_decoys\n self.extension_size = extension_size\n self.extension_minimum = extension_minimum\n self.min_b_factor = 0.0\n\n self.calculate_contacts = calculate_contacts\n self.contact_distance = contact_distance\n self.contact_identity = contact_identity\n \n self.warnings = []\n \n if db:\n self.set_db(db)\n \n self.set_subst_tables(subst_table_path)\n \n self.results = []\n self.counter = itertools.count(1)\n self.idecoy = 0\n \n def set_options(self, opts):\n for o in opts:\n if o in self.__dict__ and not o.startswith('_'):\n self.__dict__[o] = opts[o]\n \n def set_silent(self):\n #self.verbose = False\n self.errstream = open(os.devnull, 'w')\n \n def note(self, message, force=False):\n if force or self.verbose:\n self.errstream.write(str(message)+\"\\n\")\n \n def warn(self, exception, force=False):\n if isinstance(exception, str):\n exception = FreadError(exception)\n self.note(\"WARNING: \"+str(exception), force)\n self.warnings.append(exception)\n return exception\n \n def die(self, exception, force=True):\n raise self.warn(exception, force)\n \n \n def set_subst_tables(self, subst_table_path):\n self.subst_tables = load_esst(subst_table_path)\n \n def set_db(self, db):\n self.min_database_loop_length = get_min_loop_length_in_database(db)\n self.db = db\n \n def set_structure(self, structure):\n self.clear_results()\n p = Pdb(structure)\n self.ligand_atoms = p.ligands\n self.residues = ResidueList(p)\n add_oxygens(self.residues)\n \n \n def get_loop_index(self, residue_number, inscode=\"\", chain=None):\n if inscode:\n inscode = inscode.strip()\n if chain:\n chain = chain.strip()\n \n if isinstance(residue_number, str) and residue_number[-1].isalpha():\n ires = int(residue_number[:-1])\n inscode = residue_number[-1]\n else:\n ires = int(residue_number)\n \n start_of_loop=None\n for i,r in enumerate(self.residues):\n if r.ires == ires and r.inscode == inscode and (not chain or chain == r.chain):\n start_of_loop = i+1\n break\n \n return start_of_loop\n \n \n def get_structure_sequence(self, startindex=0, endindex=None):\n return self.residues[startindex:endindex].get_seq()\n \n \n def set_loop(self, start_of_loop, loop_sequence, resnum=False, chain=None, structure_known=None):\n self.clear_results()\n if chain is not None:\n chain = chain.strip()\n \n # We've been given a residue number instead of a start index\n if resnum:\n if isinstance(start_of_loop, str):\n if start_of_loop[-1].isalpha():\n ires = int(start_of_loop[:-1])\n inscode = start_of_loop[-1]\n else:\n ires = int(start_of_loop)\n inscode = \"\"\n else:\n ires = int(start_of_loop)\n inscode = \"\"\n \n found=False\n for i,r in enumerate(self.residues):\n if r.ires == ires and r.inscode == inscode and (not chain or chain == r.chain):\n start_of_loop = i+1\n found = True\n self.note(\"Located residue left of loop to be modelled: %d%s. Loop starting index: %d. Sequence surrounding start of loop: %s|%s\" % (r.ires, r.inscode, start_of_loop, self.residues[max(0,start_of_loop-8):start_of_loop].get_seq(), self.residues[start_of_loop:start_of_loop+8].get_seq()))\n break\n \n if not found:\n msg = \"Residue before start of loop (residue number %d%s\" % (ires, inscode)\n if chain:\n msg += \", chain '%s'\" % chain\n msg += \") not found in query structure\"\n self.die(BadInputError(msg))\n \n if start_of_loop < ANCHOR_LENGTH or start_of_loop > len(self.residues) - ANCHOR_LENGTH:\n self.die(BadInputError(\"Cannot model loop closer than %d residues to the terminus (need a complete anchor on both sides).\" % (ANCHOR_LENGTH)))\n \n # Assuming no coordinates are present in file, start and end of the loop are the same\n end_of_loop = start_of_loop\n \n if structure_known is not None:\n # We were told if loop coordinates are present in input file.\n # Skip auto-detection.\n #\n self.loop_structure_is_known = structure_known\n if structure_known:\n self.note(\"User tells me loop structure is present.\")\n else:\n self.note(\"User tells me loop structure is not present.\")\n # Auto-detect if loop coordinates are present in input file.\n else:\n # Are the coordinates are in the file?\n self.loop_structure_is_known = is_framework_consecutive(self.residues[start_of_loop-1],\n self.residues[start_of_loop])\n self.note(\"Is loop structure present: \"+str(self.loop_structure_is_known))\n \n # If we have the native loop structure, adjust end_of_loop.\n # Also do some additional sanity checks\n #\n if self.loop_structure_is_known:\n end_of_loop += len(loop_sequence)\n strucseq = \"\"\n for i,r in enumerate(self.residues[start_of_loop:end_of_loop]):\n strucseq += r.get_seq()\n if (not self.mutate) and (loop_sequence != strucseq):\n self.die(BadInputError(\"Residues differ between sequence and structure input: %s, %s\" % (loop_sequence, strucseq)))\n self.note(\"Loop sequence of given ATOM co-ordinates: %s\" % (strucseq))\n del strucseq\n \n if end_of_loop != start_of_loop:\n if len(loop_sequence) != end_of_loop - start_of_loop:\n self.die(BadInputError(\"Loop co-ordinates present in input, but number of residues (%d) does not match length of input loop sequence (%d).\" % (end_of_loop - start_of_loop, len(loop_sequence))))\n if end_of_loop > len(self.residues) - ANCHOR_LENGTH:\n self.die(BadInputError(\"Cannot model loop closer than %d residues to the terminus (need a complete anchor on both sides).\" % (ANCHOR_LENGTH)))\n \n \n # Set ourselves up for loop extension\n #\n self.seq = self.loop_sequence = loop_sequence\n self.s = self.start_of_loop = start_of_loop\n self.e = self.end_of_loop = end_of_loop\n \n self.verify_anchors()\n \n while len(self.seq) < self.min_database_loop_length:\n try:\n self.verify_stretch()\n except LoopStretchError:\n pass\n self.extend_loop()\n \n while True:\n try:\n self.verify_stretch()\n break\n except LoopStretchError as e:\n try:\n self.extend_loop()\n except BadInputError:\n self.die(LoopStretchError(str(e)+\" Cannot extend gap further.\"))\n \n def extend_loop(self):\n \"\"\"Extend loop by one residue on each side. Raises NonConsecutiveAnchorError() or UnsupportedLoopLengthError() upon failure.\n \"\"\"\n start_of_loop = self.start_of_loop\n end_of_loop = self.end_of_loop\n s = self.s - 1\n e = self.e + 1\n \n length = (start_of_loop - s) + (e - end_of_loop) + len(self.loop_sequence)\n\n if s<ANCHOR_LENGTH or not is_framework_consecutive(self.residues[s-ANCHOR_LENGTH], self.residues[s-ANCHOR_LENGTH+1]) or e>len(self.residues)-ANCHOR_LENGTH or not is_framework_consecutive(self.residues[e+ANCHOR_LENGTH-2], self.residues[e+ANCHOR_LENGTH-1]):\n self.die(NonConsecutiveAnchorError(\"Cannot extend loop to length %d, due to gaps in the query structure or proximity to the termini\"%(length)))\n \n \n if not is_loop_length_in_database(self.db, length):\n self.die(UnsupportedLoopLengthError(\"Cannot extend loop to length %d, due to database limitations\"%(length)))\n \n seq = \"\"\n for i in xrange(s, start_of_loop):\n seq += self.residues[i].get_seq()\n seq += self.loop_sequence\n for i in xrange(end_of_loop, e):\n seq += self.residues[i].get_seq()\n \n assert len(seq) == length, str([s, start_of_loop, end_of_loop, e, length, len(seq), seq, dbdir, pdb_file, start_of_loop, loop_sequence])\n \n self.s = s\n self.e = e\n self.seq = seq\n \n self.note(\"Extending loop to length %d\" % (length))\n \n \n def verify_anchors(self):\n # Ensure anchors are consecutive stretches of amino acids\n for x in xrange(self.s-ANCHOR_LENGTH+1, self.s):\n if not is_framework_consecutive(self.residues[x-1], self.residues[x]):\n self.die(NonConsecutiveAnchorError(\"Anchor residues not consecutive in framework structure: residue index %s, %s\"%(self.s, self.residues.code)))\n for x in xrange(self.e, self.e+ANCHOR_LENGTH-1):\n if not is_framework_consecutive(self.residues[x], self.residues[x+1]):\n self.die(NonConsecutiveAnchorError(\"Anchor residues not consecutive in framework structure: residue index %s, %s\"%(self.s, self.residues.code)))\n \n def verify_stretch(self):\n # Ensure anchors are not too far apart for loop to stretch gap\n anchor_distance = dist(self.residues[self.s-1].C, self.residues[self.e].N)\n if anchor_distance > get_max_loop_length(len(self.seq)) * 1.05:\n self.die(LoopStretchError(\"Loop span (%.2f Angstrom) too large to be closed using %d residues. Trying to extend gap.\" % (anchor_distance, len(self.seq))))\n \n def verify_length(self):\n # Ensure loop is not too long for the database \n if not is_loop_length_in_database(self.db, len(self.seq)):\n self.die(UnsupportedLoopLengthError(\"Cannot model loop of length %d, due to database limitations\"%(len(self.seq))))\n \n def verify(self):\n self.verify_anchors()\n self.verify_stretch()\n self.verify_length()\n \n \n def model(self, top=None, stop_after=None, f_rank_decoy=None, f_stop_search=None, f_filter=None):\n \"\"\"Model loop using the FREAD algorithm. This is the method handling most of the work. Raises UnsupportedLoopLengthError if loop length is not supported.\n \"\"\"\n if top is None:\n top = self.max_decoys\n \n if stop_after is None:\n stop_after = self.first_decoys\n \n if top <= 0:\n top = sys.maxint\n \n if stop_after > 0 and self.idecoy >= stop_after:\n return self.results\n \n if not f_rank_decoy:\n f_rank_decoy = FREAD_RANKING\n if not f_stop_search:\n f_stop_search = lambda x: False\n if not f_filter:\n f_filter = lambda x: True\n \n while len(self.seq) < self.min_database_loop_length:\n self.extend_loop()\n \n self.verify()\n \n meld_anchors = self.meld # and (self.open_rmsd_cutoff <= self.max_melding_rmsd)\n close_loop = self.close\n \n verbose = self.verbose\n residues = self.residues\n start_of_loop = self.s\n end_of_loop = self.e\n loop_sequence = self.seq\n \n start = start_of_loop - ANCHOR_LENGTH\n end = end_of_loop + ANCHOR_LENGTH\n loop_length = len(loop_sequence)\n total_length = loop_length + 2*ANCHOR_LENGTH\n \n self.note(\"Loop region: N(%4d, %4d) C(%4d,%4d) Loop(%3d,'%s')\" % (start, start_of_loop, end_of_loop, end, loop_length, loop_sequence))\n \n ############################################################################\n \n # Get query anchors and prepare for clash checking #\n \n # Get anchor coordinates\n #\n anchor_N = residues[start:start_of_loop]\n anchor_C = residues[end_of_loop:end]\n \n \n # Describe anchors in query structure\n #\n anchor_description, query_transform = describe_anchors(anchor_N, anchor_C, loop_length)\n \n # Build a GeometricHash of the query structure (without the loop region), for\n # clash checking\n #\n coords = []\n gh_atoms = []\n for r in residues[:start]+residues[end:]:\n for a in r:\n #if a.atom in (\"N\", \"CA\", \"C\", \"O\", \"CB\"):\n coords.append(a.xyz)\n gh_atoms.append(a)\n gh = GeometricHash(numpy.array(coords))\n del coords\n \n # Inter-residue contacts\n if self.calculate_contacts:\n p = Protein((residues[:start]+residues[end:]).split_chains())\n p.ligands = Protein(ResidueList(self.ligand_atoms).split_chains())\n contact_gh, contact_gh_atoms = make_contact_gh(p)\n \n \n ############################################################################\n \n # Search the database #\n \n results = [] # Heap Queue of the top-ranking decoys\n for decoy in self.results:\n \n _heapadd(results, (f_rank_decoy(decoy), decoy.idecoy, decoy), top)\n \n \n for decoy in iterate_database(self.db, loop_length, self.subst_tables, anchor_description, loop_sequence, self.open_rmsd_cutoff, self.score_cutoff):\n \n if len(results) >= top:\n if decoy.internal_rmsd > results[0][-1].anchor_rmsd_open:\n continue\n \n # Retrieve loop structure from database\n decoy_residues = get_loop_structure(self.db, decoy.struc, decoy.start, total_length)\n \n assert len(decoy_residues) == total_length\n \n \n # Superimpose anchors and check anchor RMSD before starting\n anchor_rmsd_open = ccd.superimpose(decoy_residues[:ANCHOR_LENGTH]+decoy_residues[-ANCHOR_LENGTH:], anchor_N+anchor_C, decoy_residues)\n \n if anchor_rmsd_open > self.open_rmsd_cutoff:\n self.note(\"%s_%d_%d : Anchor RMSD too large: %.3f\"%(decoy.struc, decoy.start, loop_length, anchor_rmsd_open))\n continue\n \n if len(results) >= top:\n if anchor_rmsd_open > results[0][-1].anchor_rmsd_open:\n continue\n \n \n # Save start residue number of loop, in database structure\n decoy.startres = decoy_residues[ANCHOR_LENGTH].ires\n decoy.startinscode = decoy_residues[ANCHOR_LENGTH].inscode\n \n # Relabel residues and discard non-matching atoms\n relabel_loop(decoy_residues[ANCHOR_LENGTH:-ANCHOR_LENGTH], loop_sequence, prevatom=anchor_N[-1].CA, nextatom=anchor_C[0].CA)\n \n \n if self.loop_structure_is_known:\n loop_rmsd_open = calculate_rmsd(decoy_residues[ANCHOR_LENGTH:-ANCHOR_LENGTH], residues[start_of_loop:end_of_loop])\n \n \n \n # Are we allowed to meld this decoy?\n #\n meld_this_decoy = meld_anchors and (anchor_rmsd_open <= self.max_melding_rmsd) #and not self.no_structure_output\n close_this_decoy = close_loop and anchor_rmsd_open > self.ccd_target_rmsd and (not meld_anchors or anchor_rmsd_open > self.max_melding_rmsd)\n \n #if not self.no_structure_output or close_this_decoy:\n decoy.nanchor = anchor_N.deep_copy()\n decoy.canchor = anchor_C.deep_copy()\n \n if meld_this_decoy:\n meld(decoy.nanchor, decoy_residues[:ANCHOR_LENGTH])\n meld(decoy.canchor, decoy_residues[-ANCHOR_LENGTH:], invertWeights=True)\n \n if not close_this_decoy:\n anchor_rmsd_closed = anchor_rmsd_open\n iterations = 0\n else:\n # Loop closure\n anchor_rmsd_closed, iterations = ccd.close_loop(decoy.nanchor, decoy_residues, decoy.canchor, target_rmsd=self.ccd_target_rmsd, iterations=self.ccd_iterations)\n if anchor_rmsd_closed > self.closed_rmsd_cutoff:\n self.note(\"Failed to close loop: %s_%d_%d\"%(decoy.struc, decoy.start, loop_length))\n continue\n \n # Cut off the decoy loop's anchors\n decoy_residues = decoy_residues[ANCHOR_LENGTH:-ANCHOR_LENGTH]\n \n \n #if not self.no_structure_output:\n # Restore main chain oxygens, which got lost during the melding/closing procedure\n decoy_residues[-1].O = None # This oxygen is wrong anyway, so delete it\n add_oxygens(decoy.nanchor+decoy_residues[:1], force=True)\n add_oxygens(decoy_residues[-1:]+decoy.canchor+residues[end:end+1], force=True)\n \n \n # Clash check\n is_clashing = is_clash(gh, gh_atoms, decoy_residues, self.vdw_factor)\n if is_clashing:\n self.note(\"Clash detected in decoy: %s_%d_%d\"%(decoy.struc, decoy.start, loop_length))\n continue\n \n \n if self.loop_structure_is_known:\n decoy.loop_rmsd_open = loop_rmsd_open\n if not iterations:\n decoy.loop_rmsd_closed = loop_rmsd_open\n else:\n decoy.loop_rmsd_closed = calculate_rmsd(decoy_residues, residues[start_of_loop:end_of_loop])\n \n decoy.length = loop_length\n decoy.anchor_rmsd_open = anchor_rmsd_open\n decoy.iterations = iterations\n \n #if not self.no_structure_output:\n decoy.loop = decoy_residues\n \n \n if self.calculate_contacts:\n contacts = \"\"\n for i, r in enumerate(decoy_residues):\n contact_atoms = find_contacts(contact_gh, r, maxdist=self.contact_distance)\n contact_atoms.extend(find_contacts_simple(decoy.nanchor+decoy_residues+decoy.canchor, i, maxdist=self.contact_distance))\n contacts += get_contact_class(r.chain, contact_atoms)\n \n decoy.native_contacts = get_native_contacts(self.db, decoy.struc, decoy.start, total_length)\n \n id = 0\n for x,y in zip(decoy.native_contacts, contacts):\n if x == y:\n id += 1\n decoy.contact_identity = float(id) / len(contacts)\n decoy.contacts = contacts\n \n if (self.contact_identity > 0) and (decoy.contact_identity < self.contact_identity):\n continue\n \n \n # Get per-residue substitution scores\n #\n tables = self.subst_tables.tables\n ascii2index = self.subst_tables.ascii2index\n seqmap = tuple([ascii2index[ord(s)] for s in loop_sequence])\n dihed = decoy.dihedrals\n score_list = []\n for i,x in enumerate(decoy.seq):\n score_list.append(tables[int(dihed[i])][seqmap[i]][ascii2index[ord(x)]])\n assert sum(score_list) == decoy.score\n decoy.residue_scores = score_list\n \n # Save per-residue scores in the occupancy column and\n # the total score in the B factor column\n #\n #\n for i,r in enumerate(decoy.loop):\n for a in r:\n a.b = self.min_b_factor + max(0, 30 - decoy.score)\n # Average score over a 3 residue window...\n # because that's how MEDELLER does it\n sc = score_list[max(0,i-1):i+2]\n a.occup = float(sum(sc))/len(sc)\n \n \n \n \n if not f_filter(decoy):\n self.note(\"User-defined filter is excluding decoy: %s_%d_%d\"%(decoy.struc, decoy.start, loop_length))\n continue\n \n self.idecoy = decoy.idecoy = next(self.counter)\n # Save result\n _heapadd(results, (f_rank_decoy(decoy), decoy.idecoy, decoy), top)\n \n if stop_after > 0 and self.idecoy >= stop_after:\n break\n \n if f_stop_search(decoy):\n break\n \n self.note(\"%d decoys found\"%(len(results)))\n \n self.results = []\n while results:\n r = heappop(results)\n self.results.append(r[-1])\n self.results.reverse()\n return self.results\n \n \n def clear_results(self):\n self.results = []\n self.warnings = []\n self.counter = itertools.count(1)\n self.idecoy = 0\n \n \n def write_summary(self, outstream=sys.stdout, write_decoy_sequence=False):\n \"\"\"Write summary information to specified output stream.\n \"\"\"\n for decoy in self.results:\n outstream.write(\"%s_%d%s_%d\\t%d\\t%.3f\\t%.3f\\t%d\\t%s\" % (decoy.struc, decoy.startres, decoy.startinscode, decoy.length, decoy.score, decoy.internal_rmsd, decoy.anchor_rmsd_open, decoy.iterations,decoy.seq))\n if write_decoy_sequence:\n outstream.write(\"\\t%s\" % (decoy.seq))\n if self.calculate_contacts:\n outstream.write(\"\\t%s\" % (decoy.contacts))\n outstream.write(\"\\t%s\" % (decoy.native_contacts))\n outstream.write(\"\\t%.3f\" % (decoy.contact_identity))\n if self.loop_structure_is_known:\n outstream.write(\"\\t%.3f\\t%.3f\" % (decoy.loop_rmsd_open, decoy.loop_rmsd_closed))\n outstream.write(\"\\n\")\n \n \n def write_decoy_structures(self, out_dir, top=0, suffix=\".loop.atm\", idfilter=[]):\n \"\"\"Write decoy structure files (PDB format) to specified directory.\n \"\"\"\n if self.no_structure_output:\n self.die(\"Cannot write decoy structures as structure output is disabled\")\n\n if out_dir:\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n \n for i, decoy in enumerate(self.results):\n if top > 0 and i >= top:\n break\n \n decoyname = \"%s_%d%s_%d\"%(decoy.struc, decoy.startres, decoy.startinscode, decoy.length)\n \n if idfilter and (decoyname not in idfilter):\n continue\n \n decoyfile = os.path.join(out_dir, decoyname+suffix)\n f = open(decoyfile, \"w\")\n try:\n for r in decoy.nanchor:\n f.write(str(r))\n for r in decoy.loop:\n f.write(str(r))\n for r in decoy.canchor:\n f.write(str(r))\n finally:\n f.close()\n \n \n def assemble_model(self, decoy):\n # Start and end of loop region in model\n start = self.s - ANCHOR_LENGTH\n end = self.e + ANCHOR_LENGTH\n \n if isinstance(decoy, int):\n decoy = self.results[decoy]\n if self.no_structure_output:\n self.die(\"Cannot write decoy structures as structure output is disabled\")\n \n model = ResidueList([])\n for i in xrange(start):\n model.append(self.residues[i].copy())\n model.extend(decoy.nanchor)\n model.extend(decoy.loop)\n model.extend(decoy.canchor)\n for i in xrange(end, len(self.residues)):\n model.append(self.residues[i].copy())\n \n # Replace main chain oxygen before the loop, to ensure the peptide bond there is planar\n model[start-1].O = None\n add_oxygens(model, start=start-1, end=start, force=True)\n \n return model\n \n \n def write_model_structures(self, out_dir, top=0, suffix=\".model.atm\", idfilter=[]):\n \"\"\"Write model structure files (PDB format), including decoy residues, to specified directory.\n \"\"\"\n \n if self.no_structure_output:\n self.die(\"Cannot write model structures as structure output is disabled\")\n \n if out_dir:\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n else:\n out_dir = \"\"\n \n for i, decoy in enumerate(self.results):\n if top > 0 and i >= top:\n break\n \n decoyname = \"%s_%d%s_%d\"%(decoy.struc, decoy.startres, decoy.startinscode, decoy.length)\n \n if idfilter and (decoyname not in idfilter):\n continue\n \n decoyfile = os.path.join(out_dir, decoyname+suffix)\n \n model = self.assemble_model(decoy)\n \n f = open(decoyfile, \"w\")\n try:\n f.write(str(model))\n finally:\n f.close()\n\n\n def open_errstream(self, filename):\n if filename == \"-\":\n self.errstream = sys.stderr\n else:\n self.errstream = open(filename, \"w\")\n\n\n def close_errstream(self):\n if self.errstream != sys.stderr:\n self.errstream.close()\n self.set_silent()\n \n \n def automodel_loop(self, start_of_loop, loop_sequence,\n loopsuffix = \"\",\n dbdir = '.',\n strucdir = 'decoys',\n summary_file_name = 'summary',\n write_decoys = True,\n write_models = True,\n write_decoy_sequence = False,\n resnum = False,\n chain = None,\n structure_known = None,\n f_rank_decoy = None,\n f_stop_search = None,\n f_filter = None,\n idfilter = [],\n **kwargs):\n \"\"\"Search the given database and return a list of decoys for the given loop.\n \"\"\"\n \n # Set options defined in the Fread constructor\n self.set_options(kwargs)\n \n if self.extension_size < 0:\n max_extension = sys.maxint\n else:\n max_extension = self.extension_size\n \n if strucdir is not None:\n strucdir = strucdir.strip()\n if (not strucdir) or (not write_decoys and not write_models):\n strucdir = None\n self.no_structure_output = True\n \n for db_group in dbdir.split(\":\"): # try these databases until we find something\n databases = db_group.split(\"|\") # combine results from all these databases\n self.set_db(databases[0])\n \n try:\n\t \n self.set_loop(start_of_loop, loop_sequence.upper(), resnum=resnum, chain=chain, structure_known=structure_known)\n except FreadError:\n break\n \n # Extend loop if extension_minimum is set\n #\n for i in xrange(self.extension_minimum):\n try:\n self.extend_loop()\n except FreadError:\n break\n \n # Try to model, and extend loop if nothing was found\n anyresults = False\n for db in databases:\n\t \n self.set_db(db)\n try:\n\t \n anyresults |= bool(self.model(f_rank_decoy=f_rank_decoy, f_stop_search=f_stop_search, f_filter=f_filter))\n except FreadError:\n break\n \n for i in xrange(max_extension):\n if anyresults:\n break\n try:\n self.extend_loop()\n except FreadError:\n break\n for db in databases:\n self.set_db(db)\n\t \n anyresults |= bool(self.model(f_rank_decoy=f_rank_decoy, f_stop_search=f_stop_search, f_filter=f_filter))\n \n if anyresults:\n break\n \n # Write results to STDOUT or a file, if one was specified\n if summary_file_name:\n if summary_file_name == \"-\":\n self.write_summary(sys.stdout, write_decoy_sequence=write_decoy_sequence)\n else:\n root, ext = os.path.splitext(summary_file_name)\n outstream = open(root + loopsuffix + ext, \"w\")\n self.write_summary(outstream, write_decoy_sequence=write_decoy_sequence)\n outstream.close()\n \n if not self.no_structure_output:\n if write_decoys:\n self.write_decoy_structures(strucdir + loopsuffix, suffix=\".loop.pdb\", idfilter = idfilter)\n if write_models:\n self.write_model_structures(strucdir + loopsuffix, suffix=\".model.pdb\", idfilter = idfilter)\n \n return self.results\n\n\ndef meld(fragScaffold, fragPrediction, invertWeights=False):\n \"\"\"Averages the coordinates of the two Pdb arguments. Move the first object's co-ordinates onto the averaged position.\n \n By default, the first object is assumed to be part of the N-terminal fragment, the second is part of the C-terminal fragment. This can be reversed by setting invertWeights=True.\"\"\"\n \n resS = fragScaffold\n resP = fragPrediction\n L = len(resS)\n \n assert len(resS) == len(resP)\n \n def averageCoord(P, S):\n # P = coordinate of prediction\n # S = coordinate of scaffold\n # D = distance (in residues) from (loop+anchor)-fragment end\n # L = anchor length\n return 1.0/(L+1) * (D*P + (L+1-D)*S)\n \n for i, (rS, rP) in enumerate(zip(resS, resP)):\n if invertWeights:\n D = len(resS)-i\n else:\n D = i+1\n \n newN = averageCoord(rP.N.xyz, rS.N.xyz)\n newCA = averageCoord(rP.CA.xyz, rS.CA.xyz)\n newC = averageCoord(rP.C.xyz, rS.C.xyz)\n \n T_from, T_to, rotmat = ccd.get_rotmat([rS.N.xyz, rS.CA.xyz, rS.C.xyz], [newN, newCA, newC])\n \n rS.O = None # Remove main chain oxygen - can regenerate this later\n \n for a in rS:\n a.xyz = numpy.dot(a.xyz - T_from, rotmat) + T_to\n \n\ndef FREAD_RANKING(decoy):\n \"\"\"Returns a comparable object (a tuple of scores) used for ranking a given decoy.\n \n Bigger values will be ranked higher.\"\"\"\n return (-decoy.anchor_rmsd_open, decoy.score, -decoy.internal_rmsd, -decoy.iterations)\n\n\ndef FREAD_RANKING_BY_ESSS(decoy):\n \"\"\"Returns a comparable object (a tuple of scores) used for ranking a given decoy.\n \n Bigger values will be ranked higher.\"\"\"\n return (decoy.score, -decoy.anchor_rmsd_open, -decoy.internal_rmsd, -decoy.iterations)\n" ]
[ [ "numpy.dot", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xudong-sun/mxnet
[ "fc9e70bf2d349ad4c6cb65ff3f0958e23a7410bf", "fe42d30d5885dd576cb871fd70594c53efce9b42" ]
[ "example/reinforcement-learning/a3c/a3c.py", "tests/python/unittest/test_random.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom __future__ import print_function\nimport mxnet as mx\nimport numpy as np\nimport rl_data\nimport sym\nimport argparse\nimport logging\nimport os\nimport gym\nfrom datetime import datetime\nimport time\nimport sys\ntry:\n from importlib import reload\nexcept ImportError:\n pass\n\nparser = argparse.ArgumentParser(description='Traing A3C with OpenAI Gym')\nparser.add_argument('--test', action='store_true', help='run testing', default=False)\nparser.add_argument('--log-file', type=str, help='the name of log file')\nparser.add_argument('--log-dir', type=str, default=\"./log\", help='directory of the log file')\nparser.add_argument('--model-prefix', type=str, help='the prefix of the model to load')\nparser.add_argument('--save-model-prefix', type=str, help='the prefix of the model to save')\nparser.add_argument('--load-epoch', type=int, help=\"load the model on an epoch using the model-prefix\")\n\nparser.add_argument('--kv-store', type=str, default='device', help='the kvstore type')\nparser.add_argument('--gpus', type=str, help='the gpus will be used, e.g \"0,1,2,3\"')\n\nparser.add_argument('--num-epochs', type=int, default=120, help='the number of training epochs')\nparser.add_argument('--num-examples', type=int, default=1000000, help='the number of training examples')\nparser.add_argument('--batch-size', type=int, default=32)\nparser.add_argument('--input-length', type=int, default=4)\n\nparser.add_argument('--lr', type=float, default=0.0001)\nparser.add_argument('--wd', type=float, default=0)\nparser.add_argument('--t-max', type=int, default=4)\nparser.add_argument('--gamma', type=float, default=0.99)\nparser.add_argument('--beta', type=float, default=0.08)\n\nargs = parser.parse_args()\n\ndef log_config(log_dir=None, log_file=None, prefix=None, rank=0):\n reload(logging)\n head = '%(asctime)-15s Node[' + str(rank) + '] %(message)s'\n if log_dir:\n logging.basicConfig(level=logging.DEBUG, format=head)\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n if not log_file:\n log_file = (prefix if prefix else '') + datetime.now().strftime('_%Y_%m_%d-%H_%M.log')\n log_file = log_file.replace('/', '-')\n else:\n log_file = log_file\n log_file_full_name = os.path.join(log_dir, log_file)\n handler = logging.FileHandler(log_file_full_name, mode='w')\n formatter = logging.Formatter(head)\n handler.setFormatter(formatter)\n logging.getLogger().addHandler(handler)\n logging.info('start with arguments %s', args)\n else:\n logging.basicConfig(level=logging.DEBUG, format=head)\n logging.info('start with arguments %s', args)\n\ndef train():\n # kvstore\n kv = mx.kvstore.create(args.kv_store)\n\n model_prefix = args.model_prefix\n if model_prefix is not None:\n model_prefix += \"-%d\" % (kv.rank)\n save_model_prefix = args.save_model_prefix\n if save_model_prefix is None:\n save_model_prefix = model_prefix\n\n log_config(args.log_dir, args.log_file, save_model_prefix, kv.rank)\n\n devs = mx.cpu() if args.gpus is None else [\n mx.gpu(int(i)) for i in args.gpus.split(',')]\n\n epoch_size = args.num_examples / args.batch_size\n\n if args.kv_store == 'dist_sync':\n epoch_size /= kv.num_workers\n\n # disable kvstore for single device\n if 'local' in kv.type and (\n args.gpus is None or len(args.gpus.split(',')) is 1):\n kv = None\n\n # module\n dataiter = rl_data.GymDataIter('Breakout-v0', args.batch_size, args.input_length, web_viz=True)\n net = sym.get_symbol_atari(dataiter.act_dim)\n module = mx.mod.Module(net, data_names=[d[0] for d in dataiter.provide_data], label_names=('policy_label', 'value_label'), context=devs)\n module.bind(data_shapes=dataiter.provide_data,\n label_shapes=[('policy_label', (args.batch_size,)), ('value_label', (args.batch_size, 1))],\n grad_req='add')\n\n # load model\n\n if args.load_epoch is not None:\n assert model_prefix is not None\n _, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, args.load_epoch)\n else:\n arg_params = aux_params = None\n\n # save model\n checkpoint = None if save_model_prefix is None else mx.callback.do_checkpoint(save_model_prefix)\n\n init = mx.init.Mixed(['fc_value_weight|fc_policy_weight', '.*'],\n [mx.init.Uniform(0.001), mx.init.Xavier(rnd_type='gaussian', factor_type=\"in\", magnitude=2)])\n module.init_params(initializer=init,\n arg_params=arg_params, aux_params=aux_params)\n\n # optimizer\n module.init_optimizer(kvstore=kv, optimizer='adam',\n optimizer_params={'learning_rate': args.lr, 'wd': args.wd, 'epsilon': 1e-3})\n\n # logging\n np.set_printoptions(precision=3, suppress=True)\n\n T = 0\n dataiter.reset()\n score = np.zeros((args.batch_size, 1))\n final_score = np.zeros((args.batch_size, 1))\n for epoch in range(args.num_epochs):\n if save_model_prefix:\n module.save_params('%s-%04d.params'%(save_model_prefix, epoch))\n\n\n for _ in range(int(epoch_size/args.t_max)):\n tic = time.time()\n # clear gradients\n for exe in module._exec_group.grad_arrays:\n for g in exe:\n g[:] = 0\n\n S, A, V, r, D = [], [], [], [], []\n for t in range(args.t_max + 1):\n data = dataiter.data()\n module.forward(mx.io.DataBatch(data=data, label=None), is_train=False)\n act, _, val = module.get_outputs()\n V.append(val.asnumpy())\n if t < args.t_max:\n act = act.asnumpy()\n act = [np.random.choice(dataiter.act_dim, p=act[i]) for i in range(act.shape[0])]\n reward, done = dataiter.act(act)\n S.append(data)\n A.append(act)\n r.append(reward.reshape((-1, 1)))\n D.append(done.reshape((-1, 1)))\n\n err = 0\n R = V[args.t_max]\n for i in reversed(range(args.t_max)):\n R = r[i] + args.gamma * (1 - D[i]) * R\n adv = np.tile(R - V[i], (1, dataiter.act_dim))\n\n batch = mx.io.DataBatch(data=S[i], label=[mx.nd.array(A[i]), mx.nd.array(R)])\n module.forward(batch, is_train=True)\n\n pi = module.get_outputs()[1]\n h = -args.beta*(mx.nd.log(pi+1e-7)*pi)\n out_acts = np.amax(pi.asnumpy(), 1)\n out_acts=np.reshape(out_acts,(-1,1))\n out_acts_tile=np.tile(-np.log(out_acts + 1e-7),(1, dataiter.act_dim))\n module.backward([mx.nd.array(out_acts_tile*adv), h])\n\n print('pi', pi[0].asnumpy())\n print('h', h[0].asnumpy())\n err += (adv**2).mean()\n score += r[i]\n final_score *= (1-D[i])\n final_score += score * D[i]\n score *= 1-D[i]\n T += D[i].sum()\n\n module.update()\n logging.info('fps: %f err: %f score: %f final: %f T: %f'%(args.batch_size/(time.time()-tic), err/args.t_max, score.mean(), final_score.mean(), T))\n print(score.squeeze())\n print(final_score.squeeze())\n\ndef test():\n log_config()\n\n devs = mx.cpu() if args.gpus is None else [\n mx.gpu(int(i)) for i in args.gpus.split(',')]\n\n # module\n dataiter = robo_data.RobosimsDataIter('scenes', args.batch_size, args.input_length, web_viz=True)\n print(dataiter.provide_data)\n net = sym.get_symbol_thor(dataiter.act_dim)\n module = mx.mod.Module(net, data_names=[d[0] for d in dataiter.provide_data], label_names=('policy_label', 'value_label'), context=devs)\n module.bind(data_shapes=dataiter.provide_data,\n label_shapes=[('policy_label', (args.batch_size,)), ('value_label', (args.batch_size, 1))],\n for_training=False)\n\n # load model\n assert args.load_epoch is not None\n assert args.model_prefix is not None\n module.load_params('%s-%04d.params'%(args.model_prefix, args.load_epoch))\n\n N = args.num_epochs * args.num_examples / args.batch_size\n\n R = 0\n T = 1e-20\n score = np.zeros((args.batch_size,))\n for t in range(N):\n dataiter.clear_history()\n data = dataiter.next()\n module.forward(data, is_train=False)\n act = module.get_outputs()[0].asnumpy()\n act = [np.random.choice(dataiter.act_dim, p=act[i]) for i in range(act.shape[0])]\n dataiter.act(act)\n time.sleep(0.05)\n _, reward, _, done = dataiter.history[0]\n T += done.sum()\n score += reward\n R += (done*score).sum()\n score *= (1-done)\n\n if t % 100 == 0:\n logging.info('n %d score: %f T: %f'%(t, R/T, T))\n\n\nif __name__ == '__main__':\n if args.test:\n test()\n else:\n train()\n\n\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport math\nimport itertools\nimport mxnet as mx\nfrom mxnet.test_utils import verify_generator, gen_buckets_probs_with_ppf\nimport numpy as np\nimport random as rnd\nfrom common import setup_module, with_seed, random_seed\nimport scipy.stats as ss\n\ndef same(a, b):\n return np.sum(a != b) == 0\n\ndef check_with_device(device, dtype):\n # The thresholds chosen for the tests are too loose. We will rely on the other tests to test the samples from the\n # generators.\n tol = 0.1\n symbols = [\n {\n 'name': 'normal',\n 'symbol': mx.sym.random.normal,\n 'ndop': mx.nd.random.normal,\n 'params': { 'loc': 10.0, 'scale': 0.5 },\n 'inputs': [ ('loc',[ [ 0.0, 2.5 ], [ -9.75, -7.0 ] ]) , ('scale',[ [ 1.0, 3.7 ], [ 4.2, 1.5 ] ]) ],\n 'checks': [\n ('mean', lambda x, params: np.mean(x.astype(np.float64) - params['loc']), tol),\n ('std', lambda x, params: np.std(x.astype(np.float64)) - params['scale'], tol)\n ]\n },\n {\n 'name': 'uniform',\n 'symbol': mx.sym.random.uniform,\n 'ndop': mx.nd.random.uniform,\n 'params': { 'low': -1.5, 'high': 3.0 },\n 'inputs': [ ('low', [ [ 0.0, 2.5 ], [ -9.75, -1.0 ] ]) , ('high', [ [ 1.0, 3.7 ], [ 4.2, 10.5 ] ]) ],\n 'checks': [\n ('mean', lambda x, params: np.mean(x.astype(np.float64)) - (params['low'] + params['high']) / 2.0, tol),\n ('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(1.0 / 12.0) * (params['high'] - params['low']), tol)\n ]\n },\n {\n 'name': 'gamma',\n 'symbol': mx.sym.random.gamma,\n 'ndop': mx.nd.random.gamma,\n 'params': { 'alpha': 9.0, 'beta': 0.5 },\n 'inputs': [ ('alpha', [ [ 0.0, 2.5 ], [ 9.75, 11.0 ] ]) , ('beta', [ [ 1.0, 0.7 ], [ 0.5, 0.3 ] ]) ],\n 'checks': [\n ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['alpha'] * params['beta'], tol),\n ('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['alpha'] * params['beta'] ** 2), tol)\n ]\n },\n {\n 'name': 'exponential',\n 'symbol': mx.sym.random.exponential,\n 'ndop': mx.nd.random.exponential,\n 'params': { 'scale': 1.0/4.0 },\n 'inputs': [ ('scale', [ [ 1.0/1.0, 1.0/8.5 ], [ 1.0/2.7 , 1.0/0.5 ] ]) ],\n 'checks': [\n ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['scale'], tol),\n ('std', lambda x, params: np.std(x.astype(np.float64)) - params['scale'], tol)\n ]\n },\n {\n 'name': 'poisson',\n 'symbol': mx.sym.random.poisson,\n 'ndop': mx.nd.random.poisson,\n 'params': { 'lam': 4.0 },\n 'inputs': [ ('lam', [ [ 25.0, 8.5 ], [ 2.7 , 0.5 ] ]) ],\n 'checks': [\n ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['lam'], tol),\n ('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['lam']), tol)\n ]\n },\n {\n 'name': 'neg-binomial',\n 'symbol': mx.sym.random.negative_binomial,\n 'ndop': mx.nd.random.negative_binomial,\n 'params': { 'k': 3, 'p': 0.4 },\n 'inputs': [ ('k', [ [ 3, 4 ], [ 5 , 6 ] ]) , ('p', [ [ 0.4 , 0.77 ], [ 0.5, 0.84 ] ]) ],\n 'checks': [\n ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['k'] * (1.0 - params['p']) / params['p'], tol),\n ('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['k'] * (1.0 - params['p']))/params['p'], tol)\n ]\n },\n {\n 'name': 'gen-neg-binomial',\n 'symbol': mx.sym.random.generalized_negative_binomial,\n 'ndop': mx.nd.random.generalized_negative_binomial,\n 'params': { 'mu': 2.0, 'alpha': 0.3 },\n 'inputs': [ ('mu', [ [ 2.0, 2.5 ], [ 1.3, 1.9 ] ]) , ('alpha', [ [ 1.0, 0.1 ], [ 0.2, 0.5 ] ]) ],\n 'checks': [\n ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['mu'], tol),\n ('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['mu'] + params['alpha'] * params['mu'] ** 2 ), tol)\n ]\n }\n\n ]\n\n # Create enough samples such that we get a meaningful distribution.\n shape = (500, 500)\n for symbdic in symbols:\n name = symbdic['name']\n ndop = symbdic['ndop']\n\n # check directly\n params = symbdic['params'].copy()\n params.update(shape=shape, dtype=dtype, ctx=device)\n mx.random.seed(128)\n ret1 = ndop(**params).asnumpy()\n mx.random.seed(128)\n ret2 = ndop(**params).asnumpy()\n assert same(ret1, ret2), \\\n \"ndarray test: `%s` should give the same result with the same seed\" % name\n\n for check_name, check_func, tol in symbdic['checks']:\n assert np.abs(check_func(ret1, params)) < tol, \"ndarray test: %s check for `%s` did not pass\" % (check_name, name)\n\n # check multi-distribution sampling\n params = {'shape': shape, 'dtype': dtype, 'ctx': device}\n params.update({k : mx.nd.array(v, ctx=device, dtype=dtype) for k, v in symbdic['inputs']})\n mx.random.seed(128)\n ret1 = ndop(**params).asnumpy()\n mx.random.seed(128)\n ret2 = ndop(**params).asnumpy()\n assert same(ret1, ret2), \\\n \"ndarray test: `%s` should give the same result with the same seed\" % name\n for i in range(2):\n for j in range(2):\n stats = {k : v[i][j] for k, v in symbdic['inputs']}\n for check_name, check_func, tol in symbdic['checks']:\n err = np.abs(check_func(ret2[i,j], stats))\n assert err < tol, \"%f vs %f: symbolic test: %s check for `%s` did not pass\" % (err, tol, check_name, name)\n\n # check symbolic\n symbol = symbdic['symbol']\n X = mx.sym.Variable(\"X\")\n params = symbdic['params'].copy()\n params.update(shape=shape, dtype=dtype)\n Y = symbol(**params) + X\n x = mx.nd.zeros(shape, dtype=dtype, ctx=device)\n xgrad = mx.nd.zeros(shape, dtype=dtype, ctx=device)\n yexec = Y.bind(device, {'X' : x}, {'X': xgrad})\n mx.random.seed(128)\n yexec.forward(is_train=True)\n yexec.backward(yexec.outputs[0])\n un1 = (yexec.outputs[0] - x).copyto(device)\n assert same(xgrad.asnumpy(), un1.asnumpy())\n mx.random.seed(128)\n yexec.forward()\n un2 = (yexec.outputs[0] - x).copyto(device)\n assert same(un1.asnumpy(), un2.asnumpy()), \\\n \"symbolic test: `%s` should give the same result with the same seed\" % name\n\n ret1 = un1.asnumpy()\n for check_name, check_func, tol in symbdic['checks']:\n assert np.abs(check_func(ret1, params)) < tol, \"symbolic test: %s check for `%s` did not pass\" % (check_name, name)\n\n # check multi-distribution sampling\n symbol = symbdic['symbol']\n params = { 'shape' : shape, 'dtype' : dtype }\n single_param = len(symbdic['inputs']) == 1;\n v1 = mx.sym.Variable('v1')\n v2 = mx.sym.Variable('v2')\n Y = symbol(v1,**params) if single_param else symbol(v1,v2,**params)\n bindings = { 'v1' : mx.nd.array(symbdic['inputs'][0][1]) }\n if not single_param :\n bindings.update({ 'v2' : mx.nd.array(symbdic['inputs'][1][1]) })\n yexec = Y.bind(ctx=device, args=bindings)\n yexec.forward()\n un1 = yexec.outputs[0].copyto(device).asnumpy()\n params = {}\n for i, r in enumerate(symbdic['inputs'][0][1]):\n for j, p1 in enumerate(r):\n params.update({ symbdic['inputs'][0][0] : p1 })\n if not single_param:\n params.update({ symbdic['inputs'][1][0] : symbdic['inputs'][1][1][i][j] })\n samples = un1[i,j]\n for check_name, check_func, tol in symbdic['checks']:\n assert np.abs(check_func(samples, params)) < tol, \"symbolic test: %s check for `%s` did not pass\" % (check_name, name)\n\n@with_seed()\ndef test_random():\n check_with_device(mx.context.current_context(), 'float16')\n check_with_device(mx.context.current_context(), 'float32')\n check_with_device(mx.context.current_context(), 'float64')\n\n\n# Set seed variously based on `start_seed` and `num_init_seeds`, then set seed finally to `final_seed`\ndef set_seed_variously(init_seed, num_init_seeds, final_seed):\n end_seed = init_seed + num_init_seeds\n for seed in range(init_seed, end_seed):\n mx.random.seed(seed)\n mx.random.seed(final_seed)\n return end_seed\n\n# Tests that seed setting of std (non-parallel) rng is synchronous w.r.t. rng use before and after.\n@with_seed()\ndef test_random_seed_setting():\n ctx = mx.context.current_context()\n seed_to_test = 1234\n num_temp_seeds = 25\n probs = [0.125, 0.25, 0.25, 0.0625, 0.125, 0.1875]\n num_samples = 100000\n for dtype in ['float16', 'float32', 'float64']:\n seed = set_seed_variously(1, num_temp_seeds, seed_to_test)\n samples1 = mx.nd.random.multinomial(data=mx.nd.array(probs, ctx=ctx, dtype=dtype),\n shape=num_samples)\n seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)\n samples2 = mx.nd.random.multinomial(data=mx.nd.array(probs, ctx=ctx, dtype=dtype),\n shape=num_samples)\n samples1np = samples1.asnumpy()\n set_seed_variously(seed, num_temp_seeds, seed_to_test+1)\n samples2np = samples2.asnumpy()\n assert same(samples1np, samples2np), \\\n \"seed-setting test: `multinomial` should give the same result with the same seed\"\n\n\n# Tests that seed setting of parallel rng is synchronous w.r.t. rng use before and after.\n@with_seed()\ndef test_parallel_random_seed_setting():\n ctx = mx.context.current_context()\n seed_to_test = 1234\n for dtype in ['float16', 'float32', 'float64']:\n # Avoid excessive test cpu runtimes\n num_temp_seeds = 25 if ctx.device_type == 'gpu' else 1\n # To flush out a possible race condition, run multiple times\n for _ in range(20):\n # Create enough samples such that we get a meaningful distribution.\n shape = (200, 200)\n params = { 'low': -1.5, 'high': 3.0 }\n params.update(shape=shape, dtype=dtype, ctx=ctx)\n\n # check directly\n seed = set_seed_variously(1, num_temp_seeds, seed_to_test)\n ret1 = mx.nd.random.uniform(**params)\n seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)\n ret2 = mx.nd.random.uniform(**params)\n seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)\n assert same(ret1.asnumpy(), ret2.asnumpy()), \\\n \"ndarray seed-setting test: `uniform` should give the same result with the same seed\"\n\n # check symbolic\n X = mx.sym.Variable(\"X\")\n Y = mx.sym.random.uniform(**params) + X\n x = mx.nd.zeros(shape, dtype=dtype, ctx=ctx)\n xgrad = mx.nd.zeros(shape, dtype=dtype, ctx=ctx)\n yexec = Y.bind(ctx, {'X' : x}, {'X': xgrad})\n seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)\n yexec.forward(is_train=True)\n yexec.backward(yexec.outputs[0])\n un1 = (yexec.outputs[0] - x).copyto(ctx)\n seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)\n yexec.forward()\n set_seed_variously(seed, num_temp_seeds, seed_to_test)\n un2 = (yexec.outputs[0] - x).copyto(ctx)\n assert same(un1.asnumpy(), un2.asnumpy()), \\\n \"symbolic seed-setting test: `uniform` should give the same result with the same seed\"\n\n# Set seed for the context variously based on `start_seed` and `num_init_seeds`, then set seed finally to `final_seed`\ndef set_seed_variously_for_context(ctx, init_seed, num_init_seeds, final_seed):\n end_seed = init_seed + num_init_seeds\n for seed in range(init_seed, end_seed):\n mx.random.seed(seed, ctx=ctx)\n mx.random.seed(final_seed, ctx=ctx)\n return end_seed\n\n# Tests that seed setting of std (non-parallel) rng for specific context is synchronous w.r.t. rng use before and after.\n@with_seed()\ndef test_random_seed_setting_for_context():\n seed_to_test = 1234\n num_temp_seeds = 25\n probs = [0.125, 0.25, 0.25, 0.0625, 0.125, 0.1875]\n num_samples = 100000\n dev_type = mx.context.current_context().device_type\n for dtype in ['float16', 'float32', 'float64']:\n samples_imp = []\n samples_sym = []\n # Collect random number samples from the generators of all devices, each seeded with the same number.\n for dev_id in range(0, 16 if dev_type == 'gpu' else 1):\n # Currently python API does not provide a method to get the number of gpu devices.\n # Waiting for PR #10354, which provides the method, to be merged.\n # As a temporal workaround, try first and catch the exception caused by the absence of the device with `dev_id`.\n try:\n with mx.Context(dev_type, dev_id):\n ctx = mx.context.current_context()\n seed = set_seed_variously_for_context(ctx, 1, num_temp_seeds, seed_to_test)\n\n # Check imperative. `multinomial` uses non-parallel rng.\n rnds = mx.nd.random.multinomial(data=mx.nd.array(probs, dtype=dtype), shape=num_samples)\n samples_imp.append(rnds.asnumpy())\n\n # Check symbolic. `multinomial` uses non-parallel rng.\n P = mx.sym.Variable(\"P\")\n X = mx.sym.random.multinomial(data=P, shape=num_samples, get_prob=False)\n exe = X.bind(ctx, {\"P\": mx.nd.array(probs, dtype=dtype)})\n set_seed_variously_for_context(ctx, seed, num_temp_seeds, seed_to_test)\n exe.forward()\n samples_sym.append(exe.outputs[0].asnumpy())\n except mx.MXNetError as e:\n if str(e).find(\"invalid device ordinal\") != -1:\n break\n else:\n raise e\n # The samples should be identical across different gpu devices.\n for i in range(1, len(samples_imp)):\n assert same(samples_imp[i - 1], samples_imp[i])\n for i in range(1, len(samples_sym)):\n assert same(samples_sym[i - 1], samples_sym[i])\n\n# Tests that seed setting of parallel rng for specific context is synchronous w.r.t. rng use before and after.\n@with_seed()\ndef test_parallel_random_seed_setting_for_context():\n seed_to_test = 1234\n dev_type = mx.context.current_context().device_type\n for dtype in ['float16', 'float32', 'float64']:\n samples_imp = []\n samples_sym = []\n # Collect random number samples from the generators of all devices, each seeded with the same number.\n for dev_id in range(0, 16 if dev_type == 'gpu' else 1):\n # Currently python API does not provide a method to get the number of gpu devices.\n # Waiting for PR #10354, which provides the method, to be merged.\n # As a temporal workaround, try first and catch the exception caused by the absence of the device with `dev_id`.\n try:\n with mx.Context(dev_type, dev_id):\n ctx = mx.context.current_context()\n # Avoid excessive test cpu runtimes.\n num_temp_seeds = 25 if dev_type == 'gpu' else 1\n # To flush out a possible race condition, run multiple times.\n for _ in range(20):\n # Create enough samples such that we get a meaningful distribution.\n shape = (200, 200)\n params = { 'low': -1.5, 'high': 3.0 }\n params.update(shape=shape, dtype=dtype)\n\n # Check imperative. `uniform` uses parallel rng.\n seed = set_seed_variously_for_context(ctx, 1, num_temp_seeds, seed_to_test)\n rnds = mx.nd.random.uniform(**params)\n samples_imp.append(rnds.asnumpy())\n\n # Check symbolic. `uniform` uses parallel rng.\n X = mx.sym.Variable(\"X\")\n Y = mx.sym.random.uniform(**params) + X\n x = mx.nd.zeros(shape, dtype=dtype)\n xgrad = mx.nd.zeros(shape, dtype=dtype)\n yexec = Y.bind(ctx, {'X' : x}, {'X': xgrad})\n set_seed_variously_for_context(ctx, seed, num_temp_seeds, seed_to_test)\n yexec.forward(is_train=True)\n yexec.backward(yexec.outputs[0])\n samples_sym.append(yexec.outputs[0].asnumpy())\n except mx.MXNetError as e:\n if str(e).find(\"invalid device ordinal\") != -1:\n break\n else:\n raise e\n # The samples should be identical across different gpu devices.\n for i in range(1, len(samples_imp)):\n assert same(samples_imp[i - 1], samples_imp[i])\n for i in range(1, len(samples_sym)):\n assert same(samples_sym[i - 1], samples_sym[i])\n\n@with_seed()\ndef test_sample_multinomial():\n for x in [mx.nd.array([[0,1,2,3,4],[4,3,2,1,0]])/10.0, mx.nd.array([0,1,2,3,4])/10.0]:\n dx = mx.nd.ones_like(x)\n mx.contrib.autograd.mark_variables([x], [dx])\n # Adding rtol and increasing samples needed to pass with seed 2951820647\n samples = 5000\n with mx.autograd.record():\n y, prob = mx.nd.random.multinomial(x, shape=samples, get_prob=True)\n r = prob * 5\n r.backward()\n\n y = y.asnumpy()\n x = x.asnumpy()\n dx = dx.asnumpy()\n if len(x.shape) is 1:\n x = x.reshape((1, x.shape[0]))\n dx = dx.reshape(1, dx.shape[0])\n y = y.reshape((1, y.shape[0]))\n prob = prob.reshape((1, prob.shape[0]))\n for i in range(x.shape[0]):\n freq = np.bincount(y[i,:], minlength=5)/np.float32(samples)*x[i,:].sum()\n mx.test_utils.assert_almost_equal(freq, x[i], rtol=0.20)\n rprob = x[i][y[i]]/x[i].sum()\n mx.test_utils.assert_almost_equal(np.log(rprob), prob.asnumpy()[i], atol=1e-5)\n\n real_dx = np.zeros((5,))\n for j in range(samples):\n real_dx[y[i][j]] += 5.0 / rprob[j]\n mx.test_utils.assert_almost_equal(real_dx, dx[i, :], rtol=1e-4, atol=1e-5)\n\n# Test the generators with the chi-square testing\n@with_seed()\ndef test_normal_generator():\n ctx = mx.context.current_context()\n samples = 1000000\n # Default success rate is 0.25, so 2 successes of 8 trials will pass.\n trials = 8\n num_buckets = 5\n for dtype in ['float16', 'float32', 'float64']:\n for mu, sigma in [(0.0, 1.0), (1.0, 5.0)]:\n print(\"ctx=%s, dtype=%s, Mu=%g, Sigma=%g:\" % (ctx, dtype, mu, sigma))\n buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.norm.ppf(x, mu, sigma), num_buckets)\n # Quantize bucket boundaries to reflect the actual dtype and adjust probs accordingly\n buckets = np.array(buckets, dtype=dtype).tolist()\n probs = [(ss.norm.cdf(buckets[i][1], mu, sigma) -\n ss.norm.cdf(buckets[i][0], mu, sigma)) for i in range(num_buckets)]\n generator_mx = lambda x: mx.nd.random.normal(mu, sigma, shape=x, ctx=ctx, dtype=dtype).asnumpy()\n verify_generator(generator=generator_mx, buckets=buckets, probs=probs,\n nsamples=samples, nrepeat=trials)\n generator_mx_same_seed =\\\n lambda x: np.concatenate(\n [mx.nd.random.normal(mu, sigma, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()\n for _ in range(10)])\n verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs,\n nsamples=samples, nrepeat=trials)\n\n@with_seed()\ndef test_uniform_generator():\n ctx = mx.context.current_context()\n for dtype in ['float16', 'float32', 'float64']:\n for low, high in [(-1.0, 1.0), (1.0, 3.0)]:\n print(\"ctx=%s, dtype=%s, Low=%g, High=%g:\" % (ctx, dtype, low, high))\n scale = high - low\n buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.uniform.ppf(x, loc=low, scale=scale), 5)\n # Quantize bucket boundaries to reflect the actual dtype and adjust probs accordingly\n buckets = np.array(buckets, dtype=dtype).tolist()\n probs = [(buckets[i][1] - buckets[i][0])/scale for i in range(5)]\n generator_mx = lambda x: mx.nd.random.uniform(low, high, shape=x, ctx=ctx, dtype=dtype).asnumpy()\n verify_generator(generator=generator_mx, buckets=buckets, probs=probs)\n generator_mx_same_seed = \\\n lambda x: np.concatenate(\n [mx.nd.random.uniform(low, high, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()\n for _ in range(10)])\n verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)\n\n@with_seed()\ndef test_gamma_generator():\n ctx = mx.context.current_context()\n for dtype in ['float16', 'float32', 'float64']:\n for kappa, theta in [(0.5, 1.0), (1.0, 5.0)]:\n print(\"ctx=%s, dtype=%s, Shape=%g, Scale=%g:\" % (ctx, dtype, kappa, theta))\n buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.gamma.ppf(x, a=kappa, loc=0, scale=theta), 5)\n generator_mx = lambda x: mx.nd.random.gamma(kappa, theta, shape=x, ctx=ctx, dtype=dtype).asnumpy()\n verify_generator(generator=generator_mx, buckets=buckets, probs=probs)\n generator_mx_same_seed = \\\n lambda x: np.concatenate(\n [mx.nd.random.gamma(kappa, theta, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()\n for _ in range(10)])\n verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)\n\n@with_seed()\ndef test_exponential_generator():\n ctx = mx.context.current_context()\n for dtype in ['float16', 'float32', 'float64']:\n for scale in [0.1, 1.0]:\n print(\"ctx=%s, dtype=%s, Scale=%g:\" % (ctx, dtype, scale))\n buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.expon.ppf(x, loc=0, scale=scale), 5)\n generator_mx = lambda x: mx.nd.random.exponential(scale, shape=x, ctx=ctx, dtype=dtype).asnumpy()\n verify_generator(generator=generator_mx, buckets=buckets, probs=probs)\n generator_mx_same_seed = \\\n lambda x: np.concatenate(\n [mx.nd.random.exponential(scale, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()\n for _ in range(10)])\n verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)\n\n@with_seed()\ndef test_poisson_generator():\n ctx = mx.context.current_context()\n for dtype in ['float16', 'float32', 'float64']:\n for lam in [1, 10]:\n print(\"ctx=%s, dtype=%s, Lambda=%d:\" % (ctx, dtype, lam))\n buckets = [(-1.0, lam - 0.5), (lam - 0.5, 2 * lam + 0.5), (2 * lam + 0.5, np.inf)]\n probs = [ss.poisson.cdf(bucket[1], lam) - ss.poisson.cdf(bucket[0], lam) for bucket in buckets]\n generator_mx = lambda x: mx.nd.random.poisson(lam, shape=x, ctx=ctx, dtype=dtype).asnumpy()\n verify_generator(generator=generator_mx, buckets=buckets, probs=probs)\n generator_mx_same_seed = \\\n lambda x: np.concatenate(\n [mx.nd.random.poisson(lam, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()\n for _ in range(10)])\n verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)\n\n@with_seed()\ndef test_negative_binomial_generator():\n ctx = mx.context.current_context()\n for dtype in ['float16', 'float32', 'float64']:\n success_num = 2\n success_prob = 0.2\n print(\"ctx=%s, dtype=%s, Success Num=%d:, Success Prob=%g\" % (ctx, dtype, success_num, success_prob))\n buckets = [(-1.0, 2.5), (2.5, 5.5), (5.5, 8.5), (8.5, np.inf)]\n probs = [ss.nbinom.cdf(bucket[1], success_num, success_prob) -\n ss.nbinom.cdf(bucket[0], success_num, success_prob) for bucket in buckets]\n generator_mx = lambda x: mx.nd.random.negative_binomial(success_num, success_prob,\n shape=x, ctx=ctx, dtype=dtype).asnumpy()\n verify_generator(generator=generator_mx, buckets=buckets, probs=probs)\n generator_mx_same_seed = \\\n lambda x: np.concatenate(\n [mx.nd.random.negative_binomial(success_num, success_prob, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()\n for _ in range(10)])\n verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)\n # Also test the Gamm-Poisson Mixture\n print('Gamm-Poisson Mixture Test:')\n alpha = 1.0 / success_num\n mu = (1.0 - success_prob) / success_prob / alpha\n generator_mx = lambda x: mx.nd.random.generalized_negative_binomial(mu, alpha,\n shape=x, ctx=ctx, dtype=dtype).asnumpy()\n verify_generator(generator=generator_mx, buckets=buckets, probs=probs)\n generator_mx_same_seed = \\\n lambda x: np.concatenate(\n [mx.nd.random.generalized_negative_binomial(mu, alpha, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()\n for _ in range(10)])\n verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)\n\n@with_seed()\ndef test_multinomial_generator():\n # This test fails with dtype float16 if the probabilities themselves cannot be\n # well-represented in float16. When the float16 random picks are assigned to buckets,\n # only certain bucket-probabilities are possible. Here we map the desired probabilites\n # (e.g. 0.1) to nearby float16 probabilities (e.g. 0.10009766) that are achievable.\n def quantize_probs(probs, dtype):\n if dtype == 'float16':\n # float16 has a 10-bit fraction plus an implicit leading 1, so all probabilities\n # of the form N/2^11 (where N is an integer) are representable.\n num_quanta = 2048.0\n quantized_probs = np.rint(np.array(probs) * num_quanta) / num_quanta\n # Ensure probabilities add to 1\n quantized_probs[0] += 1.0 - quantized_probs.sum()\n else:\n # no need to quantize probs with this data precision\n quantized_probs = np.array(probs)\n return quantized_probs\n\n ctx = mx.context.current_context()\n probs = [0.1, 0.2, 0.3, 0.05, 0.15, 0.2]\n samples = 1000000\n trials = 5\n buckets = list(range(6))\n for dtype in ['float16', 'float32', 'float64']:\n print(\"ctx=%s, dtype=%s\" %(ctx, dtype))\n quantized_probs = quantize_probs(probs, dtype)\n generator_mx = lambda x: mx.nd.random.multinomial(data=mx.nd.array(quantized_probs, ctx=ctx, dtype=dtype),\n shape=x).asnumpy()\n verify_generator(generator=generator_mx, buckets=buckets, probs=quantized_probs,\n nsamples=samples, nrepeat=trials)\n generator_mx_same_seed = \\\n lambda x: np.concatenate(\n [mx.nd.random.multinomial(data=mx.nd.array(quantized_probs, ctx=ctx, dtype=dtype),\n shape=x // 10).asnumpy()\n for _ in range(10)])\n verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=quantized_probs,\n nsamples=samples, nrepeat=trials)\n\n\n@with_seed()\ndef test_with_random_seed():\n ctx = mx.context.current_context()\n size = 100\n shape = (size,)\n\n def check_same(x, y, name):\n assert same(x, y), \\\n \"%s rng should give the same result with the same seed\" % name\n\n def check_diff(x, y, name):\n assert not same(x, y), \\\n \"%s rng should give different results with different seeds\" % name\n\n # generate python, numpy and mxnet datasets with the given seed\n def gen_data(seed=None):\n with random_seed(seed):\n python_data = [rnd.random() for _ in range(size)]\n np_data = np.random.rand(size)\n mx_data = mx.nd.random_uniform(shape=shape, ctx=ctx).asnumpy()\n return (seed, python_data, np_data, mx_data)\n\n # check data, expecting them to be the same or different based on the seeds\n def check_data(a, b):\n seed_a = a[0]\n seed_b = b[0]\n if seed_a == seed_b and seed_a is not None:\n check_same(a[1], b[1], 'python')\n check_same(a[2], b[2], 'numpy')\n check_same(a[3], b[3], 'mxnet')\n else:\n check_diff(a[1], b[1], 'python')\n check_diff(a[2], b[2], 'numpy')\n check_diff(a[3], b[3], 'mxnet')\n\n # 5 tests that include a duplicated seed 1 and randomizing seed None\n seeds = [1, 2, 1, None, None]\n data = [gen_data(seed) for seed in seeds]\n\n # Add more complicated test case scenarios\n with random_seed(1):\n seeds.append(None)\n data.append(gen_data(None))\n with random_seed(2):\n seeds.append(None)\n data.append(gen_data(None))\n with random_seed():\n seeds.append(1)\n data.append(gen_data(1))\n with random_seed():\n seeds.append(2)\n data.append(gen_data(2))\n with random_seed(1):\n seeds.append(2)\n data.append(gen_data(2))\n\n num_seeds = len(seeds)\n for i in range(0, num_seeds-1):\n for j in range(i+1, num_seeds):\n check_data(data[i],data[j])\n\n@with_seed()\ndef test_zipfian_generator():\n # dummy true classes\n num_true = 5\n num_sampled = 1000\n range_max = 20\n\n def compute_expected_prob():\n # P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)\n classes = mx.nd.arange(0, range_max)\n expected_counts = ((classes + 2).log() - (classes + 1).log()) / np.log(range_max + 1)\n return expected_counts\n\n exp_cnt = compute_expected_prob() * num_sampled\n\n # test ndarray\n true_classes = mx.nd.random.uniform(0, range_max, shape=(num_true,)).astype('int32')\n sampled_classes, exp_cnt_true, exp_cnt_sampled = mx.nd.contrib.rand_zipfian(true_classes, num_sampled, range_max)\n mx.test_utils.assert_almost_equal(exp_cnt_sampled.asnumpy(), exp_cnt[sampled_classes].asnumpy(), rtol=1e-1, atol=1e-2)\n mx.test_utils.assert_almost_equal(exp_cnt_true.asnumpy(), exp_cnt[true_classes].asnumpy(), rtol=1e-1, atol=1e-2)\n\n # test symbol\n true_classes_var = mx.sym.var('true_classes')\n outputs = mx.sym.contrib.rand_zipfian(true_classes_var, num_sampled, range_max)\n outputs = mx.sym.Group(outputs)\n executor = outputs.bind(mx.context.current_context(), {'true_classes' : true_classes})\n executor.forward()\n sampled_classes, exp_cnt_true, exp_cnt_sampled = executor.outputs\n mx.test_utils.assert_almost_equal(exp_cnt_sampled.asnumpy(), exp_cnt[sampled_classes].asnumpy(), rtol=1e-1, atol=1e-2)\n mx.test_utils.assert_almost_equal(exp_cnt_true.asnumpy(), exp_cnt[true_classes].asnumpy(), rtol=1e-1, atol=1e-2)\n\n# Issue #10277 (https://github.com/apache/incubator-mxnet/issues/10277) discusses this test.\n@with_seed()\ndef test_shuffle():\n def check_first_axis_shuffle(arr):\n stride = int(arr.size / arr.shape[0])\n column0 = arr.reshape((arr.size,))[::stride].sort()\n seq = mx.nd.arange(0, arr.size - stride + 1, stride, ctx=arr.context)\n assert (column0 == seq).prod() == 1\n for i in range(arr.shape[0]):\n subarr = arr[i].reshape((arr[i].size,))\n start = subarr[0].asscalar()\n seq = mx.nd.arange(start, start + stride, ctx=arr.context)\n assert (subarr == seq).prod() == 1\n\n # This tests that the shuffling is along the first axis with `repeat1` number of shufflings\n # and the outcomes are uniformly distributed with `repeat2` number of shufflings.\n # Note that the enough number of samples (`repeat2`) to verify the uniformity of the distribution\n # of the outcomes grows factorially with the length of the first axis of the array `data`.\n # So we have to settle down with small arrays in practice.\n # `data` must be a consecutive sequence of integers starting from 0 if it is flattened.\n def testSmall(data, repeat1, repeat2):\n # Check that the shuffling is along the first axis.\n # The order of the elements in each subarray must not change.\n # This takes long time so `repeat1` need to be small.\n for i in range(repeat1):\n ret = mx.nd.random.shuffle(data)\n check_first_axis_shuffle(ret)\n # Count the number of each different outcome.\n # The sequence composed of the first elements of the subarrays is enough to discriminate\n # the outcomes as long as the order of the elements in each subarray does not change.\n count = {}\n stride = int(data.size / data.shape[0])\n for i in range(repeat2):\n ret = mx.nd.random.shuffle(data)\n h = str(ret.reshape((ret.size,))[::stride])\n c = count.get(h, 0)\n count[h] = c + 1\n # Check the total number of possible outcomes.\n # If `repeat2` is not large enough, this could fail with high probability.\n assert len(count) == math.factorial(data.shape[0])\n # The outcomes must be uniformly distributed.\n # If `repeat2` is not large enough, this could fail with high probability.\n for p in itertools.permutations(range(0, data.size - stride + 1, stride)):\n err = abs(1. * count[str(mx.nd.array(p))] / repeat2 - 1. / math.factorial(data.shape[0]))\n assert err < 0.01, \"The absolute error {} is larger than the tolerance.\".format(err)\n # Check symbol interface\n a = mx.sym.Variable('a')\n b = mx.sym.random.shuffle(a)\n c = mx.sym.random.shuffle(data=b, name='c')\n d = mx.sym.sort(c, axis=0)\n assert (d.eval(a=data, ctx=mx.current_context())[0] == data).prod() == 1\n\n # This test is weaker than `testSmall` and to test larger arrays.\n # `repeat` should be much smaller than the factorial of `len(x.shape[0])`.\n # `data` must be a consecutive sequence of integers starting from 0 if it is flattened.\n def testLarge(data, repeat):\n # Check that the shuffling is along the first axis\n # and count the number of different outcomes.\n stride = int(data.size / data.shape[0])\n count = {}\n for i in range(repeat):\n ret = mx.nd.random.shuffle(data)\n check_first_axis_shuffle(ret)\n h = str(ret.reshape((ret.size,))[::stride])\n c = count.get(h, 0)\n count[h] = c + 1\n # The probability of duplicated outcomes is very low for large arrays.\n assert len(count) == repeat\n\n # Test small arrays with different shapes\n testSmall(mx.nd.arange(0, 3), 100, 40000)\n testSmall(mx.nd.arange(0, 9).reshape((3, 3)), 100, 40000)\n testSmall(mx.nd.arange(0, 18).reshape((3, 2, 3)), 100, 40000)\n # Test larger arrays\n testLarge(mx.nd.arange(0, 100000).reshape((10, 10000)), 10)\n testLarge(mx.nd.arange(0, 100000).reshape((10000, 10)), 10)\n\nif __name__ == '__main__':\n import nose\n nose.runmodule()\n" ]
[ [ "numpy.log", "numpy.random.choice", "numpy.reshape", "numpy.set_printoptions", "numpy.tile", "numpy.zeros" ], [ "scipy.stats.norm.ppf", "numpy.log", "scipy.stats.norm.cdf", "numpy.sqrt", "scipy.stats.uniform.ppf", "scipy.stats.nbinom.cdf", "scipy.stats.gamma.ppf", "scipy.stats.expon.ppf", "numpy.random.rand", "numpy.bincount", "numpy.float32", "numpy.array", "numpy.zeros", "numpy.sum", "scipy.stats.poisson.cdf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
guoshuhong/Classification-networks
[ "0ca20964e64cf9001e2a770b2b44d88dcca04775" ]
[ "net/layers.py" ]
[ "from tensorflow import keras\nimport tensorflow as tf\n\n\nclass BatchNormalization(keras.layers.BatchNormalization):\n \"\"\"\n Identical to keras.layers.BatchNormalization, but adds the option to freeze parameters.\n \"\"\"\n def __init__(self, freeze, *args, **kwargs):\n self.freeze = freeze\n super(BatchNormalization, self).__init__(*args, **kwargs)\n\n # set to non-trainable if freeze is true\n self.trainable = not self.freeze\n\n def call(self, inputs, training=None, **kwargs):\n # return super.call, but set training\n if not training:\n return super(BatchNormalization, self).call(inputs, training=False)\n else:\n return super(BatchNormalization, self).call(inputs, training=(not self.freeze))\n\n def get_config(self):\n config = super(BatchNormalization, self).get_config()\n config.update({'freeze': self.freeze})\n return config\n\n\nclass wBiFPNAdd(keras.layers.Layer):\n def __init__(self, epsilon=1e-4, **kwargs):\n super(wBiFPNAdd, self).__init__(**kwargs)\n self.epsilon = epsilon\n\n def build(self, input_shape):\n num_in = len(input_shape)\n self.w = self.add_weight(name=self.name,\n shape=(num_in,),\n initializer=keras.initializers.constant(1 / num_in),\n trainable=True,\n dtype=tf.float32)\n\n def call(self, inputs, **kwargs):\n w = keras.activations.relu(self.w)\n x = tf.reduce_sum([w[i] * inputs[i] for i in range(len(inputs))], axis=0)\n x = x / (tf.reduce_sum(w) + self.epsilon)\n return x\n\n def compute_output_shape(self, input_shape):\n return input_shape[0]\n\n def get_config(self):\n config = super(wBiFPNAdd, self).get_config()\n config.update({\n 'epsilon': self.epsilon\n })\n return config\n" ]
[ [ "tensorflow.keras.activations.relu", "tensorflow.reduce_sum", "tensorflow.keras.initializers.constant" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aist9/autoencoder
[ "b3b1ccbfff61883db06a6ca4506ca890eff08568" ]
[ "vae_torch/sample.py" ]
[ "# VAEのサンプルコード, MNIST使用\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nif __name__ == '__main__':\r\n import sys\r\n \r\n # コマンドライン引数を読み込み\r\n # 引数が'-1'なら学習しない\r\n args = sys.argv\r\n train_mode = ['train', 'retrain', 'load']\r\n mode = 0 if len(args) < 2 else int(args[1])\r\n train_mode = train_mode[mode]\r\n \r\n # torchのMNISTの呼び出し方がよくわからんかったのでchainerで代用\r\n # MNISTデータの読み込み\r\n import chainer\r\n train, test = chainer.datasets.get_mnist()\r\n # データとラベルに分割\r\n train_data, train_label = train._datasets\r\n test_data, test_label = test._datasets\r\n\r\n # 標準化\r\n # tr_std = train_data.std()\r\n # tr_avg = train_data.mean()\r\n # train_data = (train_data - tr_avg) / tr_std\r\n # test_data = (test_data - tr_avg) / tr_std\r\n\r\n # 学習条件\r\n # エポックとミニバッチサイズ\r\n epoch = 100\r\n batchsize = 128\r\n # 隠れ層のユニット数\r\n hidden = [128,2]\r\n\r\n act_func = 'tanh' # 活性化関数\r\n out_func = 'sigmoid' # 出力層の活性化関数 (デコーダの出力層, 無印版は必ずsigmoid)\r\n use_BN = True # Batch Normalization を使うか否か\r\n\r\n fd = './model/torch/'\r\n\r\n # modelのセットアップ\r\n from variational_autoencoder import VAE\r\n vae = VAE( int(train_data.shape[1]) ,hidden, act_func=act_func, out_func=out_func ,use_BN=use_BN, folder=fd, device='cuda', is_gauss_dist=True)\r\n\r\n # ed版はこちら. 無印版とは名前が違うだけ.\r\n # from variational_autoencoder_ed import VAE_ED\r\n # vae = VAE_ED( int(train_data.shape[1]) ,hidden, act_func=act_func, out_func=out_func ,use_BN=True, folder=fd, device='cuda', is_gauss_dist=True)\r\n\r\n # VAEの学習\r\n if train_mode == 'train':\r\n # vae.train(train_data, epoch, batchsize,C=1.0, k=1, valid=None)\r\n vae.train(train_data, epoch, batchsize,C=1.0, k=1, valid=test_data, is_plot_weight=True)\r\n if train_mode == 'retrain':\r\n # もしかしたらoptimizerも保存・ロードしたほうがいいかもしれない\r\n vae.load_model()\r\n vae.train(train_data, epoch, batchsize,C=1.0, k=1, valid=None)\r\n else:\r\n vae.load_model()\r\n\r\n # 評価モードに切り替え. batch normalizationの無効化などに必須\r\n vae.model_to_eval()\r\n\r\n\r\n\r\n\r\n # テストデータを絞る, なおこのコードではテストデータを見ていない\r\n # test_data = test_data[:20]\r\n # test_label = test_label[:20]\r\n\r\n # 再構成\r\n feat_train, reconst_train, err_train = vae.reconst(train_data)\r\n feat_test, reconst_test, err_test = vae.reconst(test_data)\r\n\r\n # 再構成データとオリジナルを1次元で比較\r\n plt.plot(reconst_train[0])\r\n plt.plot(train_data[0])\r\n plt.show()\r\n \r\n # 潜在空間の可視化\r\n col = ['r','g','b','c','m','y','orange','black','gray','violet']\r\n for i in range(3000):\r\n plt.scatter(x=feat_train[i,0],y=feat_train[i,1],marker='.',color=col[train_label[i]])\r\n plt.show()\r\n\r\n\r\n # 自作の潜在空間から出力を画像を確認. VAEで調べるとよく見る数字がシームレスに変化するやつ.\r\n split_num = 20 # 分割数. 出力画像のサイズは split_num*29 × split_num*29. (MNISTの縦横28+分割線1).\r\n rn = np.linspace(-3,3,split_num)\r\n x,y = np.meshgrid(rn, rn)\r\n dt = np.hstack( [x.reshape(-1,1), y.reshape(-1,1)] )\r\n # 変換するメソッド\r\n imgs = vae.featuremap_to_image(dt)\r\n plot_image = np.ones( (split_num*29, split_num*29) )\r\n for i in range(split_num):\r\n for j in range(split_num):\r\n plot_image[i*28+i:-~i*28+i,j*28+j:-~j*28+j] = imgs[i*split_num+j].reshape(28,28)\r\n \r\n plt.imshow(plot_image,cmap='gray', vmax=1, vmin=0)\r\n plt.show()\r\n \r\n\r\n\r\n\r\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.linspace", "numpy.meshgrid", "matplotlib.pyplot.scatter", "numpy.ones", "matplotlib.pyplot.plot", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mk43/machine-learning
[ "1ca1baf797fe6f593a88ad4e0d7ac7e5c24ce139" ]
[ "algorithm/neural-network/XOR.py" ]
[ "# coding: utf-8\n\nimport numpy as np\n\nx1 = np.asarray([0, 0, 1, 1])\nx2 = np.asarray([0, 1, 0, 1])\nX = np.row_stack((np.ones(shape=(1, 4)), x1, x2))\nprint(\"X:\\n%s\" % X)\ny = np.asarray([0, 1, 1, 0])\nW1 = np.asarray([[-1, 2, -2],\n [-1, -2, 2]])\nW2 = np.asarray([-1, 2, 2])\n\n\ndef sigmoid(input):\n return 1 / (1 + np.power(np.e, -10 * (input)))\n\n\nnp.set_printoptions(precision=6, suppress=True)\nz1 = np.matmul(W1, X)\nprint(\"W1*X = z1:\\n%s\" % z1)\na1 = np.row_stack((np.ones(shape=(1, 4)), sigmoid(z1)))\nprint(\"sigmoid(z1) = a1:\\n%s\" % a1)\nz2 = np.matmul(W2, a1)\nprint(\"W2*a1 = z2:\\n%s\" % z2)\na2 = sigmoid(z2)\nprint(\"------------------------\")\nprint(\"prediction: %s\" % a2)\nprint(\"target: %s\" % y)\nprint(\"------------------------\")\n\n# output:\n# X:\n# [[1. 1. 1. 1.]\n# [0. 0. 1. 1.]\n# [0. 1. 0. 1.]]\n# W1*X = z1:\n# [[-1. -3. 1. -1.]\n# [-1. 1. -3. -1.]]\n# sigmoid(z1) = a1:\n# [[1. 1. 1. 1. ]\n# [0.000045 0. 0.999955 0.000045]\n# [0.000045 0.999955 0. 0.000045]]\n# W2*a1 = z2:\n# [-0.999818 0.999909 0.999909 -0.999818]\n# ------------------------\n# prediction: [0.000045 0.999955 0.999955 0.000045]\n# target: [0 1 1 0]\n# ------------------------\n" ]
[ [ "numpy.power", "numpy.asarray", "numpy.set_printoptions", "numpy.matmul", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
smartcommunitylab/sco.mobilitycovid
[ "a4af6b3b2d14208d638894a94b2c673397bf77fd" ]
[ "scripts/stop_user_clusters-v6.py" ]
[ "from __future__ import print_function\nfrom sklearn.cluster import DBSCAN\n\nimport argparse\nimport hashlib\nimport os\nimport time\n\nfrom datetime import date, datetime, timedelta\nfrom functools import reduce\nfrom math import degrees\n\nfrom concurrent.futures import ThreadPoolExecutor\nimport concurrent.futures\n\nfrom azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient, BlobBlock\nfrom azure.core.exceptions import ResourceNotFoundError\n\nfrom pyspark import SparkContext, SparkConf, SQLContext\nfrom pyspark.sql import DataFrame, SparkSession\nimport pyspark.sql.functions as F\nfrom pyspark.sql.functions import col, udf\nfrom pyspark.sql.types import *\nfrom pyspark.sql.window import Window\nfrom pyspark import StorageLevel\nfrom pyspark.sql.functions import lag, pandas_udf, PandasUDFType\n\n# import logging\n\nVERSION = 6\n\n#\n# Driver settings\n#\nSHUFFLE_PARTITIONS = 32\nOUT_PARTITIONS = 2\nCORES = \"4\"\nRAM = \"12g\"\nAPP_NAME = \"StopUserClusters\"\n\n# always set overwrite\nWRITE_MODE = \"overwrite\"\nSKIP_EXISTING = False\nTHREADS = 32\n\n\n# templates\nTABLE_PATH = \"wasbs://{}@{}.blob.core.windows.net/{}/\"\nCONN_STRING = \"BlobEndpoint=https://{}.blob.core.windows.net/;SharedAccessSignature={}\"\n\n# need leading slash\nLOCAL_PATH = \"./table/\"\n\n\n#\n# Stop locations parameters\n#\nEVENTS_ROAM_DIST = 70 # meters\nSTOPS_ROAM_DIST = 65\nEARTH_RADIUS = 6372.795 * 1000\nMIN_STAY = 5\n\nUS_STATES = ['AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DC', 'DE', 'FL', 'GA', 'HI', 'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA', 'MD', 'ME', 'MI', 'MN', 'MO', 'MS',\n 'MT', 'NC', 'ND', 'NE', 'NH', 'NJ', 'NM', 'NV', 'NY', 'OH', 'OK', 'OR', 'PA', 'PR', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VA', 'VI', 'VT', 'WA', 'WI', 'WV', 'WY']\n\n\ndef read_multiple_df(spark, paths, format=\"parquet\"):\n dfs = None\n dfs_array = []\n for path in paths:\n dfs_load = spark.read.format(format).load(path)\n dfs_array.append(dfs_load)\n dfs = reduce(DataFrame.unionAll, dfs_array)\n return dfs\n\n\n#\n# Stop location lib\n#\n\n\ndef add_distance_column(dfs, order_column='timestamp'):\n # Radians lat/lon\n dfs = dfs.withColumn('latitude2', F.radians('latitude')).withColumn(\n 'longitude2', F.radians('longitude'))\n\n # Groups GPS locations into chucks. A chunk is formed by groups of points that are distant no more than roam_dist\n w = Window.partitionBy(['userID']).orderBy(order_column)\n dfs = dfs.withColumn('next_lat', F.lead('latitude2', 1).over(w))\n dfs = dfs.withColumn('next_lon', F.lead('longitude2', 1).over(w))\n\n # Haversine distance\n dfs = dfs.withColumn('distance_next', EARTH_RADIUS * 2 * F.asin(F.sqrt(\n F.pow(F.sin((col('next_lat') - col('latitude2')) / 2.0), 2) + F.cos('latitude2') * F.cos('next_lat') * F.pow(\n F.sin((col('next_lon') - col('longitude2')) / 2.0), 2))))\n dfs = dfs.withColumn('distance_prev', F.lag('distance_next', default=0).over(w)).drop(\n 'latitude2').drop('longitude2').drop('next_lon').drop('next_lat').drop('distance_next')\n return dfs\n\n\ndef get_destinations(dfs, roam_dist=110, earth_radius=6372.795 * 1000):\n \"\"\"\n Applies DBSCAN to extract the unique stop locations from a pyspark DataFrame\n\n :param x: DataFrame with ['id_client', 'latitude', 'longitude', \"from\", \"to\"]. Coordinates are in degrees.\n :param roam_dist: The stop location size in meters.\n :param earth_radius: The radius of the earth.\n :param group_results: If True, it groups by the cluster's location and id_client.\n :return: (pyspark DataFrame) If group_results=True: ['id_client', 'clatitude', 'clongitude', 'time_spent', 'frequency']\n (pyspark DataFrame) If group_results=False: ['id_client', 'latitude', 'longitude', 'clatitude', 'clongitude', 'from', 'to']\n \"\"\"\n\n @pandas_udf(\"userId string, state string, latitude double, longitude double, begin timestamp, end timestamp, clusterId integer\", PandasUDFType.GROUPED_MAP)\n def get_destinations(df):\n \"\"\"\n Applies DBSCAN to stop locations\n\n :param x: 2D numpy array with latitude and longitude.\n :param from_to_array: 2D numpy array with from and to timestamps.\n :param roam_dist: The stop location size in meters.\n :param earth_radius: The radius of the earth.\n :return: (pandas DataFrame) ['latitude', 'longitude', 'clatitude', 'clongitude', 'from', 'to', 'time_spent']\n \"\"\"\n db = DBSCAN(eps=roam_dist/earth_radius, min_samples=1,\n algorithm='ball_tree', metric='haversine')\n df[\"clusterId\"] = db.fit_predict(df[['latitude', 'longitude']])\n\n return df\n\n dfs = dfs.withColumn('latitude', F.radians('latitude'))\n dfs = dfs.withColumn('longitude', F.radians('longitude'))\n\n stops_dfs = dfs.groupby('userId', 'state').apply(get_destinations)\n\n stops_dfs = stops_dfs.withColumn('latitude', F.degrees('latitude'))\n stops_dfs = stops_dfs.withColumn('longitude', F.degrees('longitude'))\n\n w = Window().partitionBy('userId', 'clusterId')\n\n stops_dfs = stops_dfs.withColumn(\n 'clusterLatitude', F.mean('latitude').over(w))\n stops_dfs = stops_dfs.withColumn(\n 'clusterLongitude', F.mean('longitude').over(w))\n\n stops_dfs = stops_dfs.drop('latitude').drop('longitude')\n\n return stops_dfs\n\n#\n# Spark\n#\n\n\ndef getSparkConfig(cores, ram, partitions, azure_accounts, azure_oauth):\n # Setting enviroment variables and various drivers\n # \"org.apache.hadoop:hadoop-azure:2.10.0\" driver Azure\n # \"io.delta:delta-core_2.12:0.7.0\" driver Delta-lake\n # \"spark.sql.extensions=io.delta.sql.DeltaSparkSessionExtension\" configuration Delta\n # \"spark.sql.catalog.spark_catalog=org.apache.spark.sql.delta.catalog.DeltaCatalog\" configuration Delta\n # \"spark.delta.logStore.class=org.apache.spark.sql.delta.storage.AzureLogStore\" configuration Delta\n\n # Set spark environments\n os.environ['PYSPARK_PYTHON'] = '/usr/bin/python3'\n os.environ['PYSPARK_DRIVER_PYTHON'] = '/usr/bin/python3'\n # os.environ[\"PYSPARK_SUBMIT_ARGS\"] = \"\"\"--packages \"org.apache.hadoop:hadoop-azure:3.2.1\" pyspark-shell\"\"\"\n os.environ[\"PYSPARK_SUBMIT_ARGS\"] = \"\"\"--packages \"org.apache.hadoop:hadoop-azure:2.10.0\" --jars \"/mnt/batch/tasks/shared/sco-mobilitycovid-udf_2.11-1.0.jar\",\"/mnt/batch/tasks/shared/geo-0.7.7.jar\" pyspark-shell\"\"\"\n conf = (\n SparkConf()\n\n # SQL\n .set(\"spark.sql.shuffle.partitions\", partitions)\n .set(\"spark.sql.csv.filterPushdown.enabled\", \"false\")\n\n # Driver + memory\n .set(\"spark.driver.cores\", cores)\n .set(\"spark.shuffle.file.buffer\", \"1m\")\n # .set(\"spark.memory.offHeap.enabled\",\"true\")\n # .set(\"spark.memory.offHeap.size\",\"3g\")\n .set(\"spark.memory.fraction\", \"0.8\")\n .set(\"spark.memory.storageFraction\", \"0.2\")\n .set(\"spark.io.compression.lz4.blockSize\", \"128k\")\n .set(\"spark.driver.maxResultSize\", \"0\")\n .set(\"spark.driver.memory\", ram)\n\n # Local storage for spilling & storing temp files\n .set(\"spark.local.dir\", \"/mnt/batch/tasks/shared\")\n\n # Set master local\n .setMaster(\"local[*]\")\n\n # App name\n .setAppName(APP_NAME)\n )\n\n # Azure (Keys, Filesystem WASBS)\n conf.set(\"spark.hadoop.fs.wasbs.impl\",\n \"org.apache.hadoop.fs.azure.NativeAzureFileSystem\")\n\n for account in azure_accounts:\n conf.set(\"fs.azure.sas.{}.{}.blob.core.windows.net\".format(account['container'], account['storage']),\n account['sas'])\n\n if azure_oauth:\n conf.set(\"spark.hadoop.fs.azure.account.auth.type\", \"OAuth\")\n conf.set(\"spark.hadoop.fs.azure.account.oauth.provider.type\",\n \"org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider\")\n conf.set(\"spark.hadoop.fs.azure.account.oauth2.client.id\",\n azure_oauth['client-id'])\n conf.set(\"spark.hadoop.fs.azure.account.oauth2.client.secret\",\n azure_oauth['client-secret'])\n conf.set(\"spark.hadoop.fs.azure.account.oauth2.client.endpoint\",\n azure_oauth['endpoint'])\n return conf\n\n#\n# Utils\n#\n\n\ndef enumerate_prefixes(start=0, end=256):\n for i in range(start, end):\n yield '{:02x}'.format(i)\n\n\ndef upload_blob(blob_service_client, container_out, blob_key, file_path):\n blob_client = blob_service_client.get_blob_client(\n container_out, blob_key)\n\n with open(file_path, \"rb\") as data:\n blob_client.upload_blob(data, overwrite=True)\n\n # cleanup\n os.remove(file_path)\n\n return blob_key\n\n#\n# Argparser\n#\n\n\ndef get_args():\n \"\"\"Parse command line arguments.\"\"\"\n\n parser = argparse.ArgumentParser(description=\"Cuebiq data processor\")\n requiredNamed = parser.add_argument_group('required arguments')\n requiredNamed.add_argument(\n \"--storage\", type=str, required=True, help=\"Azure storage\")\n requiredNamed.add_argument(\n \"--sas\", type=str, required=True, help=\"SAS token\")\n requiredNamed.add_argument(\n \"--oauth-login\", type=str, required=True, help=\"Oauth login\")\n requiredNamed.add_argument(\n \"--oauth-client-id\", type=str, required=True, help=\"Oauth client id\")\n requiredNamed.add_argument(\n \"--oauth-client-secret\", type=str, required=True, help=\"Oauth client secret\")\n requiredNamed.add_argument(\n \"--container-in\", type=str, required=True, help=\"Input container\")\n requiredNamed.add_argument(\n \"--container-out\", type=str, required=True, help=\"Output container\")\n requiredNamed.add_argument(\"--country\", type=str,\n help=\"Country. Options: 'US','IT'\")\n requiredNamed.add_argument(\"--prefix\", type=str, help=\"User prefix\")\n\n # optional\n parser.add_argument(\"--vm-cores\", default=CORES,\n type=str, help=\"Azure VM cores\")\n parser.add_argument(\"--vm-ram\", default=RAM,\n type=str, help=\"Azure VM ram\")\n parser.add_argument(\"--shuffle-partitions\", default=SHUFFLE_PARTITIONS,\n type=int, help=\"Spark shuffle partitions\")\n parser.add_argument(\"--roam-dist-stops\", type=int,\n default=STOPS_ROAM_DIST, help=\"Roam dist stops\")\n parser.add_argument(\"--roam-dist-events\", type=int,\n default=EVENTS_ROAM_DIST, help=\"Roam dist events\")\n parsed_args = parser.parse_args()\n\n return parsed_args\n\n\n#\n# Main function\n#\ndef main():\n \"\"\"Main function\"\"\"\n\n # Get args\n args = get_args()\n\n # container\n container_in = args.container_in\n container_out = args.container_out\n\n # Azure credentials\n sas_token = args.sas\n storage_account_name = args.storage\n azure_accounts = list()\n azure_accounts.append({\n \"storage\": storage_account_name,\n \"sas\": sas_token,\n \"container\": container_in\n })\n azure_accounts.append({\n \"storage\": storage_account_name,\n \"sas\": sas_token,\n \"container\": container_out\n })\n\n oauth_login = args.oauth_login\n oauth_client_id = args.oauth_client_id\n oauth_client_secret = args.oauth_client_secret\n\n # requires hadoop 3.2+\n # azure_oauth = {\n # \"endpoint\": oauth_login,\n # \"client-id\": oauth_client_id,\n # \"client-secret\": oauth_client_secret\n # }\n azure_oauth = False\n\n # VM\n cores = args.vm_cores\n ram = args.vm_ram\n shuffle_partitions = args.shuffle_partitions\n\n # Date, prefix\n country = args.country\n prefix = args.prefix\n\n # process config\n roam_dist_stops = args.roam_dist_stops\n roam_dist_events = args.roam_dist_events\n\n # Path in - path out\n blob_in = f\"wasbs://{container_in}@{storage_account_name}.blob.core.windows.net/stoplocation-v8_prefix_r70-s5-a70-h6/{country}/\"\n timezones_in = f\"wasbs://cuebiq-data@{storage_account_name}.blob.core.windows.net/utils_states_timezones/\"\n if azure_oauth:\n # we can leverage abfss\n blob_in = f\"abfss://{container_in}@{storage_account_name}.dfs.core.windows.net/stoplocation-v8_prefix_r70-s5-a70-h6/country={country}/\"\n timezones_in = f\"abfss://cuebiq-data@{storage_account_name}.dfs.core.windows.net/utils_states_timezones/\"\n\n path_out_distinct = f\"distinct_user_clusters-v8_r70-s5-a70-h6_clustered_{roam_dist_stops}m_v{VERSION}/country={country}\"\n path_out_all = f\"all_user_clusters-v8_r70-s5-a70-h6_clustered_{roam_dist_stops}m_v{VERSION}/country={country}\"\n\n # config spark\n conf = getSparkConfig(cores, ram, shuffle_partitions,\n azure_accounts, azure_oauth)\n\n # set prop for handling partition columns as strings (fixes prefixes as int)\n conf.set(\"spark.sql.sources.partitionColumnTypeInference.enabled\", \"false\")\n\n # Create spark session\n sc = SparkContext(conf=conf).getOrCreate()\n sqlContext = SQLContext(sc)\n spark = sqlContext.sparkSession\n # register UDF from jar\n spark.udf.registerJavaFunction(\n \"geohash\", \"it.smartcommunitylab.sco.mobilitycovid.udf.GeohashEncode\")\n\n # Init azure client\n blob_service_client = BlobServiceClient.from_connection_string(\n CONN_STRING.format(storage_account_name, sas_token))\n\n # build keys, date is mandatory, prefix opt\n partition_key = f\"prefix={prefix}\"\n\n print(\"process \"+partition_key)\n start_time = time.time()\n local_dir = LOCAL_PATH+partition_key\n print(\"write temp to \"+local_dir)\n\n # cleanup local if exists\n if (os.path.isdir(local_dir)):\n map(os.unlink, (os.path.join(local_dir, f)\n for f in os.listdir(local_dir)))\n\n # Input dataset\n print(\"read dataset table\")\n read_time = time.time()\n\n # explode days manually\n dates = [\n datetime(2020, 1, 1) + timedelta(days=x) for x in range(0, 258)]\n blobs_in = [\"{}/year={}/month={}/day={}/prefix={}\".format(\n blob_in, d.year, d.month, d.day, prefix) for d in dates]\n\n #dfs = spark.read.format(\"parquet\").load(*blobs_in)\n dfs = read_multiple_df(spark, blobs_in)\n dfs_timezones = spark.read.format(\"parquet\").load(timezones_in)\n\n # manually inject prefix column\n dfs = dfs.withColumn(\"prefix\", F.lit(prefix))\n\n # apply partition filter\n dfs_state = dfs.where(f\"prefix = '{prefix}'\")\n\n print(\"processing with spark\")\n spark_time = time.time()\n\n w = Window().partitionBy('userId').orderBy('begin')\n\n dfs_state = add_distance_column(dfs_state, order_column='begin')\n dfs_state = dfs_state.fillna(0, subset=['next_travelled_distance'])\n dfs_state = dfs_state.withColumn('lag_next_travelled_distance', F.lag(\n col('next_travelled_distance')).over(w))\n dfs_state = dfs_state.withColumn('lag_end', F.lag('end').over(w))\n dfs_state = dfs_state.withColumn('rn', F.when(((col('lag_next_travelled_distance') != col('prev_travelled_distance')) |\n (col('prev_travelled_distance') > 0) |\n (col('lag_next_travelled_distance') > 0) |\n (col('distance_prev') > roam_dist_events) |\n ((F.dayofyear(col('begin')) - F.dayofyear(col('lag_end')) == 1) &\n (F.hour(col('begin')) < 6))\n ) &\n ((col('lag_end').isNull()) | (col('lag_end') < col('begin'))), 1).otherwise(0))\n # Remove prev_travelled distance when rn == 0 (it happens when lag_end and begin overlap)\n dfs_state = dfs_state.withColumn('prev_travelled_distance', F.when(\n col('rn') == 0, 0).otherwise(col('prev_travelled_distance')))\n\n w = Window().partitionBy('userId').orderBy(\n 'begin').rangeBetween(Window.unboundedPreceding, 0)\n\n dfs_state = dfs_state.withColumn('group', F.sum('rn').over(w))\n\n dfs_state = dfs_state.groupBy('userId', 'group').agg(F.mean('latitude').alias('latitude'),\n F.mean('longitude').alias(\n 'longitude'),\n F.min('begin').alias(\n 'begin'),\n F.max('end').alias(\n 'end'),\n F.first('state').alias('state')).drop('group')\n\n # Bug fix: due to the processing we do in the stop events, where we process stops every two days,\n # sometimes stop events overlap but they do not get merged until here. The error is RARE. Here we fix it\n #\n # We divide the two stops making MIN_STAY space between the two, if we can.\n w = Window().partitionBy('userId').orderBy('begin')\n dfs_state = dfs_state.withColumn('next_begin', F.lead('begin').over(w))\n dfs_state = dfs_state.withColumn('next_end', F.lead('end').over(w))\n dfs_state = dfs_state.withColumn('end', F.when(\n (col('next_begin').cast('long') - col('begin').cast('long') > 2 * MIN_STAY * 60) &\n (col('next_begin') < col('end')),\n col('next_begin') - F.expr(\"INTERVAL {} SECONDS\".format(MIN_STAY * 60))\n ).otherwise(col('end')))\n dfs_state = dfs_state.drop('next_begin', 'next_end')\n\n dfs_destinations = get_destinations(dfs_state, roam_dist=roam_dist_stops)\n dfs_destinations = dfs_destinations.withColumn(\n 'prefix', dfs_destinations.userId.substr(1, 2))\n dfs_destinations = dfs_destinations.withColumn(\n 'dayofyear', F.dayofyear('begin'))\n dfs_destinations = dfs_destinations.withColumn('year', F.year('begin'))\n # dfs_destinations = dfs_destinations.withColumn('state', F.lit(state))\n\n # Local time\n dfs_destinations.createOrReplaceTempView(\"dfs_destinations\")\n dfs_destinations = spark.sql(\"\"\"\n SELECT dfs_destinations.*, geohash(clusterLatitude, clusterLongitude, 7) as geohash7\n from dfs_destinations\n \"\"\")\n dfs_destinations = dfs_destinations.withColumn(\n 'geohash5', F.substring(col('geohash7'), 1, 5))\n dfs_destinations = dfs_destinations.join(\n F.broadcast(dfs_timezones), on='geohash5').drop('geohash5')\n dfs_destinations = dfs_destinations.withColumn(\n 'local_begin', F.from_utc_timestamp(col('begin'), col('tzid')))\n dfs_destinations = dfs_destinations.withColumn('offset', (\n (col('local_begin').cast('long') - col('begin').cast('long')) / 3600).cast('int')).drop('local_begin')\n dfs_destinations.persist(StorageLevel.DISK_ONLY)\n\n # Write\n # output as country/prefix/part1..N\n local_dir_all = local_dir + \"/all/\"\n dfs_destinations_all = dfs_destinations.select(\n 'prefix', 'userId', 'clusterId', 'begin', 'end', 'offset', 'year', 'dayofyear')\n dfs_destinations_all.repartition(8, 'dayofyear').write.format('parquet').mode(\n 'overwrite').save(local_dir_all+\"prefix=\"+prefix+\"/\")\n\n # output as country/prefix/state\n local_dir_distinct = local_dir+\"/distinct/\"\n dfs_destinations_distinct = dfs_destinations.select(\n 'prefix', 'userId', 'clusterId', 'clusterLatitude', 'clusterLongitude', 'geohash7', 'state')\n dfs_destinations_distinct = dfs_destinations_distinct.drop_duplicates([\n 'prefix', 'userId', 'clusterId', 'clusterLatitude', 'clusterLongitude', 'geohash7'])\n dfs_destinations_distinct.repartition(\"state\").write.partitionBy(\n \"state\").format('parquet').mode('overwrite').save(local_dir_distinct+\"prefix=\"+prefix+\"/\")\n\n dfs_destinations.unpersist()\n\n print(\"upload local data to azure\")\n upload_time = time.time()\n\n # upload parts 1 \"prefix/state\"\n print(f\"upload files for distinct\")\n # upload with threads\n dfutures = []\n with ThreadPoolExecutor(max_workers=THREADS) as executor:\n fprefix = prefix\n print(f\"upload files for distinct: {fprefix}\")\n prefix_dir = local_dir_distinct+\"prefix=\"+fprefix\n prefix_key = f\"prefix={fprefix}\"\n\n for state in US_STATES:\n s_key = f\"state={state}\"\n f_dir = prefix_dir + \"/\"+s_key\n f_key = prefix_key + \"/\"+s_key\n\n # print(f\"read files for distinct from {f_dir}\")\n\n if (os.path.isdir(f_dir)):\n files = [filename for filename in os.listdir(\n f_dir) if filename.startswith(\"part-\")]\n\n if len(files) > 0:\n\n for file_local in files:\n file_path = f_dir+\"/\"+file_local\n part_num = int(file_local.split('-')[1])\n part_key = '{:05d}'.format(part_num)\n # fix name as static hash to be reproducible\n filename_hash = hashlib.sha1(\n str.encode(f_key+f_key+part_key)).hexdigest()\n\n blob_key = \"{}/{}/part-{}-{}.snappy.parquet\".format(\n path_out_distinct, f_key, part_key, filename_hash)\n\n # print(\"upload \" + file_path + \" to \" + container_out+\":\"+blob_key)\n # upload_blob(blob_service_client,container_out, blob_key, file_path)\n future = executor.submit(\n upload_blob, blob_service_client, container_out, blob_key, file_path)\n dfutures.append(future)\n\n # else:\n # print(f\"no files to upload for {f_key}\")\n\n # else:\n # print(f\"missing partition for {f_key}\")\n\n # end of loop, wait for futures\n for future in dfutures:\n bkey = future.result()\n\n # ensure we wait all tasks\n # TODO check if all done\n ddone = concurrent.futures.wait(dfutures)\n\n # upload parts 2 \"prefix/parts\"\n print(f\"upload files for all\")\n fprefix = prefix\n # upload with threads\n afutures = []\n with ThreadPoolExecutor(max_workers=THREADS) as executor:\n print(f\"upload files for all: {fprefix}\")\n prefix_dir = local_dir_all+\"prefix=\"+fprefix\n prefix_key = f\"prefix={fprefix}\"\n\n if (os.path.isdir(prefix_dir)):\n files = [filename for filename in os.listdir(\n prefix_dir) if filename.startswith(\"part-\")]\n\n if len(files) > 0:\n\n for file_local in files:\n file_path = prefix_dir+\"/\"+file_local\n part_num = int(file_local.split('-')[1])\n part_key = '{:05d}'.format(part_num)\n # fix name as static hash to be reproducible\n filename_hash = hashlib.sha1(\n str.encode(prefix_key+part_key)).hexdigest()\n\n blob_key = \"{}/{}/part-{}-{}.snappy.parquet\".format(\n path_out_all, prefix_key, part_key, filename_hash)\n\n # print(\"upload \" + file_path + \" to \" + container_out+\":\"+blob_key)\n # upload_blob(blob_service_client,container_out, blob_key, file_path)\n future = executor.submit(\n upload_blob, blob_service_client, container_out, blob_key, file_path)\n afutures.append(future)\n # else:\n # print(f\"no files to upload for {d_key}\")\n\n # else:\n # print(f\"missing partition for {d_key}\")\n # end of loop, wait for futures\n for future in afutures:\n bkey = future.result()\n\n # ensure we wait all tasks\n # TODO check if all done\n adone = concurrent.futures.wait(afutures)\n\n print(\"--- {} seconds elapsed ---\".format(int(time.time() - start_time)))\n print()\n shutdown_time = time.time()\n spark.stop()\n\n end_time = time.time()\n print(\"Done in {} seconds (read:{} spark:{} upload:{} shutdown:{})\".format(\n int(end_time - start_time),\n int(spark_time - read_time),\n int(upload_time - spark_time),\n int(shutdown_time - upload_time),\n int(end_time - shutdown_time)\n ))\n print('Done.')\n #\n # END OF CODE\n #\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "sklearn.cluster.DBSCAN" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aeverall/tensorflow
[ "7992bf97711919f56f80bff9e5510cead4ab2095", "7992bf97711919f56f80bff9e5510cead4ab2095", "7992bf97711919f56f80bff9e5510cead4ab2095", "7992bf97711919f56f80bff9e5510cead4ab2095", "ae244e6dabeb6b879c5adb9ca4c2a85cb4722dc5", "7992bf97711919f56f80bff9e5510cead4ab2095", "7992bf97711919f56f80bff9e5510cead4ab2095", "7992bf97711919f56f80bff9e5510cead4ab2095" ]
[ "tensorflow/python/framework/func_graph.py", "tensorflow/python/ops/cond_v2.py", "tensorflow/python/data/experimental/kernel_tests/scan_test.py", "tensorflow/python/keras/engine/topology_test.py", "tensorflow/contrib/saved_model/python/saved_model/keras_saved_model.py", "tensorflow/python/saved_model/utils_test.py", "tensorflow/python/data/experimental/ops/prefetching_ops.py", "tensorflow/python/debug/lib/debug_gradients_test.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"FuncGraph and related functionality.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport weakref\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import tape\nfrom tensorflow.python.eager.graph_only_ops import graph_placeholder\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework.auto_control_deps import AutomaticControlDependencies\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import custom_gradient\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_decorator\nfrom tensorflow.python.util.lazy_loader import LazyLoader\n\n# This is to avoid a circular dependency:\n# function -> func_graph\nfunction = LazyLoader(\"function\", globals(),\n \"tensorflow.python.eager.function\")\ndef_function = LazyLoader(\n \"def_function\", globals(),\n \"tensorflow.python.eager.def_function\")\n\nWHITELIST_COLLECTIONS = [\n ops.GraphKeys.GLOBAL_VARIABLES,\n ops.GraphKeys.LOCAL_VARIABLES,\n ops.GraphKeys.TRAINABLE_VARIABLES,\n variable_scope._VARSTORE_KEY, # pylint: disable=protected-access\n variable_scope._VARSCOPESTORE_KEY # pylint: disable=protected-access\n]\n\n\nclass FuncGraph(ops.Graph):\n \"\"\"Graph representing a function body.\n\n Attributes:\n name: The name of the function.\n inputs: Placeholder tensors representing the inputs to this function. The\n tensors are in this FuncGraph. This represents \"regular\" inputs as well as\n captured inputs (i.e. the values of self.captures), with the regular\n inputs coming first.\n outputs: Tensors that will be returned by this function. The tensors are in\n this FuncGraph.\n structured_outputs: A possibly-nested python object which will be returned\n by this function. The Tensors in this structure are the same as those of\n self.outputs. Note that this structure might contain Python `None`s.\n variables: Variables that should be watched during function execution.\n outer_graph: The graph this function is defined in. May be another FuncGraph\n or the global default Graph.\n captures: Maps external tensor -> internal tensor (i.e. input placeholder).\n The entries are in the order they were captured.\n seed: The graph-level random seed.\n \"\"\"\n\n def __init__(self, name, read_only_collections=True):\n \"\"\"Construct a new FuncGraph.\n\n The graph will inherit its graph key, collections, seed, and distribution\n strategy stack from the current context or graph.\n\n Args:\n name: the name of the function.\n read_only_collections: whether to not write function graph collections\n back to default graph. Defaults to True.\n \"\"\"\n super(FuncGraph, self).__init__()\n\n self.name = name\n self.inputs = []\n self.outputs = []\n self.structured_outputs = None\n self._read_only_collections = read_only_collections\n self._weak_variables = []\n self.outer_graph = ops.get_default_graph()\n self.captures = collections.OrderedDict()\n\n self._building_function = True\n # Map from resource tensor name to last op (in program order) which uses\n # this tensor. Used to enforce that execution order matches program order\n # for resource tensors.\n self._last_op_using_resource_tensor = {}\n\n graph = self.outer_graph\n\n # pylint: disable=protected-access\n # TODO(b/112906995, nareshmodi): distribution strategy depends on inheriting\n # this stack from the default graph even in eager mode. Maybe it should be\n # part of the eager context? This would also allow us to remove a\n # get_default_graph() call from the function cache lookup.\n self._distribution_strategy_stack = list(graph._distribution_strategy_stack)\n # We ignore device placements from any outer scopes while tracing the\n # function when possible, to avoid hard-coding them in the function\n # graph. \"Default\" placements come from the PartitionedCallOp's placement,\n # so that the same trace of the Python function may be placed on several\n # different devices and saved functions may be placed on new devices when\n # restored.\n if context.executing_eagerly():\n self.seed = context.global_seed()\n self._xla_compile = (context.context().device_spec.device_type == \"TPU\")\n if self._distribution_strategy_stack or self._xla_compile:\n self._add_device_to_stack(context.context().device_name)\n else:\n self.seed = graph.seed\n self._xla_compile = getattr(graph, \"_xla_compile\", False)\n # TODO(allenl): Figure out if we can remove colocation stack\n # specialization (currently used in cond_v2), here and in the cache key.\n self._colocation_stack = graph._colocation_stack.copy()\n if (self._distribution_strategy_stack\n or self._xla_compile\n or device_stack_has_callable(graph._device_function_stack)):\n # Hard-code devices from device functions in the function body\n self._device_function_stack = graph._device_function_stack.copy()\n if not self._read_only_collections:\n self._collections = graph._collections\n else:\n for collection_name in graph.get_all_collection_keys():\n if collection_name not in WHITELIST_COLLECTIONS:\n self._collections[collection_name] = graph.get_collection(\n collection_name)\n for collection_name in WHITELIST_COLLECTIONS:\n self._collections[collection_name] = graph.get_collection_ref(\n collection_name)\n\n self._variable_creator_stack = graph._variable_creator_stack\n # Inherit the graph key, since this is used for matching variables in\n # optimizers.\n self._graph_key = graph._graph_key\n # pylint: enable=protected-access\n\n @property\n def variables(self):\n \"\"\"A list of variables accessed by this FuncGraph.\n\n Note that functions keep only weak references to variables. Calling the\n function after a variable it accesses has been deleted is an error.\n\n Yields:\n Strong references to variables accessed by this FuncGraph.\n \"\"\"\n for weak_v in self._weak_variables:\n v = weak_v()\n if v is None:\n raise AssertionError(\n \"Called a function referencing variables which have been deleted. \"\n \"This likely means that function-local variables were created and \"\n \"not referenced elsewhere in the program. This is generally a \"\n \"mistake; consider storing variables in an object attribute on \"\n \"first call.\")\n yield v\n\n @variables.setter\n def variables(self, var_list):\n self._weak_variables = [weakref.ref(v) for v in var_list]\n\n def create_op(\n self,\n op_type,\n inputs,\n dtypes,\n input_types=None,\n name=None,\n attrs=None,\n op_def=None,\n compute_shapes=True,\n compute_device=True):\n \"\"\"Like Graph.create_op, except handles external input tensors.\n\n This overload adds functionality to create_op to \"capture\" any external\n input tensors, i.e. tensors from the eager context or outer function graphs\n if this is a nested function. See `capture` for more information.\n\n Args:\n op_type: The `Operation` type to create. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n inputs: A list of `Tensor` objects that will be inputs to the `Operation`.\n dtypes: A list of `DType` objects that will be the types of the tensors\n that the operation produces.\n input_types: (Optional.) A list of `DType`s that will be the types of\n the tensors that the operation consumes. By default, uses the base\n `DType` of each input in `inputs`. Operations that expect\n reference-typed inputs must specify `input_types` explicitly.\n name: (Optional.) A string name for the operation. If not specified, a\n name is generated based on `op_type`.\n attrs: (Optional.) A dictionary where the key is the attribute name (a\n string) and the value is the respective `attr` attribute of the\n `NodeDef` proto that will represent the operation (an `AttrValue`\n proto).\n op_def: (Optional.) The `OpDef` proto that describes the `op_type` that\n the operation will have.\n compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always\n computed).\n compute_device: (Optional.) If True, device functions will be executed\n to compute the device property of the Operation.\n\n Returns:\n An `Operation` object.\n \"\"\"\n # This capturing logic interacts poorly with control flow contexts which\n # want to replace inputs of ops far too late in the process. This can lead\n # the context to get confused and try to create an Enter for an Enter. We\n # can detect this here and skip the additional Enter which can confuse loop\n # validation logic.\n if op_type == \"Enter\" and inputs[0].op.type == \"Enter\":\n if inputs[0].op.get_attr(\"frame_name\") == attrs[\"frame_name\"].s:\n return inputs[0].op\n # Calling AddValue on the control flow contexts to force creation of the\n # backward accumulators in the original graph before we create placeholders\n # to capture the inputs.\n ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access\n for i, inp in enumerate(inputs):\n # TPU Estimator defines a control flow context with no AddValue method.\n if ctxt is not None and hasattr(ctxt, \"AddValue\"):\n inp = ctxt.AddValue(inp)\n inp = self.capture(inp)\n inputs[i] = inp\n return super(FuncGraph, self).create_op(\n op_type, inputs, dtypes, input_types, name, attrs, op_def,\n compute_device=compute_device)\n\n def capture(self, tensor, name=None):\n \"\"\"Captures `tensor` if it's external to this graph.\n\n If `tensor` is from a different graph, returns a placeholder for it.\n `tensor` and the placeholder will appear in self.captures, and the\n placeholder will appear in self.inputs. Multiple calls to this method with\n the same `tensor` argument will return the same placeholder. If `tensor` is\n from this graph, returns `tensor`.\n\n Args:\n tensor: Tensor. May be from this FuncGraph or a different graph.\n name: Optional name if a placeholder is created.\n\n Returns:\n Tensor from this FuncGraph.\n \"\"\"\n if isinstance(tensor, ops.EagerTensor):\n if name is None:\n name = str(ops.uid())\n return self._capture_helper(tensor, name)\n if tensor.graph is not self:\n if name is None:\n name = tensor.op.name\n return self._capture_helper(tensor, name)\n return tensor\n\n def _capture_helper(self, tensor, name):\n captured_tensor = self.captures.get(tensor, None)\n if captured_tensor is None:\n captured_tensor = _create_substitute_placeholder(tensor, name=name,\n dtype=tensor.dtype)\n self.captures[tensor] = captured_tensor\n self.inputs.append(captured_tensor)\n tape.record_operation(\"captured_value\", [captured_tensor], [tensor],\n lambda x: [x])\n return captured_tensor\n\n @property\n def external_captures(self):\n \"\"\"External tensors captured by this function.\"\"\"\n return list(self.captures.keys())\n\n @property\n def internal_captures(self):\n \"\"\"Placeholders in this function corresponding captured tensors.\"\"\"\n return list(self.captures.values())\n\n\ndef func_graph_from_py_func(name,\n python_func,\n args,\n kwargs,\n signature=None,\n func_graph=None,\n autograph=False,\n add_control_dependencies=True,\n arg_names=None,\n op_return_value=None):\n \"\"\"Returns a `FuncGraph` generated from `python_func`.\n\n Args:\n name: an identifier for the function.\n python_func: the Python function to trace.\n args: the positional args with which the Python function should be called;\n ignored if a signature is provided.\n kwargs: the keyword args with which the Python function should be called;\n ignored if a signature is provided.\n signature: a possibly nested sequence of `TensorSpecs` specifying the shapes\n and dtypes of the arguments. When a signature is provided, `args` and\n `kwargs` are ignored, and `python_func` is traced with Tensors conforming\n to `signature`. If `None`, the shapes and dtypes are inferred from the\n inputs.\n func_graph: Optional. An instance of FuncGraph. If provided, we will use\n this graph else a new one is built and returned.\n autograph: whether to use autograph to compile `python_func`.\n See https://www.tensorflow.org/guide/autograph for more information.\n add_control_dependencies: If True, automatically adds control dependencies\n to ensure program order matches execution order and stateful ops always\n execute.\n arg_names: Optional list of argument names, used to give input placeholders\n recognizable names.\n op_return_value: Optional. A Tensor. If set and `python_func` returns\n Operations, those return values will be replaced with this value. If not\n set, returning an Operation triggers an error.\n\n Returns:\n A FuncGraph.\n\n Raises:\n TypeError: If any of `python_func`'s return values is neither `None` nor a\n `Tensor`.\n \"\"\"\n if op_return_value is not None:\n assert isinstance(op_return_value, ops.Tensor), op_return_value\n if func_graph is None:\n func_graph = FuncGraph(name)\n assert isinstance(func_graph, FuncGraph)\n if add_control_dependencies:\n control_manager = AutomaticControlDependencies\n else:\n control_manager = ops.NullContextmanager\n with func_graph.as_default(), control_manager() as a:\n current_scope = variable_scope.get_variable_scope()\n default_use_recource = current_scope.use_resource\n current_scope.set_use_resource(True)\n\n if signature is not None:\n args = signature\n kwargs = {}\n\n # Creates and names placeholders for all arguments.\n func_args = _get_defun_inputs_from_args(args, arg_names)\n func_kwargs = _get_defun_inputs_from_kwargs(kwargs)\n\n # Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.\n # Variables to help check whether mutation happens in calling the function\n # Copy the recursive list, tuple and map structure, but not base objects\n func_args_before = nest.pack_sequence_as(func_args, nest.flatten(func_args))\n func_kwargs_before = nest.pack_sequence_as(\n func_kwargs, nest.flatten(func_kwargs))\n\n def convert(x):\n \"\"\"Converts a function output to a Tensor.\"\"\"\n if x is None:\n return None\n if op_return_value is not None and isinstance(x, ops.Operation):\n # TODO(b/79881896): we currently can't capture external control deps, so\n # this won't work if x needs to be captured (i.e. if python_func returns\n # captured Operations).\n with ops.control_dependencies([x]):\n x = array_ops.identity(op_return_value)\n elif not isinstance(x, tensor_array_ops.TensorArray):\n try:\n x = ops.convert_to_tensor_or_indexed_slices(x)\n except (ValueError, TypeError):\n raise TypeError(\n \"To be compatible with tf.contrib.eager.defun, Python functions \"\n \"must return zero or more Tensors; in compilation of %s, found \"\n \"return value of type %s, which is not a Tensor.\" %\n (str(python_func), type(x)))\n if add_control_dependencies:\n x = a.mark_as_return(x)\n return x\n\n this_tape = tape.push_new_tape()\n try:\n if autograph:\n from tensorflow.python import autograph # pylint: disable=g-import-not-at-top\n _, original_func = tf_decorator.unwrap(python_func)\n\n def wrapper(*args, **kwargs):\n return autograph.converted_call(\n original_func, None,\n autograph.ConversionOptions(\n verbose=autograph.Verbosity.BRIEF,\n recursive=True,\n strip_decorators=(def_function.function,),\n optional_features=(),\n ), *args, **kwargs)\n\n # Wrapping around a decorator allows checks like tf_inspect.getargspec\n # to be accurate.\n converted_func = tf_decorator.make_decorator(original_func, wrapper)\n tf_decorator.rewrap(python_func, original_func, converted_func)\n\n func_outputs = python_func(*func_args, **func_kwargs)\n\n # invariant: `func_outputs` contains only Tensors, IndexedSlices,\n # SparseTensors, TensorArrays and `None`s.\n func_outputs = nest.map_structure(convert, func_outputs)\n\n check_mutation(func_args_before, func_args)\n check_mutation(func_kwargs_before, func_kwargs)\n finally:\n tape.pop_tape(this_tape)\n current_scope.set_use_resource(default_use_recource)\n\n # Variables in `func_args`, `func_kwargs` should be explicit inputs\n # to the function, not captured inputs.\n tape_variables = this_tape.watched_variables()\n arg_variables = set()\n inputs = []\n for arg in nest.flatten(func_args) + nest.flatten(func_kwargs):\n if isinstance(arg, resource_variable_ops.ResourceVariable):\n # Even if an argument variable was not used in the function, we've\n # already manually captured the resource Tensor when creating argument\n # placeholders.\n resource_placeholder = func_graph.captures.pop(arg.handle)\n arg_variables.add(arg)\n inputs.append(resource_placeholder)\n elif isinstance(arg, ops.Tensor):\n inputs.append(arg)\n variables = [v for v in tape_variables if v not in arg_variables]\n func_graph.inputs = inputs + list(func_graph.captures.values())\n\n func_graph.structured_outputs = func_outputs\n # Returning a closed-over tensor does not trigger convert_to_tensor.\n func_graph.outputs.extend(\n func_graph.capture(x)\n for x in flatten(func_graph.structured_outputs)\n if x is not None)\n\n func_graph.variables = variables\n\n # Register any other functions defined in the graph.\n with ops.init_scope():\n if context.executing_eagerly():\n for f in func_graph._functions.values(): # pylint: disable=protected-access\n # TODO(ashankar): What about the gradient registry?\n context.add_function(f._c_func.func) # pylint: disable=protected-access\n\n return func_graph\n\n\ndef maybe_captured(tensor):\n \"\"\"If t is a captured value placeholder, returns the original captured value.\n\n Args:\n tensor: Tensor.\n\n Returns:\n A tensor, potentially from a different Graph/FuncGraph.\n \"\"\"\n if (not isinstance(tensor, ops.EagerTensor) and\n tensor.op.graph.building_function and tensor.op.type == \"Placeholder\"):\n for input_t, placeholder_t in tensor.op.graph.captures.items():\n if tensor == placeholder_t:\n return maybe_captured(input_t)\n # pylint: enable=protected-access\n return tensor\n\n\ndef device_stack_has_callable(device_stack):\n \"\"\"Checks whether a device stack contains a callable.\"\"\"\n return any(callable(spec._device_name_or_function) # pylint: disable=protected-access\n for spec in device_stack.peek_objs())\n\n\ndef check_mutation(n1, n2):\n \"\"\"Check if two list of arguments are exactly the same.\"\"\"\n errmsg = (\"Function to be traced should not modify structure of input \"\n \"arguments. Check if your function has list and dictionary \"\n \"operations that alter input arguments, \"\n \"such as `list.pop`, `list.append`\")\n try:\n nest.assert_same_structure(n1, n2)\n except ValueError:\n raise ValueError(errmsg)\n\n for arg1, arg2 in zip(nest.flatten(n1), nest.flatten(n2)):\n if arg1 is not arg2:\n raise ValueError(errmsg)\n\n\ndef flatten(sequence):\n \"\"\"Like `nest.flatten` but also unpacks other Tensor-like objects.\n\n Flattens non-tensor objects into their constituent tensors.\n\n Args:\n sequence: A nested structure of Tensors, IndexedSlices, SparseTensors and\n TensorArrays.\n\n Returns:\n A list of tensors.\n \"\"\"\n # TODO(akshayka): Support `SparseTensor` in a similar fashion.\n flat_sequence = nest.flatten(sequence)\n outputs = []\n for item in flat_sequence:\n if isinstance(item, ops.IndexedSlices):\n if item.dense_shape is not None:\n outputs.extend([item.values, item.indices, item.dense_shape])\n else:\n outputs.extend([item.values, item.indices])\n elif isinstance(item, sparse_tensor.SparseTensor):\n outputs.extend([item.indices, item.values, item.dense_shape])\n elif isinstance(item, tensor_array_ops.TensorArray):\n outputs.append(item.flow)\n else:\n outputs.append(item)\n return outputs\n\n\ndef pack_sequence_as(structure, flat_sequence):\n \"\"\"Like `nest.pack_sequence_as` but also packs other Tensor-like objects.\n\n Args:\n structure: The structure to pack into. May contain Tensors, IndexedSlices,\n TensorArrays or SparseTensors.\n flat_sequence: An iterable containing tensors.\n\n Returns:\n A nested structure.\n\n Raises:\n AssertionError if `structure` and `flat_sequence` are not compatible.\n \"\"\"\n flattened_structure = nest.flatten(structure)\n flat_sequence_with_slices_and_tas = []\n index = 0\n for t in flattened_structure:\n if isinstance(t, ops.IndexedSlices):\n if t.dense_shape is not None:\n flat_sequence_with_slices_and_tas.append(\n ops.IndexedSlices(*flat_sequence[index:index + 3]))\n index += 3\n else:\n flat_sequence_with_slices_and_tas.append(\n ops.IndexedSlices(*flat_sequence[index:index + 2]))\n index += 2\n elif isinstance(t, sparse_tensor.SparseTensor):\n flat_sequence_with_slices_and_tas.append(\n sparse_tensor.SparseTensor(*flat_sequence[index:index + 3]))\n index += 3\n elif isinstance(t, tensor_array_ops.TensorArray):\n flow = flat_sequence[index]\n ta = tensor_array_ops.build_ta_with_new_flow(t, flow)\n flat_sequence_with_slices_and_tas.append(ta)\n index += 1\n else:\n flat_sequence_with_slices_and_tas.append(flat_sequence[index])\n index += 1\n assert len(flattened_structure) == len(flat_sequence_with_slices_and_tas)\n return nest.pack_sequence_as(structure, flat_sequence_with_slices_and_tas)\n\n\ndef _create_substitute_placeholder(value, name=None, dtype=None):\n \"\"\"Creates a placeholder for `value` and propagates shape info to it.\"\"\"\n # Note: setting ops.control_dependencies(None) ensures we always put\n # capturing placeholders outside of any control flow context.\n with ops.control_dependencies(None):\n placeholder = graph_placeholder(\n dtype=dtype or value.dtype, shape=value.shape, name=name)\n custom_gradient.copy_handle_data(value, placeholder)\n return placeholder\n\n\ndef _get_defun_inputs_from_args(args, names):\n \"\"\"Maps Python function positional args to graph-construction inputs.\"\"\"\n return _get_defun_inputs(args, names, structure=args)\n\n\ndef _get_defun_inputs(flat_args, names, structure):\n \"\"\"Maps python function args to graph-construction inputs.\n\n Args:\n flat_args: A flat list of user-specified arguments.\n names: A list of strings with user-specified argument names, same length as\n `flat_args`. May be `None`, in which case a generic name is used.\n structure: The original argument list or dictionary.\n\n Returns:\n Placeholders with the same structure as `structure`.\n \"\"\"\n func_graph = ops.get_default_graph()\n function_inputs = []\n if names is None:\n names = [None] * len(flat_args)\n for arg_value, name in zip(flat_args, names):\n for arg in nest.flatten(arg_value):\n if isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec)):\n if isinstance(arg, tensor_spec.TensorSpec) and arg.name:\n requested_name = arg.name\n else:\n requested_name = name\n placeholder = graph_placeholder(\n arg.dtype, arg.shape,\n name=requested_name)\n if name is not None:\n # Record the requested/user-specified name in case it's different than\n # the uniquified name, for validation when exporting signatures.\n placeholder.op._set_attr( # pylint: disable=protected-access\n \"_user_specified_name\",\n attr_value_pb2.AttrValue(s=compat.as_bytes(requested_name)))\n function_inputs.append(placeholder)\n elif isinstance(arg, resource_variable_ops.ResourceVariable):\n # Capture arg variables to create placeholders for them. These will be\n # removed as captures after the function is traced (since otherwise we'd\n # just add it back with a new placeholder when the variable was\n # referenced).\n placeholder = func_graph.capture(arg.handle, name=name)\n placeholder.op._set_attr( # pylint: disable=protected-access\n \"_user_specified_name\",\n attr_value_pb2.AttrValue(s=compat.as_bytes(name)))\n function_inputs.append(arg)\n else:\n function_inputs.append(arg)\n return nest.pack_sequence_as(structure, function_inputs)\n\n\ndef _get_defun_inputs_from_kwargs(kwargs):\n \"\"\"Maps Python function keyword args to graph-construction inputs.\"\"\"\n if kwargs:\n names, flat_args = zip(*sorted(kwargs.items()))\n else:\n names = []\n flat_args = []\n return _get_defun_inputs(flat_args, names, structure=kwargs)\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"cond_v2 and gradient.\n\nThis is a version of cond that emits a single If op, as well as the gradient\nfunction for If ops produced by cond_v2. This will eventually replace the\ncurrent tf.cond implementation once it reaches feature and performance parity.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import func_graph as func_graph_module\nfrom tensorflow.python.framework import function_def_to_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_util\nfrom tensorflow.python.ops import control_flow_util_v2 as util\nfrom tensorflow.python.ops import gen_dataset_ops\nfrom tensorflow.python.ops import gen_functional_ops\nfrom tensorflow.python.ops import gen_resource_variable_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.util import nest\n\n\n# NOTE(skyewm): TensorFlow uses protected class methods and fields to signify\n# that they aren't part of the official public API. These protected members\n# often need to be used by implementation code however. Rather than litter the\n# code with pylint comments, we ignore protected access violations for\n# readability.\n# pylint: disable=protected-access\n\n\ndef cond_v2(pred, true_fn, false_fn, name=\"cond\"):\n \"\"\"Like tf.cond, except emits a single If op.\"\"\"\n if isinstance(pred, bool):\n raise TypeError(\"pred must not be a Python bool\", pred)\n\n if not name:\n name = \"cond\"\n\n with ops.name_scope(name) as scope:\n true_name = util.unique_fn_name(scope, \"true\")\n false_name = util.unique_fn_name(scope, \"false\")\n\n # Automatic control dependencies are added in defuns, but not in v1\n # graphs. Propagate that behavior here.\n add_control_dependencies = util.in_defun()\n pred = ops.convert_to_tensor(pred)\n\n true_graph = func_graph_module.func_graph_from_py_func(\n true_name,\n true_fn, [], {},\n func_graph=util.CondBranchFuncGraph(\n true_name, read_only_collections=False),\n add_control_dependencies=add_control_dependencies,\n op_return_value=pred)\n false_graph = func_graph_module.func_graph_from_py_func(\n false_name,\n false_fn, [], {},\n func_graph=util.CondBranchFuncGraph(\n false_name, read_only_collections=False),\n add_control_dependencies=add_control_dependencies,\n op_return_value=pred)\n\n outputs = _build_cond(pred, true_graph, false_graph,\n true_graph.external_captures,\n false_graph.external_captures,\n name=scope)\n\n return func_graph_module.pack_sequence_as(true_graph.structured_outputs,\n outputs)\n\n\[email protected](\"If\")\ndef _IfGrad(op, *grads): # pylint: disable=invalid-name\n \"\"\"The gradient of an If op produced by cond_v2.\"\"\"\n true_graph, false_graph = _get_func_graphs(op)\n # Note: op.graph != ops.get_default_graph() when we are computing the gradient\n # of a nested cond.\n assert true_graph.outer_graph == op.graph\n assert false_graph.outer_graph == op.graph\n\n # Create grad functions that compute the gradient of the true/false forward\n # graphs. These functions will capture tensors from the forward pass\n # functions.\n true_grad_graph = _create_grad_func(\n true_graph, grads, util.unique_grad_fn_name(true_graph.name))\n false_grad_graph = _create_grad_func(\n false_graph, grads, util.unique_grad_fn_name(false_graph.name))\n\n assert ([t.dtype for t in true_grad_graph.outputs] ==\n [t.dtype for t in false_grad_graph.outputs])\n\n # Resolve references to forward graph tensors in grad graphs and ensure\n # they are in-scope, i.e., belong to one of outer graphs of the grad graph.\n true_grad_inputs = _resolve_grad_inputs(true_graph, true_grad_graph)\n false_grad_inputs = _resolve_grad_inputs(false_graph, false_grad_graph)\n\n outputs = _build_cond(op.inputs[0], true_grad_graph, false_grad_graph,\n true_grad_inputs, false_grad_inputs)\n\n # The predicate has no gradient.\n return [None] + outputs\n\n\ndef _build_cond(pred, true_graph, false_graph, true_inputs, false_inputs,\n name=None):\n \"\"\"Creates an If op from the specified predicate, branch functions and inputs.\n\n Note that this modifies true_graph and false_graph to make the inputs match,\n and to output all intermediates values so they're available for the gradient\n computation.\n\n true_graph and false_graph need not have the same input types, but they must\n have the same outpute types.\n\n Args:\n pred: boolean Tensor\n true_graph: FuncGraph\n false_graph: FuncGraph\n true_inputs: a list of Tensors to be passed to true_graph as input.\n false_inputs: a list of Tensors to be passed to false_graph as input.\n name: the name for the If op.\n\n Returns:\n A list of Tensors which are the outputs of the If op. Does not include added\n intermediate outputs.\n \"\"\"\n _check_same_outputs(true_graph, false_graph)\n\n # Add inputs to true_graph and false_graph to make them match. Note that\n # this modifies true_graph and false_graph.\n cond_inputs = _make_inputs_match(true_graph, false_graph,\n true_inputs, false_inputs)\n\n # Add all intermediate tensors as function outputs so they're available for\n # the gradient computation. Since the outputs of the two functions must match,\n # we wrap all the intermediates in optionals. Each intermediate output will\n # have a value iff its corresponding branch is taken.\n\n true_intermediates = _get_intermediates(true_graph)\n false_intermediates = _get_intermediates(false_graph)\n\n # Save the original number of outputs to return to the caller.\n num_cond_outputs = len(true_graph.outputs)\n\n if control_flow_util.InXlaContext(ops.get_default_graph()):\n # XLA does not yet support optionals, so output intermediates directly and\n # make them match via FakeParams, which can be converted to zeros in XLA.\n # TODO(skyewm,jpienaar): can XLA support optionals?\n extra_true_outputs, extra_false_outputs = _make_intermediates_match_xla(\n true_graph, false_graph, true_intermediates, false_intermediates)\n else:\n # Wrap intermediates in optionals.\n wrapped_true_intermediates = _wrap_intermediates(true_graph,\n true_intermediates)\n wrapped_false_intermediates = _wrap_intermediates(false_graph,\n false_intermediates)\n\n # Make outputs match by adding none optionals.\n extra_true_outputs, extra_false_outputs = _make_intermediates_match(\n true_graph, false_graph,\n wrapped_true_intermediates, wrapped_false_intermediates)\n\n true_graph.outputs.extend(extra_true_outputs)\n false_graph.outputs.extend(extra_false_outputs)\n # TODO(skyewm): somehow indicate it's a bug if this fails.\n _check_same_outputs(true_graph, false_graph)\n\n # Create the If op.\n tensors = gen_functional_ops._if( # pylint: disable=protected-access\n pred,\n cond_inputs, [t.dtype for t in true_graph.outputs],\n util.create_new_tf_function(true_graph),\n util.create_new_tf_function(false_graph),\n output_shapes=_get_output_shapes(true_graph.outputs,\n false_graph.outputs),\n name=name)\n\n # TODO(b/110167197) this approach requires cond_v2 to have at least 1 output\n if_op = tensors[0].op\n util.maybe_set_lowering_attr(if_op)\n\n # Return identities for each output of the If op, rather than the output of\n # the If op directly. This makes pruning work if the output of cond() is\n # fetched: the lowering pass converts the If outputs into IdentityN outputs,\n # which if fetched will cause all ops in the taken branch to be run (since\n # it takes all merge ops as input). After lowering, each output identity op\n # will end up with only the appropriate merge op as input.\n # TODO(b/79984175): this doesn't have to be a tuple once we covert to the\n # correct output structure\n tensors = [array_ops.identity(t) for t in tensors]\n\n # Prevent fetching since the variant outputs can't be fetched directly.\n if_op.graph.prevent_fetching(if_op)\n\n return tensors[:num_cond_outputs]\n\n\ndef _get_func_graphs(if_op):\n \"\"\"Returns `FuncGraph`s for the input op branches.\n\n Args:\n if_op: The _If Operation.\n\n Returns:\n A 2-tuple of the `FuncGraph`s of the then_branch and else_branch.\n \"\"\"\n def _get_func_graph_for_branch(branch_name):\n \"\"\"Generates and returns a FuncGraph for the given branch.\"\"\"\n inputs = if_op.inputs[1:] # First input is pred.\n input_shapes = [t.shape for t in inputs]\n func_name = if_op.get_attr(branch_name).name\n fdef = if_op.graph._get_function(func_name).definition\n # `if_op.graph` may not be the same as `ops.get_default_graph()` e.g.\n # in the case of nested if ops or when the gradient is being computed\n # from inside a Defun. We build the `func_graph` with `if_op.graph` as its\n # `outer_graph`. This resembles how the `FuncGraph` was built in the\n # forward pass. We need this so that we can resolve references to tensors\n # in `func_graph` from its gradient graph in `_resolve_grad_inputs`.\n with if_op.graph.as_default():\n func_graph = function_def_to_graph.function_def_to_graph(\n fdef, input_shapes)\n func_graph.captures = collections.OrderedDict(zip(inputs,\n func_graph.inputs))\n # Set the if op so that the gradient code can use it.\n func_graph._if = if_op\n return func_graph\n\n return (_get_func_graph_for_branch(\"then_branch\"),\n _get_func_graph_for_branch(\"else_branch\"))\n\n\ndef _grad_fn(func_graph, grads):\n \"\"\"The gradient function for each conditional branch.\n\n This function builds the gradient graph of the corresponding forward-pass\n conditional branch in `func_graph`. This is done by differentiating\n func_graph's outputs w.r.t. its inputs.\n\n Args:\n func_graph: FuncGraph. The corresponding forward-pass function.\n grads: The list of input gradient Tensors.\n\n Returns:\n The output gradient Tensors.\n \"\"\"\n # Filter out untrainable function outputs.\n # NOTE(skyewm): If we don't do this, the untrainable tensors can sometimes\n # cause _GradientsHelper to raise an exception (e.g. the implementation\n # doesn't expect 'ys' to contain boolean tensors).\n assert len(func_graph.outputs) == len(grads)\n ys = []\n grad_ys = []\n for y, grad_y in zip(func_graph.outputs, grads):\n if not gradients_impl.IsTrainable(y):\n continue\n ys.append(y)\n grad_ys.append(grad_y)\n\n # Build the gradient graph. Note that this builds the gradient computation of\n # func_graph in the current graph, which requires capturing tensors from\n # func_graph. The captured func_graph tensors are resolved to external tensors\n # in _resolve_grad_inputs.\n result = gradients_impl._GradientsHelper(\n ys, func_graph.inputs, grad_ys=grad_ys,\n src_graph=func_graph)\n\n # Functions can't return None; replace Nones with zero tensors.\n # TODO(b/80444525): don't return anything here and make _IfGrad return None if\n # both branches have zero gradient.\n for i in range(len(result)):\n if result[i] is None:\n if func_graph.inputs[i].dtype == dtypes.resource:\n result[i] = array_ops.zeros(\n gen_resource_variable_ops.variable_shape(func_graph.inputs[i]))\n else:\n result[i] = array_ops.zeros_like(func_graph.inputs[i])\n\n return result\n\n\ndef _create_grad_func(func_graph, grads, name):\n \"\"\"Returns the FuncGraph representation of _grad_fn.\"\"\"\n return func_graph_module.func_graph_from_py_func(\n name,\n lambda: _grad_fn(func_graph, grads), [], {},\n func_graph=_CondGradFuncGraph(name, func_graph))\n\n\ndef _resolve_grad_inputs(cond_graph, grad_graph):\n \"\"\"Returns the tensors to pass as inputs to `grad_graph`.\n\n The `grad_graph` may have external references to\n 1. Its outer graph containing the input gradients. These references are kept\n as is.\n 2. Tensors in the forward pass graph. These tensors may not be \"live\"\n when the gradient is being computed. We replace such references by their\n corresponding tensor in `cond_graph.outer_graph`. In the case of nested\n control flow or functions, the gradient logic handling\n `grad_graph.outer_graph` will make sure the tensor from\n `cond_graph.outer_graph` is also correctly captured.\n\n Args:\n cond_graph: FuncGraph. The forward-pass function.\n grad_graph: FuncGraph. The gradients function.\n\n Returns:\n A list of inputs tensors to be passed to grad_graph.\n \"\"\"\n new_inputs = []\n\n for t in grad_graph.external_captures:\n # `t` must either be in `grad_graph.outer_graph` or in the forward\n # `cond_graph`.\n if t.graph != grad_graph.outer_graph:\n assert t.graph == cond_graph\n # `internal_captures` are not treated as intermediates and hence not added\n # to If op outputs. So we get the outer tensor corresponding to those\n # from the list of `external_captures`.\n try:\n t = t.graph._if.outputs[t.graph.outputs.index(t)]\n except ValueError:\n index = t.graph.internal_captures.index(t)\n t = t.graph.external_captures[index]\n\n # Note: We rely on the capturing logic of the gradient If op graph to\n # correctly capture the tensors in `cond_graph.outer_graph`. Both cond_v2\n # and while_v2 handle this while building their gradient functions.\n assert t.graph == cond_graph.outer_graph\n new_inputs.append(t)\n\n return new_inputs\n\n\ndef _get_intermediates(func_graph):\n \"\"\"Returns all tensors in `func_graph` that aren't inputs or outputs.\"\"\"\n intermediates = []\n for op in func_graph.get_operations():\n for t in op.outputs:\n if t in func_graph.inputs: continue\n if t in func_graph.outputs: continue\n intermediates.append(t)\n return intermediates\n\n\ndef _separate_unique_inputs(true_inputs, false_inputs):\n \"\"\"Separates tensors appearing only in true_inputs or false_inputs, or both.\n\n Args:\n true_inputs: list of Tensors\n false_inputs: list of Tensors\n\n Returns:\n Three lists of Tensors:\n 1. The tensors that appear in both true_inputs and false_inputs\n 2. The tensors that only appear in true_inputs\n 3. The tensors that only appear in false_inputs\n \"\"\"\n true_inputs = set(true_inputs)\n false_inputs = set(false_inputs)\n\n shared_inputs = true_inputs.intersection(false_inputs)\n true_only_inputs = true_inputs - false_inputs\n false_only_inputs = false_inputs - true_inputs\n\n return list(shared_inputs), list(true_only_inputs), list(false_only_inputs)\n\n\ndef _make_intermediates_match(true_graph, false_graph,\n true_optionals, false_optionals):\n \"\"\"Returns new optionals lists that have matching signatures.\n\n This is done by mirroring each list in the other using none optionals.\n There is no merging of like optionals.\n\n Args:\n true_graph: FuncGraph\n false_graph: FuncGraph\n true_optionals: a list of optional Tensors from true_graph\n false_optionals: a list of optional Tensors from false_graph\n\n Returns:\n A new list of Tensors in true_graph and a new list of Tensors in\n false_graph. The two lists have the same number of Tensors, all of which\n will be optionals of the same shape/type.\n \"\"\"\n new_true_optionals = (true_optionals +\n _create_none_optionals(true_graph, false_optionals))\n new_false_optionals = (_create_none_optionals(false_graph, true_optionals)\n + false_optionals)\n return new_true_optionals, new_false_optionals\n\n\ndef _make_intermediates_match_xla(true_graph, false_graph, true_intermediates,\n false_intermediates):\n \"\"\"Like _make_intermediates_match but for the XLA case.\"\"\"\n new_true_intermediates = (true_intermediates +\n _create_fakeparams(true_graph, false_intermediates))\n new_false_intermediates = (_create_fakeparams(false_graph, true_intermediates)\n + false_intermediates)\n return new_true_intermediates, new_false_intermediates\n\n\ndef _make_inputs_match(true_graph, false_graph, true_inputs, false_inputs):\n \"\"\"Modifies true_graph and false_graph so they have the same input signature.\n\n This method reorders and/or adds parameters to true_graph and false_graph so\n they have the same input signature, and updates the 'inputs' and 'captured'\n fields of both graphs accordingly. It uses the input tensors from the outer\n graph to avoid duplicating shared arguments.\n\n Args:\n true_graph: FuncGraph\n false_graph: FuncGraph\n true_inputs: a list of Tensors in the outer graph. The inputs for\n true_graph.\n false_inputs: a list of Tensors in the outer graph. The inputs for\n false_graph.\n\n Returns:\n A new list of Tensors from the outer graph that are the new inputs for both\n true_graph and false_graph. This is a deduped version of true_inputs +\n false_inputs.\n \"\"\"\n shared_inputs, true_only_inputs, false_only_inputs = _separate_unique_inputs(\n true_inputs, false_inputs)\n\n new_inputs = shared_inputs + true_only_inputs + false_only_inputs\n\n true_input_to_param = dict(zip(true_inputs, true_graph.inputs))\n false_input_to_param = dict(zip(false_inputs, false_graph.inputs))\n\n true_graph.inputs = (\n [true_input_to_param[t] for t in shared_inputs] +\n [true_input_to_param[t] for t in true_only_inputs] +\n _create_dummy_inputs(true_graph, false_only_inputs))\n\n false_graph.inputs = (\n [false_input_to_param[t] for t in shared_inputs] +\n _create_dummy_inputs(false_graph, true_only_inputs) +\n [false_input_to_param[t] for t in false_only_inputs])\n\n # Rewrite the FuncGraphs' state to reflect the new inputs.\n true_graph.captures = collections.OrderedDict(zip(new_inputs,\n true_graph.inputs))\n false_graph.captures = collections.OrderedDict(zip(new_inputs,\n false_graph.inputs))\n\n return new_inputs\n\n\ndef _wrap_intermediates(func_graph, intermediates):\n with func_graph.as_default():\n return [gen_dataset_ops.optional_from_value([t]) for t in intermediates]\n\n\ndef _create_dummy_inputs(func_graph, template_tensors):\n \"\"\"Creates tensors in func_graph to represent template_tensors.\n\n Args:\n func_graph: FuncGraph.\n template_tensors: a list of tensors in the outer graph.\n\n Returns:\n A list of tensors in func_graph.\n \"\"\"\n with func_graph.as_default():\n return [array_ops.placeholder(t.dtype, shape=t.shape)\n for t in template_tensors]\n\n\ndef _create_none_optionals(func_graph, template_tensors):\n \"\"\"Creates none optionals in func_graph to represent template_tensors.\n\n Args:\n func_graph: FuncGraph.\n template_tensors: a list of tensors in func_graph.\n\n Returns:\n A list of tensors in func_graph.\n \"\"\"\n with func_graph.as_default():\n return [gen_dataset_ops.optional_none() for _ in template_tensors]\n\n\ndef _create_fakeparams(func_graph, template_tensors):\n \"\"\"Create FakeParams for the XLA case.\"\"\"\n with func_graph.as_default():\n return [gen_functional_ops.fake_param(dtype=t.dtype, shape=t.shape)\n for t in template_tensors]\n\n\ndef _check_same_outputs(true_graph, false_graph):\n \"\"\"Raises an error if true_graph and false_graph have different outputs.\"\"\"\n true_output_types = [t.dtype for t in true_graph.outputs]\n false_output_types = [t.dtype for t in false_graph.outputs]\n if (len(true_graph.outputs) != len(false_graph.outputs) or\n true_output_types != false_output_types):\n raise TypeError(\n \"true_fn() and false_fn() must return the same number and type of \"\n \"arguments, got:\\n\"\n \" true_fn: %s\\n\"\n \" false_fn: %s\" % (true_output_types, false_output_types))\n\n # Make sure `structured_outputs` for both graphs have the same structure.\n try:\n nest.assert_same_structure(true_graph.structured_outputs,\n false_graph.structured_outputs)\n except (ValueError, TypeError) as e:\n raise ValueError(\"Outputs of true_fn and false_fn must have the same \"\n \"structure: %s\" % str(e))\n\n\ndef _get_output_shapes(true_graph_outputs, false_graph_outputs):\n output_shapes = [\n t_out.shape.most_specific_compatible_shape(f_out.shape)\n for t_out, f_out in zip(true_graph_outputs, false_graph_outputs)\n ]\n return output_shapes\n\n\nclass _CondGradFuncGraph(util.CondBranchFuncGraph):\n \"\"\"FuncGraph for the gradient function of the branch of an If op.\n\n Handles unwrapping optional intermediate values that are captured by the\n gradient computation.\n \"\"\"\n\n def __init__(self, name, forward_graph):\n super(_CondGradFuncGraph, self).__init__(name, read_only_collections=False)\n self._forward_graph = forward_graph\n\n def _capture_helper(self, tensor, name):\n if (tensor.graph is not self._forward_graph or\n tensor in self._forward_graph.inputs or\n tensor in self._forward_graph.outputs):\n return super(_CondGradFuncGraph, self)._capture_helper(tensor, name)\n\n # 'tensor' is an intermediate in the forward graph. We find the corresonding\n # optional tensor, which is output from the If op, and capture it as\n # normal. We then unwrap the captured optional value to get the raw\n # intermediate value.\n for consumer in tensor.consumers():\n if (consumer.type == \"OptionalFromValue\"\n and consumer.outputs[0] in self._forward_graph.outputs):\n optional = consumer.outputs[0]\n captured_optional = super(_CondGradFuncGraph, self)._capture_helper(\n optional, name)\n return gen_dataset_ops.optional_get_value(\n captured_optional, [tensor.dtype], [tensor.shape])[0]\n raise ValueError(\n \"Couldn't find OptionalFromValue consumer for tensor '%s'.\\n\"\n \"This is an internal bug, please report at \"\n \"https://github.com/tensorflow/tensorflow/issues.\" % tensor.name)\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `tf.data.experimental.scan()`.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\nimport numpy as np\n\nfrom tensorflow.python.data.experimental.ops import scan_ops\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.platform import test\n\n\nclass ScanTest(test_base.DatasetTestBase):\n\n def _counting_dataset(self, start, scan_fn):\n return dataset_ops.Dataset.from_tensors(0).repeat().apply(\n scan_ops.scan(start, scan_fn))\n\n @test_util.run_deprecated_v1\n def testCount(self):\n def make_scan_fn(step):\n return lambda state, _: (state + step, state)\n\n start = array_ops.placeholder(dtypes.int32, shape=[])\n step = array_ops.placeholder(dtypes.int32, shape=[])\n take = array_ops.placeholder(dtypes.int64, shape=[])\n iterator = dataset_ops.make_initializable_iterator(self._counting_dataset(\n start, make_scan_fn(step)).take(take))\n next_element = iterator.get_next()\n\n with self.cached_session() as sess:\n\n for start_val, step_val, take_val in [(0, 1, 10), (0, 1, 0), (10, 1, 10),\n (10, 2, 10), (10, -1, 10),\n (10, -2, 10)]:\n sess.run(iterator.initializer,\n feed_dict={start: start_val, step: step_val, take: take_val})\n for expected, _ in zip(\n itertools.count(start_val, step_val), range(take_val)):\n self.assertEqual(expected, self.evaluate(next_element))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element)\n\n @test_util.run_in_graph_and_eager_modes\n def testFibonacci(self):\n iterator = dataset_ops.make_one_shot_iterator(\n dataset_ops.Dataset.from_tensors(1).repeat(None).apply(\n scan_ops.scan([0, 1], lambda a, _: ([a[1], a[0] + a[1]], a[1]))))\n\n if context.executing_eagerly():\n next_element = iterator.get_next\n else:\n get_next = iterator.get_next()\n next_element = lambda: get_next\n\n self.assertEqual(1, self.evaluate(next_element()))\n self.assertEqual(1, self.evaluate(next_element()))\n self.assertEqual(2, self.evaluate(next_element()))\n self.assertEqual(3, self.evaluate(next_element()))\n self.assertEqual(5, self.evaluate(next_element()))\n self.assertEqual(8, self.evaluate(next_element()))\n\n @test_util.run_deprecated_v1\n def testSparseCount(self):\n def _sparse(i):\n return sparse_tensor.SparseTensorValue(\n indices=np.array([[0, 0]]),\n values=(i * np.array([1])),\n dense_shape=np.array([1, 1]))\n\n def make_scan_fn(step):\n return lambda state, _: (_sparse(state.values[0] + step), state)\n\n start = array_ops.placeholder(dtypes.int32, shape=[])\n step = array_ops.placeholder(dtypes.int32, shape=[])\n take = array_ops.placeholder(dtypes.int64, shape=[])\n iterator = dataset_ops.make_initializable_iterator(self._counting_dataset(\n _sparse(start), make_scan_fn(step)).take(take))\n next_element = iterator.get_next()\n\n with self.cached_session() as sess:\n\n for start_val, step_val, take_val in [(0, 1, 10), (0, 1, 0), (10, 1, 10),\n (10, 2, 10), (10, -1, 10),\n (10, -2, 10)]:\n sess.run(iterator.initializer,\n feed_dict={start: start_val, step: step_val, take: take_val})\n for expected, _ in zip(\n itertools.count(start_val, step_val), range(take_val)):\n self.assertEqual(expected, self.evaluate(next_element).values[0])\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element)\n\n @test_util.run_deprecated_v1\n def testChangingStateShape(self):\n # Test the fixed-point shape invariant calculations: start with\n # initial values with known shapes, and use a scan function that\n # changes the size of the state on each element.\n def _scan_fn(state, input_value):\n # Statically known rank, but dynamic length.\n ret_longer_vector = array_ops.concat([state[0], state[0]], 0)\n # Statically unknown rank.\n ret_larger_rank = array_ops.expand_dims(state[1], 0)\n return (ret_longer_vector, ret_larger_rank), (state, input_value)\n\n dataset = dataset_ops.Dataset.from_tensors(0).repeat(5).apply(\n scan_ops.scan(([0], 1), _scan_fn))\n self.assertEqual([None], dataset.output_shapes[0][0].as_list())\n self.assertIs(None, dataset.output_shapes[0][1].ndims)\n self.assertEqual([], dataset.output_shapes[1].as_list())\n\n iterator = dataset_ops.make_one_shot_iterator(dataset)\n next_element = iterator.get_next()\n\n with self.cached_session() as sess:\n for i in range(5):\n (longer_vector_val, larger_rank_val), _ = self.evaluate(next_element)\n self.assertAllEqual([0] * (2**i), longer_vector_val)\n self.assertAllEqual(np.array(1, ndmin=i), larger_rank_val)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element)\n\n def testIncorrectStateType(self):\n\n def _scan_fn(state, _):\n return constant_op.constant(1, dtype=dtypes.int64), state\n\n dataset = dataset_ops.Dataset.range(10)\n with self.assertRaisesRegexp(\n TypeError,\n \"The element types for the new state must match the initial state.\"):\n dataset.apply(\n scan_ops.scan(constant_op.constant(1, dtype=dtypes.int32), _scan_fn))\n\n def testIncorrectReturnType(self):\n\n def _scan_fn(unused_state, unused_input_value):\n return constant_op.constant(1, dtype=dtypes.int64)\n\n dataset = dataset_ops.Dataset.range(10)\n with self.assertRaisesRegexp(\n TypeError,\n \"The scan function must return a pair comprising the new state and the \"\n \"output value.\"):\n dataset.apply(\n scan_ops.scan(constant_op.constant(1, dtype=dtypes.int32), _scan_fn))\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#,============================================================================\n\"\"\"Tests for layer graphs construction & handling.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python import keras\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.keras.engine import input_layer as input_layer_lib\nfrom tensorflow.python.keras.engine import network as network_lib\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import rmsprop\n\ntry:\n import yaml # pylint:disable=g-import-not-at-top\nexcept ImportError:\n yaml = None\n\n\nclass TopologyConstructionTest(test.TestCase):\n\n @test_util.run_deprecated_v1\n def test_get_updates(self):\n\n class MyLayer(keras.layers.Layer):\n\n def build(self, input_shape):\n self.a = self.add_variable('a',\n (1, 1),\n 'float32',\n trainable=False)\n self.b = self.add_variable('b',\n (1, 1),\n 'float32',\n trainable=False)\n self.add_update(state_ops.assign_add(self.a, [[1.]],\n name='unconditional_update'))\n self.built = True\n\n def call(self, inputs):\n self.add_update(state_ops.assign_add(self.b, inputs,\n name='conditional_update'),\n inputs=True)\n return inputs + 1\n\n x1 = input_layer_lib.Input(shape=(1,))\n layer = MyLayer()\n _ = layer.apply(x1)\n\n self.assertEqual(len(layer.updates), 2)\n self.assertEqual(len(layer.get_updates_for(x1)), 1)\n self.assertEqual(len(layer.get_updates_for(None)), 1)\n\n x2 = input_layer_lib.Input(shape=(1,))\n y2 = layer.apply(x2)\n\n self.assertEqual(len(layer.updates), 3)\n self.assertEqual(len(layer.get_updates_for(x1)), 1)\n self.assertEqual(len(layer.get_updates_for(x2)), 1)\n self.assertEqual(len(layer.get_updates_for(None)), 1)\n\n network = network_lib.Network(x2, y2)\n self.assertEqual(len(network.updates), 2)\n self.assertEqual(len(network.get_updates_for(x1)), 0)\n self.assertEqual(len(network.get_updates_for(x2)), 1)\n self.assertEqual(len(network.get_updates_for(None)), 1)\n\n x3 = input_layer_lib.Input(shape=(1,))\n _ = layer.apply(x3)\n self.assertEqual(len(network.updates), 2)\n\n x4 = input_layer_lib.Input(shape=(1,))\n _ = network(x4)\n self.assertEqual(len(network.updates), 3)\n self.assertEqual(len(network.get_updates_for(x2)), 1)\n self.assertEqual(len(network.get_updates_for(x4)), 1)\n self.assertEqual(len(network.get_updates_for(None)), 1)\n\n network.add_update(state_ops.assign_add(layer.a, [[1]]))\n self.assertEqual(len(network.updates), 4)\n self.assertEqual(len(network.get_updates_for(None)), 2)\n\n network.add_update(state_ops.assign_add(layer.b, x4), inputs=True)\n self.assertEqual(len(network.updates), 5)\n self.assertEqual(len(network.get_updates_for(x4)), 2)\n\n def test_get_updates_bn(self):\n x1 = input_layer_lib.Input(shape=(1,))\n layer = keras.layers.BatchNormalization()\n _ = layer.apply(x1)\n\n self.assertEqual(len(layer.updates), 2)\n self.assertEqual(len(layer.get_updates_for(x1)), 2)\n self.assertEqual(len(layer.get_updates_for(None)), 0)\n\n @test_util.run_deprecated_v1\n def test_get_losses(self):\n\n class MyLayer(keras.layers.Layer):\n\n def build(self, input_shape):\n self.a = self.add_variable('a',\n (1, 1),\n 'float32',\n trainable=False)\n self.b = self.add_variable('b',\n (1, 1),\n 'float32',\n trainable=False)\n self.add_loss(math_ops.reduce_sum(self.a))\n self.built = True\n\n def call(self, inputs):\n self.add_loss(math_ops.reduce_sum(inputs),\n inputs=True)\n return inputs + 1\n\n x1 = input_layer_lib.Input(shape=(1,))\n layer = MyLayer()\n _ = layer.apply(x1)\n\n self.assertEqual(len(layer.losses), 2)\n self.assertEqual(len(layer.get_losses_for(x1)), 1)\n self.assertEqual(len(layer.get_losses_for(None)), 1)\n\n x2 = input_layer_lib.Input(shape=(1,))\n y2 = layer.apply(x2)\n\n self.assertEqual(len(layer.losses), 3)\n self.assertEqual(len(layer.get_losses_for(x1)), 1)\n self.assertEqual(len(layer.get_losses_for(x2)), 1)\n self.assertEqual(len(layer.get_losses_for(None)), 1)\n\n network = network_lib.Network(x2, y2)\n self.assertEqual(len(network.losses), 2)\n self.assertEqual(len(network.get_losses_for(x1)), 0)\n self.assertEqual(len(network.get_losses_for(x2)), 1)\n self.assertEqual(len(network.get_losses_for(None)), 1)\n\n x3 = input_layer_lib.Input(shape=(1,))\n _ = layer.apply(x3)\n self.assertEqual(len(network.losses), 2)\n\n x4 = input_layer_lib.Input(shape=(1,))\n _ = network(x4)\n self.assertEqual(len(network.losses), 3)\n self.assertEqual(len(network.get_losses_for(x2)), 1)\n self.assertEqual(len(network.get_losses_for(x4)), 1)\n self.assertEqual(len(network.get_losses_for(None)), 1)\n\n network.add_loss(math_ops.reduce_sum(layer.a))\n self.assertEqual(len(network.losses), 4)\n self.assertEqual(len(network.get_losses_for(None)), 2)\n\n network.add_loss(math_ops.reduce_sum(x4), inputs=True)\n self.assertEqual(len(network.losses), 5)\n self.assertEqual(len(network.get_losses_for(x4)), 2)\n\n def testTopologicalAttributes(self):\n # test layer attributes / methods related to cross-layer connectivity.\n a = input_layer_lib.Input(shape=(32,), name='input_a')\n b = input_layer_lib.Input(shape=(32,), name='input_b')\n\n # test input, output, input_shape, output_shape\n test_layer = keras.layers.Dense(16, name='test_layer')\n a_test = test_layer(a)\n self.assertEqual(test_layer.input, a)\n self.assertEqual(test_layer.output, a_test)\n self.assertEqual(test_layer.input_shape, (None, 32))\n self.assertEqual(test_layer.output_shape, (None, 16))\n\n # test `get_*_at` methods\n dense = keras.layers.Dense(16, name='dense_1')\n a_2 = dense(a)\n b_2 = dense(b)\n\n self.assertEqual(dense.get_input_at(0), a)\n self.assertEqual(dense.get_input_at(1), b)\n self.assertEqual(dense.get_output_at(0), a_2)\n self.assertEqual(dense.get_output_at(1), b_2)\n self.assertEqual(dense.get_input_shape_at(0), (None, 32))\n self.assertEqual(dense.get_input_shape_at(1), (None, 32))\n self.assertEqual(dense.get_output_shape_at(0), (None, 16))\n self.assertEqual(dense.get_output_shape_at(1), (None, 16))\n\n # Test invalid value for attribute retrieval.\n with self.assertRaises(ValueError):\n dense.get_input_at(2)\n with self.assertRaises(AttributeError):\n new_dense = keras.layers.Dense(16)\n _ = new_dense.input\n with self.assertRaises(AttributeError):\n new_dense = keras.layers.Dense(16)\n _ = new_dense.output\n with self.assertRaises(AttributeError):\n new_dense = keras.layers.Dense(16)\n _ = new_dense.output_shape\n with self.assertRaises(AttributeError):\n new_dense = keras.layers.Dense(16)\n _ = new_dense.input_shape\n with self.assertRaises(AttributeError):\n new_dense = keras.layers.Dense(16)\n a = input_layer_lib.Input(shape=(3, 32))\n a = input_layer_lib.Input(shape=(5, 32))\n a_2 = dense(a)\n b_2 = dense(b)\n _ = new_dense.input_shape\n with self.assertRaises(AttributeError):\n new_dense = keras.layers.Dense(16)\n a = input_layer_lib.Input(shape=(3, 32))\n a = input_layer_lib.Input(shape=(5, 32))\n a_2 = dense(a)\n b_2 = dense(b)\n _ = new_dense.output_shape\n\n def testTopologicalAttributesMultiOutputLayer(self):\n\n class PowersLayer(keras.layers.Layer):\n\n def call(self, inputs):\n return [inputs**2, inputs**3]\n\n x = input_layer_lib.Input(shape=(32,))\n test_layer = PowersLayer()\n p1, p2 = test_layer(x) # pylint: disable=not-callable\n\n self.assertEqual(test_layer.input, x)\n self.assertEqual(test_layer.output, [p1, p2])\n self.assertEqual(test_layer.input_shape, (None, 32))\n self.assertEqual(test_layer.output_shape, [(None, 32), (None, 32)])\n\n def testTopologicalAttributesMultiInputLayer(self):\n\n class AddLayer(keras.layers.Layer):\n\n def call(self, inputs):\n assert len(inputs) == 2\n return inputs[0] + inputs[1]\n\n a = input_layer_lib.Input(shape=(32,))\n b = input_layer_lib.Input(shape=(32,))\n test_layer = AddLayer()\n y = test_layer([a, b]) # pylint: disable=not-callable\n\n self.assertEqual(test_layer.input, [a, b])\n self.assertEqual(test_layer.output, y)\n self.assertEqual(test_layer.input_shape, [(None, 32), (None, 32)])\n self.assertEqual(test_layer.output_shape, (None, 32))\n\n @test_util.run_deprecated_v1\n def testBasicNetwork(self):\n # minimum viable network\n x = input_layer_lib.Input(shape=(32,))\n dense = keras.layers.Dense(2)\n y = dense(x)\n network = network_lib.Network(x, y, name='dense_network')\n\n # test basic attributes\n self.assertEqual(network.name, 'dense_network')\n self.assertEqual(len(network.layers), 2) # InputLayer + Dense\n self.assertEqual(network.layers[1], dense)\n self.assertEqual(network.weights, dense.weights)\n self.assertEqual(network.trainable_weights, dense.trainable_weights)\n self.assertEqual(network.non_trainable_weights, dense.non_trainable_weights)\n\n # test callability on Input\n x_2 = input_layer_lib.Input(shape=(32,))\n y_2 = network(x_2)\n self.assertEqual(y_2.get_shape().as_list(), [None, 2])\n\n # test callability on regular tensor\n x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32))\n y_2 = network(x_2)\n self.assertEqual(y_2.get_shape().as_list(), [None, 2])\n\n # test network `trainable` attribute\n network.trainable = False\n self.assertEqual(network.weights, dense.weights)\n self.assertEqual(network.trainable_weights, [])\n self.assertEqual(network.non_trainable_weights,\n dense.trainable_weights + dense.non_trainable_weights)\n\n def test_trainable_weights(self):\n a = keras.layers.Input(shape=(2,))\n b = keras.layers.Dense(1)(a)\n model = keras.models.Model(a, b)\n\n weights = model.weights\n self.assertListEqual(model.trainable_weights, weights)\n self.assertListEqual(model.non_trainable_weights, [])\n\n model.trainable = False\n self.assertListEqual(model.trainable_weights, [])\n self.assertListEqual(model.non_trainable_weights, weights)\n\n model.trainable = True\n self.assertListEqual(model.trainable_weights, weights)\n self.assertListEqual(model.non_trainable_weights, [])\n\n model.layers[1].trainable = False\n self.assertListEqual(model.trainable_weights, [])\n self.assertListEqual(model.non_trainable_weights, weights)\n\n # sequential model\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(1, input_dim=2))\n weights = model.weights\n\n self.assertListEqual(model.trainable_weights, weights)\n self.assertListEqual(model.non_trainable_weights, [])\n\n model.trainable = False\n self.assertListEqual(model.trainable_weights, [])\n self.assertListEqual(model.non_trainable_weights, weights)\n\n model.trainable = True\n self.assertListEqual(model.trainable_weights, weights)\n self.assertListEqual(model.non_trainable_weights, [])\n\n model.layers[0].trainable = False\n self.assertListEqual(model.trainable_weights, [])\n self.assertListEqual(model.non_trainable_weights, weights)\n\n @test_util.run_deprecated_v1\n def test_layer_call_arguments(self):\n # Test the ability to pass and serialize arguments to `call`.\n inp = keras.layers.Input(shape=(2,))\n x = keras.layers.Dense(3)(inp)\n x = keras.layers.Dropout(0.5)(x, training=True)\n model = keras.models.Model(inp, x)\n # Would be `dropout/cond/Merge` by default\n self.assertTrue(model.output.op.name.endswith('dropout/mul'))\n\n # Test that argument is kept when applying the model\n inp2 = keras.layers.Input(shape=(2,))\n out2 = model(inp2)\n self.assertTrue(out2.op.name.endswith('dropout/mul'))\n\n # Test that argument is kept after loading a model\n config = model.get_config()\n model = keras.models.Model.from_config(config)\n self.assertTrue(model.output.op.name.endswith('dropout/mul'))\n\n def test_node_construction(self):\n # test basics\n a = keras.layers.Input(shape=(32,), name='input_a')\n b = keras.layers.Input(shape=(32,), name='input_b')\n\n with self.assertRaises(ValueError):\n _ = keras.layers.Input(shape=(32,), batch_shape=(10, 32))\n with self.assertRaises(ValueError):\n _ = keras.layers.Input(shape=(32,), unknown_kwarg=None)\n\n self.assertListEqual(a.get_shape().as_list(), [None, 32])\n a_layer, a_node_index, a_tensor_index = a._keras_history\n b_layer, _, _ = b._keras_history\n self.assertEqual(len(a_layer._inbound_nodes), 1)\n self.assertEqual(a_tensor_index, 0)\n node = a_layer._inbound_nodes[a_node_index]\n self.assertEqual(node.outbound_layer, a_layer)\n\n self.assertListEqual(node.inbound_layers, [])\n self.assertListEqual(node.input_tensors, [a])\n self.assertListEqual(node.input_shapes, [(None, 32)])\n self.assertListEqual(node.output_tensors, [a])\n self.assertListEqual(node.output_shapes, [(None, 32)])\n\n dense = keras.layers.Dense(16, name='dense_1')\n a_2 = dense(a)\n b_2 = dense(b)\n\n self.assertEqual(len(dense._inbound_nodes), 2)\n self.assertEqual(len(dense._outbound_nodes), 0)\n self.assertListEqual(dense._inbound_nodes[0].inbound_layers, [a_layer])\n self.assertEqual(dense._inbound_nodes[0].outbound_layer, dense)\n self.assertListEqual(dense._inbound_nodes[1].inbound_layers, [b_layer])\n self.assertEqual(dense._inbound_nodes[1].outbound_layer, dense)\n self.assertListEqual(dense._inbound_nodes[0].input_tensors, [a])\n self.assertListEqual(dense._inbound_nodes[1].input_tensors, [b])\n\n # test layer properties\n test_layer = keras.layers.Dense(16, name='test_layer')\n a_test = test_layer(a)\n self.assertListEqual(test_layer.kernel.get_shape().as_list(), [32, 16])\n self.assertEqual(test_layer.input, a)\n self.assertEqual(test_layer.output, a_test)\n self.assertEqual(test_layer.input_shape, (None, 32))\n self.assertEqual(test_layer.output_shape, (None, 16))\n\n self.assertEqual(dense.get_input_at(0), a)\n self.assertEqual(dense.get_input_at(1), b)\n self.assertEqual(dense.get_output_at(0), a_2)\n self.assertEqual(dense.get_output_at(1), b_2)\n self.assertEqual(dense.get_input_shape_at(0), (None, 32))\n self.assertEqual(dense.get_input_shape_at(1), (None, 32))\n self.assertEqual(dense.get_output_shape_at(0), (None, 16))\n self.assertEqual(dense.get_output_shape_at(1), (None, 16))\n self.assertEqual(dense.get_input_mask_at(0), None)\n self.assertEqual(dense.get_input_mask_at(1), None)\n self.assertEqual(dense.get_output_mask_at(0), None)\n self.assertEqual(dense.get_output_mask_at(1), None)\n\n def test_multi_input_layer(self):\n with self.cached_session():\n # test multi-input layer\n a = keras.layers.Input(shape=(32,), name='input_a')\n b = keras.layers.Input(shape=(32,), name='input_b')\n\n dense = keras.layers.Dense(16, name='dense_1')\n a_2 = dense(a)\n b_2 = dense(b)\n\n merged = keras.layers.concatenate([a_2, b_2], name='merge')\n self.assertListEqual(merged.get_shape().as_list(), [None, 16 * 2])\n merge_layer, merge_node_index, merge_tensor_index = merged._keras_history\n\n self.assertEqual(merge_node_index, 0)\n self.assertEqual(merge_tensor_index, 0)\n\n self.assertEqual(len(merge_layer._inbound_nodes), 1)\n self.assertEqual(len(merge_layer._outbound_nodes), 0)\n\n self.assertEqual(len(merge_layer._inbound_nodes[0].input_tensors), 2)\n self.assertEqual(len(merge_layer._inbound_nodes[0].inbound_layers), 2)\n\n c = keras.layers.Dense(64, name='dense_2')(merged)\n d = keras.layers.Dense(5, name='dense_3')(c)\n\n model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')\n self.assertEqual(len(model.layers), 6)\n output_shapes = model.compute_output_shape([(None, 32), (None, 32)])\n self.assertListEqual(output_shapes[0].as_list(), [None, 64])\n self.assertListEqual(output_shapes[1].as_list(), [None, 5])\n self.assertListEqual(\n model.compute_mask([a, b], [None, None]), [None, None])\n\n # we don't check names of first 2 layers (inputs) because\n # ordering of same-level layers is not fixed\n self.assertListEqual([l.name for l in model.layers][2:],\n ['dense_1', 'merge', 'dense_2', 'dense_3'])\n self.assertListEqual([l.name for l in model._input_layers],\n ['input_a', 'input_b'])\n self.assertListEqual([l.name for l in model._output_layers],\n ['dense_2', 'dense_3'])\n\n # actually run model\n fn = keras.backend.function(model.inputs, model.outputs)\n input_a_np = np.random.random((10, 32))\n input_b_np = np.random.random((10, 32))\n fn_outputs = fn([input_a_np, input_b_np])\n self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)])\n\n # test get_source_inputs\n self.assertListEqual(keras.engine.get_source_inputs(c), [a, b])\n\n # serialization / deserialization\n json_config = model.to_json()\n recreated_model = keras.models.model_from_json(json_config)\n recreated_model.compile('rmsprop', 'mse')\n\n self.assertListEqual([l.name for l in recreated_model.layers][2:],\n ['dense_1', 'merge', 'dense_2', 'dense_3'])\n self.assertListEqual([l.name for l in recreated_model._input_layers],\n ['input_a', 'input_b'])\n self.assertListEqual([l.name for l in recreated_model._output_layers],\n ['dense_2', 'dense_3'])\n\n fn = keras.backend.function(recreated_model.inputs,\n recreated_model.outputs)\n input_a_np = np.random.random((10, 32))\n input_b_np = np.random.random((10, 32))\n fn_outputs = fn([input_a_np, input_b_np])\n self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)])\n\n @test_util.run_deprecated_v1\n def test_recursion(self):\n with self.cached_session():\n a = keras.layers.Input(shape=(32,), name='input_a')\n b = keras.layers.Input(shape=(32,), name='input_b')\n\n dense = keras.layers.Dense(16, name='dense_1')\n a_2 = dense(a)\n b_2 = dense(b)\n merged = keras.layers.concatenate([a_2, b_2], name='merge')\n c = keras.layers.Dense(64, name='dense_2')(merged)\n d = keras.layers.Dense(5, name='dense_3')(c)\n\n model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')\n\n e = keras.layers.Input(shape=(32,), name='input_e')\n f = keras.layers.Input(shape=(32,), name='input_f')\n self.assertEqual(len(model.inputs), 2)\n g, h = model([e, f])\n self.assertEqual(len(model.inputs), 2)\n self.assertEqual(g.name, 'model/dense_2/BiasAdd:0')\n\n self.assertListEqual(g.get_shape().as_list(), c.get_shape().as_list())\n self.assertListEqual(h.get_shape().as_list(), d.get_shape().as_list())\n\n # test separate manipulation of different layer outputs\n i = keras.layers.Dense(7, name='dense_4')(h)\n\n final_model = keras.models.Model(\n inputs=[e, f], outputs=[i, g], name='final')\n self.assertEqual(len(final_model.inputs), 2)\n self.assertEqual(len(final_model.outputs), 2)\n self.assertEqual(len(final_model.layers), 4)\n\n # we don't check names of first 2 layers (inputs) because\n # ordering of same-level layers is not fixed\n self.assertListEqual([layer.name for layer in final_model.layers][2:],\n ['model', 'dense_4'])\n self.assertListEqual(\n model.compute_mask([e, f], [None, None]), [None, None])\n self.assertListEqual(\n final_model.compute_output_shape([(10, 32), (10, 32)]), [(10, 7),\n (10, 64)])\n\n # run recursive model\n fn = keras.backend.function(final_model.inputs, final_model.outputs)\n input_a_np = np.random.random((10, 32))\n input_b_np = np.random.random((10, 32))\n fn_outputs = fn([input_a_np, input_b_np])\n self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)])\n\n # test serialization\n model_config = final_model.get_config()\n recreated_model = keras.models.Model.from_config(model_config)\n\n fn = keras.backend.function(recreated_model.inputs,\n recreated_model.outputs)\n input_a_np = np.random.random((10, 32))\n input_b_np = np.random.random((10, 32))\n fn_outputs = fn([input_a_np, input_b_np])\n self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)])\n\n def test_multi_input_multi_output_recursion(self):\n with self.cached_session():\n # test multi-input multi-output\n a = keras.layers.Input(shape=(32,), name='input_a')\n b = keras.layers.Input(shape=(32,), name='input_b')\n\n dense = keras.layers.Dense(16, name='dense_1')\n a_2 = dense(a)\n b_2 = dense(b)\n merged = keras.layers.concatenate([a_2, b_2], name='merge')\n c = keras.layers.Dense(64, name='dense_2')(merged)\n d = keras.layers.Dense(5, name='dense_3')(c)\n\n model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')\n\n j = keras.layers.Input(shape=(32,), name='input_j')\n k = keras.layers.Input(shape=(32,), name='input_k')\n _, n = model([j, k])\n\n o = keras.layers.Input(shape=(32,), name='input_o')\n p = keras.layers.Input(shape=(32,), name='input_p')\n q, _ = model([o, p])\n\n self.assertListEqual(n.get_shape().as_list(), [None, 5])\n self.assertListEqual(q.get_shape().as_list(), [None, 64])\n s = keras.layers.concatenate([n, q], name='merge_nq')\n self.assertListEqual(s.get_shape().as_list(), [None, 64 + 5])\n\n # test with single output as 1-elem list\n multi_io_model = keras.models.Model([j, k, o, p], [s])\n\n fn = keras.backend.function(multi_io_model.inputs, multi_io_model.outputs)\n fn_outputs = fn([\n np.random.random((10, 32)), np.random.random((10, 32)),\n np.random.random((10, 32)), np.random.random((10, 32))\n ])\n self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])\n\n # test with single output as tensor\n multi_io_model = keras.models.Model([j, k, o, p], s)\n\n fn = keras.backend.function(multi_io_model.inputs, multi_io_model.outputs)\n fn_outputs = fn([\n np.random.random((10, 32)), np.random.random((10, 32)),\n np.random.random((10, 32)), np.random.random((10, 32))\n ])\n # note that the output of the function will still be a 1-elem list\n self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])\n\n # test serialization\n model_config = multi_io_model.get_config()\n recreated_model = keras.models.Model.from_config(model_config)\n\n fn = keras.backend.function(recreated_model.inputs,\n recreated_model.outputs)\n fn_outputs = fn([\n np.random.random((10, 32)), np.random.random((10, 32)),\n np.random.random((10, 32)), np.random.random((10, 32))\n ])\n # note that the output of the function will still be a 1-elem list\n self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])\n\n config = model.get_config()\n keras.models.Model.from_config(config)\n\n model.summary()\n json_str = model.to_json()\n keras.models.model_from_json(json_str)\n\n if yaml is not None:\n yaml_str = model.to_yaml()\n keras.models.model_from_yaml(yaml_str)\n\n def test_invalid_graphs(self):\n a = keras.layers.Input(shape=(32,), name='input_a')\n b = keras.layers.Input(shape=(32,), name='input_b')\n\n dense = keras.layers.Dense(16, name='dense_1')\n a_2 = dense(a)\n b_2 = dense(b)\n merged = keras.layers.concatenate([a_2, b_2], name='merge')\n c = keras.layers.Dense(64, name='dense_2')(merged)\n d = keras.layers.Dense(5, name='dense_3')(c)\n\n model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')\n\n # input is not an Input tensor\n j = keras.layers.Input(shape=(32,), name='input_j')\n j = keras.layers.Dense(32)(j)\n k = keras.layers.Input(shape=(32,), name='input_k')\n m, n = model([j, k])\n\n with self.assertRaises(Exception):\n keras.models.Model([j, k], [m, n])\n\n # disconnected graph\n j = keras.layers.Input(shape=(32,), name='input_j')\n k = keras.layers.Input(shape=(32,), name='input_k')\n m, n = model([j, k])\n with self.assertRaises(Exception):\n keras.models.Model([j], [m, n])\n\n # redundant outputs\n j = keras.layers.Input(shape=(32,), name='input_j')\n k = keras.layers.Input(shape=(32,), name='input_k')\n m, n = model([j, k])\n\n keras.models.Model([j, k], [m, n, n])\n\n # redundant inputs\n j = keras.layers.Input(shape=(32,), name='input_j')\n k = keras.layers.Input(shape=(32,), name='input_k')\n m, n = model([j, k])\n with self.assertRaises(Exception):\n keras.models.Model([j, k, j], [m, n])\n\n # i have not idea what I'm doing: garbage as inputs/outputs\n j = keras.layers.Input(shape=(32,), name='input_j')\n k = keras.layers.Input(shape=(32,), name='input_k')\n m, n = model([j, k])\n with self.assertRaises(Exception):\n keras.models.Model([j, k], [m, n, 0])\n\n @test_util.run_deprecated_v1\n def test_raw_tf_compatibility(self):\n # test calling layers/models on TF tensors\n a = keras.layers.Input(shape=(32,), name='input_a')\n b = keras.layers.Input(shape=(32,), name='input_b')\n\n dense = keras.layers.Dense(16, name='dense_1')\n a_2 = dense(a)\n b_2 = dense(b)\n merged = keras.layers.concatenate([a_2, b_2], name='merge')\n c = keras.layers.Dense(64, name='dense_2')(merged)\n d = keras.layers.Dense(5, name='dense_3')(c)\n\n model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')\n\n j = keras.layers.Input(shape=(32,), name='input_j')\n k = keras.layers.Input(shape=(32,), name='input_k')\n self.assertEqual(len(model.inputs), 2)\n m, n = model([j, k])\n self.assertEqual(len(model.inputs), 2)\n tf_model = keras.models.Model([j, k], [m, n])\n\n j_tf = array_ops.placeholder(dtype=dtypes.float32, shape=(None, 32))\n k_tf = array_ops.placeholder(dtype=dtypes.float32, shape=(None, 32))\n m_tf, n_tf = tf_model([j_tf, k_tf])\n self.assertListEqual(m_tf.get_shape().as_list(), [None, 64])\n self.assertListEqual(n_tf.get_shape().as_list(), [None, 5])\n\n # test merge\n keras.layers.concatenate([j_tf, k_tf], axis=1)\n keras.layers.add([j_tf, k_tf])\n\n # test tensor input\n x = array_ops.placeholder(shape=(None, 2), dtype=dtypes.float32)\n keras.layers.InputLayer(input_tensor=x)\n\n x = keras.layers.Input(tensor=x)\n keras.layers.Dense(2)(x)\n\n def test_basic_masking(self):\n a = keras.layers.Input(shape=(10, 32), name='input_a')\n b = keras.layers.Masking()(a)\n model = keras.models.Model(a, b)\n self.assertEqual(model.output_mask.get_shape().as_list(), [None, 10])\n\n @test_util.run_deprecated_v1\n def testMaskingSingleInput(self):\n\n class MaskedLayer(keras.layers.Layer):\n\n def call(self, inputs, mask=None):\n if mask is not None:\n return inputs * mask\n return inputs\n\n def compute_mask(self, inputs, mask=None):\n return array_ops.ones_like(inputs)\n\n if context.executing_eagerly():\n a = constant_op.constant([2] * 32)\n mask = constant_op.constant([0, 1] * 16)\n a._keras_mask = mask\n b = MaskedLayer().apply(a)\n self.assertTrue(hasattr(b, '_keras_mask'))\n self.assertAllEqual(\n self.evaluate(array_ops.ones_like(mask)),\n self.evaluate(getattr(b, '_keras_mask')))\n self.assertAllEqual(self.evaluate(a * mask), self.evaluate(b))\n else:\n x = input_layer_lib.Input(shape=(32,))\n y = MaskedLayer()(x) # pylint: disable=not-callable\n network = network_lib.Network(x, y)\n\n # test callability on Input\n x_2 = input_layer_lib.Input(shape=(32,))\n y_2 = network(x_2)\n self.assertEqual(y_2.get_shape().as_list(), [None, 32])\n\n # test callability on regular tensor\n x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32))\n y_2 = network(x_2)\n self.assertEqual(y_2.get_shape().as_list(), [None, 32])\n\n @test_util.run_deprecated_v1\n def test_activity_regularization_with_model_composition(self):\n\n def reg(x):\n return math_ops.reduce_sum(x)\n\n net_a_input = input_layer_lib.Input((2,))\n net_a = net_a_input\n net_a = keras.layers.Dense(2, kernel_initializer='ones',\n use_bias=False,\n activity_regularizer=reg)(net_a)\n model_a = keras.Model([net_a_input], [net_a])\n\n net_b_input = input_layer_lib.Input((2,))\n net_b = model_a(net_b_input)\n model_b = keras.Model([net_b_input], [net_b])\n\n model_b.compile(optimizer='sgd', loss=None)\n x = np.ones((1, 2))\n loss = model_b.evaluate(x)\n self.assertEqual(loss, 4.)\n\n def test_layer_sharing_at_heterogenous_depth(self):\n with self.cached_session():\n x_val = np.random.random((10, 5))\n\n x = input_layer_lib.Input(shape=(5,))\n a = keras.layers.Dense(5, name='A')\n b = keras.layers.Dense(5, name='B')\n output = a(b(a(b(x))))\n m = keras.models.Model(x, output)\n\n output_val = m.predict(x_val)\n\n config = m.get_config()\n weights = m.get_weights()\n\n m2 = keras.models.Model.from_config(config)\n m2.set_weights(weights)\n\n output_val_2 = m2.predict(x_val)\n self.assertAllClose(output_val, output_val_2, atol=1e-6)\n\n def test_layer_sharing_at_heterogenous_depth_with_concat(self):\n with self.cached_session():\n input_shape = (16, 9, 3)\n input_layer = input_layer_lib.Input(shape=input_shape)\n\n a = keras.layers.Dense(3, name='dense_A')\n b = keras.layers.Dense(3, name='dense_B')\n c = keras.layers.Dense(3, name='dense_C')\n\n x1 = b(a(input_layer))\n x2 = a(c(input_layer))\n output = keras.layers.concatenate([x1, x2])\n\n m = keras.models.Model(inputs=input_layer, outputs=output)\n\n x_val = np.random.random((10, 16, 9, 3))\n output_val = m.predict(x_val)\n\n config = m.get_config()\n weights = m.get_weights()\n\n m2 = keras.models.Model.from_config(config)\n m2.set_weights(weights)\n\n output_val_2 = m2.predict(x_val)\n self.assertAllClose(output_val, output_val_2, atol=1e-6)\n\n @test_util.run_deprecated_v1\n def test_explicit_training_argument(self):\n with self.cached_session():\n a = keras.layers.Input(shape=(2,))\n b = keras.layers.Dropout(0.5)(a)\n base_model = keras.models.Model(a, b)\n\n a = keras.layers.Input(shape=(2,))\n b = base_model(a, training=False)\n model = keras.models.Model(a, b)\n\n x = np.ones((100, 2))\n y = np.ones((100, 2))\n model.compile(optimizer='sgd', loss='mse')\n loss = model.train_on_batch(x, y)\n self.assertEqual(loss, 0) # In inference mode, output is equal to input.\n\n a = keras.layers.Input(shape=(2,))\n b = base_model(a, training=True)\n model = keras.models.Model(a, b)\n preds = model.predict(x)\n self.assertEqual(np.min(preds), 0.) # At least one unit was dropped.\n\n def test_multi_output_model_with_none_masking(self):\n\n with self.cached_session():\n\n def func(x):\n return [x * 0.2, x * 0.3]\n\n def output_shape(input_shape):\n return [input_shape, input_shape]\n\n i = keras.layers.Input(shape=(3, 2, 1))\n o = keras.layers.Lambda(function=func, output_shape=output_shape)(i)\n\n self.assertEqual(keras.backend.int_shape(o[0]), (None, 3, 2, 1))\n self.assertEqual(keras.backend.int_shape(o[1]), (None, 3, 2, 1))\n\n o = keras.layers.add(o)\n model = keras.Model(i, o)\n\n i2 = keras.layers.Input(shape=(3, 2, 1))\n o2 = model(i2)\n model2 = keras.Model(i2, o2)\n\n x = np.random.random((4, 3, 2, 1))\n out = model2.predict(x)\n assert out.shape == (4, 3, 2, 1)\n self.assertAllClose(out, x * 0.2 + x * 0.3, atol=1e-4)\n\n def test_constant_initializer_with_numpy(self):\n\n with self.cached_session():\n initializer = keras.initializers.Constant(np.ones((3, 2)))\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(2, input_shape=(3,),\n kernel_initializer=initializer))\n model.add(keras.layers.Dense(3))\n model.compile(loss='mse', optimizer='sgd', metrics=['acc'])\n\n json_str = model.to_json()\n keras.models.model_from_json(json_str)\n\n if yaml is not None:\n yaml_str = model.to_yaml()\n keras.models.model_from_yaml(yaml_str)\n\n\nclass DeferredModeTest(test.TestCase):\n\n @test_util.run_in_graph_and_eager_modes()\n def testSimpleNetworkBuilding(self):\n inputs = input_layer_lib.Input(shape=(32,))\n if context.executing_eagerly():\n self.assertEqual(inputs.dtype.name, 'float32')\n self.assertEqual(inputs.shape.as_list(), [None, 32])\n\n x = keras.layers.Dense(2)(inputs)\n if context.executing_eagerly():\n self.assertEqual(x.dtype.name, 'float32')\n self.assertEqual(x.shape.as_list(), [None, 2])\n\n outputs = keras.layers.Dense(4)(x)\n network = network_lib.Network(inputs, outputs)\n self.assertIsInstance(network, network_lib.Network)\n\n if context.executing_eagerly():\n # It should be possible to call such a network on EagerTensors.\n inputs = constant_op.constant(\n np.random.random((10, 32)).astype('float32'))\n outputs = network(inputs)\n self.assertEqual(outputs.shape.as_list(), [10, 4])\n\n @test_util.run_in_graph_and_eager_modes()\n def testMultiIONetworkbuilding(self):\n input_a = input_layer_lib.Input(shape=(32,))\n input_b = input_layer_lib.Input(shape=(16,))\n a = keras.layers.Dense(16)(input_a)\n\n class AddLayer(keras.layers.Layer):\n\n def call(self, inputs):\n return inputs[0] + inputs[1]\n\n c = AddLayer()([a, input_b]) # pylint: disable=not-callable\n c = keras.layers.Dense(2)(c)\n\n network = network_lib.Network([input_a, input_b], [a, c])\n if context.executing_eagerly():\n a_val = constant_op.constant(\n np.random.random((10, 32)).astype('float32'))\n b_val = constant_op.constant(\n np.random.random((10, 16)).astype('float32'))\n outputs = network([a_val, b_val])\n self.assertEqual(len(outputs), 2)\n self.assertEqual(outputs[0].shape.as_list(), [10, 16])\n self.assertEqual(outputs[1].shape.as_list(), [10, 2])\n\n\nclass DefaultShapeInferenceBehaviorTest(test.TestCase):\n\n def _testShapeInference(self, model, input_shape, expected_output_shape):\n input_value = np.random.random(input_shape)\n output_value = model.predict(input_value)\n self.assertEqual(output_value.shape, expected_output_shape)\n\n @test_util.run_in_graph_and_eager_modes()\n def testSingleInputCase(self):\n\n class LayerWithOneInput(keras.layers.Layer):\n\n def build(self, input_shape):\n self.w = array_ops.ones(shape=(3, 4))\n\n def call(self, inputs):\n return keras.backend.dot(inputs, self.w)\n\n inputs = input_layer_lib.Input(shape=(3,))\n layer = LayerWithOneInput()\n\n if context.executing_eagerly():\n self.assertEqual(\n layer.compute_output_shape((None, 3)).as_list(), [None, 4])\n # As a side-effect, compute_output_shape builds the layer.\n self.assertTrue(layer.built)\n # We can still query the layer's compute_output_shape with compatible\n # input shapes.\n self.assertEqual(\n layer.compute_output_shape((6, 3)).as_list(), [6, 4])\n\n outputs = layer(inputs)\n model = keras.Model(inputs, outputs)\n self._testShapeInference(model, (2, 3), (2, 4))\n\n @test_util.run_in_graph_and_eager_modes()\n def testMultiInputOutputCase(self):\n\n class MultiInputOutputLayer(keras.layers.Layer):\n\n def build(self, input_shape):\n self.w = array_ops.ones(shape=(3, 4))\n\n def call(self, inputs):\n a = keras.backend.dot(inputs[0], self.w)\n b = a + inputs[1]\n return [a, b]\n\n input_a = input_layer_lib.Input(shape=(3,))\n input_b = input_layer_lib.Input(shape=(4,))\n output_a, output_b = MultiInputOutputLayer()([input_a, input_b])\n model = keras.Model([input_a, input_b], [output_a, output_b])\n output_a_val, output_b_val = model.predict(\n [np.random.random((2, 3)), np.random.random((2, 4))])\n self.assertEqual(output_a_val.shape, (2, 4))\n self.assertEqual(output_b_val.shape, (2, 4))\n\n @test_util.run_in_graph_and_eager_modes()\n def testTrainingArgument(self):\n\n class LayerWithTrainingArg(keras.layers.Layer):\n\n def build(self, input_shape):\n self.w = array_ops.ones(shape=(3, 4))\n\n def call(self, inputs, training):\n return keras.backend.dot(inputs, self.w)\n\n inputs = input_layer_lib.Input(shape=(3,))\n outputs = LayerWithTrainingArg()(inputs, training=False)\n model = keras.Model(inputs, outputs)\n self._testShapeInference(model, (2, 3), (2, 4))\n\n @test_util.run_in_graph_and_eager_modes()\n def testNoneInShape(self):\n\n class Model(keras.Model):\n\n def __init__(self):\n super(Model, self).__init__()\n self.conv1 = keras.layers.Conv2D(8, 3)\n self.pool = keras.layers.GlobalAveragePooling2D()\n self.fc = keras.layers.Dense(3)\n\n def call(self, x):\n x = self.conv1(x)\n x = self.pool(x)\n x = self.fc(x)\n return x\n\n model = Model()\n model.build(tensor_shape.TensorShape((None, None, None, 1)))\n self.assertTrue(model.built, 'Model should be built')\n self.assertTrue(model.weights,\n 'Model should have its weights created as it '\n 'has been built')\n sample_input = array_ops.ones((1, 10, 10, 1))\n output = model(sample_input)\n self.assertEqual(output.shape, (1, 3))\n\n @test_util.run_in_graph_and_eager_modes()\n def testNoneInShapeWithCompoundModel(self):\n\n class BasicBlock(keras.Model):\n\n def __init__(self):\n super(BasicBlock, self).__init__()\n self.conv1 = keras.layers.Conv2D(8, 3)\n self.pool = keras.layers.GlobalAveragePooling2D()\n self.dense = keras.layers.Dense(3)\n\n def call(self, x):\n x = self.conv1(x)\n x = self.pool(x)\n x = self.dense(x)\n return x\n\n class CompoundModel(keras.Model):\n\n def __init__(self):\n super(CompoundModel, self).__init__()\n self.block = BasicBlock()\n\n def call(self, x):\n x = self.block(x) # pylint: disable=not-callable\n return x\n\n model = CompoundModel()\n model.build(tensor_shape.TensorShape((None, None, None, 1)))\n self.assertTrue(model.built, 'Model should be built')\n self.assertTrue(model.weights,\n 'Model should have its weights created as it '\n 'has been built')\n sample_input = array_ops.ones((1, 10, 10, 1))\n output = model(sample_input) # pylint: disable=not-callable\n self.assertEqual(output.shape, (1, 3))\n\n @test_util.run_in_graph_and_eager_modes()\n def testNoneInShapeWithFunctinalAPI(self):\n\n class BasicBlock(keras.Model):\n # Inherting from keras.layers.Layer since we are calling this layer\n # inside a model created using functional API.\n\n def __init__(self):\n super(BasicBlock, self).__init__()\n self.conv1 = keras.layers.Conv2D(8, 3)\n\n def call(self, x):\n x = self.conv1(x)\n return x\n\n input_layer = keras.layers.Input(shape=(None, None, 1))\n x = BasicBlock()(input_layer)\n x = keras.layers.GlobalAveragePooling2D()(x)\n output_layer = keras.layers.Dense(3)(x)\n\n model = keras.Model(inputs=input_layer, outputs=output_layer)\n\n model.build(tensor_shape.TensorShape((None, None, None, 1)))\n self.assertTrue(model.built, 'Model should be built')\n self.assertTrue(model.weights,\n 'Model should have its weights created as it '\n 'has been built')\n sample_input = array_ops.ones((1, 10, 10, 1))\n output = model(sample_input)\n self.assertEqual(output.shape, (1, 3))\n\n @test_util.run_in_graph_and_eager_modes()\n def test_sequential_as_downstream_of_masking_layer(self):\n inputs = keras.layers.Input(shape=(3, 4))\n x = keras.layers.Masking(mask_value=0., input_shape=(3, 4))(inputs)\n\n s = keras.Sequential()\n s.add(keras.layers.Dense(5, input_shape=(4,)))\n\n x = keras.layers.wrappers.TimeDistributed(s)(x)\n model = keras.Model(inputs=inputs, outputs=x)\n model.compile(optimizer=rmsprop.RMSPropOptimizer(1e-3), loss='mse')\n\n model_input = np.random.randint(\n low=1, high=5, size=(10, 3, 4)).astype('float32')\n for i in range(4):\n model_input[i, i:, :] = 0.\n model.fit(model_input,\n np.random.random((10, 3, 5)), epochs=1, batch_size=6)\n\n if not context.executing_eagerly():\n # Note: this doesn't work in eager due to DeferredTensor/ops compatibility\n # issue.\n mask_outputs = [model.layers[1].compute_mask(model.layers[1].input)]\n mask_outputs += [model.layers[2].compute_mask(\n model.layers[2].input, mask_outputs[-1])]\n func = keras.backend.function([model.input], mask_outputs)\n mask_outputs_val = func([model_input])\n self.assertAllClose(mask_outputs_val[0], np.any(model_input, axis=-1))\n self.assertAllClose(mask_outputs_val[1], np.any(model_input, axis=-1))\n\n\nclass GraphUtilsTest(test.TestCase):\n\n @test_util.run_deprecated_v1\n def testGetReachableFromInputs(self):\n\n with self.cached_session():\n pl_1 = array_ops.placeholder(shape=None, dtype='float32')\n pl_2 = array_ops.placeholder(shape=None, dtype='float32')\n pl_3 = array_ops.placeholder(shape=None, dtype='float32')\n x_1 = pl_1 + pl_2\n x_2 = pl_2 * 2\n x_3 = pl_3 + 1\n x_4 = x_1 + x_2\n x_5 = x_3 * pl_1\n\n self.assertEqual(\n keras.utils.tf_utils.get_reachable_from_inputs([pl_1]),\n {pl_1, x_1, x_4, x_5, x_1.op, x_4.op, x_5.op})\n self.assertEqual(\n keras.utils.tf_utils.get_reachable_from_inputs([pl_1, pl_2]),\n {pl_1, pl_2, x_1, x_2, x_4, x_5, x_1.op, x_2.op, x_4.op, x_5.op})\n self.assertEqual(\n keras.utils.tf_utils.get_reachable_from_inputs([pl_3]),\n {pl_3, x_3, x_5, x_3.op, x_5.op})\n self.assertEqual(\n keras.utils.tf_utils.get_reachable_from_inputs([x_3]),\n {x_3, x_5, x_5.op})\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=protected-access\n\"\"\"Utility functions to save/load keras Model to/from SavedModel.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport six\n\nfrom tensorflow.python.client import session\nfrom tensorflow.python.estimator import keras as estimator_keras_util\nfrom tensorflow.python.estimator import model_fn as model_fn_lib\nfrom tensorflow.python.estimator.export import export as export_helpers\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras import models as models_lib\nfrom tensorflow.python.keras import optimizers\nfrom tensorflow.python.keras.engine import sequential\nfrom tensorflow.python.keras.metrics import Metric\nfrom tensorflow.python.keras.models import model_from_json\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.saved_model import builder as saved_model_builder\nfrom tensorflow.python.saved_model import constants\nfrom tensorflow.python.saved_model import utils_impl as saved_model_utils\nfrom tensorflow.python.training import saver as saver_lib\nfrom tensorflow.python.training.checkpointable import util as checkpointable_utils\nfrom tensorflow.python.util import compat\n\n\ndef save_keras_model(\n model, saved_model_path, custom_objects=None, as_text=None):\n \"\"\"Save a `tf.keras.Model` into Tensorflow SavedModel format.\n\n `save_model` generates new files/folders under the `saved_model_path` folder:\n 1) an asset folder containing the json string of the model's\n configuration (topology).\n 2) a checkpoint containing the model weights.\n 3) a saved_model.pb file containing the model's MetaGraphs. The prediction\n graph is always exported. The evaluaton and training graphs are exported\n if the following conditions are met:\n - Evaluation: model loss is defined.\n - Training: model is compiled with an optimizer defined under `tf.train`.\n This is because `tf.keras.optimizers.Optimizer` instances cannot be\n saved to checkpoints.\n\n Model Requirements:\n - Model must be a sequential model or functional model. Subclassed models can\n not be saved via this function, unless you provide an implementation for\n get_config() and from_config().\n - All variables must be saveable by the model. In general, this condition is\n met through the use of layers defined in the keras library. However,\n there is currently a bug with variables created in Lambda layer functions\n not being saved correctly (see\n https://github.com/keras-team/keras/issues/9740).\n\n Note that each mode is exported in separate graphs, so different modes do not\n share variables. To use the train graph with evaluation or prediction graphs,\n create a new checkpoint if variable values have been updated.\n\n Example:\n\n ```python\n import tensorflow as tf\n\n # Create a tf.keras model.\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Dense(1, input_shape=[10]))\n model.summary()\n\n # Save the tf.keras model in the SavedModel format.\n saved_to_path = tf.contrib.saved_model.save_keras_model(\n model, '/tmp/my_simple_tf_keras_saved_model')\n\n # Load the saved keras model back.\n model_prime = tf.contrib.saved_model.load_keras_model(saved_to_path)\n model_prime.summary()\n ```\n\n Args:\n model: A `tf.keras.Model` to be saved.\n saved_model_path: a string specifying the path to the SavedModel directory.\n The SavedModel will be saved to a timestamped folder created within this\n directory.\n custom_objects: Optional dictionary mapping string names to custom classes\n or functions (e.g. custom loss functions).\n as_text: whether to write the `SavedModel` proto in text format.\n\n Returns:\n String path to the SavedModel folder, a subdirectory of `saved_model_path`.\n\n Raises:\n NotImplementedError: If the model is a subclassed model.\n ValueError: If a Sequential model does not have input shapes defined by the\n user, and is not built.\n \"\"\"\n if not model._is_graph_network:\n if isinstance(model, sequential.Sequential):\n # If input shape is not directly set in the model, the exported model\n # will assume that the inputs have the same shape as the shape the model\n # was built model with.\n if not model.built:\n raise ValueError(\n 'Sequential model must be built before it can be exported.')\n else:\n raise NotImplementedError(\n 'Exporting subclassed models is not yet supported.')\n\n export_dir = export_helpers.get_timestamped_export_dir(saved_model_path)\n temp_export_dir = export_helpers.get_temp_export_dir(export_dir)\n\n builder = saved_model_builder._SavedModelBuilder(temp_export_dir)\n\n # Manually save variables to export them in an object-based checkpoint. This\n # skips the `builder.add_meta_graph_and_variables()` step, which saves a\n # named-based checkpoint.\n # TODO(b/113134168): Add fn to Builder to save with object-based saver.\n # TODO(b/113178242): This should only export the model json structure. Only\n # one save is needed once the weights can be copied from the model to clone.\n checkpoint_path = _export_model_json_and_variables(model, temp_export_dir)\n\n # Export each mode. Use ModeKeys enums defined for `Estimator` to ensure that\n # Keras models and `Estimator`s are exported with the same format.\n # Every time a mode is exported, the code checks to see if new variables have\n # been created (e.g. optimizer slot variables). If that is the case, the\n # checkpoint is re-saved to include the new variables.\n export_args = {'builder': builder,\n 'model': model,\n 'custom_objects': custom_objects,\n 'checkpoint_path': checkpoint_path}\n\n has_saved_vars = False\n if model.optimizer:\n if isinstance(model.optimizer, optimizers.TFOptimizer):\n _export_mode(model_fn_lib.ModeKeys.TRAIN, has_saved_vars, **export_args)\n has_saved_vars = True\n _export_mode(model_fn_lib.ModeKeys.EVAL, has_saved_vars, **export_args)\n else:\n logging.warning(\n 'Model was compiled with an optimizer, but the optimizer is not from '\n '`tf.train` (e.g. `tf.train.AdagradOptimizer`). Only the serving '\n 'graph was exported. The train and evaluate graphs were not added to '\n 'the SavedModel.')\n _export_mode(model_fn_lib.ModeKeys.PREDICT, has_saved_vars, **export_args)\n\n builder.save(as_text)\n\n gfile.Rename(temp_export_dir, export_dir)\n return export_dir\n\n\ndef _export_model_json_and_variables(model, saved_model_path):\n \"\"\"Save model variables and json structure into SavedModel subdirectories.\"\"\"\n # Save model configuration as a json string under assets folder.\n model_json = model.to_json()\n model_json_filepath = os.path.join(\n saved_model_utils.get_or_create_assets_dir(saved_model_path),\n compat.as_text(constants.SAVED_MODEL_FILENAME_JSON))\n file_io.write_string_to_file(model_json_filepath, model_json)\n\n # Save model weights in checkpoint format under variables folder.\n saved_model_utils.get_or_create_variables_dir(saved_model_path)\n checkpoint_prefix = saved_model_utils.get_variables_path(saved_model_path)\n model.save_weights(checkpoint_prefix, save_format='tf', overwrite=True)\n return checkpoint_prefix\n\n\ndef _get_var_list(model):\n \"\"\"Return list of all checkpointed saveable objects in the model.\"\"\"\n return checkpointable_utils.named_saveables(model)\n\n\ndef _export_mode(\n mode, has_saved_vars, builder, model, custom_objects, checkpoint_path):\n \"\"\"Export a model, and optionally save new vars from the clone model.\n\n Args:\n mode: A `tf.estimator.ModeKeys` string.\n has_saved_vars: A `boolean` indicating whether the SavedModel has already\n exported variables.\n builder: A `SavedModelBuilder` object.\n model: A `tf.keras.Model` object.\n custom_objects: A dictionary mapping string names to custom classes\n or functions.\n checkpoint_path: String path to checkpoint.\n\n Raises:\n ValueError: If the train/eval mode is being exported, but the model does\n not have an optimizer.\n \"\"\"\n compile_clone = (mode != model_fn_lib.ModeKeys.PREDICT)\n if compile_clone and not model.optimizer:\n raise ValueError(\n 'Model does not have an optimizer. Cannot export mode %s' % mode)\n\n model_graph = ops.get_default_graph()\n with ops.Graph().as_default() as g:\n\n K.set_learning_phase(mode == model_fn_lib.ModeKeys.TRAIN)\n\n # Clone the model into blank graph. This will create placeholders for inputs\n # and targets.\n clone = models_lib.clone_and_build_model(\n model, custom_objects=custom_objects, compile_clone=compile_clone)\n\n # Make sure that iterations variable is added to the global step collection,\n # to ensure that, when the SavedModel graph is loaded, the iterations\n # variable is returned by `tf.train.get_global_step()`. This is required for\n # compatibility with the SavedModelEstimator.\n if compile_clone:\n g.add_to_collection(ops.GraphKeys.GLOBAL_STEP, clone.optimizer.iterations)\n\n # Extract update and train ops from train/test/predict functions.\n train_op = None\n if mode == model_fn_lib.ModeKeys.TRAIN:\n clone._make_train_function()\n train_op = clone.train_function.updates_op\n elif mode == model_fn_lib.ModeKeys.EVAL:\n clone._make_test_function()\n else:\n clone._make_predict_function()\n g.get_collection_ref(ops.GraphKeys.UPDATE_OPS).extend(clone.state_updates)\n\n clone_var_list = checkpointable_utils.named_saveables(clone)\n\n with session.Session().as_default():\n if has_saved_vars:\n # Confirm all variables in the clone have an entry in the checkpoint.\n status = clone.load_weights(checkpoint_path)\n status.assert_existing_objects_matched()\n else:\n # Confirm that variables between the clone and model match up exactly,\n # not counting optimizer objects. Optimizer objects are ignored because\n # if the model has not trained, the slot variables will not have been\n # created yet.\n # TODO(b/113179535): Replace with checkpointable equivalence.\n _assert_same_non_optimizer_objects(model, model_graph, clone, g)\n\n # TODO(b/113178242): Use value transfer for checkpointable objects.\n clone.load_weights(checkpoint_path)\n\n # Add graph and variables to SavedModel.\n # TODO(b/113134168): Switch to add_meta_graph_and_variables.\n clone.save_weights(checkpoint_path, save_format='tf', overwrite=True)\n builder._has_saved_variables = True\n\n # Add graph to the SavedModel builder.\n builder.add_meta_graph(\n model_fn_lib.EXPORT_TAG_MAP[mode],\n signature_def_map=_create_signature_def_map(clone, mode),\n saver=saver_lib.Saver(clone_var_list),\n init_op=variables.local_variables_initializer(),\n train_op=train_op)\n return None\n\n\ndef _create_signature_def_map(model, mode):\n \"\"\"Create a SignatureDef map from a Keras model.\"\"\"\n inputs_dict = {name: x for name, x in zip(model.input_names, model.inputs)}\n if model.optimizer:\n targets_dict = {x.name.split(':')[0]: x\n for x in model.targets if x is not None}\n inputs_dict.update(targets_dict)\n outputs_dict = {name: x\n for name, x in zip(model.output_names, model.outputs)}\n metrics = estimator_keras_util._convert_keras_metrics_to_estimator(model)\n\n # Add metric variables to the `LOCAL_VARIABLES` collection. Metric variables\n # are by default not added to any collections. We are doing this here, so\n # that metric variables get initialized.\n local_vars = set(ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES))\n vars_to_add = set()\n if metrics is not None:\n for key, value in six.iteritems(metrics):\n if isinstance(value, Metric):\n vars_to_add.update(value.variables)\n # Convert Metric instances to (value_tensor, update_op) tuple.\n metrics[key] = (value.result(), value.updates[0])\n # Remove variables that are in the local variables collection already.\n vars_to_add = vars_to_add.difference(local_vars)\n for v in vars_to_add:\n ops.add_to_collection(ops.GraphKeys.LOCAL_VARIABLES, v)\n\n export_outputs = model_fn_lib.export_outputs_for_mode(\n mode,\n predictions=outputs_dict,\n loss=model.total_loss if model.optimizer else None,\n metrics=metrics)\n return export_helpers.build_all_signature_defs(\n inputs_dict,\n export_outputs=export_outputs,\n serving_only=(mode == model_fn_lib.ModeKeys.PREDICT))\n\n\ndef _assert_same_non_optimizer_objects(model, model_graph, clone, clone_graph): # pylint: disable=unused-argument\n \"\"\"Assert model and clone contain the same checkpointable objects.\"\"\"\n\n # TODO(fchollet, kathywu): make sure this works in eager mode.\n return True\n\n\ndef load_keras_model(saved_model_path):\n \"\"\"Load a keras.Model from SavedModel.\n\n load_model reinstantiates model state by:\n 1) loading model topology from json (this will eventually come\n from metagraph).\n 2) loading model weights from checkpoint.\n\n Example:\n\n ```python\n import tensorflow as tf\n\n # Create a tf.keras model.\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Dense(1, input_shape=[10]))\n model.summary()\n\n # Save the tf.keras model in the SavedModel format.\n saved_to_path = tf.contrib.saved_model.save_keras_model(\n model, '/tmp/my_simple_tf_keras_saved_model')\n\n # Load the saved keras model back.\n model_prime = tf.contrib.saved_model.load_keras_model(saved_to_path)\n model_prime.summary()\n ```\n\n Args:\n saved_model_path: a string specifying the path to an existing SavedModel.\n\n Returns:\n a keras.Model instance.\n \"\"\"\n # restore model topology from json string\n model_json_filepath = os.path.join(\n compat.as_bytes(saved_model_path),\n compat.as_bytes(constants.ASSETS_DIRECTORY),\n compat.as_bytes(constants.SAVED_MODEL_FILENAME_JSON))\n model_json = file_io.read_file_to_string(model_json_filepath)\n model = model_from_json(model_json)\n\n # restore model weights\n checkpoint_prefix = os.path.join(\n compat.as_text(saved_model_path),\n compat.as_text(constants.VARIABLES_DIRECTORY),\n compat.as_text(constants.VARIABLES_FILENAME))\n model.load_weights(checkpoint_prefix)\n return model\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for SavedModel utils.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.core.framework import types_pb2\nfrom tensorflow.python.eager import function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.saved_model import utils\n\n\nclass UtilsTest(test.TestCase):\n\n def testBuildTensorInfoOp(self):\n x = constant_op.constant(1, name=\"x\")\n y = constant_op.constant(2, name=\"y\")\n z = control_flow_ops.group([x, y], name=\"op_z\")\n z_op_info = utils.build_tensor_info_from_op(z)\n self.assertEqual(\"op_z\", z_op_info.name)\n self.assertEqual(types_pb2.DT_INVALID, z_op_info.dtype)\n self.assertEqual(0, len(z_op_info.tensor_shape.dim))\n\n def testBuildTensorInfoDefunOp(self):\n @function.defun\n def my_init_fn(x, y):\n self.x_var = x\n self.y_var = y\n\n x = constant_op.constant(1, name=\"x\")\n y = constant_op.constant(2, name=\"y\")\n init_op_info = utils.build_tensor_info_from_op(my_init_fn(x, y))\n self.assertEqual(\"PartitionedFunctionCall\", init_op_info.name)\n self.assertEqual(types_pb2.DT_INVALID, init_op_info.dtype)\n self.assertEqual(0, len(init_op_info.tensor_shape.dim))\n\n def testBuildTensorInfoDense(self):\n x = array_ops.placeholder(dtypes.float32, 1, name=\"x\")\n x_tensor_info = utils.build_tensor_info(x)\n self.assertEqual(\"x:0\", x_tensor_info.name)\n self.assertEqual(types_pb2.DT_FLOAT, x_tensor_info.dtype)\n self.assertEqual(1, len(x_tensor_info.tensor_shape.dim))\n self.assertEqual(1, x_tensor_info.tensor_shape.dim[0].size)\n\n def testBuildTensorInfoSparse(self):\n x = array_ops.sparse_placeholder(dtypes.float32, [42, 69], name=\"x\")\n x_tensor_info = utils.build_tensor_info(x)\n self.assertEqual(x.values.name,\n x_tensor_info.coo_sparse.values_tensor_name)\n self.assertEqual(x.indices.name,\n x_tensor_info.coo_sparse.indices_tensor_name)\n self.assertEqual(x.dense_shape.name,\n x_tensor_info.coo_sparse.dense_shape_tensor_name)\n self.assertEqual(types_pb2.DT_FLOAT, x_tensor_info.dtype)\n self.assertEqual(2, len(x_tensor_info.tensor_shape.dim))\n self.assertEqual(42, x_tensor_info.tensor_shape.dim[0].size)\n self.assertEqual(69, x_tensor_info.tensor_shape.dim[1].size)\n\n def testGetTensorFromInfoDense(self):\n expected = array_ops.placeholder(dtypes.float32, 1, name=\"x\")\n tensor_info = utils.build_tensor_info(expected)\n actual = utils.get_tensor_from_tensor_info(tensor_info)\n self.assertIsInstance(actual, ops.Tensor)\n self.assertEqual(expected.name, actual.name)\n\n def testGetTensorFromInfoSparse(self):\n expected = array_ops.sparse_placeholder(dtypes.float32, name=\"x\")\n tensor_info = utils.build_tensor_info(expected)\n actual = utils.get_tensor_from_tensor_info(tensor_info)\n self.assertIsInstance(actual, sparse_tensor.SparseTensor)\n self.assertEqual(expected.values.name, actual.values.name)\n self.assertEqual(expected.indices.name, actual.indices.name)\n self.assertEqual(expected.dense_shape.name, actual.dense_shape.name)\n\n def testGetTensorFromInfoInOtherGraph(self):\n with ops.Graph().as_default() as expected_graph:\n expected = array_ops.placeholder(dtypes.float32, 1, name=\"right\")\n tensor_info = utils.build_tensor_info(expected)\n with ops.Graph().as_default(): # Some other graph.\n array_ops.placeholder(dtypes.float32, 1, name=\"other\")\n actual = utils.get_tensor_from_tensor_info(tensor_info,\n graph=expected_graph)\n self.assertIsInstance(actual, ops.Tensor)\n self.assertIs(actual.graph, expected_graph)\n self.assertEqual(expected.name, actual.name)\n\n def testGetTensorFromInfoInScope(self):\n # Build a TensorInfo with name \"bar/x:0\".\n with ops.Graph().as_default():\n with ops.name_scope(\"bar\"):\n unscoped = array_ops.placeholder(dtypes.float32, 1, name=\"x\")\n tensor_info = utils.build_tensor_info(unscoped)\n self.assertEqual(\"bar/x:0\", tensor_info.name)\n # Build a graph with node \"foo/bar/x:0\", akin to importing into scope foo.\n with ops.Graph().as_default():\n with ops.name_scope(\"foo\"):\n with ops.name_scope(\"bar\"):\n expected = array_ops.placeholder(dtypes.float32, 1, name=\"x\")\n self.assertEqual(\"foo/bar/x:0\", expected.name)\n # Test that tensor is found by prepending the import scope.\n actual = utils.get_tensor_from_tensor_info(tensor_info,\n import_scope=\"foo\")\n self.assertEqual(expected.name, actual.name)\n\n def testGetTensorFromInfoRaisesErrors(self):\n expected = array_ops.placeholder(dtypes.float32, 1, name=\"x\")\n tensor_info = utils.build_tensor_info(expected)\n tensor_info.name = \"blah:0\" # Nonexistant name.\n with self.assertRaises(KeyError):\n utils.get_tensor_from_tensor_info(tensor_info)\n tensor_info.ClearField(\"name\") # Malformed (missing encoding).\n with self.assertRaises(ValueError):\n utils.get_tensor_from_tensor_info(tensor_info)\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Python wrapper for prefetching_ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.ops import iterator_ops\nfrom tensorflow.python.data.util import nest\nfrom tensorflow.python.data.util import sparse\nfrom tensorflow.python.eager import function\nfrom tensorflow.python.framework import device as framework_device\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import functional_ops\nfrom tensorflow.python.ops import gen_dataset_ops\nfrom tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export(\"data.experimental.prefetch_to_device\")\ndef prefetch_to_device(device, buffer_size=None):\n \"\"\"A transformation that prefetches dataset values to the given `device`.\n\n NOTE: Although the transformation creates a `tf.data.Dataset`, the\n transformation must be the final `Dataset` in the input pipeline.\n\n Args:\n device: A string. The name of a device to which elements will be prefetched.\n buffer_size: (Optional.) The number of elements to buffer on `device`.\n Defaults to an automatically chosen value.\n\n Returns:\n A `Dataset` transformation function, which can be passed to\n `tf.data.Dataset.apply`.\n \"\"\"\n def _apply_fn(dataset):\n return dataset.apply(\n copy_to_device(target_device=device)).prefetch(buffer_size)\n\n return _apply_fn\n\n\n@tf_export(\"data.experimental.copy_to_device\")\ndef copy_to_device(target_device, source_device=\"/cpu:0\"):\n \"\"\"A transformation that copies dataset elements to the given `target_device`.\n\n Args:\n target_device: The name of a device to which elements will be copied.\n source_device: The original device on which `input_dataset` will be placed.\n\n Returns:\n A `Dataset` transformation function, which can be passed to\n `tf.data.Dataset.apply`.\n \"\"\"\n\n def _apply_fn(dataset):\n options = dataset_ops.Options()\n options.experimental_autotune = False\n return _CopyToDeviceDataset(\n dataset, target_device=target_device,\n source_device=source_device).with_options(options)\n\n return _apply_fn\n\n\n# TODO(rohanj): Use the _input_hostmem attr on the RemoteCall ops to indicate\n# all inputs to the Op are in host memory, thereby avoiding some unnecessary\n# Sends and Recvs.\nclass _CopyToDeviceDataset(dataset_ops.UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` that copies elements to another device.\"\"\"\n\n def __init__(self, input_dataset, target_device, source_device=\"/cpu:0\"):\n \"\"\"Constructs a _CopyToDeviceDataset.\n\n Args:\n input_dataset: `Dataset` to be copied\n target_device: The name of the device to which elements would be copied.\n source_device: Device where input_dataset would be placed.\n \"\"\"\n super(_CopyToDeviceDataset, self).__init__(input_dataset)\n self._input_dataset = input_dataset\n self._target_device = target_device\n spec = framework_device.DeviceSpec().from_string(self._target_device)\n self._is_gpu_target = (spec.device_type == \"GPU\")\n self._source_device_string = source_device\n self._source_device = ops.convert_to_tensor(source_device)\n\n self._flat_output_shapes = nest.flatten(\n sparse.as_dense_shapes(self._input_dataset.output_shapes,\n self._input_dataset.output_classes))\n self._flat_output_types = nest.flatten(\n sparse.as_dense_types(self._input_dataset.output_types,\n self._input_dataset.output_classes))\n\n @function.defun()\n def _init_func():\n \"\"\"Creates an iterator for the input dataset.\n\n Returns:\n A `string` tensor that encapsulates the iterator created.\n \"\"\"\n # pylint: disable=protected-access\n ds_variant = self._input_dataset._as_variant_tensor()\n resource = gen_dataset_ops.anonymous_iterator(\n output_types=self._flat_output_types,\n output_shapes=self._flat_output_shapes)\n with ops.control_dependencies(\n [gen_dataset_ops.make_iterator(ds_variant, resource)]):\n return gen_dataset_ops.iterator_to_string_handle(resource)\n\n init_func_concrete = _init_func._get_concrete_function_internal() # pylint: disable=protected-access\n\n @function.defun()\n def _remote_init_func():\n return functional_ops.remote_call(\n target=self._source_device,\n args=init_func_concrete.captured_inputs,\n Tout=[dtypes.string],\n f=init_func_concrete)\n\n self._init_func = _remote_init_func._get_concrete_function_internal() # pylint: disable=protected-access\n self._init_captured_args = self._init_func.captured_inputs\n\n @function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])\n def _next_func(string_handle):\n \"\"\"Calls get_next for created iterator.\n\n Args:\n string_handle: An iterator string handle created by _init_func\n Returns:\n The elements generated from `input_dataset`\n \"\"\"\n with ops.device(self._source_device_string):\n iterator = iterator_ops.Iterator.from_string_handle(\n string_handle, self.output_types, self.output_shapes,\n self.output_classes)\n ret = iterator.get_next()\n return nest.flatten(sparse.serialize_sparse_tensors(ret))\n\n next_func_concrete = _next_func._get_concrete_function_internal() # pylint: disable=protected-access\n\n @function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])\n def _remote_next_func(string_handle):\n return functional_ops.remote_call(\n target=self._source_device,\n args=[string_handle] +\n next_func_concrete.captured_inputs,\n Tout=self._flat_output_types,\n f=next_func_concrete)\n\n self._next_func = _remote_next_func._get_concrete_function_internal() # pylint: disable=protected-access\n self._next_captured_args = self._next_func.captured_inputs\n\n @function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])\n def _finalize_func(string_handle):\n \"\"\"Destroys the iterator resource created.\n\n Args:\n string_handle: An iterator string handle created by _init_func\n Returns:\n Tensor constant 0\n \"\"\"\n iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2(\n string_handle,\n output_types=self._flat_output_types,\n output_shapes=self._flat_output_shapes)\n with ops.control_dependencies([\n resource_variable_ops.destroy_resource_op(\n iterator_resource, ignore_lookup_error=True)]):\n return array_ops.constant(0, dtypes.int64)\n\n finalize_func_concrete = _finalize_func._get_concrete_function_internal() # pylint: disable=protected-access\n\n @function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])\n def _remote_finalize_func(string_handle):\n return functional_ops.remote_call(\n target=self._source_device,\n args=[string_handle] +\n finalize_func_concrete.captured_inputs,\n Tout=[dtypes.int64],\n f=finalize_func_concrete)\n\n self._finalize_func = _remote_finalize_func._get_concrete_function_internal( # pylint: disable=protected-access\n )\n self._finalize_captured_args = self._finalize_func.captured_inputs\n\n g = ops.get_default_graph()\n self._init_func.add_to_graph(g)\n self._next_func.add_to_graph(g)\n self._finalize_func.add_to_graph(g)\n # pylint: enable=protected-scope\n\n # The one_shot_iterator implementation needs a 0 arg _make_dataset function\n # that thereby captures all the inputs required to create the dataset. Since\n # there are strings that are inputs to the GeneratorDataset which can't be\n # placed on a GPU, this fails for the GPU case. Therefore, disabling it for\n # GPU\n def make_one_shot_iterator(self):\n if self._is_gpu_target:\n raise ValueError(\"Cannot create a one shot iterator when using \"\n \"`tf.data.experimental.copy_to_device()` on GPU. Please \"\n \"use `Dataset.make_initializable_iterator()` instead.\")\n else:\n return super(_CopyToDeviceDataset, self).make_one_shot_iterator()\n\n def _as_variant_tensor(self):\n with ops.device(self._target_device):\n return gen_dataset_ops.generator_dataset(\n self._init_captured_args,\n self._next_captured_args,\n self._finalize_captured_args,\n init_func=self._init_func,\n next_func=self._next_func,\n finalize_func=self._finalize_func,\n output_types=self._flat_output_types,\n output_shapes=self._flat_output_shapes)\n\n\nclass _MapOnGpuDataset(dataset_ops.UnaryDataset):\n \"\"\"A `Dataset` that maps a function over elements in its using a GPU.\"\"\"\n\n def __init__(self, input_dataset, map_func, use_inter_op_parallelism=True):\n \"\"\"See `Dataset.map()` for details.\"\"\"\n super(_MapOnGpuDataset, self).__init__(input_dataset)\n self._input_dataset = input_dataset\n self._use_inter_op_parallelism = use_inter_op_parallelism\n\n self._map_func = dataset_ops.StructuredFunctionWrapper(\n map_func,\n self._transformation_name(),\n dataset=input_dataset,\n defun_kwargs={\"experimental_ints_on_device\": True})\n\n def _functions(self):\n return [self._map_func]\n\n def _as_variant_tensor(self):\n input_t = self._input_dataset._as_variant_tensor() # pylint: disable=protected-access\n return ged_ops.experimental_map_dataset(\n input_t,\n self._map_func.function.captured_inputs,\n f=self._map_func.function,\n use_inter_op_parallelism=self._use_inter_op_parallelism,\n **dataset_ops.flat_structure(self))\n\n @property\n def output_classes(self):\n return self._map_func.output_classes\n\n @property\n def output_shapes(self):\n return self._map_func.output_shapes\n\n @property\n def output_types(self):\n return self._map_func.output_types\n\n def _transformation_name(self):\n return \"map_on_gpu()\"\n\n\ndef map_on_gpu(map_func):\n \"\"\"Maps `map_func` across the elements of this dataset.\n\n NOTE: This is a highly experimental version of `tf.data.Dataset.map` that runs\n `map_func` on GPU. It must be used after applying the\n `tf.data.experimental.copy_to_device` transformation with a GPU device\n argument.\n\n Args:\n map_func: A function mapping a nested structure of tensors (having shapes\n and types defined by `self.output_shapes` and `self.output_types`) to\n another nested structure of tensors.\n\n Returns:\n A `Dataset` transformation function, which can be passed to\n `tf.data.Dataset.apply`.\n \"\"\"\n\n def _apply_fn(dataset):\n return _MapOnGpuDataset(dataset, map_func)\n\n return _apply_fn\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Unit tests for debug_gradients module.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport shutil\nimport tempfile\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.debug.lib import debug_data\nfrom tensorflow.python.debug.lib import debug_gradients\nfrom tensorflow.python.debug.lib import debug_utils\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.training import gradient_descent\n\n\nclass IdentifyGradientTest(test_util.TensorFlowTestCase):\n\n def setUp(self):\n rewriter_config = rewriter_config_pb2.RewriterConfig(\n disable_model_pruning=True,\n dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF)\n graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)\n config = config_pb2.ConfigProto(graph_options=graph_options)\n self.sess = session.Session(config=config)\n with self.sess.as_default():\n self.u = variables.Variable(2.0, name=\"u\")\n self.v = variables.Variable(3.0, name=\"v\")\n self.w = math_ops.multiply(self.u.value(), self.v.value(), name=\"w\")\n\n def tearDown(self):\n ops.reset_default_graph()\n debug_gradients.clear_gradient_debuggers()\n\n @test_util.run_deprecated_v1\n def testIdentifyGradientGivesCorrectTensorObjectWithoutContextManager(self):\n grad_debugger = debug_gradients.GradientsDebugger()\n id_grad_w = grad_debugger.identify_gradient(self.w)\n y = math_ops.add(id_grad_w, -1.0, name=\"y\")\n\n grads = gradients_impl.gradients(y, [self.u, self.v])\n self.assertEqual(2, len(grads))\n u_grad = grads[0]\n v_grad = grads[1]\n\n self.sess.run(variables.global_variables_initializer())\n self.assertAllClose(5.0, self.sess.run(y))\n self.assertAllClose(3.0, self.sess.run(u_grad))\n self.assertAllClose(2.0, self.sess.run(v_grad))\n\n # Fetch the gradient tensor with the x-tensor object.\n w_grad = grad_debugger.gradient_tensor(self.w)\n self.assertIsInstance(w_grad, ops.Tensor)\n self.assertAllClose(1.0, self.sess.run(w_grad))\n\n # Fetch the gradient tensor with the x-tensor's name.\n w_grad = grad_debugger.gradient_tensor(self.w.name)\n self.assertIsInstance(w_grad, ops.Tensor)\n self.assertAllClose(1.0, self.sess.run(w_grad))\n\n # Fetch the gradient tensor with the x-tensor name.\n w_grad = grad_debugger.gradient_tensor(self.w.name)\n self.assertIsInstance(w_grad, ops.Tensor)\n self.assertAllClose(1.0, self.sess.run(w_grad))\n\n @test_util.run_deprecated_v1\n def testIdentifyGradientGivesCorrectTensorObjectWithTfGradients(self):\n grad_debugger = debug_gradients.GradientsDebugger()\n id_grad_w = grad_debugger.identify_gradient(self.w)\n y = math_ops.add(id_grad_w, -1.0, name=\"y\")\n\n with grad_debugger:\n grads = gradients_impl.gradients(y, [self.u, self.v])\n self.assertEqual(2, len(grads))\n u_grad = grads[0]\n v_grad = grads[1]\n\n self.sess.run(variables.global_variables_initializer())\n self.assertAllClose(5.0, self.sess.run(y))\n self.assertAllClose(3.0, self.sess.run(u_grad))\n self.assertAllClose(2.0, self.sess.run(v_grad))\n\n # Fetch the gradient tensor with the x-tensor object.\n w_grad = grad_debugger.gradient_tensor(self.w)\n self.assertIsInstance(w_grad, ops.Tensor)\n self.assertAllClose(1.0, self.sess.run(w_grad))\n\n # Fetch the gradient tensor with the x-tensor's name.\n w_grad = grad_debugger.gradient_tensor(self.w.name)\n self.assertIsInstance(w_grad, ops.Tensor)\n self.assertAllClose(1.0, self.sess.run(w_grad))\n\n # Fetch the gradient tensor with the x-tensor name.\n w_grad = grad_debugger.gradient_tensor(self.w.name)\n self.assertIsInstance(w_grad, ops.Tensor)\n self.assertAllClose(1.0, self.sess.run(w_grad))\n\n @test_util.run_deprecated_v1\n def testCallingIdentifyGradientTwiceWithTheSameGradientsDebuggerErrors(self):\n grad_debugger = debug_gradients.GradientsDebugger()\n grad_debugger.identify_gradient(self.w)\n with self.assertRaisesRegexp(ValueError,\n \"The graph already contains an op named .*\"):\n grad_debugger.identify_gradient(self.w)\n\n @test_util.run_deprecated_v1\n def testIdentifyGradientWorksOnMultipleLosses(self):\n grad_debugger_1 = debug_gradients.GradientsDebugger()\n grad_debugger_2 = debug_gradients.GradientsDebugger()\n\n y = math_ops.add(self.w, -1.0, name=\"y\")\n debug_y = grad_debugger_1.identify_gradient(y)\n z1 = math_ops.square(debug_y, name=\"z1\")\n\n debug_y = grad_debugger_2.identify_gradient(y)\n z2 = math_ops.sqrt(debug_y, name=\"z2\")\n\n with grad_debugger_1:\n gradient_descent.GradientDescentOptimizer(0.1).minimize(z1)\n with grad_debugger_2:\n gradient_descent.GradientDescentOptimizer(0.1).minimize(z2)\n\n dz1_dy = grad_debugger_1.gradient_tensor(y)\n dz2_dy = grad_debugger_2.gradient_tensor(y)\n self.assertIsInstance(dz1_dy, ops.Tensor)\n self.assertIsInstance(dz2_dy, ops.Tensor)\n self.assertIsNot(dz1_dy, dz2_dy)\n\n self.sess.run(variables.global_variables_initializer())\n self.assertAllClose(5.0**2, self.sess.run(z1))\n self.assertAllClose(5.0**0.5, self.sess.run(z2))\n self.assertAllClose(2.0 * 5.0, self.sess.run(dz1_dy))\n self.assertAllClose(0.5 * (5.0**-0.5), self.sess.run(dz2_dy))\n\n @test_util.run_deprecated_v1\n def testIdentifyGradientRaisesLookupErrorForUnknownXTensor(self):\n grad_debugger_1 = debug_gradients.GradientsDebugger()\n grad_debugger_2 = debug_gradients.GradientsDebugger()\n id_grad_w = grad_debugger_1.identify_gradient(self.w)\n y = math_ops.add(id_grad_w, -1.0, name=\"y\")\n\n # There are >1 gradient debuggers registered, and grad_debugger is not used\n # as a context manager here, so the gradient w.r.t. self.w will not be\n # registered.\n gradients_impl.gradients(y, [self.u, self.v])\n\n with self.assertRaisesRegexp(\n LookupError,\n r\"This GradientsDebugger has not received any gradient tensor for \"):\n grad_debugger_1.gradient_tensor(self.w)\n with self.assertRaisesRegexp(\n LookupError,\n r\"This GradientsDebugger has not received any gradient tensor for \"):\n grad_debugger_2.gradient_tensor(self.w)\n\n @test_util.run_deprecated_v1\n def testIdentifyGradientRaisesTypeErrorForNonTensorOrTensorNameInput(self):\n grad_debugger = debug_gradients.GradientsDebugger()\n with self.assertRaisesRegexp(\n TypeError,\n r\"x_tensor must be a str or tf\\.Tensor or tf\\.Variable, but instead \"\n r\"has type .*Operation.*\"):\n grad_debugger.gradient_tensor(variables.global_variables_initializer())\n\n @test_util.run_deprecated_v1\n def testIdentifyGradientTensorWorksWithGradientDescentOptimizer(self):\n grad_debugger = debug_gradients.GradientsDebugger()\n id_grad_w = grad_debugger.identify_gradient(self.w)\n y = math_ops.add(id_grad_w, -1.0, name=\"y\")\n\n with grad_debugger:\n gradient_descent.GradientDescentOptimizer(0.1).minimize(y)\n\n self.sess.run(variables.global_variables_initializer())\n\n # Fetch the gradient tensor with the x-tensor object.\n w_grad = grad_debugger.gradient_tensor(self.w)\n self.assertIsInstance(w_grad, ops.Tensor)\n self.assertAllClose(1.0, self.sess.run(w_grad))\n\n @test_util.run_deprecated_v1\n def testWatchGradientsByXTensorNamesWorks(self):\n y = math_ops.add(self.w, -1.0, name=\"y\")\n\n # The constructrion of the forward graph has completed.\n # But we can still get the gradient tensors by using\n # watch_gradients_by_tensor_names().\n grad_debugger = debug_gradients.GradientsDebugger()\n with grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, \"w:0$\"):\n grads = gradients_impl.gradients(y, [self.u, self.v])\n self.assertEqual(2, len(grads))\n u_grad = grads[0]\n v_grad = grads[1]\n\n self.sess.run(variables.global_variables_initializer())\n self.assertAllClose(5.0, self.sess.run(y))\n self.assertAllClose(3.0, self.sess.run(u_grad))\n self.assertAllClose(2.0, self.sess.run(v_grad))\n\n w_grad = grad_debugger.gradient_tensor(self.w)\n self.assertIsInstance(w_grad, ops.Tensor)\n self.assertAllClose(1.0, self.sess.run(w_grad))\n\n w_grad = grad_debugger.gradient_tensor(\"w:0\")\n self.assertIsInstance(w_grad, ops.Tensor)\n self.assertAllClose(1.0, self.sess.run(w_grad))\n\n @test_util.run_deprecated_v1\n def testWatchGradientsByXTensorNamesWorksWithoutContextManager(self):\n y = math_ops.add(self.w, -1.0, name=\"y\")\n\n # The constructrion of the forward graph has completed.\n # But we can still get the gradient tensors by using\n # watch_gradients_by_tensor_names().\n grad_debugger = debug_gradients.GradientsDebugger()\n grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, \"w:0$\")\n grads = gradients_impl.gradients(y, [self.u, self.v])\n self.assertEqual(2, len(grads))\n u_grad = grads[0]\n v_grad = grads[1]\n\n self.sess.run(variables.global_variables_initializer())\n self.assertAllClose(5.0, self.sess.run(y))\n self.assertAllClose(3.0, self.sess.run(u_grad))\n self.assertAllClose(2.0, self.sess.run(v_grad))\n\n w_grad = grad_debugger.gradient_tensor(self.w)\n self.assertIsInstance(w_grad, ops.Tensor)\n self.assertAllClose(1.0, self.sess.run(w_grad))\n\n w_grad = grad_debugger.gradient_tensor(\"w:0\")\n self.assertIsInstance(w_grad, ops.Tensor)\n self.assertAllClose(1.0, self.sess.run(w_grad))\n\n @test_util.run_deprecated_v1\n def testWatchGradientsWorksOnRefTensor(self):\n y = math_ops.add(self.w, -1.0, name=\"y\")\n\n grad_debugger = debug_gradients.GradientsDebugger()\n with grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, \"u:0$\"):\n grads = gradients_impl.gradients(y, [self.u, self.v])\n self.assertEqual(2, len(grads))\n u_grad = grads[0]\n v_grad = grads[1]\n\n self.assertIs(u_grad, grad_debugger.gradient_tensor(\"u:0\"))\n\n self.sess.run(variables.global_variables_initializer())\n self.assertAllClose(3.0, self.sess.run(u_grad))\n self.assertAllClose(2.0, self.sess.run(v_grad))\n self.assertAllClose(3.0, self.sess.run(\n grad_debugger.gradient_tensor(\"u:0\")))\n\n @test_util.run_deprecated_v1\n def testWatchGradientsWorksOnMultipleTensors(self):\n y = math_ops.add(self.w, -1.0, name=\"y\")\n\n grad_debugger = debug_gradients.GradientsDebugger()\n with grad_debugger.watch_gradients_by_tensor_names(self.sess.graph,\n \"(u|w):0$\"):\n grads = gradients_impl.gradients(y, [self.u, self.v])\n self.assertEqual(2, len(grads))\n u_grad = grads[0]\n\n self.assertEqual(2, len(grad_debugger.gradient_tensors()))\n self.assertIs(u_grad, grad_debugger.gradient_tensor(\"u:0\"))\n self.assertIsInstance(grad_debugger.gradient_tensor(\"w:0\"), ops.Tensor)\n\n self.sess.run(variables.global_variables_initializer())\n self.assertAllClose(1.0, self.sess.run(\n grad_debugger.gradient_tensor(\"w:0\")))\n self.assertAllClose(3.0, self.sess.run(\n grad_debugger.gradient_tensor(\"u:0\")))\n\n @test_util.run_deprecated_v1\n def testWatchGradientsByXTensorsWorks(self):\n y = math_ops.add(self.w, -1.0, name=\"foo/y\")\n z = math_ops.square(y, name=\"foo/z\")\n\n # The constructrion of the forward graph has completed.\n # But we can still get the gradient tensors by using\n # watch_gradients_by_x_tensors().\n grad_debugger = debug_gradients.GradientsDebugger()\n with grad_debugger.watch_gradients_by_tensors(self.sess.graph,\n [self.w, self.u, y]):\n gradient_descent.GradientDescentOptimizer(0.1).minimize(z)\n\n self.assertEqual(3, len(grad_debugger.gradient_tensors()))\n u_grad = grad_debugger.gradient_tensor(self.u)\n w_grad = grad_debugger.gradient_tensor(self.w)\n y_grad = grad_debugger.gradient_tensor(y)\n\n self.sess.run(variables.global_variables_initializer())\n self.assertAllClose(10.0, self.sess.run(y_grad))\n self.assertAllClose(10.0, self.sess.run(w_grad))\n self.assertAllClose(30.0, self.sess.run(u_grad))\n\n @test_util.run_deprecated_v1\n def testWatchGradientsByTensorCanWorkOnMultipleLosses(self):\n y = math_ops.add(self.w, -1.0, name=\"y\")\n z1 = math_ops.square(y, name=\"z1\")\n z2 = math_ops.sqrt(y, name=\"z2\")\n\n grad_debugger_1 = debug_gradients.GradientsDebugger()\n with grad_debugger_1.watch_gradients_by_tensors(self.sess.graph, y):\n gradient_descent.GradientDescentOptimizer(0.1).minimize(z1)\n\n grad_debugger_2 = debug_gradients.GradientsDebugger()\n with grad_debugger_2.watch_gradients_by_tensors(self.sess.graph, y):\n gradient_descent.GradientDescentOptimizer(0.1).minimize(z2)\n\n dz1_dy = grad_debugger_1.gradient_tensor(y)\n dz2_dy = grad_debugger_2.gradient_tensor(y)\n self.assertIsInstance(dz1_dy, ops.Tensor)\n self.assertIsInstance(dz2_dy, ops.Tensor)\n self.assertIsNot(dz1_dy, dz2_dy)\n\n self.sess.run(variables.global_variables_initializer())\n self.assertAllClose(5.0**2, self.sess.run(z1))\n self.assertAllClose(5.0**0.5, self.sess.run(z2))\n self.assertAllClose(2.0 * 5.0, self.sess.run(dz1_dy))\n self.assertAllClose(0.5 * (5.0**-0.5), self.sess.run(dz2_dy))\n\n @test_util.run_deprecated_v1\n def testGradientsValuesFromDumpWorks(self):\n y = math_ops.add(self.w, -1.0, name=\"y\")\n z = math_ops.square(y, name=\"z\")\n\n grad_debugger = debug_gradients.GradientsDebugger()\n with grad_debugger.watch_gradients_by_tensors(self.sess.graph,\n [self.w, self.u, y]):\n train_op = gradient_descent.GradientDescentOptimizer(0.1).minimize(z)\n\n self.sess.run(variables.global_variables_initializer())\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n dump_dir = tempfile.mkdtemp()\n debug_url = \"file://\" + dump_dir\n debug_utils.watch_graph(run_options, self.sess.graph, debug_urls=debug_url)\n run_metadata = config_pb2.RunMetadata()\n self.assertAllClose(2.0, self.sess.run(self.u))\n self.sess.run(train_op, options=run_options, run_metadata=run_metadata)\n self.assertAllClose(-1.0, self.sess.run(self.u))\n\n dump = debug_data.DebugDumpDir(\n dump_dir, partition_graphs=run_metadata.partition_graphs)\n dump.set_python_graph(self.sess.graph)\n\n y_grad_values = debug_gradients.gradient_values_from_dump(\n grad_debugger, y, dump)\n self.assertEqual(1, len(y_grad_values))\n self.assertAllClose(10.0, y_grad_values[0])\n\n w_grad_values = debug_gradients.gradient_values_from_dump(\n grad_debugger, self.w, dump)\n self.assertEqual(1, len(w_grad_values))\n self.assertAllClose(10.0, w_grad_values[0])\n\n u_grad_values = debug_gradients.gradient_values_from_dump(\n grad_debugger, self.u, dump)\n self.assertEqual(1, len(u_grad_values))\n self.assertAllClose(30.0, u_grad_values[0])\n\n with self.assertRaisesRegexp(\n LookupError,\n r\"This GradientsDebugger has not received any gradient tensor for \"\n r\"x-tensor v:0\"):\n debug_gradients.gradient_values_from_dump(grad_debugger, self.v, dump)\n\n # Cleanup.\n shutil.rmtree(dump_dir)\n\n\nif __name__ == \"__main__\":\n googletest.main()\n" ]
[ [ "tensorflow.python.eager.tape.record_operation", "tensorflow.python.util.tf_decorator.rewrap", "tensorflow.python.util.tf_decorator.make_decorator", "tensorflow.python.eager.context.context", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.eager.graph_only_ops.graph_placeholder", "tensorflow.python.eager.context.add_function", "tensorflow.python.eager.context.global_seed", "tensorflow.python.framework.ops.IndexedSlices", "tensorflow.python.framework.sparse_tensor.SparseTensor", "tensorflow.python.util.nest.map_structure", "tensorflow.python.util.tf_decorator.unwrap", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.framework.ops.convert_to_tensor_or_indexed_slices", "tensorflow.python.ops.custom_gradient.copy_handle_data", "tensorflow.python.eager.tape.push_new_tape", "tensorflow.python.framework.ops.init_scope", "tensorflow.python.ops.variable_scope.get_variable_scope", "tensorflow.python.util.nest.pack_sequence_as", "tensorflow.python.ops.tensor_array_ops.build_ta_with_new_flow", "tensorflow.python.util.nest.assert_same_structure", "tensorflow.python.framework.ops.uid", "tensorflow.python.eager.tape.pop_tape", "tensorflow.python.util.compat.as_bytes", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.autograph.ConversionOptions", "tensorflow.python.util.nest.flatten" ], [ "tensorflow.python.ops.control_flow_util_v2.CondBranchFuncGraph", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.gen_dataset_ops.optional_none", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.control_flow_util_v2.create_new_tf_function", "tensorflow.python.framework.func_graph.pack_sequence_as", "tensorflow.python.framework.ops.RegisterGradient", "tensorflow.python.ops.gradients_impl.IsTrainable", "tensorflow.python.ops.gradients_impl._GradientsHelper", "tensorflow.python.ops.control_flow_util_v2.unique_fn_name", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.python.ops.gen_functional_ops.fake_param", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.gen_resource_variable_ops.variable_shape", "tensorflow.python.ops.control_flow_util_v2.in_defun", "tensorflow.python.ops.control_flow_util_v2.maybe_set_lowering_attr", "tensorflow.python.util.nest.assert_same_structure", "tensorflow.python.ops.control_flow_util_v2.unique_grad_fn_name", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.gen_dataset_ops.optional_get_value", "tensorflow.python.framework.function_def_to_graph.function_def_to_graph", "tensorflow.python.ops.gen_dataset_ops.optional_from_value" ], [ "tensorflow.python.data.experimental.ops.scan_ops.scan", "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.data.ops.dataset_ops.make_one_shot_iterator", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.platform.test.main", "tensorflow.python.data.ops.dataset_ops.Dataset.range", "numpy.array", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.keras.layers.Lambda", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.framework.test_util.run_in_graph_and_eager_modes", "tensorflow.python.keras.layers.Dense", "tensorflow.python.ops.state_ops.assign_add", "tensorflow.python.keras.backend.int_shape", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.keras.backend.function", "tensorflow.python.keras.engine.get_source_inputs", "tensorflow.python.keras.backend.dot", "tensorflow.python.keras.layers.Conv2D", "numpy.any", "tensorflow.python.eager.context.executing_eagerly", "numpy.random.randint", "tensorflow.python.keras.layers.BatchNormalization", "tensorflow.python.keras.layers.concatenate", "tensorflow.python.keras.layers.add", "tensorflow.python.platform.test.main", "tensorflow.python.keras.models.Model.from_config", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.keras.Sequential", "numpy.min", "tensorflow.python.keras.utils.tf_utils.get_reachable_from_inputs", "tensorflow.python.keras.Model", "tensorflow.python.keras.layers.Masking", "tensorflow.python.keras.engine.input_layer.Input", "tensorflow.python.keras.models.Sequential", "tensorflow.python.keras.models.model_from_yaml", "tensorflow.python.keras.layers.Dropout", "tensorflow.python.training.rmsprop.RMSPropOptimizer", "tensorflow.python.keras.layers.Input", "tensorflow.python.keras.layers.InputLayer", "tensorflow.python.keras.layers.wrappers.TimeDistributed", "tensorflow.python.keras.models.model_from_json", "tensorflow.python.ops.array_ops.ones_like", "numpy.random.random", "tensorflow.python.keras.engine.network.Network", "numpy.ones", "tensorflow.python.keras.models.Model", "tensorflow.python.keras.layers.GlobalAveragePooling2D", "tensorflow.python.ops.math_ops.reduce_sum", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.estimator.keras._convert_keras_metrics_to_estimator", "tensorflow.python.saved_model.utils_impl.get_variables_path", "tensorflow.python.util.compat.as_text", "tensorflow.python.estimator.export.export.get_temp_export_dir", "tensorflow.python.framework.ops.add_to_collection", "tensorflow.python.estimator.model_fn.export_outputs_for_mode", "tensorflow.python.keras.models.clone_and_build_model", "tensorflow.python.framework.ops.get_collection", "tensorflow.python.saved_model.builder._SavedModelBuilder", "tensorflow.python.saved_model.utils_impl.get_or_create_assets_dir", "tensorflow.python.estimator.export.export.get_timestamped_export_dir", "tensorflow.python.training.checkpointable.util.named_saveables", "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.lib.io.file_io.read_file_to_string", "tensorflow.python.client.session.Session", "tensorflow.python.keras.models.model_from_json", "tensorflow.python.estimator.export.export.build_all_signature_defs", "tensorflow.python.platform.gfile.Rename", "tensorflow.python.framework.ops.Graph", "tensorflow.python.lib.io.file_io.write_string_to_file", "tensorflow.python.util.compat.as_bytes", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.keras.backend.set_learning_phase", "tensorflow.python.ops.variables.local_variables_initializer", "tensorflow.python.training.saver.Saver", "tensorflow.python.saved_model.utils_impl.get_or_create_variables_dir" ], [ "tensorflow.python.saved_model.utils.build_tensor_info", "tensorflow.python.saved_model.utils.get_tensor_from_tensor_info", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.framework.ops.Graph", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.array_ops.sparse_placeholder", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.saved_model.utils.build_tensor_info_from_op", "tensorflow.python.platform.test.main", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.ops.array_ops.constant", "tensorflow.python.ops.functional_ops.remote_call", "tensorflow.python.data.ops.dataset_ops.flat_structure", "tensorflow.python.framework.device.DeviceSpec", "tensorflow.python.framework.ops.device", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.ops.gen_dataset_ops.make_iterator", "tensorflow.python.eager.function.defun", "tensorflow.python.data.util.sparse.serialize_sparse_tensors", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.data.util.sparse.as_dense_types", "tensorflow.python.data.ops.iterator_ops.Iterator.from_string_handle", "tensorflow.python.ops.gen_dataset_ops.generator_dataset", "tensorflow.python.ops.gen_dataset_ops.iterator_to_string_handle", "tensorflow.python.framework.tensor_spec.TensorSpec", "tensorflow.python.ops.resource_variable_ops.destroy_resource_op", "tensorflow.python.data.ops.dataset_ops.Options", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.data.util.sparse.as_dense_shapes", "tensorflow.python.ops.gen_dataset_ops.iterator_from_string_handle_v2", "tensorflow.python.ops.gen_dataset_ops.anonymous_iterator" ], [ "tensorflow.core.protobuf.config_pb2.RunMetadata", "tensorflow.core.protobuf.config_pb2.GraphOptions", "tensorflow.core.protobuf.config_pb2.RunOptions", "tensorflow.python.ops.variables.Variable", "tensorflow.python.ops.math_ops.sqrt", "tensorflow.python.ops.gradients_impl.gradients", "tensorflow.python.ops.math_ops.add", "tensorflow.python.platform.googletest.main", "tensorflow.python.debug.lib.debug_data.DebugDumpDir", "tensorflow.python.framework.ops.reset_default_graph", "tensorflow.python.ops.math_ops.square", "tensorflow.python.client.session.Session", "tensorflow.python.training.gradient_descent.GradientDescentOptimizer", "tensorflow.core.protobuf.rewriter_config_pb2.RewriterConfig", "tensorflow.python.debug.lib.debug_gradients.GradientsDebugger", "tensorflow.python.debug.lib.debug_gradients.clear_gradient_debuggers", "tensorflow.python.debug.lib.debug_utils.watch_graph", "tensorflow.python.debug.lib.debug_gradients.gradient_values_from_dump", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.core.protobuf.config_pb2.ConfigProto" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.6", "1.13", "2.3", "2.4", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.5", "1.4" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.12", "2.6", "2.7", "1.13", "2.3", "2.4", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.6", "1.13", "2.3", "2.4", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.12", "2.6", "1.13", "2.3", "2.4", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
qbxlvnf11/graph-neural-networks-for-graph-classification
[ "5d69ead58c786aa8e472ab0433156fe09fe6ca4b", "5d69ead58c786aa8e472ab0433156fe09fe6ca4b" ]
[ "models/GraphUNet.py", "layers/graph_isomorphism_layer.py" ]
[ "import os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n#from layers.graph_convolution_layer import GraphConvolutionLayer\nfrom layers.graph_unet_layer import GraphUNetLayer\nfrom readouts.basic_readout import readout_function\n\n\"\"\"\nBase paper: https://arxiv.org/pdf/1905.05178.pdf\n\"\"\"\n\nclass GraphUNet(nn.Module):\n def __init__(self, n_feat, n_class, n_layer, agg_hidden, fc_hidden, dropout, readout, device):\n super(GraphUNet, self).__init__()\n \n self.n_layer = n_layer\n self.readout = readout\n \n # Pooling_rate\n pooling_rations = [0.8 - (i * 0.1) if i < 3 else 0.5 for i in range(n_layer)]\n \n # Graph unet layer\n self.graph_unet_layers = []\n for i in range(n_layer):\n if i == 0:\n self.graph_unet_layers.append(GraphUNetLayer(n_feat, agg_hidden, pooling_rations[i], device))\n else:\n self.graph_unet_layers.append(GraphUNetLayer(agg_hidden, agg_hidden, pooling_rations[i], device))\n \n # Fully-connected layer\n self.fc1 = nn.Linear(agg_hidden, fc_hidden)\n self.fc2 = nn.Linear(fc_hidden, n_class)\n\n def forward(self, data):\n \n for i in range(self.n_layer):\n # Graph unet layer\n data = self.graph_unet_layers[i](data)\n \n x = data[0]\n \n # Dropout\n if i != self.n_layer - 1:\n x = F.dropout(x, p=self.dropout, training=self.training)\n \n # Readout\n x = readout_function(x, self.readout)\n \n # Fully-connected layer\n x = F.relu(self.fc1(x))\n x = F.softmax(self.fc2(x))\n \n return x\n\n def __repr__(self):\n layers = ''\n\n for i in range(self.n_layer):\n layers += str(self.graph_unet_layers[i]) + '\\n'\n layers += str(self.fc1) + '\\n'\n layers += str(self.fc2) + '\\n'\n return layers", "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\"\"\"\nReferences: https://github.com/williamleif/graphsage-simple\n\"\"\"\n\nclass MLP(nn.Module):\n def __init__(self, n_layer, input_dim, hidden_dim, output_dim):\n\n super(MLP, self).__init__()\n\n self.linear_or_not = True # Default is linear model\n self.n_layer = n_layer\n\n if n_layer < 1:\n raise ValueError(\"number of layers should be positive!\")\n elif n_layer == 1:\n # Linear model\n self.linear = nn.Linear(input_dim, output_dim)\n else:\n # Multi-layer model\n self.linear_or_not = False\n self.linears = torch.nn.ModuleList()\n self.batch_norms = torch.nn.ModuleList()\n \n self.linears.append(nn.Linear(input_dim, hidden_dim))\n for layer in range(n_layer - 2):\n self.linears.append(nn.Linear(hidden_dim, hidden_dim))\n self.linears.append(nn.Linear(hidden_dim, output_dim))\n\n for layer in range(n_layer - 1):\n self.batch_norms.append(nn.BatchNorm1d((hidden_dim)))\n\n def forward(self, x):\n if self.linear_or_not:\n # If linear model\n return self.linear(x)\n else:\n # If MLP\n h = x\n for layer in range(self.n_layer - 1):\n h = F.relu(self.batch_norms[layer](self.linears[layer](h)))\n return self.linears[self.n_layer - 1](h)\n\nclass GraphIsomorphismLayer(nn.Module):\n def __init__(self, layer_num, in_features, out_features, neighbor_pooling_type, learn_eps, eps, device, num_mlp_layers = 2):\n super(GraphIsomorphismLayer, self).__init__()\n self.layer_num = layer_num\n self.in_features = in_features\n self.out_features = out_features \n \n self.neighbor_pooling_type = neighbor_pooling_type\n self.learn_eps = learn_eps\n self.eps = eps\n self.device = device\n\n self.mlp = MLP(num_mlp_layers, in_features, out_features, out_features).to(device)\n\n self.batch_norm = nn.BatchNorm1d(out_features).to(device)\n\n def maxpool(self, h, padded_neighbor_list):\n ###Element-wise minimum will never affect max-pooling\n\n dummy = torch.min(h, dim = 0)[0]\n h_with_dummy = torch.cat([h, dummy.reshape((1, -1)).to(self.device)])\n pooled_rep = torch.max(h_with_dummy[padded_neighbor_list], dim = 1)[0]\n return pooled_re\n\n # Pooling neighboring nodes and center nodes separately by epsilon reweighting \n def next_layer_eps(self, h, padded_neighbor_list = None, Adj_block = None):\n if self.neighbor_pooling_type == \"max\":\n # If max pooling\n pooled = self.maxpool(h, padded_neighbor_list)\n else:\n # If sum or average pooling\n pooled = torch.spmm(Adj_block, h)\n if self.neighbor_pooling_type == \"average\":\n # If average pooling\n degree = torch.spmm(Adj_block, torch.ones((Adj_block.shape[0], 1)).to(self.device))\n pooled = pooled/degree\n\n # Reweights the center node representation when aggregating it with its neighbors\n pooled = pooled + (1 + self.eps[self.layer_num]) * h\n pooled_rep = self.mlp(pooled)\n h = self.batch_norm(pooled_rep)\n\n # Non-linearity\n h = F.relu(h)\n return h\n\n # Pooling neighboring nodes and center nodes altogether \n def next_layer(self, h, padded_neighbor_list = None, Adj_block = None): \n if self.neighbor_pooling_type == \"max\":\n # If max pooling\n pooled = self.maxpool(h, padded_neighbor_list)\n else:\n #If sum or average pooling\n pooled = torch.spmm(Adj_block, h)\n if self.neighbor_pooling_type == \"average\":\n #If average pooling\n degree = torch.spmm(Adj_block, torch.ones((Adj_block.shape[0], 1)).to(self.device))\n pooled = pooled/degree\n\n # Representation of neighboring and center nodes \n pooled_rep = self.mlp(pooled)\n h = self.batch_norm(pooled_rep)\n\n # Non-linearity\n h = F.relu(h)\n return h\n \n def forward(self, h, Adj_block, padded_neighbor_list):\n if self.neighbor_pooling_type == \"max\" and self.learn_eps:\n h = self.next_layer_eps(h, padded_neighbor_list = padded_neighbor_list)\n elif not self.neighbor_pooling_type == \"max\" and self.learn_eps:\n h = self.next_layer_eps(h, Adj_block = Adj_block)\n elif self.neighbor_pooling_type == \"max\" and not self.learn_eps:\n h = self.next_layer(h, padded_neighbor_list = padded_neighbor_list)\n elif not self.neighbor_pooling_type == \"max\" and not self.learn_eps:\n h = self.next_layer(h, Adj_block = Adj_block)\n \n return h\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' \\\n + str(self.in_features) + ' -> ' \\\n + str(self.out_features) + ')'" ]
[ [ "torch.nn.Linear", "torch.nn.functional.dropout" ], [ "torch.nn.BatchNorm1d", "torch.ones", "torch.max", "torch.min", "torch.nn.ModuleList", "torch.nn.Linear", "torch.nn.functional.relu", "torch.spmm" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
supertopdev/data-science
[ "9534085236a79e123b97f0771a4641289039d93b" ]
[ "ISR/utils/datahandler.py" ]
[ "import os\nimport imageio\nimport numpy as np\nfrom ISR.utils.logger import get_logger\n\n\nclass DataHandler:\n \"\"\"\n DataHandler generate augmented batches used for training or validation.\n\n Args:\n lr_dir: directory containing the Low Res images.\n hr_dir: directory containing the High Res images.\n patch_size: integer, size of the patches extracted from LR images.\n scale: integer, upscaling factor.\n n_validation_samples: integer, size of the validation set. Only provided if the\n DataHandler is used to generate validation sets.\n T: float in [0,1], is the patch \"flatness\" threshold.\n Determines what level of detail the patches need to meet. 0 means any patch is accepted.\n \"\"\"\n\n def __init__(self, lr_dir, hr_dir, patch_size, scale, n_validation_samples=None, T=0.03):\n self.folders = {'hr': hr_dir, 'lr': lr_dir} # image folders\n self.extensions = ('.png', '.jpeg', '.jpg') # admissible extension\n self.img_list = {} # list of file names\n self.n_validation_samples = n_validation_samples\n self.patch_size = patch_size\n self.scale = scale\n self.T = T\n self.patch_size = {'lr': patch_size, 'hr': patch_size * self.scale}\n self.logger = get_logger(__name__)\n self._make_img_list()\n self._check_dataset()\n\n def _make_img_list(self):\n \"\"\" Creates a dictionary of lists of the acceptable images contained in lr_dir and hr_dir. \"\"\"\n\n for res in ['hr', 'lr']:\n file_names = os.listdir(self.folders[res])\n file_names = [file for file in file_names if file.endswith(self.extensions)]\n self.img_list[res] = np.sort(file_names)\n\n if self.n_validation_samples:\n samples = np.random.choice(\n range(len(self.img_list['hr'])), self.n_validation_samples, replace=False\n )\n for res in ['hr', 'lr']:\n self.img_list[res] = self.img_list[res][samples]\n\n def _check_dataset(self):\n \"\"\" Sanity check for dataset. \"\"\"\n\n # the order of these asserts is important for testing\n assert len(self.img_list['hr']) == self.img_list['hr'].shape[0], 'UnevenDatasets'\n assert self._matching_datasets(), 'Input/LabelsMismatch'\n\n def _matching_datasets(self):\n \"\"\" Rough file name matching between lr and hr directories. \"\"\"\n # LR_name.png = HR_name+x+scale.png\n # or\n # LR_name.png = HR_name.png\n LR_name_root = [x.split('.')[0].split('x')[0] for x in self.img_list['lr']]\n HR_name_root = [x.split('.')[0] for x in self.img_list['hr']]\n return np.all(HR_name_root == LR_name_root)\n\n def _not_flat(self, patch):\n \"\"\"\n Determines whether the patch is complex, or not-flat enough.\n Threshold set by self.T.\n \"\"\"\n\n if max(np.std(patch, axis=0).mean(), np.std(patch, axis=1).mean()) < self.T:\n return False\n else:\n return True\n\n def _crop_imgs(self, imgs, batch_size, idx=0):\n \"\"\"\n Get random top left corners coordinates in LR space, multiply by scale to\n get HR coordinates.\n Gets batch_size + n possible coordinates.\n Accepts the batch only if the standard deviation of pixel intensities is above a given threshold, OR\n no patches can be further discarded (n have been discarded already).\n Square crops of size patch_size are taken from the selected\n top left corners.\n \"\"\"\n\n n = 2 * batch_size\n top_left = {'x': {}, 'y': {}}\n for i, axis in enumerate(['x', 'y']):\n top_left[axis]['lr'] = np.random.randint(\n 0, imgs['lr'].shape[i] - self.patch_size['lr'] + 1, batch_size + n\n )\n top_left[axis]['hr'] = top_left[axis]['lr'] * self.scale\n\n crops = {}\n for res in ['lr', 'hr']:\n slices = [\n [slice(x, x + self.patch_size[res]), slice(y, y + self.patch_size[res])]\n for x, y in zip(top_left['x'][res], top_left['y'][res])\n ]\n crops[res] = []\n for s in slices:\n candidate_crop = imgs[res][s[0], s[1], slice(None)]\n if self._not_flat(candidate_crop) or n == 0:\n crops[res].append(candidate_crop)\n else:\n n -= 1\n if len(crops[res]) == batch_size:\n break\n crops[res] = np.array(crops[res])\n return crops\n\n def _apply_transform(self, img, transform_selection):\n \"\"\" Rotates and flips input image according to transform_selection. \"\"\"\n\n rotate = {\n 0: lambda x: x,\n 1: lambda x: np.rot90(x, k=1, axes=(1, 0)), # rotate right\n 2: lambda x: np.rot90(x, k=1, axes=(0, 1)), # rotate left\n }\n\n flip = {\n 0: lambda x: x,\n 1: lambda x: np.flip(x, 0), # flip along horizontal axis\n 2: lambda x: np.flip(x, 1), # flip along vertical axis\n }\n\n rot_direction = transform_selection[0]\n flip_axis = transform_selection[1]\n\n img = rotate[rot_direction](img)\n img = flip[flip_axis](img)\n\n return img\n\n def _transform_batch(self, batch, transforms):\n \"\"\" Transforms each individual image of the batch independently. \"\"\"\n\n t_batch = np.array(\n [self._apply_transform(img, transforms[i]) for i, img in enumerate(batch)]\n )\n return t_batch\n\n def get_batch(self, batch_size, idx=None):\n \"\"\"\n Returns a dictionary with keys ('lr', 'hr') containing training batches\n of Low Res and High Res image patches.\n \"\"\"\n\n if not idx:\n # randomly select one image. idx is given at validation time.\n idx = np.random.choice(range(len(self.img_list['hr'])))\n img = {}\n for res in ['lr', 'hr']:\n img_path = os.path.join(self.folders[res], self.img_list[res][idx])\n img[res] = imageio.imread(img_path) / 255.0\n batch = self._crop_imgs(img, batch_size)\n transforms = np.random.randint(0, 3, (batch_size, 2))\n batch['lr'] = self._transform_batch(batch['lr'], transforms)\n batch['hr'] = self._transform_batch(batch['hr'], transforms)\n\n return batch\n\n def get_validation_batches(self, batch_size):\n \"\"\" Returns a batch for each image in the validation set. \"\"\"\n\n if self.n_validation_samples:\n batches = []\n for idx in range(self.n_validation_samples):\n batches.append(self.get_batch(batch_size, idx))\n return batches\n else:\n self.logger.error(\n 'No validation set size specified. (not operating in a validation set?)'\n )\n raise ValueError(\n 'No validation set size specified. (not operating in a validation set?)'\n )\n\n def get_validation_set(self, batch_size):\n \"\"\"\n Returns a batch for each image in the validation set.\n Flattens and splits them to feed it to Keras's model.evaluate.\n \"\"\"\n\n if self.n_validation_samples:\n batches = self.get_validation_batches(batch_size)\n valid_set = {'lr': [], 'hr': []}\n for batch in batches:\n for res in ('lr', 'hr'):\n valid_set[res].extend(batch[res])\n for res in ('lr', 'hr'):\n valid_set[res] = np.array(valid_set[res])\n return valid_set\n else:\n self.logger.error(\n 'No validation set size specified. (not operating in a validation set?)'\n )\n raise ValueError(\n 'No validation set size specified. (not operating in a validation set?)'\n )\n" ]
[ [ "numpy.rot90", "numpy.sort", "numpy.all", "numpy.std", "numpy.array", "numpy.flip", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vaibhav016/TensorFlowASR
[ "2c90f67f284be6c8a6c182223b9f517a73bc766f" ]
[ "tensorflow_asr/utils/env_util.py" ]
[ "# Copyright 2020 Huy Le Nguyen (@usimarit)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Union, List\nimport warnings\nimport tensorflow as tf\n\nlogger = tf.get_logger()\n\n\ndef setup_environment(): # Set memory growth and only log ERRORs\n \"\"\" Setting tensorflow running environment \"\"\"\n warnings.simplefilter(\"ignore\")\n logger.setLevel(\"WARN\")\n\n\ndef setup_devices(devices: List[int], cpu: bool = False):\n \"\"\"Setting visible devices\n\n Args:\n devices (list): list of visible devices' indices\n \"\"\"\n if cpu:\n cpus = tf.config.list_physical_devices(\"CPU\")\n tf.config.set_visible_devices(cpus, \"CPU\")\n else:\n gpus = tf.config.list_physical_devices(\"GPU\")\n if gpus:\n visible_gpus = [gpus[i] for i in devices]\n tf.config.set_visible_devices(visible_gpus, \"GPU\")\n print(\"Run on\", len(visible_gpus), \"Physical GPUs\")\n\n\ndef setup_strategy(devices: List[int], tpu_address: str = None):\n \"\"\"Setting mirrored strategy for training\n\n Args:\n devices (list): list of visible devices' indices\n tpu_address (str): an optional custom tpu address\n\n Returns:\n tf.distribute.Strategy: TPUStrategy for training on tpus or MirroredStrategy for training on gpus\n \"\"\"\n try:\n return setup_tpu(tpu_address)\n except (ValueError, tf.errors.NotFoundError) as e:\n logger.warn(e)\n pass\n setup_devices(devices)\n return tf.distribute.MirroredStrategy()\n\n\ndef setup_tpu(tpu_address=None):\n if tpu_address is None:\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver()\n else:\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=\"grpc://\" + tpu_address)\n tf.config.experimental_connect_to_cluster(resolver)\n tf.tpu.experimental.initialize_tpu_system(resolver)\n print(\"All TPUs: \", tf.config.list_logical_devices(\"TPU\"))\n return tf.distribute.experimental.TPUStrategy(resolver)\n\n\ndef has_devices(devices: Union[List[str], str]):\n if isinstance(devices, list):\n return all([len(tf.config.list_logical_devices(d)) != 0 for d in devices])\n return len(tf.config.list_logical_devices(devices)) != 0\n" ]
[ [ "tensorflow.tpu.experimental.initialize_tpu_system", "tensorflow.config.list_logical_devices", "tensorflow.get_logger", "tensorflow.distribute.cluster_resolver.TPUClusterResolver", "tensorflow.config.list_physical_devices", "tensorflow.config.experimental_connect_to_cluster", "tensorflow.config.set_visible_devices", "tensorflow.distribute.experimental.TPUStrategy", "tensorflow.distribute.MirroredStrategy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yjc9696/biobert-my
[ "ffc11c91f7032cffbcc7d9526159f0ff8e08c1f3" ]
[ "helpData_n2c2.py" ]
[ "import json\n\nimport numpy as np\n\nimport metrics\nimport tokenization\nimport utils\nfrom config import opt\n\n\ndef write2file(data, path):\n with open(path, 'w') as f:\n f.write(json.dumps(data, ensure_ascii=False))\ndef get_BIO(self, tag):\n return self.id2tag[tag]\n # if tag == 0:\n # return 'O'\n # if tag % 2 == 1:\n # return 'B-' + str(tag)\n # else:\n # return 'I-' + str(tag)\n\n\ndef detokenize(self,in_tok,in_lab):\n \"\"\"\n convert suub-word level BioBERT-NER results to full words and labels.\n\n Args:\n pred_token_test_path: path to token_test.txt from output folder. ex) output/token_test.txt\n pred_label_test_path: path to label_test.txt from output folder. ex) output/label_test.txt\n Outs:\n A dictionary that contains full words and predicted labels.\n \"\"\"\n\n # read predicted\n pred = {'toks':[], 'labels':[]} # dictionary for predicted tokens and labels.\n for lineIdx, (lineTok, lineLab) in enumerate(zip(in_tok, in_lab)):\n pred['toks'].append(lineTok)\n if lineLab in ['[CLS]','[SEP]', 'X']: # replace non-text tokens with O. These will not be evaluated.\n pred['labels'].append('O')\n continue\n pred['labels'].append(get_BIO(self,lineLab))\n\n assert (len(pred['toks']) == len(pred['labels'])), \"Error! : len(pred['toks'])(%s) != len(pred['labels'])(%s) : Please report us \"%(len(pred['toks']), len(pred['labels']))\n\n bert_pred = {'toks':[], 'labels':[], 'sentence':[]}\n buf = []\n for t, l in zip(pred['toks'], pred['labels']):\n if t in ['[CLS]','[SEP]']: # non-text tokens will not be evaluated.\n bert_pred['toks'].append(t)\n bert_pred['labels'].append(t) # Tokens and labels should be identical if they are [CLS] or [SEP]\n if t == '[SEP]':\n bert_pred['sentence'].append(buf)\n buf = []\n continue\n elif t[:2] == '##': # if it is a piece of a word (broken by Word Piece tokenizer)\n bert_pred['toks'][-1] += t[2:] # append pieces to make a full-word\n buf[-1]+=t[2:]\n else:\n bert_pred['toks'].append(t)\n bert_pred['labels'].append(l)\n buf.append(t)\n\n assert (len(bert_pred['toks']) == len(bert_pred['labels'])), (\"Error! : len(bert_pred['toks']) != len(bert_pred['labels']) : Please report us\")\n\n return bert_pred\n\n\nclass DataHelper(object):\n def __init__(self, opt):\n self.opt = opt\n self.tokenizer = tokenization.FullTokenizer(\n vocab_file=opt.bert_vocab_unk, do_lower_case=True)\n self.id2r, self.r2id = None, None\n self.id2tag, self.tag2id = None, None\n self.id2type, self.type2id = None, None\n self.type2types = None\n self.get_relations()\n\n self.origin_train_data = utils.load_data(self.opt.train_data_dir)\n self.origin_dev_data = utils.load_data(self.opt.dev_data_dir)\n self.origin_test1_data = utils.load_data(self.opt.test1_data_dir)\n\n def get_relations(self):\n \"\"\"\n 得到所有的xx2xx文件\n \"\"\"\n # origin_50_schema = load_data(self.opt.schema_dir_old, case=1)\n relation_all = utils.load_data(self.opt.schema_dir_new, case=1)\n # self.down2top = {} # 记录类别的上下为关系\n # for old, new in zip(origin_50_schema, new_50_schema):\n # old_sample_obj_type = old['object_type']\n # old_sample_sbj_type = old['subject_type']\n # new_sample_obj_type = new['object_type']\n # new_sample_sbj_type = new['subject_type']\n # top_obj, top_sbj = self.down2top.get(old_sample_obj_type, None), self.down2top.get(old_sample_sbj_type, None)\n # assert (top_obj == None or top_obj == new_sample_obj_type) \\\n # and (top_sbj == None or top_sbj == new_sample_sbj_type)\n # self.down2top[old_sample_obj_type] = new_sample_obj_type\n # self.down2top[old_sample_sbj_type] = new_sample_sbj_type\n # print(\"上下位关系为:{}\".format(self.down2top))\n\n self.r2id = {}\n self.id2r = {}\n self.id2tag = {}\n self.tag2id = {}\n self.id2type = {}\n self.type2id = {}\n exist_ent_type, exist_rel_type = set(), set()\n for sample in relation_all:\n obj, r, sbj = sample['object_type'], sample['predicate'], sample['subject_type']\n if obj not in exist_ent_type:\n self.id2type[len(exist_ent_type) + 1] = obj\n self.id2tag[2 * len(exist_ent_type) + 1] = 'B-' + obj\n self.id2tag[2 * len(exist_ent_type) + 2] = 'I-' + obj\n exist_ent_type.add(obj)\n if sbj not in exist_ent_type:\n self.id2type[len(exist_ent_type) + 1] = sbj\n self.id2tag[2 * len(exist_ent_type) + 1] = 'B-' + sbj\n self.id2tag[2 * len(exist_ent_type) + 2] = 'I-' + sbj\n exist_ent_type.add(sbj)\n if r not in exist_rel_type:\n # 多余给NA\n self.id2r[len(exist_rel_type)] = r\n exist_rel_type.add(r)\n\n self.id2r[len(exist_rel_type)] = 'NA'\n exist_rel_type.add('NA')\n self.id2tag[0] = 'O'\n exist_ent_type.add('O')\n self.id2type[0] = 'O'\n\n print(\"实体类型数目为:{};关系数目为:{}\".format(len(exist_ent_type), len(exist_rel_type)))\n self.r2id = {self.id2r[idx]: idx for idx in self.id2r.keys()}\n self.tag2id = {self.id2tag[idx]: idx for idx in self.id2tag.keys()}\n self.type2id = {self.id2type[idx]: idx for idx in self.id2type.keys()}\n\n self.type2types = {ent: set() for ent in exist_ent_type}\n for sample in relation_all:\n obj, r, sbj = sample['object_type'], sample['predicate'], sample['subject_type']\n self.type2types[obj].add(sbj)\n\n self.type2types = {ent: list(self.type2types[ent]) for ent in self.type2types.keys()}\n\n # 写入文件\n print(\"写入xx2xx数据到目录{}..\".format(self.opt.json_data_root))\n write2file(self.id2r, self.opt.id2r_dir)\n write2file(self.r2id, self.opt.r2id_dir)\n write2file(self.id2tag, self.opt.id2tag_dir)\n write2file(self.tag2id, self.opt.tag2id_dir)\n write2file(self.id2type, self.opt.id2type_dir)\n write2file(self.type2id, self.opt.type2id_dir)\n write2file(self.type2types, self.opt.type2types_dir)\n\n def down2topForDatas(self, datas):\n topDatas = []\n for data in datas:\n text = data['text']\n downSpoList = data['spo_list']\n topSpoList = []\n for spo in downSpoList:\n spo['object_type'] = self.down2top[spo['object_type']]\n spo['subject_type'] = self.down2top[spo['subject_type']]\n topSpoList.append(spo)\n dataUnit = {}\n dataUnit['text'] = text\n dataUnit['spo_list'] = topSpoList\n topDatas.append(dataUnit)\n return topDatas\n\n def get_positions(self, data_list, map_str):\n \"\"\"\n 返回实体在单词列表中的所有位置\n sample:\n >> input: ['球','星','球','星', ...., ], '球星'\n >> return: 【(2, 3),(4,5)】\n \"\"\"\n result = []\n if (opt.use_all_positions == False):\n str, end = self.get_position(data_list, map_str)\n result.append((str, end))\n return result\n map_str = map_str.strip().replace(' ', '$')\n map_str = self.tokenizer.tokenize(map_str)\n map_str = [i.replace('#', '') for i in map_str]\n map_str = ''.join(map_str)\n data_list = [i.replace('#', '') for i in data_list]\n # 如果只由一个词组成\n for word in data_list:\n if map_str.lower() in word.lower():\n start_id = end_id = data_list.index(word)\n result.append((start_id, end_id))\n start_id = -1\n end_id = -1\n for idx, word in enumerate(data_list):\n if map_str.startswith(word):\n start_id = end_id = idx\n while end_id + 1 < len(data_list) and data_list[end_id + 1] in map_str:\n if \"\".join(data_list[start_id:end_id + 2]) == map_str:\n # print(\"\".join(data_list[start_id:end_id+3]))\n result.append((start_id, end_id + 1))\n break\n end_id += 1\n find_str = \"\"\n for idx in range(start_id, end_id + 1):\n find_str = find_str + data_list[idx]\n if find_str != map_str:\n pre_extend = (data_list[start_id - 1] if start_id > 0 else \"\") + find_str\n last_extend = find_str + (data_list[end_id + 1] if end_id < len(data_list) - 1 else \"\")\n pre_last_extend = (data_list[start_id - 1] if start_id > 0 else \"\") + find_str + (data_list[end_id + 1] if end_id < len(data_list) - 1 else \"\")\n if map_str in pre_extend:\n start_id -= 1\n elif map_str in last_extend:\n end_id += 1\n elif map_str in pre_last_extend:\n start_id -= 1\n end_id += 1\n else:\n start_id = -1\n end_id = -1\n if len(result) > 0:\n return result\n for idx, word in enumerate(data_list[:-1]):\n if map_str in (word + data_list[idx + 1]):\n result.append((idx, idx + 1))\n\n if len(result) == 0:\n result.append((-1, -1))\n # print(\"word_list{} map_str {} loss\".format(data_list, map_str))\n return result\n\n def get_position(self, data_list, map_str):\n \"\"\"\n 返回实体在单词列表中的位置,只返回第一次出现的位置\n sample:\n >> input: ['球','星','姚','明', ...., ], '姚明'\n >> return: (2, 3)\n \"\"\"\n map_str = map_str.strip().replace(' ', '$')\n map_str = self.tokenizer.tokenize(map_str)\n map_str = [i.replace('#', '') for i in map_str]\n map_str = ''.join(map_str)\n data_list = [i.replace('#', '') for i in data_list]\n # 如果只由一个词组成\n for word in data_list:\n if map_str.lower() in word.lower():\n start_id = end_id = data_list.index(word)\n return start_id, end_id\n\n start_id = -1\n end_id = -1\n for idx, word in enumerate(data_list):\n if start_id != - 1 and end_id != -1:\n return start_id, end_id\n if map_str.startswith(word):\n start_id = end_id = idx\n while end_id + 1 < len(data_list) and data_list[end_id + 1] in map_str:\n if \"\".join(data_list[start_id:end_id + 2]) == map_str:\n # print(\"\".join(data_list[start_id:end_id+3]))\n return start_id, end_id + 1\n end_id += 1\n find_str = \"\"\n for idx in range(start_id, end_id + 1):\n find_str = find_str + data_list[idx]\n if find_str != map_str:\n pre_extend = (data_list[start_id - 1] if start_id > 0 else \"\") + find_str\n last_extend = find_str + (data_list[end_id + 1] if end_id < len(data_list) - 1 else \"\")\n pre_last_extend = (data_list[start_id - 1] if start_id > 0 else \"\") + find_str + (data_list[end_id + 1] if end_id < len(data_list) - 1 else \"\")\n if map_str in pre_extend:\n start_id -= 1\n elif map_str in last_extend:\n end_id += 1\n elif map_str in pre_last_extend:\n start_id -= 1\n end_id += 1\n else:\n start_id = -1\n end_id = -1\n if start_id != -1 and end_id != -1:\n return start_id, end_id\n for idx, word in enumerate(data_list[:-1]):\n if map_str in (word + data_list[idx + 1]):\n return idx, idx + 1\n # print(\"word_list{} map_str {} loss\".format(data_list, map_str))\n return start_id, end_id\n\n def get_tag(self, word_list, entity_list, type_list):\n '''\n 得到一个句子的tag标签\n sampple:\n >> input: ['球','星', '姚', '明', ...], ['姚明'], ['人物']\n >> return: ['O', 'O', 'id(B-人物), id(I-人物)']\n '''\n word_list = [word.replace('#', '') for word in word_list]\n tag_list = [0] * len(word_list)\n for entity, type_ in zip(entity_list, type_list):\n positions = self.get_positions(word_list, entity)\n for start_id, end_id in positions:\n if start_id == -1 or end_id == -1:\n continue\n # 补充书名号\n # if start_id > 0 and end_id < len(word_list)-1:\n # if word_list[start_id-1] == '《' and word_list[end_id+1] == '》':\n # start_id -= 1\n # end_id += 1\n Bid = 2 * (self.type2id[type_] - 1) + 1\n Iid = 2 * (self.type2id[type_] - 1) + 2\n tag_list[start_id] = Bid\n if start_id < end_id:\n for idx in range(start_id + 1, end_id + 1):\n tag_list[idx] = Iid\n return tag_list\n\n def get_entity_list_and_type_list(self, data_list):\n \"\"\"\n 得到实体和对应的类型列表,一一对应\n sample:\n >> input: [姚明,NBA]\n >> return:[人,组织]\n \"\"\"\n entity_list, type_list = [], []\n for unit in data_list:\n entity_list.append(unit['object'])\n type_list.append(unit['object_type'])\n entity_list.append(unit['subject'])\n type_list.append(unit['subject_type'])\n return entity_list, type_list\n\n def get_sample_exist_entity2rlation(self, word_list, spo_list):\n \"\"\"\n 给定句子的 bert切词列表, 一句话的spo_list\n 返回该句话存在的头实体尾实体位置及对应的关系字典\n {(obj_s, obj_e, sbj_s, sbj_e): r}\n \"\"\"\n golden_map = {}\n word_list = [word.replace('#', '') for word in word_list]\n for spo in spo_list:\n obj = spo['object']\n sbj = spo['subject']\n\n o_po = self.get_positions(word_list, obj)\n s_po = self.get_positions(word_list, sbj)\n for o_s, o_e in o_po:\n for s_s, s_e in s_po:\n r = self.r2id[spo['predicate']]\n golden_map[(o_s, o_e, s_s, s_e)] = r\n return golden_map\n\n def get_sample_all_entity2relation(self, tags_list, golden_map):\n \"\"\"\n 返回一个句子所有可能实体组合极其关系\n [[s1, e1, s2, e2, r],\n [s1, e1, s2, e2, 0]\n ...]]\n \"\"\"\n all_entity = []\n NA_entity = []\n NA_num = 0\n rel_num = 0\n tags_list = [self.id2tag[i] for i in tags_list]\n ent_and_position = metrics.get_entities(tags_list)\n for ent1 in ent_and_position:\n for ent2 in ent_and_position:\n if ent2 == ent1:\n continue\n ent2_for_ent1 = self.type2types.get(ent1[0], [])\n if ent2[0] not in ent2_for_ent1:\n continue\n entity_tuple = (ent1[1], ent1[2], ent2[1], ent2[2])\n # 0代表关系为NA\n re = golden_map.get(entity_tuple, self.r2id['NA'])\n ent_list = [entity_tuple[i] for i in range(4)]\n ent_list.append(re)\n if re == self.r2id['NA']:\n NA_entity.append(ent_list)\n else:\n all_entity.append(ent_list)\n rel_num = len(all_entity)\n if len(NA_entity) > 0:\n all_entity.extend(NA_entity[:min(2, len(NA_entity))])\n NA_num = min(opt.naNum, len(NA_entity))\n return all_entity, rel_num, NA_num\n\n def get_sens_and_tags_and_entsRel(self, datas, case=0):\n rel_max_sen = -1\n exceed_length_num = 0\n NA_num = 0\n max_r_num = 0\n all_rel_num = 0\n sens, tags, ent_rel = [], [], []\n PAD = self.tokenizer.convert_tokens_to_ids(['[PAD]'])\n O_tag = [self.type2id['O']]\n BIO_data = \"\"\n BIO_base =\"\"\n for data in datas:\n text = data['text']\n # 一共修改3处, util中一处 此文件两处去掉首位空格,然后将空格替换为@\n text = text.strip().replace(' ', '$')\n word_list = self.tokenizer.tokenize(text)\n sen = self.tokenizer.convert_tokens_to_ids(word_list)\n rel_max_sen = max(rel_max_sen, len(word_list))\n if len(word_list) > self.opt.seq_length:\n exceed_length_num += 1\n\n if len(word_list) < self.opt.seq_length:\n sen = sen + PAD * (self.opt.seq_length - len(sen))\n else:\n sen = sen[:self.opt.seq_length]\n sens.append(sen)\n\n # if case >= 2:\n # continue\n entity_list, type_list = self.get_entity_list_and_type_list(data['spo_list'])\n # __import__('ipdb').set_trace()\n # '▌1999年:「喜剧之王」前两年的贺岁档其实都有星爷,只不过作品票房一直跟不上'\n tag = self.get_tag(word_list, entity_list, type_list)\n assert len(word_list) == len(tag)\n if len(word_list) < self.opt.seq_length:\n tag = tag + O_tag * (self.opt.seq_length - len(tag))\n else:\n tag = tag[:self.opt.seq_length]\n tags.append(tag)\n de_t=detokenize(self,word_list,tag)\n for i,word in enumerate(word_list):\n if word == '$':\n continue\n BIO_data = BIO_data + word + \"\t\" + get_BIO(self, tag[i]) + \"\\n\"\n for i in range(len(de_t['toks'])):\n word=de_t['toks'][i]\n label=de_t['labels'][i]\n if word == '$':\n continue\n BIO_base = BIO_base + word + \"\t\" + label + \"\\n\"\n BIO_data = BIO_data + '\\n'\n BIO_base = BIO_base + '\\n'\n # __import__('ipdb').set_trace()\n exist_map = self.get_sample_exist_entity2rlation(word_list, data['spo_list'])\n if case == 0:\n all_e2r, rel_num, NAs = self.get_sample_all_entity2relation(tag, exist_map)\n NA_num += NAs\n else:\n all_e2r = []\n for key in exist_map.keys():\n e2r = [key[0], key[1], key[2], key[3], exist_map[key]]\n all_e2r.append(e2r)\n all_rel_num += len(exist_map)\n max_r_num = max(max_r_num, len(all_e2r))\n ent_rel.append(all_e2r)\n sens = np.array(sens)\n tags = np.array(tags)\n ent_rel = np.array(ent_rel)\n\n root_path = self.opt.data_root\n base=\"\"\n if case == 0:\n branch = 'train.tsv'\n base ='train_base.tsv'\n if case == 1:\n branch = 'dev.tsv'\n base ='dev_base.tsv'\n if case == 2:\n branch = 'test.tsv'\n base ='test_base.tsv'\n if case == 3:\n branch = 'test2.tsv'\n data_root = root_path\n print(\"存在关系数:{};NA关系数{}; 每句话中最大关系数(含NA):{}\".format(all_rel_num, NA_num, max_r_num))\n print(\"真实最大长度{}; 设置最大长度{}; 超过长度数{}\".format(rel_max_sen, self.opt.seq_length, exceed_length_num))\n print(\"saving data in {}\".format(data_root))\n\n with open(data_root+branch, 'w') as f:\n f.write(BIO_data)\n with open(data_root+base, 'w') as f:\n f.write(BIO_base)\n # np.save(data_root + 'sens', sens)\n # np.save(data_root + 'tags', tags)\n # np.save(data_root + 'relations', ent_rel)\n\n def process_data(self):\n if self.origin_train_data is not None:\n print(\"process train data\")\n self.get_sens_and_tags_and_entsRel(self.origin_train_data, case=0)\n if self.origin_dev_data is not None:\n print(\"process dev data\")\n self.get_sens_and_tags_and_entsRel(self.origin_dev_data, case=1)\n if self.origin_test1_data is not None:\n print(\"process test1 data\")\n self.get_sens_and_tags_and_entsRel(self.origin_test1_data, case=2)\n print(\"确定数据质量...\")\n # metrics.judge_data_quality(self.opt)\n\n\nif __name__ == '__main__':\n dataHelper = DataHelper(opt)\n # metrics.judge_data_quality(opt)\n dataHelper.process_data()\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jdieter31/riemannian-nlp
[ "75ef47608c81ec6e925fe24d16c67985e4b987c6" ]
[ "riemann/data/data_ingredient.py" ]
[ "from math import floor\n\nimport numpy as np\n\nfrom .graph import load_edge_list, load_adjacency_matrix\nfrom .graph_dataset import BatchedDataset\nfrom ..config_loader import get_config\n\n\ndef load_dataset():\n data_config = get_config().data\n\n if graph_data_type == \"edge\":\n idx, objects, weights = load_edge_list(data_config.path, data_config.symmetrize,\n delimiter=data_config.delimiter)\n else:\n idx, objects, weights = load_adjacency_matrix(data_config.path,\n data_config.graph_data_format,\n data_config.symmetrize)\n\n # define a feature function\n if data_config.object_id_to_feature_func == \"conceptnet\":\n features = [' '.join(object_id.split('_')) for object_id in objects]\n elif data_config.object_id_to_feature_func == \"wordnet\":\n # placental_mammal.n.01 -> placental mammal\n features = [' '.join(object_id.split('.')[0].split('_')) for object_id in objects]\n elif data_config.object_id_to_feature_func == \"id\":\n # xyz -> xyz\n features = [object_id for object_id in objects]\n else:\n features = None\n\n if make_eval_split:\n np.random.seed(data_config.split_seed)\n shuffle_order = np.arange(idx.shape[0])\n np.random.shuffle(shuffle_order)\n num_eval = floor(idx.shape[0] * data_config.split_size)\n eval_indices = shuffle_order[:num_eval]\n train_indices = shuffle_order[num_eval:]\n train_idx = idx[train_indices]\n train_weights = weights[train_indices]\n eval_idx = idx[eval_indices]\n eval_weights = weights[eval_indices]\n\n train_data = BatchedDataset(\n train_idx,\n objects,\n train_weights,\n data_config.manifold,\n data_config.n_graph_neighbors,\n data_config.n_manifold_neighbors,\n data_config.n_rand_neighbors,\n data_config.batch_size,\n data_config.num_workers,\n data_config.nn_workers,\n data_config.manifold_nn_k,\n features,\n saved_data_file=data_config.graph_data_file,\n gen_data=data_config.gen_graph_data\n )\n\n eval_data = BatchedDataset.initialize_eval_dataset(\n train_data,\n eval_batch_size,\n data_config.n_eval_neighbors,\n data_config.max_eval_graph_neighbors,\n data_config.eval_workers,\n data_config.eval_nn_workers,\n manifold_neighbors=data_config.eval_manifold_neighbors,\n eval_edges=eval_idx,\n eval_weights=eval_weights)\n\n return train_data, eval_data\n else:\n train_data = BatchedDataset(\n idx,\n objects,\n weights,\n manifold,\n data_config.n_graph_neighbors,\n data_config.n_manifold_neighbors,\n data_config.n_rand_neighbors,\n data_config.batch_size,\n data_config.num_workers,\n data_config.nn_workers,\n data_config.manifold_nn_k,\n features,\n saved_data_file=data_config.graph_data_file,\n gen_data=data_config.gen_graph_data)\n\n eval_data = BatchedDataset.initialize_eval_dataset(\n train_data,\n data_config.eval_batch_size,\n data_config.n_eval_neighbors,\n data_config.max_eval_graph_neighbors,\n data_config.eval_workers,\n data_config.eval_nn_workers,\n manifold_neighbors=data_config.eval_manifold_neighbors,\n saved_data_file=data_config.graph_data_file,\n gen_data=data_config.gen_graph_data)\n\n return train_data, eval_data\n\n\ndef get_adjacency_dict(data):\n adj = {}\n for row in data.idx:\n x = row[0]\n y = row[1]\n if x in adj:\n adj[x].add(y)\n else:\n adj[x] = {y}\n return adj\n" ]
[ [ "numpy.arange", "numpy.random.shuffle", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dpopadic/arpmRes
[ "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76" ]
[ "scripts/sources/s_checklist_scenariobased_step07.py", "functions_legacy/GraphicalLasso.py", "scripts/sources/S_PricingEquityTaylor.py", "scripts/sources/s_estimation_copmarg_ratings.py", "scripts/sources/S_KolSmirnTestSVI.py", "functions_legacy/MetrHastAlgo.py", "scripts/sources/s_mean_var_solution_robust.py", "functions_legacy/PlotTwoDimEllipsoid.py", "scripts/sources/S_LognormEuclidBasis.py", "functions_legacy/EMalgorithmFP.py", "arpym/statistics/invariance_test_ks.py", "scripts/sources/s_checklist_scenariobased_step10.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.4'\n# jupytext_version: 1.2.0\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# # s_checklist_scenariobased_step07 [<img src=\"https://www.arpm.co/lab/icons/icon_permalink.png\" width=30 height=30 style=\"display: inline;\">](https://www.arpm.co/lab/redirect.php?code=s_checklist_scenariobased_step07&codeLang=Python)\n# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ex-vue-7).\n\n# +\nimport numpy as np\nimport pandas as pd\n\nfrom arpym.portfolio import spectral_index\nfrom arpym.statistics import meancov_sp, quantile_sp\n\n# -\n\n# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step07-parameters)\n\n# +\n# indicates which projection to continue from\n# True: use copula-marginal projections\n# False: use historical projections\ncopula_marginal = True\n\nlam = 3e-7 # parameter of exponential utility function\nc_quantile = 0.95 # confidence level for the quantile satisfaction measure\nc_es = 0.95 # confidence level for the negative expected shortfall\n# -\n\n# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step07-implementation-step00): Load data\n\n# +\npath = '../../../databases/temporary-databases/'\n\nif copula_marginal:\n # Projection\n db_projection_tools = pd.read_csv(path + 'db_projection_tools.csv')\n j_ = int(db_projection_tools['j_'][0])\n\n db_scenprob = pd.read_csv(path + 'db_scenario_probs.csv')\n p = db_scenprob['p'].values\n\n # Pricing\n db_pricing = pd.read_csv(path + 'db_pricing.csv')\n pi_tnow_thor = db_pricing.values\n\n # Aggregation\n db_exante_perf = pd.read_csv(path + 'db_exante_perf.csv')\n y_h = db_exante_perf.values.squeeze()\n\nelse:\n # Projection\n db_projection_tools = pd.read_csv(path + 'db_projection_bootstrap_tools.csv')\n j_ = int(db_projection_tools['j_'][0])\n\n db_scenprob = pd.read_csv(path + 'db_scenario_probs_bootstrap.csv')\n p = db_scenprob['p'].values\n\n # Pricing\n db_pricing = pd.read_csv(path + 'db_pricing_historical.csv')\n pi_tnow_thor = db_pricing.values\n\n # Aggregation\n db_exante_perf = pd.read_csv(path + 'db_exante_perf_historical.csv')\n y_h = db_exante_perf.values.squeeze()\n\ndb_holdings = pd.read_csv(path + 'db_holdings.csv')\nh = np.squeeze(db_holdings.values)\n# -\n\n# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step07-implementation-step01): Calculate certainty equivalent satisfaction measure\n\n# +\n# expected utility\nexpected_utility = p @ (-np.exp(-lam * y_h)) # expected utility computation\n\n# certainty equivalent satisfaction measure\ncert_eq_yh = -(1 / lam) * np.log(-expected_utility)\n# -\n\n# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step07-implementation-step02): Quantile satisfaction measure\n\n# quantile\nq_yh = quantile_sp(1 - c_quantile, y_h, p, method='kernel_smoothing')\n\n\n# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step07-implementation-step03): Expected shortfall satisfaction measure\n\n# +\n# indicator function\ndef indicator(x):\n return (0 <= x and x <= 1 - c_es)\n\n\n# spectrum function\ndef spectr_es(x):\n return (1 / (1 - c_es)) * indicator(x)\n\n\n# negative expected shortfall\nes_yh, _ = spectral_index(spectr_es, pi_tnow_thor,\n p, h)\n# -\n\n# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step07-implementation-step04): Expectation and variance satisfaction measures\n\n# expectation satisfaction measure\nmean_yh, var_yh = meancov_sp(y_h, p)\n# opposite of variance is satisfaction measure\nneg_var_yh = -var_yh\n\n# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step07-implementation-step05): Save database\n\n# +\nout = pd.DataFrame({'cert_eq_yh': pd.Series(cert_eq_yh),\n 'q_yh': pd.Series(q_yh),\n 'es_yh': pd.Series(es_yh),\n 'mean_yh': pd.Series(mean_yh),\n 'neg_var_yh': pd.Series(neg_var_yh),\n 'c_es': pd.Series(c_es),\n 'c_quantile': pd.Series(c_quantile)})\nif copula_marginal:\n out.to_csv(path + 'db_quantile_and_satis.csv',\n index=False)\nelse:\n out.to_csv(path + 'db_quantile_and_satis_historical.csv',\n index=False)\n\ndel out\n", "import matplotlib.pyplot as plt\nfrom numpy import array, ones, zeros, cov\n\nplt.style.use('seaborn')\nfrom fglasso import glasso\n\n\ndef GraphicalLasso(pop, lam, initStruct=None, approximate=0, warmInit=0, verbose=0, penalDiag=1, tolThreshold=1e-4, maxIter=1e4, w=None, theta=None):\n # [w, theta, iter, avgTol, hasError] = GraphicalLasso(pop, lam,\n # initStruct, approximate, warmInit, verbose, penalDiag, tolThreshold,\n # maxIter, w, theta)\n #\n # Computes a regularized estimate of covariance matrix and its inverse\n # Inputs:\n # - pop: the set of samples to be used for covariance estimation in an NxP\n # matrix where N is the number of samples and P is the number of variables\n # - lam: the regularization penalty. Can be a single number or a matrix\n # of penalization values for each entry in the covariance matrix\n # - initStruct(o@): a matrix of size PxP, where zero entries will force\n # the corresponding entries in the inverse covariance matrix to be zero.\n # - approximate([o]): a flag indicating whether to use approximate estimation\n # (Meinhausen-Buhlmann approximation)\n # - warmInit[o]: a flag indicating whether the estimation will start from\n # given initial values of w and theta\n # - verbose([o]): a flag indicating whether to output algorithm process\n # - penalDiag[o]: a flag indicating whether to penalize diagonal elements of\n # the covariance matrix\n # - tolThreshold[o]: the amount of tolerance acceptable for covariance matrix\n # elements before terminating the algorithm\n # - maxIter([o]): maximum number of iteration to perform in the algorithm\n # - w[o]: the initial value of covariance matrix used for warm initialization\n # - theta([o]): the initial value of inverse covariance matrix used for warm\n # initialization\n # @: o indicates optional arguments\n # Outputs:\n # - w: the estimated covariance matrix\n # - theta: the estimated inverse covariance matrix\n # - iter: actual number of iterations performed in the algorithm\n # - avgTol: average tolerance of covariance matrix entries before\n # terminating the algorithm\n # - hasError: a flag indicating whether the algorithm terminated\n # erroneously or not\n #\n # Code by: Hossein Karshenas ([email protected])\n # Date: 10 Feb 2011\n\n numVars = pop.shape[1]\n if isinstance(lam,float):\n lam = array([[lam]])\n m, n = lam.shape\n if m != n:\n raise ValueError('Regularization coefficients matrix should be symmetric matrix')\n elif m > 1 and m != numVars:\n raise ValueError('Regularization coefficients matrix should have a size equal to the number of variables')\n if m == 1:\n lam = lam * ones((numVars,numVars))\n\n if initStruct is not None:\n initStruct = 1 - initStruct\n initStruct = 10e9*initStruct\n lam = lam + initStruct\n\n if w is None:\n if warmInit is False:\n raise ValueError('In warm initialization mode starting values for the covariance and precision matrices should be determined')\n else:\n w = zeros((numVars,numVars),order='F')\n if theta is None:\n if warmInit is False:\n raise ValueError('In warm initialization mode starting values for the precision matrix should be determined')\n else:\n theta = zeros((numVars,numVars),order='F')\n\n niter = 0\n jerr = 0.0\n # glasso(cov(pop.T), lam, 1, 1, 1, penalDiag, tolThreshold, maxIter, w, theta, niter, jerr, 1)\n glasso(cov(pop.T), lam, approximate, 0, verbose, penalDiag, tolThreshold, maxIter, w, theta, niter, jerr, numVars)\n # w, theta, iter, avgTol, hasError = glasso(cov(pop.T), lam, approximate, 0, verbose, penalDiag, tolThreshold, maxIter, w, theta, 0, 0, numVars)\n\n if False:\n raise Warning('The execution of the algorithm caused errors')\n return w, theta, niter, jerr\n\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.4'\n# jupytext_version: 1.1.4\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# # S_PricingEquityTaylor [<img src=\"https://www.arpm.co/lab/icons/icon_permalink.png\" width=30 height=30 style=\"display: inline;\">](https://www.arpm.co/lab/redirect.php?code=S_PricingEquityTaylor&codeLang=Python)\n# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-taylor-equity-pl).\n\n# ## Prepare the environment\n\n# +\nimport os\nimport os.path as path\nimport sys\n\nsys.path.append(path.abspath('../../functions-legacy'))\nfrom collections import namedtuple\n\nfrom numpy import arange, array, ones, zeros, sort, where, diff, linspace, round, log, exp, sqrt\nfrom numpy import sum as npsum, min as npmin, max as npmax\n\nfrom scipy.stats import norm\nfrom scipy.io import loadmat\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import figure, plot, legend, xlim, ylabel, \\\n xlabel, title, xticks, yticks\n\nplt.style.use('seaborn')\n\nfrom CONFIG import GLOBAL_DB, TEMPORARY_DB\nfrom ARPM_utils import save_plot, struct_to_dict\nfrom FPmeancov import FPmeancov\nfrom HistogramFP import HistogramFP\nfrom SaddlePointQuadN import SaddlePointQuadN\nfrom SimulateBrownMot import SimulateBrownMot\n# -\n\n# ## run S_PricingEquityProfitAndLoss\n\nfrom S_PricingEquityProfitAndLoss import *\n\n# ## Compute the first order approximation of the equity P&L, which has normal distribution,\n# ## and the second order approximation of the equity P&L, that has generalized non-central gamma distribution,\n# ## at the selected horizon (120 days). Use function SaddlePointQuadN to compute the cumulative distribution\n# ## function of the generalized non-central gamma distribution.\n\n# +\nn_ = 500\nhor_sel = 120 # selected horizon for the plot (120 days)\ni = where(horiz_u == hor_sel)[0][-1]\nx_hor = zeros((n_, i+1))\nTaylor_first = zeros((n_, i+1))\ncdf_QuadN = zeros((n_, i+1))\nTaylor_second = zeros((n_, i+1))\n\nx_hor[:,i] = linspace(Mu_PL[0, i] - 10*Sigma_PL[0, i], Mu_PL[0, i] + 10 *Sigma_PL[0, i], n_)\n# first order approximation (normal pdf)\nTaylor_first[:,i] = norm.pdf(x_hor[:,i], exp(x[0,-1])*mu*horiz_u[i + 1],exp(x[0,-1])*sig*sqrt(horiz_u[i + 1]))\n# second order approximation (QuadN pdf)\nb, c, mu2, sigma2 = array([[exp(x[0,-1])]]), array([[exp(x[0,-1])*0.5]]), mu*horiz_u[i + 1], sig**2*horiz_u[i + 1]\n_, Taylor_second[:,i] = SaddlePointQuadN(x_hor[:,[i]].T, 0, b, c, mu2, sigma2) # QuadN cumulative density function\n# Taylor_second(:,i) = diff(cdf_QuadN(:,i))/diff(x_hor((:,i)))\n# -\n\n# ## Plot a few (say 15) simulated paths of the equity P&L up to the selected horizon (120 days),\n# ## along with the first order approximation, the second order approximation and the analytical\n# ## distribution of the equity P&L. Furthermore, show the mean and the standard deviation of\n# ## the analytical distribution.\n\n# +\nlgrey = [0.8, 0.8, 0.8] # light grey\ndgrey = [0.4, 0.4, 0.4] # dark grey\nlblue = [0.27, 0.4, 0.9] # light blu\norange = [0.94, 0.35, 0] # orange\nj_sel = 15 # selected MC simulations\n\nfigure()\n# simulated path, mean and standard deviation\nplot(horiz_u[:i], PL[:j_sel,:i].T, color=lgrey)\nplt.xticks(arange(0,t_end+20,20))\nxlim([npmin(horiz_u), npmax(horiz_u)+1])\nl1 = plot(horiz_u[:i], Mu_PL[0,:i], color='g', label='mean')\nl2 = plot(horiz_u[:i], Mu_PL[0,:i] + Sigma_PL[0,:i], color='r', label=' + / - st.deviation')\nplot(horiz_u[:i], Mu_PL[0,:i] - Sigma_PL[0,:i], color='r')\n\n# analytical pdf\nflex_probs_scenarios = ones((1, j_)) / j_\noption = namedtuple('option','n_bins')\noption = namedtuple('option', 'n_bins')\noption.n_bins = round(10 * log(j_))\ny_hist, x_hist = HistogramFP(PL[:,[i]].T, flex_probs_scenarios, option)\nscale = 0.15 * Sigma_PL[0, i] / npmax(y_hist)\ny_hist = y_hist * scale\nshift_y_hist = horiz_u[i] + y_hist\nemp_pdf = plt.barh(x_hist[:-1], shift_y_hist[0]-npmin(shift_y_hist[0]), height=x_hist[1]-x_hist[0],\n left=npmin(shift_y_hist[0]), facecolor=lgrey, edgecolor=lgrey,label='horizon pdf') # empirical pdf\nplot(shift_y_hist[0], x_hist[:-1], color=dgrey) # border\n\n# first order approximation\nTaylor_first[:,i] = Taylor_first[:,i]*scale\nshift_T_first = horiz_u[i] + Taylor_first[:,i]\nl3 = plot(shift_T_first, x_hor[:,i], color=orange, label='first order approx')\n\n# second order approximation\nTaylor_second[:,i] = Taylor_second[:,i]*scale\nshift_T_second = horiz_u[i] + Taylor_second[:,i]\nl4 = plot(shift_T_second, x_hor[:,i], color=lblue, label='second order approx')\n\nlegend()\nxlabel('time (days)')\nylabel('P&L')\ntitle('P&L equity Taylor approximation');\n# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])\n", "# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.4'\n# jupytext_version: 1.2.1\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# # s_estimation_copmarg_ratings [<img src=\"https://www.arpm.co/lab/icons/icon_permalink.png\" width=30 height=30 style=\"display: inline;\">](https://www.arpm.co/lab/redirect.php?code=s_estimation_copmarg_ratings&codeLang=Python)\n# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=s_estimation_copmarg_ratings).\n\n# ## Prepare the environment\n\n# +\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import t as tstu\n\nfrom arpym.statistics import cop_marg_sep, scoring, smoothing, mvt_pdf\nfrom arpym.estimation import conditional_fp, cov_2_corr, exp_decay_fp, fit_locdisp_mlfp, fit_garch_fp\n\n# -\n\n# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_estimation_copmarg_ratings-parameters)\n\ntau_hl_prior = 4 * 252 # half-life parameter for time conditioning\ntau_hl_smooth = 21 # half-life parameter for VIX smoothing\ntau_hl_score = 5 * 21 # half-life parameter for VIX scoring\nalpha = 0.5 # proportion of obs. included in range for state conditioning\nnu_min = 2 # lower bound for the degrees of freedom for t copula\nnu_max = 20 # upper bound for the degrees of freedom for t copula\n\n# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_estimation_copmarg_ratings-implementation-step00): Upload data\n\n# +\npath = '../../../databases/global-databases/equities/db_stocks_SP500/'\ndb_stocks = pd.read_csv(path + 'db_stocks_sp.csv', skiprows=[0],\n index_col=0)\nv = db_stocks.loc[:, ['GE', 'JPM']].values\n\n# VIX (used for time-state conditioning)\nvix_path = '../../../databases/global-databases/derivatives/db_vix/data.csv'\ndb_vix = pd.read_csv(vix_path, usecols=['date', 'VIX_close'],\n index_col=0)\ndb_vix.index = pd.to_datetime(db_vix.index)\ndates = pd.to_datetime(db_stocks.loc[::20, ['GE', 'JPM']].index)\n# -\n\n# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_estimation_copmarg_ratings-implementation-step01): Fit GARCH process and extract realized invariants\n\n# select monthly values\nv = v[::20, :]\n# compute monthly compounded returns\nc = np.diff(np.log(v), axis=0)\n_, _, epsi_garch_ge = fit_garch_fp(c[:, 0])\n_, _, epsi_garch_jpm = fit_garch_fp(c[:, 1])\nepsi = np.c_[epsi_garch_ge, epsi_garch_jpm]\nt_ = v.shape[0] - 1\n\n# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_estimation_copmarg_ratings-implementation-step02): Set the flexible probabilities\n\n# state indicator: VIX compounded return realizations\nc_vix = np.diff(np.log(np.array(db_vix.loc[dates, :].VIX_close)))\n# smoothing\nz_smooth = smoothing(c_vix, tau_hl_smooth)\n# scoring\nz = scoring(z_smooth, tau_hl_score)\n# target value\nz_star = z[-1]\n# prior probabilities\np_prior = exp_decay_fp(t_, tau_hl_prior)\n# posterior probabilities\np = conditional_fp(z, z_star, alpha, p_prior)\n\n# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_estimation_copmarg_ratings-implementation-step03): Estimate t copula\n\n# +\n# calculate grades of the compounded returns\nu, _, _ = cop_marg_sep(epsi, p)\n\n# grid for the degrees of freedom parameter\nnu_copula = np.arange(nu_min, nu_max + 1)\nl_ = len(nu_copula)\n\nrho2_copula_vec = np.zeros((2, 2, l_))\nllike_nu = np.zeros(l_)\n\nfor l in range(l_):\n # t-distributed invariants\n epsi_tilde = tstu.ppf(u, nu_copula[l])\n\n # maximum likelihood\n _, sig2_hat = fit_locdisp_mlfp(epsi_tilde, nu=nu_copula[l],\n threshold=10 ** -3, maxiter=1000)\n # compute correlation matrix\n rho2_copula_vec[:, :, l], _ = cov_2_corr(sig2_hat)\n\n # compute log-likelihood at times with no missing values\n llike_nu[l] = np.sum(p * np.log(mvt_pdf(epsi, np.zeros(2),\n rho2_copula_vec[:, :, l],\n nu_copula[l])))\n\n# choose nu that gives the highest log-likelihood\nl_max = np.argmax(llike_nu)\nnu_hat = nu_copula[l_max]\nrho2_hat = rho2_copula_vec[:, :, l_max]\n# -\n\n# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_estimation_copmarg_ratings-implementation-step04): Save database\n\nout = {'rho2': pd.Series(rho2_hat[0, 1]),\n 'nu': pd.Series(nu_hat)}\nout = pd.DataFrame(out)\npath = '../../../databases/temporary-databases/'\nout.to_csv(path + 'db_copula_ratings.csv')\ndel out\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# 'his script performs the Kolmogorov-Smirnov test for invariance on the\n# parameter increments of SVI model.\n# -\n\n# ## For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=exer-sviiid-copy-1).\n\n# +\n# ## Prepare the environment\n\n# +\nimport os\nimport os.path as path\nimport sys\n\nsys.path.append(path.abspath('../../functions-legacy'))\n\nfrom numpy import diff\n\nfrom scipy.io import loadmat\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import figure\n\nplt.style.use('seaborn')\n\nfrom CONFIG import GLOBAL_DB, TEMPORARY_DB\nfrom ARPM_utils import save_plot\nfrom TestKolSmirn import TestKolSmirn\nfrom InvarianceTestKolSmirn import InvarianceTestKolSmirn\n# -\n\n# ## Load the database generated by script S_FitSVI\n\n# +\ntry:\n db = loadmat(os.path.join(GLOBAL_DB, 'db_FitSVI'))\nexcept FileNotFoundError:\n db = loadmat(os.path.join(TEMPORARY_DB, 'db_FitSVI'))\n\ntheta = db['theta']\n# -\n\n# ## Compute increments and perform Kolmogorov-Smirnov test\n\n# +\n# initialize variables\ndelta_theta = {}\ns1 = {}\ns2 = {}\nint = {}\nF_s1 = {}\nF_s2 = {}\nup = {}\nlow = {}\n\nfor k in range(6):\n delta_theta[k] = diff(theta[k, :]).reshape(1, -1) # increments\n [s1[k], s2[k], int[k], F_s1[k], F_s2[k], up[k], low[k]] = TestKolSmirn(delta_theta[k]) # Kolmogorov-Smirnov test\n# -\n\n# ## Plot the results of the IID test\n\n# +\n# position settings\npos = {}\npos[0] = [0.1300, 0.74, 0.3347, 0.1717]\npos[1] = [0.5703, 0.74, 0.3347, 0.1717]\npos[2] = [0.1300, 0.11, 0.7750, 0.5]\npos[3] = [0.15, 1.71]\n# names of figures\nname = {}\nname[0] = r'Invariance test (increments of $\\theta_1$)'\nname[1] = r'Invariance test (increments of $\\theta_2$)'\nname[2] = r'Invariance test (increments of $\\theta_3$)'\nname[3] = r'Invariance test (increments of $\\theta_4$)'\nname[4] = r'Invariance test (increments of $\\theta_5$)'\nname[5] = r'Invariance test (increments of $\\theta_6$)'\n\nfor k in range(6):\n f = figure()\n InvarianceTestKolSmirn(delta_theta[k], s1[k], s2[k], int[k], F_s1[k], F_s2[k], up[k], low[k], pos, name[k]);\n # save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])\n", "import numpy as np\nfrom numpy import zeros\nfrom numpy.random import rand, randn\n\n\ndef MetrHastAlgo(f_hat,f_pri,theta_0,j_):\n # This function performs the Metropolis-Hastings algorithm to generate a\n # sample of length j_ from the univariate posterior distribution defined by\n # the conditional likelihood f_hat and the prior f_pi. The candidate-\n # generating function is a normal pdf with unitary variance, parametrized\n # by the expectation.\n # INPUTS\n # f_hat :[handle] handle of conditional likelihood pdf\n # f_pri :[handle] handle of prior pdf\n # theta_0 :[scalar] initial value\n # j_ :[scalar] length of the sample to be generated\n # OPS\n # theta :[vector](1 x j_) generated sample\n # a_rate :[scalar] acceptance rate\n\n # For details on the exercise, see here .\n\n ## code\n u = rand(1,j_)# pre-allocate the draws from the uniform distribution (step 2)\n gamma = randn(1,j_)# pre-allocate the draws from standard normal distribution\n\n theta = zeros(j_+1)\n theta[0] = theta_0\n accepted = 0\n for j in range(j_):\n # step 1\n xi = theta[j] + gamma[0,j]# generate candidate from a distribution N(theta([j],1))\n # step 2 already performed\n # step 3\n beta = (f_hat(xi)*f_pri(xi))/(f_hat(theta[j])*f_pri(theta[j]))\n alpha = np.minimum(beta, 1)\n # step 4\n if u[0,j] <= alpha:\n theta[j+1]= xi\n accepted = accepted+1\n else:\n theta[j+1]= theta[j]\n theta = theta[1:]\n a_rate = accepted/j_\n return theta,a_rate\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.4'\n# jupytext_version: 1.1.4\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# # s_mean_var_solution_robust [<img src=\"https://www.arpm.co/lab/icons/icon_permalink.png\" width=30 height=30 style=\"display: inline;\">](https://www.arpm.co/lab/redirect.php?code=s_mean_var_solution_robust&codeLang=Python)\n# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-mean-var-solution-robust).\n\n# +\nimport numpy as np\nimport cvxopt\nfrom scipy.stats import chi2\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\nfrom arpym.tools.logo import add_logo\n# -\n\n# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_mean_var_solution_robust-parameters)\n\n# +\nn_ = 10 # number of stocks\nv_budget = 1000 # budget at time t_now\n\nv_in = 200 # initial variance\nv_fin = 5000 # final variance\nv_ = 100 # variance grid\np_in = 10**-9 # initial probability\np_fin = 0.25 # final probability\np_ = 10 # probability grid\n\nr_rf = 0.02 # risk-free rate\n# -\n\n# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_mean_var_solution_robust-implementation-step01): Generate current values and P&L expectation and covariance and define robustness matrix\n\n# +\nv_tnow = np.random.lognormal(4, 0.05, n_)\n\nmu_pi = 0.5*np.arange(1, n_+1)\nsig2_pi = 0.2*np.ones((n_, n_)) + 0.8*np.eye(n_)\nsig2_pi = np.diag(mu_pi)@[email protected](mu_pi)\n\n\n# robustness matrix is the diagonal matrix of the P&L's variances\nt = np.diag(np.diag(sig2_pi))\n# high penalty for low-variance P&L's\nt[t >= np.median(np.diag(t))] = 10**-5*t[t >= np.median(np.diag(t))]\n# -\n\n# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_mean_var_solution_robust-implementation-step02): Spectral decompositions of the matrices sig2_pi, t\n\nlam2_sig2_pi, e_sig2_pi = np.linalg.eig(sig2_pi)\nlam2_sig2_pi = np.diag(lam2_sig2_pi)\nlam2_t, e_t = np.linalg.eig(t)\nlam2_t = np.diag(lam2_t)\n\n# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_mean_var_solution_robust-implementation-step03): Solve the first step of the mean-variance approach\n\n# +\n# Constraints:\n# 1) budget constraint: h'*v_tnow = v_budget\n# 2) no-short-sale: h>=0\n\nv_span = np.linspace(v_in, v_fin, v_) # variances\np_span = np.linspace(p_in, p_fin, p_)\nq_span = np.sqrt(chi2.ppf(p_span, df=n_)) # quantiles\n\nh_lambda = np.zeros((n_, v_, p_))\nmu_h_lambda = np.zeros((v_, p_))\nsig2_h_lambda = np.zeros((v_, p_))\n\ncvxopt.solvers.options['show_progress'] = False\n\nprint('First step of mean-variance approach')\nfor v in range(v_):\n for q in range(p_):\n # objective\n c_opt = cvxopt.matrix(np.r_[1, -mu_pi], tc='d')\n\n # equality constraints: budget\n A_opt = cvxopt.matrix(np.r_[0, v_tnow], size=(1, n_+1), tc='d')\n b_opt = cvxopt.matrix(v_budget, tc='d')\n\n # inequality constraints\n # no-short-sale\n Gl_opt = cvxopt.matrix(np.block([[0, np.zeros((1, n_))],\n [np.zeros((n_, 1)), -np.eye(n_)]]))\n hl_opt = cvxopt.matrix(np.zeros((n_+1)))\n # variance\n Gq0_opt = cvxopt.matrix(np.block([[0, np.zeros((1, n_))],\n [np.zeros((n_, 1)),\n -np.sqrt(lam2_sig2_pi) @\n e_sig2_pi.T]]))\n hq0_opt = cvxopt.matrix(np.r_[np.sqrt(v_span[v]), np.zeros(n_)])\n # robustness\n Gq1_opt = cvxopt.matrix(np.block([[-1, np.zeros((1, n_))],\n [np.zeros((n_, 1)),\n -q_span[q] *\n np.sqrt(lam2_t)@e_t.T]]))\n hq1_opt = cvxopt.matrix(np.zeros(n_+1))\n\n Gq_opt = [Gq0_opt, Gq1_opt]\n hq_opt = [hq0_opt, hq1_opt]\n\n # solve\n prob = cvxopt.solvers.socp(c=c_opt,\n Gl=Gl_opt, hl=hl_opt,\n Gq=Gq_opt, hq=hq_opt,\n A=A_opt, b=b_opt)\n\n if prob['x'] is not None:\n h_lambda[:, v, q] = np.array(prob['x'])[1:, 0]\n else:\n print('\\nInfeasible problem for parameters:\\n')\n print('v = ' + str(v_span[v]) + ' ' + 'p = ' + str(p_span[q]))\n\n # Compute the efficient frontier\n mu_h_lambda[v, q] = h_lambda[:, v, q]@mu_pi\n sig2_h_lambda[v, q] = h_lambda[:, v, q].T @\\\n sig2_pi @\\\n h_lambda[:, v, q]\n# -\n\n# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_mean_var_solution_robust-implementation-step03): Compute weights\n\nw_lambda = (h_lambda.T*v_tnow).T / v_budget\n\n# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_mean_var_solution_robust-implementation-step04): Solve the second step of the mean-variance approach\n\n# +\nprint('Second step of mean-variance approach')\n\n# satisfaction = Sharpe ratio\nsatis_h_lambda = mu_h_lambda / np.sqrt(sig2_h_lambda)\n\n# optimal variance and robustness penalty\nlambda_star_ind = np.where(satis_h_lambda == satis_h_lambda.max())\nv_star_ind = lambda_star_ind[0][0]\nq_star_ind = lambda_star_ind[1][0]\nv_star = v_span[v_star_ind]\nq_star = q_span[q_star_ind]\n# optimal holdings and weights\nh_qsi_star = h_lambda[:, v_star_ind, q_star_ind]\nw_qsi_star = w_lambda[:, v_star_ind, q_star_ind]\n# -\n\n# ## Plots\n\n# +\nplt.style.use('arpm')\n\nx0 = max(np.sqrt(sig2_h_lambda[:, 0]).min(),\n np.sqrt(sig2_h_lambda[:, -1]).min())\nx1 = min(np.sqrt(sig2_h_lambda[:, 0]).max(),\n np.sqrt(sig2_h_lambda[:, -1]).max())\nxlim = [x0, x1]\n\nfig = plt.figure()\n\n# Non-robust\nax11 = plt.subplot2grid((2, 4), (0, 0), colspan=2, rowspan=1)\nplt.plot(np.sqrt(sig2_h_lambda[:, 0]),\n mu_h_lambda[:, 0])\nplt.plot(np.sqrt(sig2_h_lambda[v_star_ind, 0]),\n mu_h_lambda[v_star_ind, 0],\n '.', markersize=15, color='k')\nplt.legend(['Efficient frontier', 'Optimal holdings'])\nplt.ylabel('$E\\{Y_{h}\\}$')\nplt.xlabel('$Sd\\{Y_{h}\\}$')\nplt.xlim(xlim)\nstr_opt = '$p =$ %1.2f %%' % np.float(100*p_span[0])\nplt.text(0.8, 0.1, str_opt, horizontalalignment='center',\n verticalalignment='center', transform=ax11.transAxes)\nplt.title('Non-robust mean-variance efficient frontier', fontweight='bold')\n\nax12 = plt.subplot2grid((2, 4), (1, 0), colspan=2, rowspan=2)\ncolors = cm.get_cmap('Spectral')(np.arange(n_)/n_)[:, :3]\nfor n in range(n_):\n if n == 0:\n plt.fill_between(np.sqrt(sig2_h_lambda[:, 0]),\n w_lambda[n, :, 0],\n np.zeros(v_), color=colors[n, :])\n else:\n plt.fill_between(np.sqrt(sig2_h_lambda[:, 0]),\n np.sum(w_lambda[:n+1, :, 0], axis=0),\n np.sum(w_lambda[:n, :, 0], axis=0),\n color=colors[n, :])\nplt.axvline(x=np.sqrt(sig2_h_lambda[v_star_ind, 0]), color='k')\nplt.ylabel('$w$')\nplt.xlabel('$Sd\\{Y_{h}\\}$')\nplt.xlim(xlim)\nplt.ylim([0, 1])\nplt.title('Non-robust portfolio weights', fontweight='bold')\n\nplt.tight_layout()\n\n# Robust\nax21 = plt.subplot2grid((2, 4), (0, 2), colspan=2, rowspan=1)\nplt.plot(np.sqrt(sig2_h_lambda[:, -1]),\n mu_h_lambda[:, -1])\nplt.plot(np.sqrt(sig2_h_lambda[v_star_ind, -1]),\n mu_h_lambda[v_star_ind, -1],\n '.', markersize=15, color='k')\nplt.legend(['Efficient frontier', 'Optimal holdings'])\nplt.ylabel('$E\\{Y_{h}\\}$')\nplt.xlabel('$Sd\\{Y_{h}\\}$')\nplt.xlim(xlim)\nstr_opt = '$p =$ %1.2f %%' % np.float(100*p_span[-1])\nplt.text(0.8, 0.1, str_opt, horizontalalignment='center',\n verticalalignment='center', transform=ax21.transAxes)\nplt.title('Robust mean-variance efficient frontier', fontweight='bold')\nadd_logo(fig, axis=ax21, location=5, size_frac_x=1/8)\nplt.tight_layout()\n\nax22 = plt.subplot2grid((2, 4), (1, 2), colspan=2, rowspan=1)\ncolors = cm.get_cmap('Spectral')(np.arange(n_)/n_)[:, :3]\nfor n in range(n_):\n if n == 0:\n plt.fill_between(np.sqrt(sig2_h_lambda[:, -1]),\n w_lambda[n, :, -1],\n np.zeros(v_), color=colors[n, :])\n else:\n plt.fill_between(np.sqrt(sig2_h_lambda[:, -1]),\n np.sum(w_lambda[:n+1, :, -1], axis=0),\n np.sum(w_lambda[:n, :, -1], axis=0),\n color=colors[n, :])\nplt.axvline(x=np.sqrt(sig2_h_lambda[v_star_ind, -1]), color='k')\nplt.ylabel('$w$')\nplt.xlabel('$Sd\\{Y_{h}\\}$')\nplt.xlim(xlim)\nplt.ylim([0, 1])\nplt.title('Robust portfolio weights', fontweight='bold')\n\nplt.tight_layout()\n", "from matplotlib.pyplot import plot, grid, axis\nfrom numpy import cos, sin, pi, linspace, diag, sqrt, tile, array, sort, argsort, diagflat, maximum\nfrom numpy.linalg import eig\n\nimport numpy as np\nnp.seterr(invalid='ignore')\n\n\ndef PlotTwoDimEllipsoid(mu, sigma2, r=1, PlotAxes=False, PlotTangBox=False, color='k', linewidth=2, PlotEll=True,\n n_points=1000, fig=None, ax=None):\n '''This def creates and plots the two dimensional ellipsoid:\n (x - mu).T * (sigma2**(-1)) * (x - mu) = r**2\n INPUTS\n mu : [vector] (2 x 1)\n sigma2 : [matrix] (2 x 2) symmetric and positive definite matrix\n r : [scalar] radius of the ellipsoid\n PlotAxes : [boolean] if true then the principal axes are plotted\n PlotTangBox : [boolean] if true then the tangent box is plotted.\n color : [char] color of the line defining the ellipsoid\n linewidth : [scalar] width of the line defining the ellipsoid\n PlotEll : [boolean] if true then the ellipsoid is plotted\n n_points : [scalar] number of points of the ellipsoid\n OUTPUTS\n ell_handle : [figure handle] ellipsoid\n ell_points : [matrix] (2 x n_points) points of the ellipsoid\n ax1, ax2 : [figure handle] principal axes\n'''\n\n # For details on the exercise, see here .\n ## Code\n theta = linspace(0, 2 * pi, n_points)\n\n # compute the initial sphere\n y = [r * cos(theta), r * sin(theta)]\n\n # principal axes\n y_axes1 = array([[-r, r], [0, 0]])\n y_axes2 = array([[0, 0], [-r, r]])\n\n # spectral decomposition of sigma2\n Diag_lambda2, e = eig(sigma2)\n lambda2, order = sort(Diag_lambda2), argsort(Diag_lambda2)\n e = e[:, order]\n Diag_lambda = diagflat(sqrt(maximum(lambda2,0)))\n\n # compute the ellipsoid as affine transformation of the sphere\n u = e@Diag_lambda@y\n u_axes1 = e@Diag_lambda@y_axes1\n u_axes2 = e@Diag_lambda@y_axes2\n ell_points = tile(mu, (1, n_points)) + u\n\n # if fig is None and ax is None:\n # fig = figure()\n # ax = fig.add_subplot()\n # elif ax is None:\n # ax = gca()\n # plot the ellipsoid\n if PlotEll:\n ell_handle = plot(ell_points[0], ell_points[1], lw=linewidth, color=color)\n grid(True)\n else:\n ell_handle = None\n\n # plot the tangent box\n if PlotTangBox:\n sigvec = sqrt(diag(sigma2))\n\n tangBox_low = [[mu[0] - r * sigvec[0], mu[0] + r * sigvec[0]], [mu[1] - r * sigvec[1], mu[1] - r * sigvec[1]]]\n tangBox_up = [[mu[0] - r * sigvec[0], mu[0] + r * sigvec[0]], [mu[1] + r * sigvec[1], mu[1] + r * sigvec[1]]]\n tangBox_left = [[mu[0] - r * sigvec[0], mu[0] - r * sigvec[0]], [mu[1] - r * sigvec[1], mu[1] + r * sigvec[1]]]\n tangBox_right = [[mu[0] + r * sigvec[0], mu[0] + r * sigvec[0]], [mu[1] - r * sigvec[1], mu[1] + r * sigvec[1]]]\n\n h1 = plot(tangBox_low[0], tangBox_low[1], color=color, lw=linewidth)\n h2 = plot(tangBox_up[0], tangBox_up[1], color=color, lw=linewidth)\n h3 = plot(tangBox_left[0], tangBox_left[1], color=color, lw=linewidth)\n h4 = plot(tangBox_right[0], tangBox_right[1], color=color, lw=linewidth)\n\n # plot the principal axes\n if PlotAxes:\n ax1 = plot(u_axes1[0] + mu[0], u_axes1[1] + mu[1], color=color, lw=linewidth)\n ax2 = plot(u_axes2[0] + mu[0], u_axes2[1] + mu[1], color=color, lw=linewidth)\n axis('equal')\n else:\n ax1 = None\n ax2 = None\n\n return ell_handle, ell_points, ax1, ax2\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.4'\n# jupytext_version: 1.1.4\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# # S_LognormEuclidBasis [<img src=\"https://www.arpm.co/lab/icons/icon_permalink.png\" width=30 height=30 style=\"display: inline;\">](https://www.arpm.co/lab/redirect.php?code=S_LognormEuclidBasis&codeLang=Python)\n# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=EBRandGeomLogN).\n\n# ## Prepare the environment\n\n# +\nimport os\nimport os.path as path\nimport sys\n\nsys.path.append(path.abspath('../../functions-legacy'))\n\nimport numpy as np\nfrom numpy import array, ones, diag, eye, abs, exp, sqrt\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import plot, legend, xlim, ylim, scatter, subplots, ylabel, \\\n xlabel, quiver\n\nplt.style.use('seaborn')\n\nfrom ARPM_utils import save_plot\nfrom PlotTwoDimEllipsoid import PlotTwoDimEllipsoid\nfrom NormalScenarios import NormalScenarios\nfrom Riccati import Riccati\n\n# input parameters\nj_ = 5*10 ** 4 # number of simulations\nm = array([[0.17], [0.06]]) # (normal) expectation\nsvec = array([[0.24], [0.14]]) # (normal) standard deviation\nrho = 0.15 # (normal) correlation\n# -\n\n# ## Compute lognormal expectation and covariance\n\nc2_ = array([[1, rho], [rho, 1]]) # (normal) correlation matrix\ns2 = np.diagflat(svec)@[email protected](svec) # (normal) covariance matrix\nmu = exp(m + 0.5*diag(s2).reshape(-1,1)) # expectation\nsig2 = np.diagflat(mu)@(exp(s2) - ones((2, 1)))@np.diagflat(mu) # covariance matrix\n\n# ## Generate bivariate lognormal draws\n\nX = exp(NormalScenarios(m, s2, j_, 'Riccati')[0])\n\n# ## Compute a the Riccati root of the correlation matrix and the vectors\n\n# +\nsigvec = sqrt(diag(sig2)) # standard deviation\nc2 = np.diagflat(1 / sigvec)@[email protected](1 / sigvec) # correlation matrix\n\nc = Riccati(eye(2), c2)\nx = [email protected](sigvec)\n# -\n\n# ## Compute Euclidean measures\n\ninn_prods = x.T@x\nlens = sqrt(diag(inn_prods))\nangle = np.arccos(inn_prods[0, 1] / np.prod(lens))\ndistance = sqrt(inn_prods[0, 0] + inn_prods[1, 1] - 2*inn_prods[0, 1])\n\n# ## Display the scatter plot and the ellipsoid\n\n# +\nx1 = max(abs((x[0])))\nx2 = max(abs((x[1])))\n\nf, ax = subplots(1,2)\n\nplt.sca(ax[0])\nscatter(X[0], X[1], 0.5, [.8, .8, .8], '*')\nPlotTwoDimEllipsoid(mu, sig2, 1, [], 1, 'r', 2)\nxlabel('$X_1$')\nylabel('$X_2$')\nxlim([mu[0] - 1.5*x1, mu[0] + 1.5*x1])\nylim([mu[1] - 1.5*x2, mu[1] + 1.5*x2])\n# -\n\n# ## Display the vectors\n\nplt.sca(ax[1])\nquiver(0, 0, x[0, 0], x[1, 0], color = 'm', lw= 2, angles='xy',scale_units='xy',scale=1)\nquiver(0, 0, x[0, 1], x[1, 1], color = 'b', lw= 2, angles='xy',scale_units='xy',scale=1)\nquiv1 = plot(0, 0, color='m', lw= 2, marker=None)\nquiv2 = plot(0, 0, color='b', lw= 2, marker=None)\nplot(0, 0, 'o',markeredgecolor='k',markerfacecolor='w')\nplt.grid(True)\nxlim([- 1.5*x1, 1.5*x1])\nylim([- 1.5*x2, 1.5*x2])\nlegend(['$X_1$','$X_2$'])\nplt.tight_layout();\n# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])\n", "import matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy import arange, isnan, ones, zeros, tile, r_, diagflat\nfrom numpy import sum as npsum\nfrom numpy.linalg import norm, inv\n\nplt.style.use('seaborn')\n\nfrom FPmeancov import FPmeancov\n\n\ndef EMalgorithmFP(epsi,FP,nu,tol):\n #Expectation-Maximization with Flexible Probabilities for Missing Values\n #under Student t assumption (nu degrees of freedom)\n # INPUT\n # epsi : [matrix] (i_ x t_end) observations - with zeros's for missing values\n # FP : [vector] (1 x t_end) flexible probabilities\n # nu : [scalar] multivariate Student's t degrees of freedom\n # tol : [scalar] or [vector] (2 x 1) tolerance, needed to check convergence of mu and sigma2 estimates\n # OP\n # mu : [vector] (i_ x 1) EMFP estimate of the location parameter\n # sigma2 : [matrix] (i_ x i_) EMFP estimate of the scatter matrix\n\n # For details on the exercise, see here .\n\n #tolerance: needed to check convergence\n if isinstance(tol, float) or len(tol)==1:\n tol=[tol, tol]\n\n i_,t_=epsi.shape\n\n #step0: initialize\n I = isnan(epsi)\n\n Data = epsi[:,npsum(I,axis=0)==0]\n\n FPa = FP[[0],npsum(I,axis=0)==0].reshape(1,-1)\n FPa=FPa/npsum(FPa)\n\n # HFP mu and sigma2 on available data\n m, s2=FPmeancov(Data,FPa)\n # m = m[...,np.newaxis]\n s2 = s2[...,np.newaxis]\n\n w=ones((1,t_))\n\n Error=ones(len(tol))*10**6\n j=0\n # start main loop\n gamma = {}\n while any(Error>tol):\n j=j+1\n eps = zeros((epsi.shape[0],t_))\n for t in range(t_):\n gamma[t]=zeros((i_,i_))\n\n na=[]\n for i in range(i_):\n if isnan(epsi[i,t]):\n na=r_[na, i] #non-available\n\n a= arange(i_)\n if isinstance(na,np.ndarray):\n if na.size > 0:\n mask = np.ones(a.shape, dtype=bool) # np.ones_like(a,dtype=bool)\n na = list(map(int,na))\n mask[na] = False\n a = a[mask] #available\n\n A=i_-len(na) #|available|\n\n eps[a,t]=epsi[a,t]\n eps[na,t]=epsi[na,t]\n\n #step1:\n\n #update weights\n invs2 = inv(s2[np.ix_(a,a,[j-1])].squeeze())\n w[0,t]=(nu+A)/(nu+(eps[a,[t]]-m[a,[j-1]]).T@invs2@(eps[a,[t]]-m[a,[j-1]]))\n\n if na:\n #fill entries\n eps[na,t]=(m[na,[j-1]]+s2[np.ix_(na,a,[j-1])].squeeze()@invs2@(eps[a,[t]]-m[a,[j-1]])).flatten()\n\n #fill buffer\n gamma[t][np.ix_(na,na)]=s2[np.ix_(na,na,[j-1])].squeeze()-s2[np.ix_(na,a,[j-1])].squeeze()@invs2@s2[np.ix_(a,na,[j-1])].squeeze()\n\n #step[1:] update output\n new_m=(eps@(FP*w).T)/npsum(FP*w)\n m = r_['-1',m,new_m]\n gamma_p = zeros(gamma[0].shape+(t_,))\n for t in range(t_):\n gamma_p[:, :, t]= gamma[t]*FP[0,t]\n new_s2= (eps-tile(m[:,[j]],(1,t_)))@(diagflat(FP*w))@(eps-tile(m[:,[j]],(1,t_))).T+npsum(gamma_p,2)\n s2 = r_['-1',s2,new_s2[...,np.newaxis]]\n\n # step3: check convergence\n Error[0] = norm(m[:,j]-m[:,j-1])/norm(m[:,j-1])\n Error[1] = norm(s2[:,:,j]-s2[:,:,j-1],ord='fro')/norm(s2[:,:,j-1],ord='fro')\n\n mu=m[:,-1]\n sigma2=s2[:,:,-1]\n return mu, sigma2\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom scipy.interpolate import interp1d\nimport matplotlib.pyplot as plt\n# import seaborn as sns\n\nfrom arpym.statistics.cdf_sp import cdf_sp\nfrom arpym.tools.histogram_sp import histogram_sp\n\n\ndef invariance_test_ks(epsi, *, conf_lev=0.95,\n title='Kolmogorov-Smirnov test',\n plot_test=True):\n \"\"\"For details, see here.\n\n Parameters\n ----------\n epsi : array, shape (t_, )\n conf_lev : scalar, optional\n title : string, optional\n plot_test : boolean, optional\n\n Returns\n -------\n conf_int : array, shape(2)\n\n \"\"\"\n\n # Step 1: Generate two random mutually exclusive partitions of observations\n\n t_ = epsi.shape[0]\n half_t_ = int(np.round(t_ / 2))\n\n # random permutation of the given vector\n epsi_perm = np.random.permutation(epsi)\n\n epsi_a = epsi_perm[: half_t_]\n epsi_b = epsi_perm[half_t_:]\n a_ = epsi_a.shape[0]\n b_ = epsi_b.shape[0]\n\n # Step 2: Compute the hfp cdfs of the two partitions and the KS statistic\n\n # compute hfp cdf's\n epsi_sort = np.unique(np.sort(epsi))\n cdf_a = cdf_sp(epsi_sort, epsi_a)\n cdf_b = cdf_sp(epsi_sort, epsi_b)\n\n # compute statistic\n z_ks = np.max(abs(cdf_a - cdf_b))\n\n # Step 3: Compute the confidence interval\n\n alpha = 1 - conf_lev\n z = np.sqrt(-np.log(alpha) * (a_ + b_) / (2 * a_ * b_))\n\n # Step 4: Generate figure\n\n if plot_test:\n # build the band for Kolmogorov-Smirnov test\n band_mid = 0.5 * (cdf_a + cdf_b)\n band_up = band_mid + 0.5 * z\n band_low = band_mid - 0.5 * z\n\n # colors\n blue = [0.2, 0.2, 0.7]\n l_blue = [0.2, 0.6, 0.8]\n orange = [.9, 0.6, 0]\n d_orange = [0.9, 0.3, 0]\n\n # max and min value of the first reference axis settings,\n xlim_1 = np.percentile(epsi, 1.5)\n xlim_2 = np.percentile(epsi, 98.5)\n\n ax1 = plt.subplot2grid((2, 2), (0, 0))\n ax2 = plt.subplot2grid((2, 2), (0, 1))\n ax3 = plt.subplot2grid((2, 2), (1, 0), colspan=2)\n # # plot histogram of Sample 1, y1\n # sns.distplot(epsi_a, bins=int(round(10 * np.log(len(epsi_a.flatten())))),\n # kde=False, color=orange,\n # hist_kws={\"alpha\": 1, \"edgecolor\": \"k\"}, ax=ax1)\n nx1, cx1 = histogram_sp(epsi_a, k_=int(round(10 * np.log(len(epsi_a.flatten())))))\n ax1.bar(cx1, nx1, cx1[1] - cx1[0], facecolor=orange, edgecolor='k')\n ax1.set_xlabel('Sample1')\n ax1.set_xlim((xlim_1, xlim_2))\n ax1.ticklabel_format(axis='x', style='sci', scilimits=(-2, 2))\n ax1.grid(False)\n\n # sns.distplot(epsi_b, bins=int(round(10 * np.log(len(epsi_b.flatten())))),\n # kde=False, color=l_blue,\n # hist_kws={\"alpha\": 1, \"edgecolor\": \"k\"}, ax=ax2)\n nx2, cx2 = histogram_sp(epsi_b, k_=int(round(10 * np.log(len(epsi_a.flatten())))))\n ax2.bar(cx2, nx2, cx2[1] - cx2[0], facecolor=l_blue, edgecolor='k')\n ax2.grid(False)\n ax2.set_xlabel('Sample2')\n ax2.set_xlim((xlim_1, xlim_2))\n ax2.ticklabel_format(axis='x', style='sci', scilimits=(-2, 2))\n\n ylim = np.max(np.r_[nx1, nx2])\n ax1.set_ylim([0, ylim])\n ax2.set_ylim([0, ylim])\n # plot the cdf[s]\n # plot data on the first reference axis\n\n ax3.scatter(epsi_a, cdf_sp(epsi_a, epsi_a), color=d_orange, s=2)\n ax3.scatter(epsi_b, cdf_sp(epsi_b, epsi_b), color=blue, s=2)\n\n # shows partitions epsi_a and epsi_b\n ax3.scatter(epsi_a, 0.002 * np.ones(a_), color=d_orange, s=0.5)\n ax3.scatter(epsi_b, 0.002 * np.ones(b_), color=blue, s=0.5)\n\n # plot the (upper and lower) band\n ax3.plot(epsi_sort, band_up, '-', color='k', lw=0.5)\n ax3.plot(epsi_sort, band_low, '-', color='k', lw=0.5)\n ax3.set_xlabel('data')\n ax3.set_ylabel('cdf')\n\n ax3.set_xlim([xlim_1, xlim_2])\n ax3.set_ylim([-0.05, 1.05])\n ax3.ticklabel_format(axis='x', style='sci', scilimits=(-2, 2))\n plt.suptitle(title)\n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n\n return z_ks, z\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.4'\n# jupytext_version: 1.1.5\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# # s_checklist_scenariobased_step10 [<img src=\"https://www.arpm.co/lab/icons/icon_permalink.png\" width=30 height=30 style=\"display: inline;\">](https://www.arpm.co/lab/redirect.php?code=s_checklist_scenariobased_step10&codeLang=Python)\n# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ex-vue-10).\n\n# +\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import norm\nimport matplotlib.pyplot as plt\n\nfrom arpym.statistics import meancov_sp\nfrom arpym.tools import add_logo\n# -\n\n# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step10-parameters)\n\n# +\n# indicates which projection to continue from\n# True: use copula-marginal projections\n# False: use historical projections\ncopula_marginal = True\n\nq_now = 0 # initial volume time\nq_end = 1 # final volume time\nk_ = 300 # number of elements in the q grid\nl_ = 500 # number of elements in the beta grid\nalpha = 1 # parameter of the power slippage component\ngamma = 3.14e-5 # permanent impact parameter\neta = 1.42e-6 # temporary impact parameter\nc = 0.95 # confidence level for quantile satisfaction measure\n\nn_plot = 2 # index of instrument to plot\n# -\n\n# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step10-implementation-step00): Load data\n\n# +\npath = '../../../databases/temporary-databases/'\n\n# Risk drivers identification\ndb_riskdrivers_tools = pd.read_csv(path+'db_riskdrivers_tools.csv')\nn_stocks = int(db_riskdrivers_tools['n_stocks'][0])\nn_bonds = int(db_riskdrivers_tools.n_bonds[0])\nn_ = n_stocks+n_bonds+3\n\n# Aggregation\ndb_holdings = pd.read_csv(path+'db_holdings.csv')\nh = np.squeeze(db_holdings.values) # initial holdings\n\nif copula_marginal:\n # Projection\n db_scenprob = pd.read_csv(path+'db_scenario_probs.csv')\n p = db_scenprob.p.values\n\n # Pricing\n # import daily P&Ls computed in step 5 with m_=1\n db_pi_oneday = pd.read_csv(path+'db_oneday_pl.csv')\n pi_oneday = db_pi_oneday.values\n\n # Construction\n db_final_portfolio = pd.read_csv(path+'db_final_portfolio.csv')\n # the final portfolio is the one obtained in the construction step,\n # that optimizes the cVaR satisfaction measure\n h_qsi = np.squeeze(db_final_portfolio.values)\n\nelse:\n # Projection\n db_scenprob = pd.read_csv(path+'db_scenario_probs_bootstrap.csv')\n p = db_scenprob.p.values\n\n # Pricing\n # import daily P&Ls computed in step 5 with m_=1\n db_pi_oneday = pd.read_csv(path+'db_oneday_pl_historical.csv')\n pi_oneday = db_pi_oneday.values\n\n # Construction\n db_final_portfolio = pd.read_csv(path+'db_final_portfolio_historical.csv')\n # the final portfolio is the one obtained in the construction step,\n # that optimizes the cVaR satisfaction measure\n h_qsi = np.squeeze(db_final_portfolio.values)\n\n# start portfolio\nh_qnow = h\n# final portfolio\nh_qend = h_qsi\n# -\n\n# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step10-implementation-step01): Find trajectory\n\n# +\nsigma2 = np.zeros(n_)\nvariance_pihat = np.zeros((n_, l_))\nmean_pihat = np.zeros((n_, l_))\nxi = np.zeros(l_)\ntraj = np.zeros((n_, l_, k_))\n\n# size of parent order\ndelta_h_parent = (h_qend - h_qnow).astype('int')\n# beta grid\nbeta = np.linspace(alpha/(1+alpha), 1, l_+1, endpoint=True)\nbeta = beta[1:]\n# q grid\nq_grid = np.linspace(q_now, q_end, k_)\n\nfor n in range(n_):\n if delta_h_parent[n] == 0:\n # no change in holdings\n traj[n, :, :] = np.tile(h_qend[n], (l_, k_))\n else:\n _, sigma2[n] = meancov_sp(pi_oneday[:, n], p)\n for l in range(l_):\n # expected P&L\n xi[l] = beta[l]**(alpha+1)/(beta[l]+beta[l]*alpha-alpha)\n mean_pihat[n, l] = gamma/2*(h_qend[n]**2 - h_qnow[n]**2) - \\\n eta*xi[l]*np.abs(delta_h_parent[n])**(1+alpha) * \\\n (q_end-q_now)**(-alpha)\n # P&L variance\n variance_pihat[n, l] = sigma2[n] * (q_end-q_now) * \\\n (h_qnow[n]**2 + 2*h_qnow[n]*delta_h_parent[n]/(beta[l]+1) +\n (delta_h_parent[n]**2)/(2*beta[l]+1))\n # trajectory\n traj[n, l, :] = h_qnow[n] + \\\n ((q_grid-q_now)/(q_end-q_now))**beta[l]*delta_h_parent[n]\n# -\n\n# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step10-implementation-step02): Maximization\n\nq_satis = np.zeros((n_, l_))\nbeta_star = np.zeros(n_)\nl_star = np.zeros(n_)\nfor n in range(n_):\n if delta_h_parent[n] == 0:\n # no change in holdings\n beta_star[n] = beta[-1]\n else:\n # quantile satisfaction measure\n for l in range(l_):\n q_satis[n, l] = mean_pihat[n, l] + \\\n np.sqrt(variance_pihat[n, l])*norm.ppf(1-c)\n # beta corresponding to the optimal liquidation trajectory\n l_star[n] = \\\n np.where(q_satis[n, :] == np.max(q_satis[n, :]))[0]\n beta_star[n] = beta[np.int(l_star[n])]\n\n# ## Plots\n\n# plot execution trajectories\nplt.style.use('arpm')\nfig = plt.figure(figsize=(1280.0/72.0, 720.0/72.0), dpi = 72.0)\nfor i in range(0, l_, 50):\n plt.plot(q_grid, traj[n_plot-1, i, :]*1e-6, color='grey')\nplt.plot(q_grid, traj[n_plot-1, np.int(l_star[n_plot-1]), :]*1e-6,\n color='red')\nplt.title('Optimal trading trajectory - ' + db_pi_oneday.columns[n_plot-1],\n fontsize=20, fontweight='bold')\nplt.xlabel('Volume time', fontsize=17)\nplt.ylabel('Holdings (million units)', fontsize=17)\nplt.xticks(fontsize=14)\nplt.yticks(fontsize=14)\nplt.xlim(0,1)\nadd_logo(fig, location=1, set_fig_size=False)\n" ]
[ [ "numpy.log", "pandas.read_csv", "pandas.Series", "numpy.squeeze", "numpy.exp" ], [ "numpy.ones", "numpy.cov", "numpy.array", "numpy.zeros", "matplotlib.pyplot.style.use" ], [ "matplotlib.pyplot.legend", "numpy.log", "numpy.sqrt", "numpy.linspace", "matplotlib.pyplot.title", "numpy.min", "numpy.arange", "numpy.ones", "matplotlib.pyplot.plot", "numpy.max", "matplotlib.pyplot.ylabel", "numpy.where", "matplotlib.pyplot.xlabel", "numpy.exp", "numpy.zeros", "matplotlib.pyplot.style.use", "matplotlib.pyplot.figure" ], [ "numpy.log", "pandas.read_csv", "pandas.to_datetime", "pandas.Series", "numpy.arange", "pandas.DataFrame", "scipy.stats.t.ppf", "numpy.argmax", "numpy.array", "numpy.zeros" ], [ "numpy.diff", "matplotlib.pyplot.style.use", "matplotlib.pyplot.figure" ], [ "numpy.minimum", "numpy.random.randn", "numpy.random.rand", "numpy.zeros" ], [ "numpy.random.lognormal", "numpy.diag", "matplotlib.pyplot.legend", "numpy.sqrt", "numpy.linspace", "matplotlib.pyplot.subplot2grid", "matplotlib.pyplot.tight_layout", "numpy.linalg.eig", "numpy.arange", "numpy.eye", "matplotlib.pyplot.text", "numpy.zeros", "matplotlib.pyplot.style.use", "matplotlib.pyplot.figure", "scipy.stats.chi2.ppf", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "numpy.array", "numpy.sum", "matplotlib.pyplot.ylabel", "numpy.ones", "matplotlib.pyplot.xlim", "matplotlib.cm.get_cmap", "matplotlib.pyplot.xlabel", "numpy.float" ], [ "numpy.diag", "numpy.maximum", "numpy.linspace", "numpy.linalg.eig", "numpy.tile", "numpy.sort", "numpy.cos", "matplotlib.pyplot.plot", "numpy.seterr", "numpy.sin", "matplotlib.pyplot.grid", "matplotlib.pyplot.axis", "numpy.argsort", "numpy.array" ], [ "numpy.diag", "matplotlib.pyplot.legend", "numpy.sqrt", "matplotlib.pyplot.plot", "matplotlib.pyplot.quiver", "numpy.exp", "matplotlib.pyplot.tight_layout", "numpy.eye", "matplotlib.pyplot.style.use", "numpy.diagflat", "matplotlib.pyplot.ylim", "numpy.array", "matplotlib.pyplot.ylabel", "numpy.abs", "matplotlib.pyplot.scatter", "matplotlib.pyplot.subplots", "matplotlib.pyplot.sca", "numpy.ones", "matplotlib.pyplot.xlim", "matplotlib.pyplot.grid", "numpy.prod", "matplotlib.pyplot.xlabel" ], [ "numpy.ix_", "matplotlib.pyplot.style.use", "numpy.isnan", "numpy.arange", "numpy.diagflat", "numpy.linalg.norm", "numpy.tile", "numpy.ones", "numpy.zeros", "numpy.sum" ], [ "numpy.log", "matplotlib.pyplot.tight_layout", "numpy.percentile", "numpy.sort", "numpy.round", "numpy.max", "numpy.ones", "numpy.random.permutation", "matplotlib.pyplot.suptitle", "matplotlib.pyplot.subplot2grid" ], [ "scipy.stats.norm.ppf", "numpy.sqrt", "numpy.linspace", "numpy.squeeze", "matplotlib.pyplot.plot", "numpy.int", "numpy.max", "pandas.read_csv", "numpy.zeros", "matplotlib.pyplot.style.use", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel", "numpy.abs", "numpy.tile", "matplotlib.pyplot.xlim", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.yticks" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
SayanGhoshBDA/code-backup
[ "8b6135facc0e598e9686b2e8eb2d69dd68198b80", "8b6135facc0e598e9686b2e8eb2d69dd68198b80", "8b6135facc0e598e9686b2e8eb2d69dd68198b80" ]
[ "python/data_sutram/scraper/appl_map.py", "python/python_backup/wisp_old/archives/s_wisp_v3.py", "python/pyopenGL/ogl2/ogl_14_plot_2.py" ]
[ "\"\"\"\nhttps://sat-cdn1.apple-mapkit.com/tile?style=7&size=1&scale=1&z=19&x=84135&y=202065&v=4002&accessKey=1549129912_6641142575737855346_%2F_9%2F4MX0U5yhJDc3LDXazhcQj3xjCJU%2BYsiKcviN%2FnWxE%3D&emphasis=standard&tint=dark\n\nhttps://sat-cdn4.apple-mapkit.com/tile?style=7&size=1&scale=1&z=19&x=84135&y=202061&v=4002&accessKey=1549129912_6641142575737855346_%2F_9%2F4MX0U5yhJDc3LDXazhcQj3xjCJU%2BYsiKcviN%2FnWxE%3D&emphasis=standard&tint=dark\n\nhttps://sat-cdn4.apple-mapkit.com/tile?style=7&size=1&scale=1&z=19&x=84135&y=202064&v=4002&accessKey=1549129912_6641142575737855346_%2F_9%2F4MX0U5yhJDc3LDXazhcQj3xjCJU%2BYsiKcviN%2FnWxE%3D&emphasis=standard&tint=dark\n\nhttps://sat-cdn3.apple-mapkit.com/tile?style=7&size=1&scale=1&z=19&x=84122&y=202064&v=4002&accessKey=1549131702_8398681963599501052_%2F_oQrt5vqvzaVZBN%2FLBc5baLQgg5kEhfpKMYQDRlmZ36Q%3D&emphasis=standard&tint=dark\n\nhttps://sat-cdn3.apple-mapkit.com/tile?style=7&size=1&scale=1&z=19&x=84127&y=202067&v=4002&accessKey=1549131702_8398681963599501052_%2F_oQrt5vqvzaVZBN%2FLBc5baLQgg5kEhfpKMYQDRlmZ36Q%3D&emphasis=standard&tint=dark\nx\n38.089967/84127\n0.0004527674468363308\ny\n-122.236446/202067\n-0.0006049302756016569\n\nx_conv = 2208.8242126933765\n\ny_conv = -1653.078168433501\n\nhttps://sat-cdn1.apple-mapkit.com/tile?style=7&size=1&scale=1&z=19&x=371353&y=184495&v=4002&accessKey=1549132809_7090421196284837684_%2F_gT3s2ghdt72RemReCoMIf13JXH%2BE0rbJKjODBV6pfQc%3D&emphasis=standard&tint=dark\nhttps://sat-cdn1.apple-mapkit.com/tile?style=7&size=1&scale=1&z=19&x=323660&y=198484&v=4002&accessKey=1549132809_7090421196284837684_/_gT3s2ghdt72RemReCoMIf13JXH+E0rbJKjODBV6pfQc=&emphasis=standard&tint=dark\n\n\n\"\"\"\nimport urllib.request, urllib.parse, urllib.error\nfrom bs4 import BeautifulSoup\nimport ssl\nimport os\nimport wget\nimport imghdr\nimport numpy as np\nimport requests\nimport time\n# Ignore SSL certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\nimport math\nPI = math.pi\ncdn = [1,2,3,4]\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:21.0) Gecko/20100101 Firefox/21.0'#,\n #'From': '[email protected]' # This is another valid field\n}\n\ndef ret_xy_tiles(lat_deg,lon_deg,zoom):\n n = 2**zoom\n #lon_deg = xtile / n * 360.0 - 180.0\n \n #lat_deg = lat_rad * 180.0 / π\n xtile = n * ((lon_deg + 180) / 360)\n #lat_rad = math.atan(math.asinh(PI * (1 - 2 * ytile / n)))\n lat_rad = lat_deg * PI / 180.0\n ytile = n * (1 - (math.log(math.tan(lat_rad) + 1/math.cos(lat_rad)) / PI)) / 2\n return math.floor(xtile),math.floor(ytile)\n\ndef make_url(lat_deg,lon_deg,zoom):\n x_tyle,y_tyle = ret_xy_tiles(lat_deg,lon_deg,zoom)\n for i in cdn:\n now_time = time.time()\n #url_str = \"https://sat-cdn\"+str(i)+\".apple-mapkit.com/tile?style=7&size=1&scale=1&z=19&x=\"+str(x_tyle)+\"&y=\"+str(y_tyle)+\"&v=4002&accessKey=\"+str(math.floor(now_time)-10)+\"_8618194673608956327_%2F_AaK0kkbh9QXsy5VX35tXlchP3bjj1%2FkjYGmHywouu0E%3D&emphasis=standard&tint=dark\"\n url_str = \"https://sat-cdn\"+str(i)+\".apple-mapkit.com/tile?style=7&size=1&scale=1&z=19&x=\"+str(x_tyle)+\"&y=\"+str(y_tyle)+\"&v=4002&accessKey=1549197942_115647566105935154_%2F_b3rw2rvOuQvkjh0rILQSaaW3GxphOI%2BXJo48fORdf0Y%3D&emphasis=standard&tint=dark\"\n print(\"Trying :: \",url_str)\n try:\n file_name = str(x_tyle)+\"_\"+str(y_tyle)+\".jpeg\"\n #file_name = wget.download(url_str)\n #file_name.replace(\".tmp\", \"{x_tyle}_{y_tyle}.jpg\")\n r = requests.get(url_str, allow_redirects=True,headers=headers)\n open(file_name, 'wb').write(r.content)\n \n if imghdr.what(file_name) is 'jpeg':\n print(\"JPEG\")\n else:\n os.remove(file_name)\n print(\"NOT JPEG\")\n except:\n print(\"Ops Blown Off!\")\n\nif __name__ == \"__main__\":\n #make_url(38.085668,-122.235644,19)\n for i in np.arange(10,45,0.1):\n for j in np.arange(10,100,0.1):\n print(\"i=\",i,\"j=\",j)\n make_url(i,j,19)", "\"\"\"\n# This is the application (probably basic) to find the location (almost any) in any Country \n# according to the choices of your preference. Uses Foursquare API to get the data (geojson),\n# also uses shitty tkinter GUI for accepting data. Please provide the Access key for the API\n# if bychance not given! This then creates a custom-made http server to visualise the locations\n# in a web browser, because Folium (leaflet.js) doesn't work in GUI or Terminal LOL!\n# \n# Caution: Please don't blame me if this doesn't works, cause the data may not be present for \n# some location, since everyone will use free services of foursquare API.\n#\n# @Copyright :: Don't share this software without the permission of the author\n#\n# e-mail : [email protected]\n# \n# Created for the purpose of final year project! :=> Almost data visualization project!\n#\n# Dated : 10-02-2019\n\"\"\"\n__version__ = \"0.0.1-beta\"\n__author__ = \"Jimut Bahan Pal <[email protected]>\"\n\nfrom tkinter import Tk, Label, Button, Entry, StringVar, DISABLED, NORMAL, END, W, E, N, S\n# tranforming json file into a pandas dataframe library\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nfrom pandas.io.json import json_normalize\nfrom folium.plugins import MarkerCluster\nfrom tempfile import NamedTemporaryFile\nfrom geopy.geocoders import Nominatim # module to convert an address into latitude and longitude values\nfrom IPython.core.display import HTML \nfrom IPython.display import Image\nfrom tkinter import *\nimport tkinter as tk\nimport pandas as pd # library for data analsysis\nimport numpy as np # library to handle data in a vectorized manner\nimport subprocess\nimport webbrowser\nimport requests # library to handle requests\nimport random # library for random number generation\nimport folium # plotting library\n\nprint('Folium installed')\nprint('Libraries imported.')\ndef_sec_dummy = 0\nclass guiProj:\n def __init__(self, master):\n # the constructor for creating the GUI of the app using tkinter!\n # this is creating the padding for the input/label text etc.\n for i in range(100):\n master.columnconfigure(i, pad=3) \n master.rowconfigure(i, pad=3)\n self.master = master\n master.title(\"WISP\")\n # probably the do-able geometry\n master.geometry(\"350x600\")\n\n # Shortened version of the code!\n msg_s = [\"CLIENT ID\",\"FOURSQUARE SECRET\",\"LOC/CITY\",\"RADIUS (in meters)\",\"NO. OF PREFERENCE\"]\n i_var = 0\n\n # just initializing!\n self.msg_list = [None]*int(5)\n self.text_list = [None]*int(5)\n self.label_list = [None]*int(5)\n self.entry_list = [None]*int(5)\n\n # creating the basic template of the application!\n for msg in msg_s:\n self.msg_list[i_var] = msg\n self.text_list[i_var] = StringVar()\n self.text_list[i_var].set(\"{}\".format(self.msg_list[i_var]))\n self.label_list[i_var] = Label(master, textvariable=self.text_list[i_var])\n self.label_list[i_var].grid(row=i_var, column=0, columnspan=1, sticky=W+N)\n\n self.entry_list[i_var] = Entry(master)\n self.entry_list[i_var].grid(row=i_var, column=1, columnspan=1, sticky=W+N)\n \n i_var += 1\n \n def submit_pref():\n # this function gets called when they submit the preference!\n get_pref_no = self.entry_list[4].get() #to get the preference\n # dummy preference for conditional check later\n if get_pref_no == \"\":\n get_pref_no = 0\n # initialising etc.\n self.entry_pref = [None]*int(get_pref_no)\n self.text_pref = [None]*int(get_pref_no)\n self.pf_text = [None]*int(get_pref_no)\n self.label_pref = [None]*int(get_pref_no)\n\n\n def onFrameConfigure(canvas):\n '''Reset the scroll region to encompass the inner frame'''\n canvas.configure(scrollregion=canvas.bbox(\"all\"))\n \n def populate(frame):\n '''Put in some fake data'''\n # automating the boring and tedious stuffs through list\n # basically, takes the input for the number of preferences!\n for iter_ in range(int(get_pref_no)):\n \n text_str = \"{}-{} :\".format(\"preference\",iter_+1)\n \n self.pf_text[iter_] = text_str\n # creating the label\n self.text_pref[iter_] = StringVar()\n self.text_pref[iter_].set(self.pf_text[iter_])\n self.label_pref[iter_] = Label(frame, textvariable=self.text_pref[iter_])\n self.label_pref[iter_].grid(row=5+iter_+1, column=0, columnspan=1, sticky=W+N)\n\n # entry widget\n self.entry_pref[iter_] = Entry(frame)\n self.entry_pref[iter_].grid(row=5+iter_+1, column=1, columnspan=1, sticky=W+N)\n \"\"\"\n for row in range(100):\n tk.Label(frame, text=\"%s\" % row, width=3, borderwidth=\"1\", \n relief=\"solid\").grid(row=row, column=0)\n t=\"this is the second column for row %s\" %row\n tk.Label(frame, text=t).grid(row=row, column=1)\n tk.Entry(frame).grid(row=row, column=2)\n \"\"\"\n \n canvas = tk.Canvas(master, borderwidth=0, background=\"#ffffff\")\n frame = tk.Frame(canvas, background=\"#ffffff\")\n vsb = tk.Scrollbar(master, orient=\"vertical\", command=canvas.yview)\n canvas.configure(yscrollcommand=vsb.set)\n # for the scrollbar\n vsb.grid(row=8, column=2,rowspan=int(get_pref_no), sticky=\"nsw\")\n # for the grid\n canvas.grid(row=8,column=0,rowspan=1,sticky=\"nsew\")\n canvas.create_window((4,4), window=frame, anchor=\"nw\")\n frame.bind(\"<Configure>\", lambda event, canvas=canvas: onFrameConfigure(canvas))\n populate(frame)\n \"\"\"\n \n \n \n \"\"\"\n # disabling the button! for one-time use!\n self.submit_pref_buttton.configure(state=DISABLED)\n\n self.show_map_button = Button(master, text=\"show map\",command=self.show_map)\n self.show_map_button.grid(row=int(get_pref_no)+8,column=1,columnspan=1, sticky=W+E)\n\n def def_sec():\n # this function sets the default secrets!\n\n global def_sec_dummy\n def_sec_dummy = 1\n print(\"USING DEFAULT SECRETS FOR CLIENT_ID and CLIENT_SECRET \")\n self.entry_list[0].insert(END, 'using default client ID')\n self.entry_list[1].insert(END, 'using default client secret')\n # disabling things! lol\n self.use_default_sec.configure(state=DISABLED)\n # place holder gets called when we use this! (default secret thingie)\n self.use_default_sec = Button(master, text=\"use default secrets\",command=def_sec)\n self.use_default_sec.grid(row=5,column=0,columnspan=1, sticky=W+E)\n # again button thing\n self.submit_pref_buttton = Button(master, text=\"submit\",command=submit_pref)\n self.submit_pref_buttton.grid(row=5,column=1,columnspan=1, sticky=W+E)\n \n def show_map(self):\n # To get all the values and show the map!\n all_values = [] # has all the values that is got from the GUI\n for item in self.entry_list:\n all_values.append(item.get())\n pref_list = [] # gets the preference one by one!\n for item in self.entry_pref:\n pref_list.append(item.get())\n #print(all_values)\n string_gen = \"0123456789abcdef\" # this actually generated the random hex code for colors!\n def get_random_col():\n # unnecessary stuffs to make the visualization cool\n ret_str = \"#\"\n for i in range(6):\n ret_str += random.choice(string_gen)\n return ret_str\n #get_random_col()\n global def_sec_dummy\n if def_sec_dummy == 1:\n # default things! lol make sure to clear them!!!!\n CLIENT_ID = \"QGITIC5BRJSPQFIEDOINIWZUBWBUX3JUGOAJO2H1KC1HPT1T\"\n CLIENT_SECRET = \"2FJXKPKYOMK4DISKLHL4DI111FPZYCTZIZW2IF55VCLK0WJF\"\n elif def_sec_dummy == 0:\n CLIENT_ID = all_values[0] #input(\"Enter the client ID : \") # your Foursquare ID\n # QGITIC5BRJSPQFIEDOINIWZUBWBUX3JUGOAJO2H1KC1HPT1T\n CLIENT_SECRET = all_values[1] #input(\"Enter the Foursquare secret : \") # your Foursquare Secret\n # 2FJXKPKYOMK4DISKLHL4DI111FPZYCTZIZW2IF55VCLK0WJF\n VERSION = '20190122'\n LIMIT = 1000\n address = all_values[2] #input(\"Enter the location/ city :\")\n\n print('Your credentails:')\n print('CLIENT_ID: ' + CLIENT_ID)\n print('CLIENT_SECRET:' + CLIENT_SECRET)\n print('Location of your choice : ', address)\n\n geolocator = Nominatim()\n try:\n # get's the lat and long for a place, it is kept under try catch for safety purpose\n location = geolocator.geocode(address)\n latitude = location.latitude\n longitude = location.longitude\n print(latitude, longitude)\n except:\n\n print(\"CHECK INTERNET CONNECTION!\")\n # directly closes the application\n exit(4)\n\n \n RADIUS = int(all_values[3])\n\n print(\"Total preference list : \",pref_list)\n\n map_address = folium.Map(location=[latitude, longitude], zoom_start=11)\n marker_cluster = MarkerCluster().add_to(map_address)\n list_df = []\n for item_pref in pref_list:\n url = 'https://api.foursquare.com/v2/venues/search?client_id={}&client_secret={}&ll={},{}&v={}&query={}&radius={}&limit={}'.format(CLIENT_ID, CLIENT_SECRET, latitude, longitude, VERSION, item_pref, RADIUS, LIMIT)\n try :\n print(\"url : \",url)\n results = requests.get(url).json()\n # assign relevant part of JSON to venues\n venues = results['response']['venues']\n\n # tranform venues into a dataframe\n dataframe = json_normalize(venues)\n print(dataframe.head())\n try:\n # keep only columns that include venue name, and anything that is associated with location\n filtered_columns = ['name', 'categories'] + [col for col in dataframe.columns if col.startswith('location.')] + ['id']\n dataframe_filtered = dataframe.loc[:, filtered_columns]\n except:\n print(\"Something went wrong!\")\n continue\n # function that extracts the category of the venue\n def get_category_type(row):\n try:\n categories_list = row['categories']\n except:\n categories_list = row['venue.categories']\n\n if len(categories_list) == 0:\n return None\n else:\n return categories_list[0]['name']\n\n # filter the category for each row\n try:\n dataframe_filtered['categories'] = dataframe_filtered.apply(get_category_type, axis=1)\n except:\n print(\"Something went wrong!\")\n continue\n\n # clean column names by keeping only last term\n dataframe_filtered.columns = [column.split('.')[-1] for column in dataframe_filtered.columns]\n\n #dataframe_filtered.head()\n\n # copying it to stationary shop dataframe\n\n data_frame = dataframe_filtered.copy()\n list_df.append(data_frame)\n except:\n print(\"Preference : \",item_pref,\" doesn't exists!!!\")\n print(list_df)\n # create map latitude and longitude values\n MAP_FINAL = folium.Map(location=[latitude, longitude], zoom_start=11)\n # configuration for the dafault map to be created!\n marker_cluster = MarkerCluster().add_to(MAP_FINAL)\n for list_item in list_df:\n FILL_COL = str(get_random_col())\n OVER_COL = str(get_random_col())\n for lat, lng, cat, postcode in zip(list_item['lat'], list_item['lng'], list_item['categories'], list_item['postalCode']):\n f_format = str(\"POSTCODE : \"+str(postcode))\n s_format = str(\"CATEGORY : \"+str(cat))\n label = '{}, {}'.format(f_format,s_format )\n label = folium.Popup(label, parse_html=True)\n # the circle marker is done through this way! with custom pop-ups\n folium.CircleMarker(\n [lat, lng],\n radius=5,\n popup=label,\n color=OVER_COL,\n fill=True,\n fill_color=FILL_COL,\n fill_opacity=0.7).add_to(marker_cluster) \n \n # setting port addr, localhost for the custom http server\n PORT = 7000\n HOST = '127.0.0.1'\n SERVER_ADDRESS = '{host}:{port}'.format(host=HOST, port=PORT)\n FULL_SERVER_ADDRESS = 'http://' + SERVER_ADDRESS\n # ------------------------------------------------------------------------------------------------\n # so let's write a custom temporary-HTML renderer\n def TemproraryHttpServer(page_content_type, raw_data):\n \"\"\"\n A simpe, temprorary http web server on the pure Python 3.\n It has features for processing pages with a XML or HTML content.\n \"\"\"\n\n class HTTPServerRequestHandler(BaseHTTPRequestHandler):\n \"\"\"\n An handler of request for the server, hosting XML-pages.\n \"\"\"\n\n def do_GET(self):\n \"\"\"Handle GET requests\"\"\"\n\n # response from page\n self.send_response(200)\n\n # set up headers for pages\n content_type = 'text/{0}'.format(page_content_type)\n self.send_header('Content-type', content_type)\n self.end_headers()\n\n # writing data on a page\n self.wfile.write(bytes(raw_data, encoding='utf'))\n\n return\n\n if page_content_type not in ['html', 'xml']:\n raise ValueError('This server can serve only HTML or XML pages.')\n\n page_content_type = page_content_type\n\n # kill a process, hosted on a localhost:PORT\n subprocess.call(['fuser', '-k', '{0}/tcp'.format(PORT)])\n\n # Started creating a temprorary http server.\n httpd = HTTPServer((HOST, PORT), HTTPServerRequestHandler)\n\n # run a temprorary http server\n httpd.serve_forever()\n\n\n def run_html_server(html_data=None):\n\n if html_data is None:\n html_data = \"\"\"\n <!DOCTYPE html>\n <html>\n <head>\n <title>Page Title</title>\n </head>\n <body>\n <h1>This is a Heading</h1>\n <p>This is a paragraph.</p>\n </body>\n </html>\n \"\"\"\n\n # open in a browser URL and see a result\n webbrowser.open(FULL_SERVER_ADDRESS)\n\n # run server\n TemproraryHttpServer('html', html_data)\n\n # ------------------------------------------------------------------------------------------------\n # now let's save the visualization into the temp file and render it\n tmp = NamedTemporaryFile()\n MAP_FINAL.save(tmp.name)\n with open(tmp.name) as f:\n folium_map_html = f.read()\n\n run_html_server(folium_map_html)\n\ndef main():\n root = Tk()\n # initialising the app\n my_gui = guiProj(root)\n # goes on and on loop for tkinter!\n root.mainloop()\n\nif __name__ == \"__main__\":\n main()\n", "#// last done till 43 pg no do the graph inequalities the next day.\n\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\nimport sys\n#from numpy import *\nimport numpy as np\nimport math\n\ndef init():\n glClearColor(1.0,1.0,1.0,1.0)\n gluOrtho2D(-5.0,5.0,-5.0,5.0)\n\ndef plotfunc():\n glClear(GL_COLOR_BUFFER_BIT)\n glColor3f(0.0,0.0,0.0) # color\n glPointSize(1.0)\n for a in np.arange(1.0,3.0,0.1):\n for t in np.arange(-4.4,4.4,0.01):\n x = 0.3*a*(t*t-3)\n y = 0.1*a*t*(t*t-3)\n glBegin(GL_POINTS)\n glVertex2f(x,y)\n glEnd()\n glFlush()\n\ndef main():\n glutInit(sys.argv) # tells the python we are going to be displaying GLUT style graphics\n glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)\n glutCreateWindow(\"Plot Points\")\n glutInitWindowSize(400,400)\n glutInitWindowPosition(50,50)\n glutDisplayFunc(plotfunc)\n init()\n glutMainLoop()\n\nmain()\n" ]
[ [ "numpy.arange" ], [ "pandas.io.json.json_normalize" ], [ "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "0.19", "0.24", "0.20", "0.25" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gerardrbentley/peak-weather
[ "2880184c99c2d30075665f76a9e8b815906a55e5" ]
[ "streamlit_app/streamlit_app.py" ]
[ "from copy import deepcopy\nimport asyncio\nimport json\n\nimport pandas as pd\nimport streamlit as st\nfrom structlog import get_logger\n\nfrom helpers import (\n fromtimestamp,\n show_weather,\n WeatherItem,\n gather_one_call_weather_data,\n clean_time,\n)\n\nlog = get_logger()\nst.set_page_config(\n layout=\"wide\",\n page_title=\"Peak Weather: 4,000 Footers\",\n page_icon=\":mountain:\",\n)\n\n\[email protected](ttl=60 * 60)\ndef load_data(lat_lon_pairs: list) -> list:\n \"\"\"Function to fetch Open Weather data and cache results\n\n Args:\n lat_lon_pairs (list): Destinations to get data for\n\n Returns:\n list: List of dictionaries which are json responses from open weather\n \"\"\"\n log.info(\"Start Load Data\")\n data = asyncio.run(gather_one_call_weather_data(lat_lon_pairs))\n log.info(\"Returning Load Data\")\n return data\n\n\[email protected]()\ndef load_metadata() -> pd.DataFrame:\n \"\"\"Function to read mountain lat, lon, and other metadata and cache results\n\n Returns:\n pd.DataFrame: df containing information for 48 mountains\n \"\"\"\n df = pd.read_csv(\"./data/mountains.csv\")\n df = df.sort_values(\"name\")\n return df\n\n\ndef get_mtn_anchor(mountain: str) -> str:\n anchor = mountain.lower().replace(\" \", \"-\")\n return f\"[{mountain}](#{anchor})\"\n\n\ndef main():\n \"\"\"Main Streamlit App Entrypoint\"\"\"\n st.title(\n \":sunny::mountain::rainbow: Peak Weather of the 4,000 Footers :rainbow::mountain::sunny:\"\n )\n st.header(\":umbrella: You can't stop the rain, but you can spot it! :umbrella:\")\n with st.expander(\"Expand for Basic App Information:\"):\n st.markdown(\n \"\"\"\\\n# Peak Weather: New Hampshire's 4,000 Footers\n\nBuilt to give you a dashboard view of the next few hours' forecast for New Hampshire's 48 4,000 ft mountains.\nGonna rain on the Kinsmans?\nIs it snowing on Washington?\nShould I hike Owl's Head?\n\nPowered by [Streamlit](https://docs.streamlit.io/) + [Open Weather API](https://openweathermap.org/api).\nSpecifically, Streamlit runs the web interactinos and OpenWeather provides the data.\n\nBuilt with :heart: from [Gar's Bar](https://tech.gerardbentley.com) by Gerard Bentley\n\"\"\"\n )\n with st.spinner(\"Loading Mountain List\"):\n base_mountains = load_metadata()\n\n with st.expander(\"Expand for Basic Mountain Information: \"):\n st.dataframe(base_mountains)\n\n with st.spinner(\"Fetching Weather Data\"):\n lat_lon_pairs = zip(base_mountains.lat, base_mountains.lon)\n cached_responses = load_data(lat_lon_pairs)\n weather_responses = deepcopy(cached_responses)\n\n first_response = weather_responses[0]\n log.info(\"Weather Response\", first_response=first_response)\n if \"current\" not in first_response:\n st.error(\n \"\"\"\\\n### Oof...\n\nOpen Weather API can't be reached for data at the moment.\nApologies, feel free to check back soon.\"\"\"\n )\n st.write(\n f\"## Time: {fromtimestamp(first_response['current']['dt']).strftime('%I:%M:%S %p, %b %d %Y')}\"\n )\n table = []\n\n table.append(\"| Mountains | | |\")\n table.append(\"|---|---|---|\")\n for left, middle, right in zip(\n base_mountains.name[::3], base_mountains.name[1::3], base_mountains.name[2::3]\n ):\n table.append(\n f\"| {get_mtn_anchor(left)} | {get_mtn_anchor(middle)} | {get_mtn_anchor(right)} |\"\n )\n st.markdown(\"\\n\".join(table))\n\n for mountain, response in zip(base_mountains.name, weather_responses):\n st.write(\"-\" * 88)\n st.write(f\"#### {mountain}\")\n st.write(f\"({response['lat']}, {response['lon']})\")\n st.write(f\"Weather {clean_time(response['current']['dt'])}: \")\n current_temperature = round(response[\"current\"][\"temp\"], 1)\n st.metric(\"Temp (F)\", current_temperature, 0.0)\n for weather in response[\"current\"][\"weather\"]:\n weather_item = WeatherItem(**weather)\n show_weather(weather_item)\n\n with st.expander(\"Expand for future forecast:\"):\n for col, entry in zip(st.columns(5), response[\"hourly\"][1:]):\n col.write(f\"{clean_time(entry['dt'])}\")\n temperature = round(entry[\"temp\"], 1)\n col.metric(\n \"Temp (F)\", temperature, round(temperature - current_temperature, 1)\n )\n for weather in entry[\"weather\"]:\n weather_item = WeatherItem(**weather)\n show_weather(weather_item, col)\n current_temperature = temperature\n alerts = response.get(\"alerts\")\n if alerts is not None:\n for alert in alerts:\n body = (\n f\"### Alert From {alert['sender_name']}: {alert['event']}\",\n f\"Duration: {fromtimestamp(alert['start'])} - {fromtimestamp(alert['end'])}\",\n alert[\"description\"],\n f\"Tags: {'; '.join(alert['tags'])}\",\n )\n\n st.warning(\"\\n\".join(body))\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
ErwindeGelder/ScenarioRiskQuantification
[ "ee351b7bd3629af0c1d1d800dcc5ac5426b9b804" ]
[ "simulation/acc_idmplus.py" ]
[ "\"\"\" Model of ACC with FCW and IDM+ to take over from Xiao et al. (2017).\n\nCreation date: 2020 08 12\nAuthor(s): Erwin de Gelder\n\nModifications:\n\"\"\"\n\nimport numpy as np\nfrom .acc import ACC, ACCParameters, ACCState\nfrom .idm import IDMParameters\nfrom .idmplus import IDMPlus\n\n\nclass ACCIDMPlusParameters(ACCParameters):\n \"\"\" Parameters for the ACCIDMPlus. \"\"\"\n fcw_threshold: float = 0.75 # [0-1] If probability is above, give warning.\n fcw_delay: float = 1.0 # [s] After this delay, driver takes over.\n driver_takeover_speed: float = 15.0 # [m/s] Speed difference at which driver takes control.\n driver_takeover_view: float = 150 # [m] Only take over if car is within this distance.\n driver_model: IDMPlus = None # The model of the driver.\n driver_parms: IDMParameters = None # Parameters of the driver model.\n\n def __init__(self, **kwargs):\n self.driver_model = IDMPlus()\n ACCParameters.__init__(self, **kwargs)\n\n\nclass ACCIDMPlusState(ACCState):\n \"\"\" State of the ACCIDMPlus. \"\"\"\n fcw: bool = False\n samples_since_fcw: int = 0\n samples_in_view: int = 0\n driver_takeover: bool = False\n\n\nclass ACCIDMPlus(ACC):\n \"\"\" Class for simulation of the human (IDMplus) + ACC. \"\"\"\n def __init__(self):\n ACC.__init__(self)\n self.parms = ACCIDMPlusParameters()\n self.state = ACCIDMPlusState()\n\n self.nstep = 0\n\n def init_simulation(self, parms: ACCIDMPlusParameters) -> None:\n \"\"\" Initialize the simulation.\n\n See the ACC for the default parameters.\n The following additional parameters can also be set:\n fcw_threshold: float = 0.75 # [0-1] If probability is above, give warning.\n fcw_delay: float = 1.0 # [s] After this delay, driver takes over.\n driver_takeover_speed: float = 15.0 # [m/s] Speed difference at which driver takes control.\n driver_takeover_view: float = 150 # [m] Only take over if car is within this distance.\n driver_model: StandardModel = None # The model of the driver.\n driver_parms: StandardParameters = None # Parameters of the driver model.\n\n :param parms: The parameters listed above.\n \"\"\"\n # Set the parameters of the ACC model.\n ACC.init_simulation(self, parms)\n\n # Initialize the driver model.\n self.parms.driver_model = parms.driver_model\n self.parms.driver_parms = parms.driver_parms\n self.parms.driver_model.init_simulation(parms.driver_parms)\n\n # Initialize parameters of the Forward Collision Warning\n self.parms.fcw_delay, self.parms.fcw_threshold = parms.fcw_delay, parms.fcw_threshold\n\n # Reset the state regarding the takeover.\n self.state.fcw = False\n self.state.samples_since_fcw = 0\n self.state.samples_in_view = 0\n self.state.driver_takeover = False\n\n self.nstep = 0\n\n def step_simulation(self, leader) -> None:\n self.nstep += 1\n self.integration_step()\n\n # Update the driver model.\n self.parms.driver_model.step_simulation(leader)\n\n # If the FCW is active for longer than `fcw_delay`, the driver is active.\n # Note: additional requirement is that the car should be in the viewing range for at least\n # as long as the reactiontime of the driver.\n if leader.state.position-self.state.position < self.parms.driver_parms.max_view:\n self.state.samples_in_view += 1\n if self.state.fcw:\n self.state.samples_since_fcw += 1\n elif not self.state.driver_takeover:\n self.state.fcw = self.fcw_warning(leader)\n if not self.state.driver_takeover and self.state.fcw:\n if self.state.samples_since_fcw*self.parms.timestep >= self.parms.fcw_delay and \\\n self.state.samples_in_view > self.parms.driver_parms.n_reaction+1:\n self.state.driver_takeover = True\n\n # Following Xiao et al. (2017), the driver takes over if approaching speed > 15 m/s and\n # car is within 150 m. The speed (15 m/s) and view (150 m) are parameterized with\n # `driver_take_over_speed`, `driver_take_over_view`.\n # As a additional requirement, the driver should brake.\n if not self.state.driver_takeover:\n if self.state.speed-leader.state.speed > self.parms.driver_takeover_speed and \\\n (leader.state.position-self.state.position) < self.parms.driver_takeover_view and \\\n self.parms.driver_model.state.acceleration < 0:\n self.state.driver_takeover = True\n\n if self.state.driver_takeover:\n # Update our own states with that from the driver model.\n self.state.position = self.parms.driver_model.state.position\n self.state.speed = self.parms.driver_model.state.speed\n self.state.acceleration = self.parms.driver_model.state.acceleration\n else:\n # Update the states of the driver model with that from the ACC.\n self.update(leader.state.position-self.state.position,\n self.state.speed,\n self.state.speed-leader.state.speed)\n\n self.parms.driver_model.state.position = self.state.position\n self.parms.driver_model.state.speed = self.state.speed\n self.parms.driver_model.state.acceleration = self.state.acceleration\n\n def fcw_warning(self, leader) -> bool:\n \"\"\" Issue a FCW based on the model of Kiefer et al.\n\n :param leader: The leading vehicle that contains position and speed.\n :return: Whether an FCW will be issued.\n \"\"\"\n inv_ttc = ((self.state.speed - leader.state.speed) /\n (leader.state.position - self.state.position))\n if leader.state.speed > 0 > leader.state.acceleration:\n tmp = -6.092 + 18.816*inv_ttc + 0.119*self.state.speed\n elif leader.state.speed > 0:\n tmp = -6.092 + 12.584*inv_ttc + 0.119*self.state.speed\n else:\n tmp = -9.073 + 24.225*inv_ttc + 0.119*self.state.speed\n\n probability = 1 / (1 + np.exp(-tmp))\n if probability > self.parms.fcw_threshold:\n return True\n return False\n" ]
[ [ "numpy.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yqshao/PiNN
[ "464034dda44ce053cf1255a3e7d367c636d6b9f3" ]
[ "pinn/models/base.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Basic functions for PiNN models\"\"\"\nimport tensorflow as tf\nfrom pinn.utils import pi_named\n\ndef export_model(model_fn):\n # default parameters for all models\n from pinn.optimizers import default_adam\n default_params = {'optimizer': default_adam}\n def pinn_model(params, **kwargs):\n model_dir = params['model_dir']\n params_tmp = default_params.copy()\n params_tmp.update(params)\n params = params_tmp\n model = tf.estimator.Estimator(\n model_fn=model_fn, params=params, model_dir=model_dir, **kwargs)\n return model\n return pinn_model\n\nclass MetricsCollector():\n def __init__(self, mode):\n self.mode = mode\n self.LOSS = []\n self.ERROR = []\n self.METRICS = {}\n\n def add_error(self, tag, data, pred, mask=None, weight=1.0,\n use_error=True, log_error=True, log_hist=True):\n \"\"\"Add the error\n\n Args:\n tag (str): name of the error.\n data (tensor): data label tensor.\n pred (tensor): prediction tensor.\n mask (tensor): default to None (no mask, not implemented yet).\n weight (tensor): default to 1.0.\n mode: ModeKeys.TRAIN or ModeKeys.EVAL.\n return_error (bool): return error vector (for usage with Kalman Filter).\n log_loss (bool): log the error and loss function.\n log_hist (bool): add data and predicition histogram to log.\n log_mae (bool): add the mean absolute error to log.\n log_rmse (bool): add the root mean squared error to log.\n \"\"\"\n error = data - pred\n weight = tf.cast(weight, data.dtype)\n if self.mode == tf.estimator.ModeKeys.TRAIN:\n if log_hist:\n tf.compat.v1.summary.histogram(f'{tag}_DATA', data)\n tf.compat.v1.summary.histogram(f'{tag}_PRED', pred)\n tf.compat.v1.summary.histogram(f'{tag}_ERROR', error)\n if log_error:\n mae = tf.reduce_mean(tf.abs(error))\n rmse = tf.sqrt(tf.reduce_mean(error**2))\n tf.compat.v1.summary.scalar(f'{tag}_MAE', mae)\n tf.compat.v1.summary.scalar(f'{tag}_RMSE', rmse)\n if mask is not None:\n error = tf.boolean_mask(error, mask)\n if use_error:\n loss = tf.reduce_mean(error**2 * weight)\n tf.compat.v1.summary.scalar(f'{tag}_LOSS', loss)\n self.ERROR.append(error*tf.math.sqrt(weight))\n self.LOSS.append(loss)\n if self.mode == tf.estimator.ModeKeys.EVAL:\n if log_error:\n self.METRICS[f'METRICS/{tag}_MAE'] = tf.compat.v1.metrics.mean_absolute_error(data, pred)\n self.METRICS[f'METRICS/{tag}_RMSE'] = tf.compat.v1.metrics.root_mean_squared_error(data, pred)\n if mask is not None:\n error = tf.boolean_mask(error, mask)\n if use_error:\n loss = tf.reduce_mean(error**2 * weight)\n self.METRICS[f'METRICS/{tag}_LOSS'] = tf.compat.v1.metrics.mean(loss)\n self.LOSS.append(loss)\n\n\n@pi_named('TRAIN_OP')\ndef get_train_op(optimizer, metrics, tvars, separate_errors=False):\n \"\"\"\n Args:\n optimizer: a PiNN optimizer config.\n params: optimizer parameters.\n loss: scalar loss function.\n error: a list of error vectors (reserved for EKF).\n network: a PiNN network instance.\n sperate_errors (bool): separately update elements in the metrics\n \"\"\"\n from pinn.optimizers import get, EKF, gEKF\n import numpy as np\n\n optimizer = get(optimizer)\n optimizer.iterations = tf.compat.v1.train.get_or_create_global_step()\n nvars = np.sum([np.prod(var.shape) for var in tvars])\n print(f'{nvars} trainable vaiabless, training with {tvars[0].dtype.name} precision.')\n\n if not (isinstance(optimizer, EKF) or isinstance(optimizer, gEKF)):\n loss_list = metrics.LOSS\n if separate_errors:\n selection = tf.random.uniform([], maxval= len(loss_list), dtype=tf.int32)\n loss = tf.stack(loss_list)[selection]\n else:\n loss = tf.reduce_sum(loss_list)\n grads = tf.gradients(loss, tvars)\n return optimizer.apply_gradients(zip(grads, tvars))\n else:\n error_list = metrics.ERROR\n # EKF error vectors are scaled\n if isinstance(optimizer, EKF):\n error = tf.concat([tf.reshape(e, [-1])/tf.math.sqrt(tf.cast(tf.size(e), e.dtype))\n for e in error_list], 0)\n # gEKF should handle this automatically\n if isinstance(optimizer, gEKF):\n error = tf.concat([tf.reshape(e, [-1]) for e in error_list], 0)\n if separate_errors:\n selection = tf.random.uniform([], maxval= len(error_list), dtype=tf.int32)\n mask = tf.concat([tf.fill([tf.size(e)], tf.equal(selection,i))\n for i,e in enumerate(error_list)], 0)\n error = tf.boolean_mask(error, mask)\n return optimizer.get_train_op(error, tvars)\n" ]
[ [ "tensorflow.compat.v1.metrics.mean", "tensorflow.reduce_sum", "tensorflow.stack", "tensorflow.cast", "tensorflow.equal", "tensorflow.boolean_mask", "tensorflow.compat.v1.metrics.mean_absolute_error", "tensorflow.gradients", "tensorflow.compat.v1.metrics.root_mean_squared_error", "tensorflow.compat.v1.train.get_or_create_global_step", "tensorflow.estimator.Estimator", "tensorflow.compat.v1.summary.scalar", "tensorflow.size", "tensorflow.math.sqrt", "tensorflow.reduce_mean", "tensorflow.reshape", "numpy.prod", "tensorflow.compat.v1.summary.histogram", "tensorflow.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] } ]
danielabler/glimslib
[ "3d345bf3ed2d364e83a00ad9297dd5f81d7193db" ]
[ "glimslib/simulation_helpers/test_unit_timeSeriesMultiData.py" ]
[ "from unittest import TestCase\nimport os\nimport numpy as np\n\nimport glimslib.utils.file_utils as fu\nfrom glimslib import fenics_local as fenics, config\nfrom glimslib.simulation_helpers.helper_classes import FunctionSpace, TimeSeriesMultiData\n\n\nclass TestTimeSeriesMultiData(TestCase):\n\n def setUp(self):\n # Domain\n nx = ny = nz = 10\n mesh = fenics.RectangleMesh(fenics.Point(-2, -2), fenics.Point(2, 2), nx, ny)\n # function spaces\n displacement_element = fenics.VectorElement(\"Lagrange\", mesh.ufl_cell(), 1)\n concentration_element = fenics.FiniteElement(\"Lagrange\", mesh.ufl_cell(), 1)\n element = fenics.MixedElement([displacement_element, concentration_element])\n subspace_names = {0: 'displacement', 1: 'concentration'}\n self.functionspace = FunctionSpace(mesh)\n self.functionspace.init_function_space(element, subspace_names)\n # build a 'solution' function\n u_0_conc_expr = fenics.Expression('sqrt(pow(x[0]-x0,2)+pow(x[1]-y0,2)) < 0.1 ? (1.0) : (0.0)', degree=1,\n x0=0.25,\n y0=0.5)\n u_0_disp_expr = fenics.Constant((1.0, 0.0))\n self.U = self.functionspace.project_over_space(function_expr={0: u_0_disp_expr, 1: u_0_conc_expr})\n\n def test_register_time_series(self):\n tsmd = TimeSeriesMultiData()\n tsmd.register_time_series(name='solution', functionspace=self.functionspace)\n self.assertTrue(hasattr(tsmd, tsmd.time_series_prefix+'solution'))\n tsmd.register_time_series(name='solution2', functionspace=self.functionspace)\n self.assertTrue(hasattr(tsmd, tsmd.time_series_prefix+'solution2'))\n\n def test_get_time_series(self):\n tsmd = TimeSeriesMultiData()\n tsmd.register_time_series(name='solution', functionspace=self.functionspace)\n self.assertEqual(tsmd.get_time_series('solution'), getattr(tsmd, tsmd.time_series_prefix+'solution'))\n\n def test_add_observation(self):\n tsmd = TimeSeriesMultiData()\n tsmd.register_time_series(name='solution', functionspace=self.functionspace)\n tsmd.add_observation('solution', field=self.U, time=1, time_step=1, recording_step=1)\n tsmd.add_observation('solution2', field=self.U, time=1, time_step=1, recording_step=1)\n self.assertEqual(tsmd.get_time_series('solution').get_observation(1).get_time_step(), 1)\n\n def test_get_observation(self):\n tsmd = TimeSeriesMultiData()\n tsmd.register_time_series(name='solution', functionspace=self.functionspace)\n tsmd.add_observation('solution', field=self.U, time=1, time_step=1, recording_step=1)\n tsmd.add_observation('solution2', field=self.U, time=1, time_step=1, recording_step=1)\n self.assertEqual(tsmd.get_time_series('solution').get_observation(1),\n tsmd.get_observation('solution', 1))\n self.assertEqual(tsmd.get_observation('solution3', 1), None)\n\n def test_get_solution_function(self):\n tsmd = TimeSeriesMultiData()\n tsmd.register_time_series(name='solution', functionspace=self.functionspace)\n tsmd.add_observation('solution', field=self.U, time=1, time_step=1, recording_step=1)\n u = tsmd.get_solution_function('solution', subspace_id=None, recording_step=1)\n self.assertEqual(u.function_space(), self.U.function_space())\n self.assertNotEqual(u, self.U)\n\n def test_get_all_time_series(self):\n tsmd = TimeSeriesMultiData()\n tsmd.register_time_series(name='solution', functionspace=self.functionspace)\n tsmd.register_time_series(name='solution2', functionspace=self.functionspace)\n ts_dict = tsmd.get_all_time_series()\n self.assertEqual(len(ts_dict), 2)\n self.assertTrue('solution' in ts_dict.keys())\n self.assertTrue('solution2' in ts_dict.keys())\n\n def test_save_to_hdf5(self):\n path_to_file = os.path.join(config.output_dir_testing, 'timeseries_to_hdf5.h5')\n fu.ensure_dir_exists(path_to_file)\n tsmd = TimeSeriesMultiData()\n tsmd.register_time_series(name='solution', functionspace=self.functionspace)\n tsmd.add_observation('solution', field=self.U, time=1, time_step=1, recording_step=1)\n tsmd.add_observation('solution', field=self.U, time=1, time_step=1, recording_step=2)\n tsmd.add_observation('solution', field=self.U, time=1, time_step=1, recording_step=3)\n tsmd.register_time_series(name='solution2', functionspace=self.functionspace)\n tsmd.add_observation('solution2', field=self.U, time=1, time_step=1, recording_step=1)\n tsmd.add_observation('solution2', field=self.U, time=1, time_step=1, recording_step=2)\n tsmd.add_observation('solution2', field=self.U, time=1, time_step=1, recording_step=3)\n tsmd.save_to_hdf5(path_to_file, replace=True)\n # path_to_file2 = os.path.join(config.output_dir_testing, 'timeseries_to_hdf5_manual.h5')\n # hdf = fenics.HDF5File(self.functionspace._mesh.mpi_comm(), path_to_file2, \"w\")\n # function = tsmd.get_solution_function('solution', recording_step=1)\n # hdf.write(function, 'solution', 1)\n # hdf.write(function, 'solution', 2)\n # hdf.close()\n\n def test_load_from_hdf5(self):\n path_to_file = os.path.join(config.output_dir_testing, 'timeseries_to_hdf5_for_reading.h5')\n fu.ensure_dir_exists(path_to_file)\n # create file\n tsmd = TimeSeriesMultiData()\n tsmd.register_time_series(name='solution', functionspace=self.functionspace)\n tsmd.add_observation('solution', field=self.U, time=1, time_step=1, recording_step=1)\n tsmd.add_observation('solution', field=self.U, time=1, time_step=1, recording_step=2)\n tsmd.add_observation('solution', field=self.U, time=1, time_step=1, recording_step=3)\n tsmd.register_time_series(name='solution2', functionspace=self.functionspace)\n tsmd.add_observation('solution2', field=self.U, time=1, time_step=1, recording_step=1)\n tsmd.add_observation('solution2', field=self.U, time=1, time_step=1, recording_step=2)\n tsmd.add_observation('solution2', field=self.U, time=1, time_step=1, recording_step=3)\n tsmd.save_to_hdf5(path_to_file, replace=True)\n # read file\n tsmd2 = TimeSeriesMultiData()\n tsmd2.register_time_series(name='solution', functionspace=self.functionspace)\n tsmd2.register_time_series(name='solution2', functionspace=self.functionspace)\n tsmd2.load_from_hdf5(path_to_file)\n self.assertEqual(len(tsmd2.get_all_time_series()),2)\n self.assertEqual(len(tsmd2.get_time_series('solution').get_all_recording_steps()),3)\n self.assertEqual(len(tsmd2.get_time_series('solution2').get_all_recording_steps()), 3)\n u_reloaded = tsmd2.get_solution_function(name='solution')\n # print(u_reloaded.vector().array())\n # print(self.U.vector().array())\n array_1 = u_reloaded.vector().get_local()\n array_2 = self.U.vector().get_local()\n self.assertTrue(np.allclose(array_1, array_2))\n\n\n\n" ]
[ [ "numpy.allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
linthieda/DeepFEH
[ "273b5c695674e0d22bc7f701a5d64113034b04de" ]
[ "model/a2c.py" ]
[ "import numpy as np\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nfrom reinforce import Reinforce\nfrom model import CriticNet\n\nimport feh_simulator.simulator as gym\n\nclass A2C(Reinforce):\n # Implementation of N-step Advantage Actor Critic.\n # This class inherits the Reinforce class, so for example, you can reuse\n # generate_episode() here.\n\n def __init__(self, env, lr, critic_lr, gamma, n, policy_path, critic_path, load=False):\n # Initializes A2C.\n # Args:\n # - model: The actor model.\n # - lr: Learning rate for the actor model.\n # - critic_model: The critic model.\n # - critic_lr: Learning rate for the critic model.\n # - n: The value of N in N-step A2C.\n Reinforce.__init__(self, env, lr, gamma=gamma, save_path=policy_path, load=load)\n self.critic_path = critic_path\n s_len = self.env.observation_space_shape[0]\n self.critic = CriticNet(critic_lr, s_len=s_len)\n self.n = n\n if load:\n self.critic.load(self.critic_path)\n print(\"Hyperparameters:\\nPolicy LR = {} Critic LR = {} Gamma = {} N = {} \\nPolicy Path = {} \\nCritic Path = {} \\nLoad = {}\".format(\n lr, critic_lr, gamma, n, policy_path, critic_path, load\n ))\n return\n\n def train(self):\n # Trains the model on a single episode using A2C.\n K = 500\n print(\"pretrain test:\")\n print('episode 0 ', end='')\n self.test()\n print(\"training\")\n # generate an episode\n gamma_n_1 = self.gamma ** (self.n - 1)\n gamma_n = gamma_n_1 * self.gamma\n for i in range(10000000):\n s, ava, a, r = self.generate_episode()\n s = np.array(s)\n r = np.array(r)\n r /= 100.0\n T = len(r)\n if self.n >= T:\n n = T - 1\n else:\n n = self.n\n sum_r = np.zeros(shape=(T, ), dtype=np.float32)\n sum_r[T - 1] = r[T - 1]\n for p in range(2, n + 1, 1):\n sum_r[T - p] = sum_r[T - p + 1] * self.gamma + r[T - p]\n for q in range(n + 1, T + 1, 1):\n sum_r[T - q] = (sum_r[T - q + 1] - gamma_n_1 * r[T - q + n]) * self.gamma + r[T - q]\n\n V_end = np.zeros(shape=(T,), dtype=np.float32)\n\n for j in range(6):\n V = self.critic.predict(s)\n V_end[0:T-n] = V[n: T]\n R = gamma_n * V_end + sum_r\n G = R - V \n self.model.fit(s, ava, a, G)\n self.critic.fit(s, R)\n \n if (i + 1) % K == 0:\n print('episode {} '.format(i + 1), end='')\n self.test()\n self.model.save(self.save_path)\n self.critic.save(self.critic_path)\n self.model.save(self.save_path)\n return\n\n\ndef main():\n env = gym.make('FEH-v1')\n n = 50\n a2c = A2C(env=env, lr=0.0001, gamma=0.99, critic_lr=0.0001, n=n,\n policy_path=\"./saved_model/a2c_policy-v2-n{}.h5\".format(n),\n critic_path=\"./saved_model/a2c_critic_v2-n{}.h5\".format(n),\n load=False)\n a2c.train()\n return\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.use", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
janosh/mnf-bnn
[ "955b083027d8757716e1cdb6baaf908964546e6d" ]
[ "tf_mnf/flows/rnvp.py" ]
[ "import tensorflow as tf\n\n\nclass RNVP(tf.Module):\n \"\"\"Affine half (aka Real Non-Volume Preserving) flow (x = z * exp(s) + t),\n where a randomly selected half z1 of the dimensions in z are transformed as an\n affine function of the other half z2, i.e. scaled by s(z2) and shifted by t(z2).\n\n From \"Density estimation using Real NVP\", Dinh et al. (May 2016)\n https://arxiv.org/abs/1605.08803\n\n This implementation uses the numerically stable updates introduced by IAF:\n https://arxiv.org/abs/1606.04934\n \"\"\"\n\n def __init__(self, dim, h_sizes=[30], activation=\"tanh\", **kwargs):\n super().__init__(**kwargs)\n layers = [tf.keras.layers.Dense(hs, activation) for hs in h_sizes]\n self.net = tf.keras.Sequential(layers)\n self.t = tf.keras.layers.Dense(dim)\n self.s = tf.keras.layers.Dense(dim)\n\n def forward(self, z): # z -> x\n # Get random Bernoulli mask. This decides which channels will remain\n # unchanged and which will be transformed as functions of the unchanged.\n mask = tf.keras.backend.random_binomial(tf.shape(z), p=0.5)\n z1, z2 = (1 - mask) * z, mask * z\n y = self.net(z2)\n shift = self.t(y)\n scale = self.s(y)\n\n # sigmoid(x) = 1 / (1 + exp(-x)). For x in (-inf, inf) => sigmoid(x) in (0, 1).\n gate = tf.sigmoid(scale)\n log_dets = tf.reduce_sum((1 - mask) * tf.math.log(gate), axis=1)\n x = (z1 * gate + (1 - gate) * shift) + z2\n\n return x, log_dets\n" ]
[ [ "tensorflow.shape", "tensorflow.keras.layers.Dense", "tensorflow.sigmoid", "tensorflow.keras.Sequential", "tensorflow.math.log" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
masasin/spirit
[ "c8366e649eb105a8a579fb7a47dcc5aaeae6a0d8" ]
[ "src/visualization/plot_thesis.py" ]
[ "from functools import partial\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn.apionly as sns\n\nfrom ..analysis.csv_analysis import analyze_data, load_surveys\nfrom ..data.survey_utils import ExperimentType\nfrom .latexify import latexify, figure, fig_size\nfrom .plot_tools import plot_detailed, plot_distribution, plot_overview\n\n\n# Colours\ndefault_cycler = plt.rcParamsDefault[\"axes.prop_cycle\"]\ncolorblind_cmaps = [\"Dark2\", \"Set2\"]\ncmap_main, cmap_complement = colorblind_cmaps\n# cmap_main, cmap_complement = cmap_complement, cmap_main\ncolorblind_cyclers = {cmap: plt.cycler(\"color\", plt.cm.get_cmap(cmap).colors)\n for cmap in colorblind_cmaps}\nplt.rcParams[\"axes.prop_cycle\"] = colorblind_cyclers[cmap_main]\n\n\nFIGURE_DIR = Path(__file__).parent.joinpath(\"../../reports/thesis/img/plots\")\nfigure = partial(figure, folder=FIGURE_DIR, exts=[\"pdf\", \"pgf\"])\n\n\ndef do_drone_dos():\n with figure(\"ardrone_dos\", size=fig_size(0.45)):\n distances = np.array([0, 2, 8, 18, 23, 29, 34, 40,\n 45, 51, 56, 62, 67, 72, 78, 80])\n powers = np.array([90, 90, 86, 60, 50, 62, 35, 26,\n 24, 12, 20, 22, 26, 22, 12, 5])\n\n fig, ax1 = plt.subplots()\n ax1.step(distances, powers, lw=0.5)\n ax1.set_xlabel(\"distance (m)\")\n ax1.set_ylabel(r\"signal (\\%)\")\n ax1.set_ylim(0, 100)\n\n x_range = np.arange(80)\n best_fit = 10 * np.log10(6 / (1e5 * x_range**2.7))\n\n ax2 = ax1.twinx()\n ax2.plot(x_range, best_fit, c=\"C1\", lw=0.5)\n ax2.set_ylim(-100, -50)\n ax2.yaxis.set_tick_params(which=\"both\", labelright=False, right=False)\n\n plt.legend([ax.get_children()[0] for ax in (ax1, ax2)], [\"data\", \"fit\"])\n\n\ndef do_paths():\n with figure(\"paths_overview\", size=fig_size(0.75, 0.8)):\n ax1 = plt.subplot(\"121\")\n plot_overview(results, ExperimentType.Onboard, color=\"C0\", size_point=2,\n drone_width=0.5)\n ax2 = plt.subplot(\"122\", sharex=ax1, sharey=ax1)\n plot_overview(results, ExperimentType.Spirit, color=\"C1\", size_point=2,\n ylabel=\"\", drone_width=0.5)\n plt.setp(ax2.get_yticklabels(), visible=False)\n\n with figure(\"paths_detailed\", size=fig_size(0.75, 0.7)):\n ax1 = plt.subplot(\"121\")\n plot_detailed(results, ExperimentType.Onboard, color=\"C0\",\n size_point=2, crosshair=True, drone_width=0.5)\n ax1.legend_.remove()\n ax2 = plt.subplot(\"122\", sharex=ax1, sharey=ax1)\n plot_detailed(results, ExperimentType.Spirit, color=\"C1\", ylabel=\"\",\n size_point=2, crosshair=True, drone_width=0.5)\n ax2.legend_.remove()\n plt.setp(ax2.get_yticklabels(), visible=False)\n\n\ndef do_distributions():\n with figure(\"distribution_onboard\", size=fig_size(0.44, 1)):\n plot_distribution(results, ExperimentType.Onboard, color=\"C0\",\n crosshair=True, drone_width=0.5)\n\n with figure(\"distribution_spirit\", size=fig_size(0.44, 1)):\n plot_distribution(results, ExperimentType.Spirit, color=\"C1\",\n crosshair=True, drone_width=0.5)\n\n\ndef do_durations():\n with figure(\"duration\", size=fig_size(0.44, 1)):\n sns.factorplot(x=\"experiment\", y=\"duration\", data=analyses, kind=\"box\")\n sns.swarmplot(x=\"experiment\", y=\"duration\", split=True, data=analyses,\n palette=cmap_complement)\n plt.ylim(0, plt.ylim()[1])\n plt.ylabel(\"duration (s)\")\n\n with figure(\"duration_runs\", size=fig_size(0.44, 1)):\n sns.factorplot(x=\"order\", y=\"duration\", hue=\"experiment\", data=analyses,\n capsize=0.2)\n plt.ylim(0, plt.ylim()[1])\n plt.ylabel(\"duration (s)\")\n plt.xlabel(\"run\")\n\n\ndef do_movement():\n with figure(\"movement\", size=fig_size(0.9, 0.4)):\n molten = pd.melt(analyses,\n id_vars=[\"user\", \"experiment\", \"order\", \"group\"],\n value_vars=[\"path_length\", \"move_x\", \"move_y\"])\n g = sns.factorplot(x=\"experiment\", y=\"value\", col=\"variable\",\n data=molten, kind=\"box\")\n g.fig.axes[0].set_title(\"Path length\")\n g.fig.axes[1].set_title(\"Movement in $x$\")\n g.fig.axes[2].set_title(\"Movement in $y$\")\n g.fig.axes[0].set_ylabel(\"distance (m)\")\n plt.ylim(0, plt.ylim()[1])\n\n with figure(\"movement_x\"):\n molten = pd.melt(analyses,\n id_vars=[\"user\", \"experiment\", \"order\", \"group\"],\n value_vars=[\"move_l\", \"move_r\", \"move_x\"])\n g = sns.factorplot(x=\"experiment\", y=\"value\", col=\"variable\",\n data=molten, kind=\"box\")\n g.fig.axes[0].set_title(\"Movement left\")\n g.fig.axes[1].set_title(\"Movement right\")\n g.fig.axes[2].set_title(\"Movement in $x$\")\n g.fig.axes[0].set_ylabel(\"distance (m)\")\n plt.ylim(0, plt.ylim()[1])\n\n with figure(\"movement_y\"):\n molten = pd.melt(analyses,\n id_vars=[\"user\", \"experiment\", \"order\", \"group\"],\n value_vars=[\"move_b\", \"move_f\", \"move_y\"])\n g = sns.factorplot(x=\"experiment\", y=\"value\", col=\"variable\",\n data=molten, kind=\"box\")\n g.fig.axes[0].set_title(\"Movement backwards\")\n g.fig.axes[1].set_title(\"Movement forwards\")\n g.fig.axes[2].set_title(\"Movement in $y$\")\n g.fig.axes[0].set_ylabel(\"distance (m)\")\n plt.ylim(0, plt.ylim()[1])\n\n with figure(\"movement_back\"):\n sns.factorplot(x=\"experiment\", y=\"move_b\", data=analyses, kind=\"box\")\n sns.swarmplot(x=\"experiment\", y=\"move_b\", split=True, data=analyses,\n palette=cmap_complement)\n plt.ylabel(\"distance (m)\")\n plt.title(\"Movement backwards\")\n\n with figure(\"movement_runs\", size=fig_size(0.9, 0.4)):\n molten = pd.melt(analyses,\n id_vars=[\"user\", \"experiment\", \"order\", \"group\"],\n value_vars=[\"path_length\", \"move_x\", \"move_y\"])\n g = sns.factorplot(x=\"order\", y=\"value\", col=\"variable\",\n data=molten, hue=\"experiment\", capsize=0.2)\n g.fig.axes[0].set_title(\"Path length\")\n g.fig.axes[1].set_title(\"Movement in $x$\")\n g.fig.axes[2].set_title(\"Movement in $y$\")\n g.fig.axes[0].set_ylabel(\"distance (m)\")\n g.fig.axes[0].set_xlabel(\"run\")\n g.fig.axes[1].set_xlabel(\"run\")\n g.fig.axes[2].set_xlabel(\"run\")\n plt.ylim(0, plt.ylim()[1])\n\n with figure(\"movement_x_runs\"):\n molten = pd.melt(analyses,\n id_vars=[\"user\", \"experiment\", \"order\", \"group\"],\n value_vars=[\"move_l\", \"move_r\", \"move_x\"])\n g = sns.factorplot(x=\"order\", y=\"value\", col=\"variable\",\n data=molten, hue=\"experiment\")\n g.fig.axes[0].set_title(\"Movement left\")\n g.fig.axes[1].set_title(\"Movement right\")\n g.fig.axes[2].set_title(\"Movement in $x$\")\n g.fig.axes[0].set_ylabel(\"distance (m)\")\n g.fig.axes[0].set_xlabel(\"run\")\n g.fig.axes[1].set_xlabel(\"run\")\n g.fig.axes[2].set_xlabel(\"run\")\n plt.ylim(0, plt.ylim()[1])\n\n with figure(\"movement_y_runs\"):\n molten = pd.melt(analyses,\n id_vars=[\"user\", \"experiment\", \"order\", \"group\"],\n value_vars=[\"move_b\", \"move_f\", \"move_y\"])\n g = sns.factorplot(x=\"order\", y=\"value\", col=\"variable\",\n data=molten, hue=\"experiment\")\n g.fig.axes[0].set_title(\"Movement backwards\")\n g.fig.axes[1].set_title(\"Movement forwards\")\n g.fig.axes[2].set_title(\"Movement in $y$\")\n g.fig.axes[0].set_ylabel(\"distance (m)\")\n g.fig.axes[0].set_xlabel(\"run\")\n g.fig.axes[1].set_xlabel(\"run\")\n g.fig.axes[2].set_xlabel(\"run\")\n plt.ylim(0, plt.ylim()[1])\n\n\ndef do_errors():\n with figure(\"rms\", size=fig_size(0.9, 0.4)):\n molten = pd.melt(analyses,\n id_vars=[\"user\", \"experiment\", \"order\", \"group\"],\n value_vars=[\"rms\", \"rms_x\", \"rms_y\"])\n g = sns.factorplot(x=\"experiment\", y=\"value\", col=\"variable\",\n data=molten, kind=\"box\")\n g.fig.axes[0].set_title(\"RMS Error*\")\n g.fig.axes[1].set_title(\"RMS Error in $x$*\")\n g.fig.axes[2].set_title(\"RMS Error in $y$*\")\n g.fig.axes[0].set_ylabel(\"error (m)\")\n\n with figure(\"rms_runs\", size=fig_size(0.9, 0.4)):\n molten = pd.melt(analyses,\n id_vars=[\"user\", \"experiment\", \"order\", \"group\"],\n value_vars=[\"rms\", \"rms_x\", \"rms_y\"])\n g = sns.factorplot(x=\"order\", y=\"value\", col=\"variable\",\n hue=\"experiment\", data=molten, capsize=0.2)\n g.fig.axes[0].set_title(\"RMS Error\")\n g.fig.axes[1].set_title(\"RMS Error in $x$\")\n g.fig.axes[2].set_title(\"RMS Error in $y$\")\n g.fig.axes[0].set_ylabel(\"error (m)\")\n g.fig.axes[0].set_xlabel(\"run\")\n g.fig.axes[1].set_xlabel(\"run\")\n g.fig.axes[2].set_xlabel(\"run\")\n\n with figure(\"distance\", size=fig_size(0.9, 0.4)):\n molten = pd.melt(analyses,\n id_vars=[\"user\", \"experiment\", \"order\", \"group\"],\n value_vars=[r\"dist_err\", r\"x_err\", r\"y_err\"])\n g = sns.factorplot(x=\"experiment\", y=\"value\", col=\"variable\",\n data=molten, kind=\"box\")\n g.fig.axes[0].set_title(\"Distance from target*\")\n g.fig.axes[1].set_title(\"Distance from target in $x$\")\n g.fig.axes[2].set_title(\"Distance from target in $y$*\")\n g.fig.axes[0].set_ylabel(\"distance (m)\")\n g.axes[0][0].axhline(0, color=\"black\", linewidth=1, zorder=-1)\n g.axes[0][1].axhline(0, color=\"black\", linewidth=1, zorder=-1)\n g.axes[0][2].axhline(0, color=\"black\", linewidth=1, zorder=-1)\n\n\ndef do_surveys():\n with figure(\"tlx_results\", size=fig_size(0.44, 1)):\n sns.factorplot(x=\"experiment\", y=\"tlx\", data=tlx, kind=\"box\")\n sns.swarmplot(x=\"experiment\", y=r\"tlx\",\n data=tlx, palette=cmap_complement, split=True)\n plt.ylim(0, plt.ylim()[1])\n plt.ylabel(\"NASA-TLX weighted score*\")\n\n with figure(\"tlx_components\", size=fig_size(0.44, 1)):\n components = [\"mental\", \"physical\", \"temporal\", \"performance\",\n \"effort\", \"frustration\"]\n molten = pd.melt(tlx, id_vars=[\"user\", \"experiment\", \"order\"],\n value_vars=components,\n var_name=\"component\", value_name=\"score\")\n sns.barplot(x=r\"component\", y=\"score\", hue=\"experiment\", data=molten)\n\n plt.gca().set_xticklabels(\n [\"MD\", \"PD\", \"TD\", \"P\", \"E\", \"F\"])\n\n plt.xlabel(\"NASA-TLX component\")\n plt.ylabel(\"score\")\n\n with figure(\"survey_results\", size=fig_size(0.44, 1)):\n sns.factorplot(x=\"experiment\", y=\"total\", data=surveys, kind=\"box\")\n sns.swarmplot(x=\"experiment\", y=r\"total\", data=surveys,\n palette=cmap_complement, split=True)\n plt.ylim(0, plt.ylim()[1])\n plt.ylabel(\"survey score*\")\n\n with figure(\"survey_components\", size=fig_size(0.44, 1)):\n components = [r\"orientation_understanding\", r\"orientation_control\",\n r\"position_understanding\", r\"position_control\",\n r\"spacial_understanding\", r\"spacial_control\"]\n molten = pd.melt(surveys, id_vars=[\"user\", \"experiment\", \"order\"],\n value_vars=components,\n var_name=\"question\", value_name=\"rating\")\n sns.barplot(x=r\"question\", y=\"rating\", hue=\"experiment\", data=molten)\n\n plt.gca().set_xticklabels(\n [\"OA\", \"OC\", \"PA*\", \"PC*\", \"RA*\", \"RC*\"])\n\n plt.xlabel(\"question\")\n plt.ylabel(\"rating\")\n\n with figure(\"survey_overview\", size=fig_size(0.9, 0.5)):\n molten = pd.melt(surveys, id_vars=[\"user\", \"experiment\", \"order\"],\n value_vars=[r\"orientation_understanding\",\n r\"orientation_control\",\n r\"position_understanding\",\n r\"position_control\",\n r\"spacial_understanding\",\n r\"spacial_control\"],\n var_name=\"question\", value_name=\"rating\")\n g = sns.barplot(x=r\"rating\", y=r\"question\", hue=\"experiment\",\n data=molten)\n sns.stripplot(x=\"rating\", y=r\"question\", data=molten, hue=\"experiment\",\n split=True, palette=cmap_complement, jitter=0.6, size=3)\n\n plt.gca().set_yticklabels(\n [\"angle aware\", \"angle control\",\n \"position aware*\", \"position control*\",\n \"rel. pos. aware*\", \"rel. pos. control*\"])\n\n handles, labels = g.get_legend_handles_labels()\n plt.legend(handles[2:], labels[2:])\n plt.xlabel(\"rating\")\n plt.title(\"Survey results\")\n\n\nif __name__ == \"__main__\":\n latexify()\n\n do_drone_dos()\n\n results, analyses = analyze_data()\n do_paths()\n do_distributions()\n do_durations()\n do_movement()\n do_errors()\n\n users, tlx, surveys = load_surveys()\n do_surveys()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.gca", "matplotlib.pyplot.title", "matplotlib.pyplot.cm.get_cmap", "numpy.arange", "matplotlib.pyplot.ylim", "matplotlib.pyplot.subplots", "matplotlib.pyplot.subplot", "numpy.log10", "matplotlib.pyplot.xlabel", "numpy.array", "pandas.melt", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
sunandita/ICAPS_Summer_School_RAE_2020
[ "a496b62185bcfdd2c76eb7986ae99cfa85708d28", "a496b62185bcfdd2c76eb7986ae99cfa85708d28", "a496b62185bcfdd2c76eb7986ae99cfa85708d28", "a496b62185bcfdd2c76eb7986ae99cfa85708d28" ]
[ "problems/OF/auto/problem80_OF.py", "problems/OF/auto/problem52_OF.py", "problems/OF/auto/problem14_OF.py", "problems/OF/auto/problem31_OF.py" ]
[ "__author__ = 'mason'\n\nfrom domain_orderFulfillment import *\nfrom timer import DURATION\nfrom state import state\nimport numpy as np\n\n'''\nThis is a randomly generated problem\n'''\n\ndef GetCostOfMove(id, r, loc1, loc2, dist):\n return 1 + dist\n\ndef GetCostOfLookup(id, item):\n return max(1, np.random.beta(2, 2))\n\ndef GetCostOfWrap(id, orderName, m, item):\n return max(1, np.random.normal(5, .5))\n\ndef GetCostOfPickup(id, r, item):\n return max(1, np.random.normal(4, 1))\n\ndef GetCostOfPutdown(id, r, item):\n return max(1, np.random.normal(4, 1))\n\ndef GetCostOfLoad(id, orderName, r, m, item):\n return max(1, np.random.normal(3, .5))\n\nDURATION.TIME = {\n 'lookupDB': GetCostOfLookup,\n 'wrap': GetCostOfWrap,\n 'pickup': GetCostOfPickup,\n 'putdown': GetCostOfPutdown,\n 'loadMachine': GetCostOfLoad,\n 'moveRobot': GetCostOfMove,\n 'acquireRobot': 1,\n 'freeRobot': 1,\n 'wait': 5\n}\n\nDURATION.COUNTER = {\n 'lookupDB': GetCostOfLookup,\n 'wrap': GetCostOfWrap,\n 'pickup': GetCostOfPickup,\n 'putdown': GetCostOfPutdown,\n 'loadMachine': GetCostOfLoad,\n 'moveRobot': GetCostOfMove,\n 'acquireRobot': 1,\n 'freeRobot': 1,\n 'wait': 5\n}\n\nrv.LOCATIONS = [0, 1, 2, 3, 4, 5, 6, 7, 200]\nrv.FACTORY1 = frozenset({0, 1, 2, 3, 4, 5, 6, 7, 200})\nrv.FACTORY_UNION = rv.FACTORY1\nrv.SHIPPING_DOC = {rv.FACTORY1: 2}\n\nrv.GROUND_EDGES = {0: [5, 6, 3], 1: [5, 7], 2: [5, 200, 3, 6], 3: [0, 2, 5], 4: [5], 5: [4, 0, 1, 2, 3, 200], 6: [0, 2, 7, 200], 7: [1, 6], 200: [5, 6, 2]}\nrv.GROUND_WEIGHTS = {(0, 5): 7.9850546018862145, (0, 6): 6.9468710812757815, (0, 3): 12.099718445874334, (1, 5): 7.528748667927151, (1, 7): 6.418070736597217, (2, 5): 2.638611160543803, (2, 200): 7.8118804436721145, (2, 3): 3.9268270102333664, (2, 6): 10.6254313640586, (3, 5): 5.469893763260881, (4, 5): 5.862988480314365, (5, 200): 7.930686919202043, (6, 7): 11.9166581376803, (6, 200): 7.026333797476917}\n\nrv.ROBOTS = { 'r0': rv.FACTORY1, }\nrv.ROBOT_CAPACITY = {'r0': 7.295545079301372}\nrv.MACHINES = { 'm0': rv.FACTORY1, }\nrv.PALLETS = { 'p0', 'p1', }\n\n\ndef ResetState():\n state.OBJECTS = { 'o0': True, 'o1': True, 'o2': True, 'o3': True, 'o4': True, 'o5': True, 'o6': True, 'o7': True, }\n state.OBJ_WEIGHT = {'o0': 6.707214929077857, 'o1': 6.448795041001162, 'o2': 5.54703539784075, 'o3': 7.295545079301372, 'o4': 6.655084061734898, 'o5': 7.295545079301372, 'o6': 7.235373845668021, 'o7': 4.7440365198981445}\n state.OBJ_CLASS = {'type0': ['o0'], 'type1': ['o1', 'o2', 'o3', 'o4', 'o5', 'o6'], 'type2': ['o7']}\n\n state.loc = { 'r0': 4, 'm0': 6, 'p0': 0, 'p1': 3, 'o0': 4, 'o1': 7, 'o2': 5, 'o3': 200, 'o4': 200, 'o5': 4, 'o6': 200, 'o7': 5,}\n state.load = { 'r0': NIL,}\n state.busy = {'r0': False, 'm0': False}\n state.numUses = {'m0': 10}\n state.var1 = {'temp': 'r0', 'temp1': 'r0', 'temp2': 1, 'redoId': 0}\n state.shouldRedo = {}\n\ntasks = {\n 3: [['orderStart', ['type0']]],\n 8: [['orderStart', ['type1']]],\n}\neventsEnv = {\n}", "__author__ = 'mason'\n\nfrom domain_orderFulfillment import *\nfrom timer import DURATION\nfrom state import state\nimport numpy as np\n\n'''\nThis is a randomly generated problem\n'''\n\ndef GetCostOfMove(id, r, loc1, loc2, dist):\n return 1 + dist\n\ndef GetCostOfLookup(id, item):\n return max(1, np.random.beta(2, 2))\n\ndef GetCostOfWrap(id, orderName, m, item):\n return max(1, np.random.normal(5, .5))\n\ndef GetCostOfPickup(id, r, item):\n return max(1, np.random.normal(4, 1))\n\ndef GetCostOfPutdown(id, r, item):\n return max(1, np.random.normal(4, 1))\n\ndef GetCostOfLoad(id, orderName, r, m, item):\n return max(1, np.random.normal(3, .5))\n\nDURATION.TIME = {\n 'lookupDB': GetCostOfLookup,\n 'wrap': GetCostOfWrap,\n 'pickup': GetCostOfPickup,\n 'putdown': GetCostOfPutdown,\n 'loadMachine': GetCostOfLoad,\n 'moveRobot': GetCostOfMove,\n 'acquireRobot': 1,\n 'freeRobot': 1,\n 'wait': 5\n}\n\nDURATION.COUNTER = {\n 'lookupDB': GetCostOfLookup,\n 'wrap': GetCostOfWrap,\n 'pickup': GetCostOfPickup,\n 'putdown': GetCostOfPutdown,\n 'loadMachine': GetCostOfLoad,\n 'moveRobot': GetCostOfMove,\n 'acquireRobot': 1,\n 'freeRobot': 1,\n 'wait': 5\n}\n\nrv.LOCATIONS = [0, 1, 2, 200]\nrv.FACTORY1 = frozenset({0, 1, 2, 200})\nrv.FACTORY_UNION = rv.FACTORY1\nrv.SHIPPING_DOC = {rv.FACTORY1: 0}\n\nrv.GROUND_EDGES = {0: [1, 2, 200], 1: [0, 2, 200], 2: [1, 0, 200], 200: [1, 2, 0]}\nrv.GROUND_WEIGHTS = {(0, 1): 8.589176442828045, (0, 2): 6.934232690357609, (0, 200): 6.715426276444552, (1, 2): 1, (1, 200): 8.070342450610243, (2, 200): 8.770513318640017}\n\nrv.ROBOTS = { 'r0': rv.FACTORY1, }\nrv.ROBOT_CAPACITY = {'r0': 6.994062156388614}\nrv.MACHINES = { 'm0': rv.FACTORY1, 'm1': rv.FACTORY1, 'm2': rv.FACTORY1, }\nrv.PALLETS = { 'p0', }\n\n\ndef ResetState():\n state.OBJECTS = { 'o0': True, 'o1': True, 'o2': True, }\n state.OBJ_WEIGHT = {'o0': 6.994062156388614, 'o1': 5.913387765231502, 'o2': 6.188936069732975}\n state.OBJ_CLASS = {'type0': ['o0', 'o1', 'o2']}\n\n state.loc = { 'r0': 1, 'm0': 2, 'm1': 2, 'm2': 1, 'p0': 200, 'o0': 2, 'o1': 200, 'o2': 2,}\n state.load = { 'r0': NIL,}\n state.busy = {'r0': False, 'm0': False, 'm1': False, 'm2': False}\n state.numUses = {'m0': 8, 'm1': 9, 'm2': 10}\n state.var1 = {'temp': 'r0', 'temp1': 'r0', 'temp2': 1, 'redoId': 0}\n state.shouldRedo = {}\n\ntasks = {\n 1: [['orderStart', ['type0']]],\n}\neventsEnv = {\n}", "__author__ = 'mason'\n\nfrom domain_orderFulfillment import *\nfrom timer import DURATION\nfrom state import state\nimport numpy as np\n\n'''\nThis is a randomly generated problem\n'''\n\ndef GetCostOfMove(id, r, loc1, loc2, dist):\n return 1 + dist\n\ndef GetCostOfLookup(id, item):\n return max(1, np.random.beta(2, 2))\n\ndef GetCostOfWrap(id, orderName, m, item):\n return max(1, np.random.normal(5, .5))\n\ndef GetCostOfPickup(id, r, item):\n return max(1, np.random.normal(4, 1))\n\ndef GetCostOfPutdown(id, r, item):\n return max(1, np.random.normal(4, 1))\n\ndef GetCostOfLoad(id, orderName, r, m, item):\n return max(1, np.random.normal(3, .5))\n\nDURATION.TIME = {\n 'lookupDB': GetCostOfLookup,\n 'wrap': GetCostOfWrap,\n 'pickup': GetCostOfPickup,\n 'putdown': GetCostOfPutdown,\n 'loadMachine': GetCostOfLoad,\n 'moveRobot': GetCostOfMove,\n 'acquireRobot': 1,\n 'freeRobot': 1,\n 'wait': 5\n}\n\nDURATION.COUNTER = {\n 'lookupDB': GetCostOfLookup,\n 'wrap': GetCostOfWrap,\n 'pickup': GetCostOfPickup,\n 'putdown': GetCostOfPutdown,\n 'loadMachine': GetCostOfLoad,\n 'moveRobot': GetCostOfMove,\n 'acquireRobot': 1,\n 'freeRobot': 1,\n 'wait': 5\n}\n\nrv.LOCATIONS = [0, 1, 2, 3, 4, 5, 200]\nrv.FACTORY1 = frozenset({0, 1, 2, 3, 4, 5, 200})\nrv.FACTORY_UNION = rv.FACTORY1\nrv.SHIPPING_DOC = {rv.FACTORY1: 2}\n\nrv.GROUND_EDGES = {0: [1, 3, 4, 5, 200], 1: [0, 2, 5], 2: [1, 200], 3: [0, 5, 4], 4: [3, 0, 200], 5: [0, 1, 3, 200], 200: [0, 2, 4, 5]}\nrv.GROUND_WEIGHTS = {(0, 1): 15.26505604326184, (0, 3): 13.072190972915541, (0, 4): 2.535437866030862, (0, 5): 5.525679171120722, (0, 200): 8.517425095335629, (1, 2): 1.0754961711809683, (1, 5): 10.267652963485919, (2, 200): 5.995996755145361, (3, 5): 7.241187251282743, (3, 4): 1, (4, 200): 11.199559770138954, (5, 200): 3.864924395629216}\n\nrv.ROBOTS = { 'r0': rv.FACTORY1, 'r1': rv.FACTORY1, }\nrv.ROBOT_CAPACITY = {'r0': 10.41926804259969, 'r1': 11.776662726525505}\nrv.MACHINES = { 'm0': rv.FACTORY1, 'm1': rv.FACTORY1, 'm2': rv.FACTORY1, 'm3': rv.FACTORY1, }\nrv.PALLETS = { 'p0', 'p1', }\n\n\ndef ResetState():\n state.OBJECTS = { 'o0': True, }\n state.OBJ_WEIGHT = {'o0': 9.477416531242499}\n state.OBJ_CLASS = {'type0': ['o0']}\n\n state.loc = { 'r0': 2, 'r1': 5, 'm0': 5, 'm1': 1, 'm2': 5, 'm3': 2, 'p0': 200, 'p1': 3, 'o0': 3,}\n state.load = { 'r0': NIL, 'r1': NIL,}\n state.busy = {'r0': False, 'r1': False, 'm0': False, 'm1': False, 'm2': False, 'm3': False}\n state.numUses = {'m0': 12, 'm1': 9, 'm2': 8, 'm3': 8}\n state.var1 = {'temp': 'r0', 'temp1': 'r0', 'temp2': 1, 'redoId': 0}\n state.shouldRedo = {}\n\ntasks = {\n 1: [['orderStart', ['type0']]],\n}\neventsEnv = {\n}", "__author__ = 'mason'\n\nfrom domain_orderFulfillment import *\nfrom timer import DURATION\nfrom state import state\nimport numpy as np\n\n'''\nThis is a randomly generated problem\n'''\n\ndef GetCostOfMove(id, r, loc1, loc2, dist):\n return 1 + dist\n\ndef GetCostOfLookup(id, item):\n return max(1, np.random.beta(2, 2))\n\ndef GetCostOfWrap(id, orderName, m, item):\n return max(1, np.random.normal(5, .5))\n\ndef GetCostOfPickup(id, r, item):\n return max(1, np.random.normal(4, 1))\n\ndef GetCostOfPutdown(id, r, item):\n return max(1, np.random.normal(4, 1))\n\ndef GetCostOfLoad(id, orderName, r, m, item):\n return max(1, np.random.normal(3, .5))\n\nDURATION.TIME = {\n 'lookupDB': GetCostOfLookup,\n 'wrap': GetCostOfWrap,\n 'pickup': GetCostOfPickup,\n 'putdown': GetCostOfPutdown,\n 'loadMachine': GetCostOfLoad,\n 'moveRobot': GetCostOfMove,\n 'acquireRobot': 1,\n 'freeRobot': 1,\n 'wait': 5\n}\n\nDURATION.COUNTER = {\n 'lookupDB': GetCostOfLookup,\n 'wrap': GetCostOfWrap,\n 'pickup': GetCostOfPickup,\n 'putdown': GetCostOfPutdown,\n 'loadMachine': GetCostOfLoad,\n 'moveRobot': GetCostOfMove,\n 'acquireRobot': 1,\n 'freeRobot': 1,\n 'wait': 5\n}\n\nrv.LOCATIONS = [0, 1, 2, 3, 200]\nrv.FACTORY1 = frozenset({0, 1, 2, 3, 200})\nrv.FACTORY_UNION = rv.FACTORY1\nrv.SHIPPING_DOC = {rv.FACTORY1: 0}\n\nrv.GROUND_EDGES = {0: [1, 200], 1: [0, 2, 3, 200], 2: [1, 3, 200], 3: [1, 2], 200: [0, 1, 2]}\nrv.GROUND_WEIGHTS = {(0, 1): 12.60763448992882, (0, 200): 9.89863572980953, (1, 2): 8.346711200228176, (1, 3): 7.069838519742104, (1, 200): 4.70693278171893, (2, 3): 10.526279141411713, (2, 200): 9.54241377793227}\n\nrv.ROBOTS = { 'r0': rv.FACTORY1, }\nrv.ROBOT_CAPACITY = {'r0': 7.4116857535645275}\nrv.MACHINES = { 'm0': rv.FACTORY1, }\nrv.PALLETS = { 'p0', 'p1', }\n\n\ndef ResetState():\n state.OBJECTS = { 'o0': True, 'o1': True, 'o2': True, 'o3': True, 'o4': True, }\n state.OBJ_WEIGHT = {'o0': 7.4116857535645275, 'o1': 7.4116857535645275, 'o2': 3.944413000881914, 'o3': 7.4116857535645275, 'o4': 6.694104791621712}\n state.OBJ_CLASS = {'type0': ['o0'], 'type1': ['o1', 'o2', 'o3', 'o4']}\n\n state.loc = { 'r0': 200, 'm0': 0, 'p0': 3, 'p1': 0, 'o0': 3, 'o1': 1, 'o2': 2, 'o3': 200, 'o4': 0,}\n state.load = { 'r0': NIL,}\n state.busy = {'r0': False, 'm0': False}\n state.numUses = {'m0': 7}\n state.var1 = {'temp': 'r0', 'temp1': 'r0', 'temp2': 1, 'redoId': 0}\n state.shouldRedo = {}\n\ntasks = {\n 1: [['orderStart', ['type1']]],\n}\neventsEnv = {\n}" ]
[ [ "numpy.random.normal", "numpy.random.beta" ], [ "numpy.random.normal", "numpy.random.beta" ], [ "numpy.random.normal", "numpy.random.beta" ], [ "numpy.random.normal", "numpy.random.beta" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ruanchaves/Dual-encoder-Entity-Retrieval-with-BERT
[ "ff8c7933afaf0b2c40a7df0250f4b82a5868dc2a" ]
[ "data_reader.py" ]
[ "import numpy as np\nfrom tqdm import tqdm\nimport torch\nimport pdb\nfrom typing import Iterator\nfrom allennlp.data import Instance\nfrom allennlp.data.dataset_readers import DatasetReader\nfrom allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer, PretrainedTransformerIndexer\nfrom allennlp.data.fields import SpanField, ListField, TextField, MetadataField, ArrayField, SequenceLabelField, LabelField\nfrom allennlp.data.tokenizers import Token\nfrom utils import OnlyFixedDatasetLoader, KBConstructor_fromKGemb, FixedNegativesEntityLoader\nfrom overrides import overrides\nimport random\nimport transformers\nfrom utils import from_original_sentence2left_mention_right_tokens_before_berttokenized\n\n# SEEDS are FIXED\ntorch.backends.cudnn.deterministic = True\nseed = 777\nnp.random.seed(seed)\ntorch.manual_seed(seed)\n\nclass FixedDatasetTokenizedReader(DatasetReader):\n def __init__(self,args, canonical_and_def_connecttoken, token_indexers=None):\n super().__init__(lazy=args.allen_lazyload)\n\n self.args = args\n self.max_context_len = args.max_context_len\n self.max_canonical_len = args.max_canonical_len\n self.max_def_len = args.max_def_len\n\n self.token_indexers = self.token_indexer_returner()\n self.berttokenizer = self.berttokenizer_returner()\n\n linking_dataset_loader = OnlyFixedDatasetLoader(args=args)\n self.id2line, self.train_mention_id, self.dev_mention_id, self.test_mention_id = linking_dataset_loader.id2line_trn_dev_test_loader()\n\n print('loading KB')\n self.kbclass = KBConstructor_fromKGemb(args=self.args)\n self.setting_original_KB()\n print('original KB loaded')\n self.ignored_mention_idxs = self.to_be_ignored_mention_idx_checker()\n self.mention_start_token, self.mention_end_token = '[unused1]', '[unused2]'\n self.canonical_and_def_connecttoken = canonical_and_def_connecttoken\n\n def setting_original_KB(self):\n self.cui2idx, self.idx2cui, self.cui2emb, self.cui2cano, self.cui2def = self.kbclass.return_original_KB()\n\n def currently_stored_KB_dataset_returner(self):\n return self.cui2idx, self.idx2cui, self.cui2emb, self.cui2cano, self.cui2def\n\n def huggingfacename_returner(self):\n 'Return huggingface modelname and do_lower_case parameter'\n if self.args.bert_name == 'bert-base-uncased':\n return 'bert-base-uncased', True\n elif self.args.bert_name == 'biobert':\n return './biobert_transformers/', False\n else:\n print('Currently',self.args.bert_name,'are not supported.')\n exit()\n\n def token_indexer_returner(self):\n huggingface_name, do_lower_case = self.huggingfacename_returner()\n return {'tokens': PretrainedTransformerIndexer(\n model_name=huggingface_name,\n do_lowercase=do_lower_case)\n }\n\n def berttokenizer_returner(self):\n if self.args.bert_name == 'bert-base-uncased':\n vocab_file = './vocab_file/bert-base-uncased-vocab.txt'\n do_lower_case = True\n elif self.args.bert_name == 'biobert':\n vocab_file = './vocab_file/biobert_v1.1_pubmed_vocab.txt'\n do_lower_case = False\n else:\n print('currently not supported:', self.args.bert_name)\n raise NotImplementedError\n return transformers.BertTokenizer(vocab_file=vocab_file,\n do_lower_case=do_lower_case,\n do_basic_tokenize=True,\n never_split=['<target>','</target>'])\n\n def tokenizer_custom(self, txt):\n target_anchors = ['<target>', '</target>']\n original_tokens = txt.split(' ')\n new_tokens = list()\n\n for token in original_tokens:\n if token in target_anchors:\n new_tokens.append(token)\n continue\n else:\n split_to_subwords = self.berttokenizer.tokenize(token) # token is oneword, split_tokens\n if ['[CLS]'] in split_to_subwords:\n split_to_subwords.remove('[CLS]')\n if ['[SEP]'] in split_to_subwords:\n split_to_subwords.remove('[SEP]')\n if split_to_subwords == []:\n new_tokens.append('[UNK]')\n else:\n new_tokens += split_to_subwords\n\n return new_tokens\n\n def mention_and_contexttokenizer_followblinkimplementation(self, txt):\n '''\n Args: sentence with space, including target anchor\n txt:\n\n Returns: [[CLS], split_sub0, ..., [mention_start], mention, [mention_end], ..., [SEP]]\n\n '''\n mention_start = '<target>'\n mention_end = '</target>'\n left, mention, right = from_original_sentence2left_mention_right_tokens_before_berttokenized(txt)\n\n new_tokens = list()\n new_tokens.append('[CLS]')\n\n if len(left) != 0:\n left_tokens = []\n for one_token in left:\n left_tokens += self.berttokenizer.tokenize(one_token)\n new_tokens += left_tokens[:self.args.max_left_context_len]\n\n new_tokens.append(self.mention_start_token)\n if len(mention) != 0:\n mention_tokens = []\n for one_token in mention:\n mention_tokens += self.berttokenizer.tokenize(one_token)\n new_tokens += mention_tokens[:self.args.max_mention_len]\n new_tokens.append(self.mention_end_token)\n\n if len(right) != 0:\n right_tokens = []\n for one_token in right:\n right_tokens += self.berttokenizer.tokenize(one_token)\n new_tokens += right_tokens[:self.args.max_right_context_len]\n new_tokens.append('[SEP]')\n return new_tokens\n\n def find_anchor(self,split_txt,tobefoundtoken):\n for i, word in enumerate(split_txt):\n if word == tobefoundtoken:\n return i\n return -1\n\n def left_right_mention_sentence_from_anchorincludedsentence_returner(self, split_txt):\n i = self.find_anchor(split_txt=split_txt, tobefoundtoken='<target>') # mention start\n j = self.find_anchor(split_txt=split_txt, tobefoundtoken='</target>') # mention end\n\n sfm_mention = split_txt[i+1:j]\n raw_sentence_noanchor = [token for token in split_txt if not token in ['<target>', '</target>']]\n\n left_context_include_mention = split_txt[:j]\n left_context_include_mention.remove('<target>')\n right_context_include_mention = split_txt[i+1:]\n right_context_include_mention.remove('</target>')\n\n return raw_sentence_noanchor, sfm_mention, left_context_include_mention, right_context_include_mention\n\n @overrides\n def _read(self, train_dev_testflag) -> Iterator[Instance]:\n mention_ids = list()\n if train_dev_testflag == 'train':\n mention_ids += self.train_mention_id\n # Because original data is sorted with pmid documents, we have to shuffle data points for in-batch training.\n random.shuffle(mention_ids)\n elif train_dev_testflag == 'dev':\n mention_ids += self.dev_mention_id\n elif train_dev_testflag == 'test':\n mention_ids += self.test_mention_id\n\n for idx, mention_uniq_id in tqdm(enumerate(mention_ids)):\n if mention_uniq_id in self.ignored_mention_idxs:\n continue\n if self.args.model_for_training == 'blink_implementation_inbatchencoder':\n data = self.linesparser_for_blink_implementation(line=self.id2line[mention_uniq_id], mention_uniq_id=mention_uniq_id)\n else:\n data = self.lineparser_for_local_mentions(line=self.id2line[mention_uniq_id], mention_uniq_id=mention_uniq_id)\n yield self.text_to_instance(data=data)\n\n def lineparser_for_local_mentions(self, line, mention_uniq_id):\n '''\n Now this function is going to be depreceated,\n since we gonna follow faithfully with \"Zero-shot entity linking with dense entity retrieval\"\n\n Args:\n line:\n train_dev_testflag:\n mention_uniq_id:\n\n Returns:\n\n '''\n gold_cui, gold_type, gold_surface_mention, targetanchor_included_sentence = line.split('\\t')\n tokenized_context_including_target_anchors = self.tokenizer_custom(txt=targetanchor_included_sentence)\n raw_sentence_noanchor, sfm_mention, left_context_include_mention, right_context_include_mention = self.left_right_mention_sentence_from_anchorincludedsentence_returner(\n split_txt=tokenized_context_including_target_anchors)\n\n data = {}\n\n data['mention_uniq_id'] = mention_uniq_id\n data['gold_ids'] = gold_cui # str\n data['gold_id_idx_with_cui2idx'] = int(self.cui2idx[gold_cui])\n data['mention_raw'] = gold_surface_mention\n data['raw_sentence_without_anchor_str'] = ' '.join(raw_sentence_noanchor)\n\n data['context'] = [Token(word) for word in raw_sentence_noanchor][:self.args.max_context_len]\n data['mention_preprocessed'] = [Token(word) for word in sfm_mention][:self.max_context_len]\n\n if len(left_context_include_mention) <= self.max_context_len:\n data['left_context_include_mention'] = [Token(word) for word in left_context_include_mention]\n else:\n data['left_context_include_mention'] = [Token(word) for word in left_context_include_mention][\n len(left_context_include_mention) - self.max_context_len:]\n\n data['right_context_include_mention'] = [Token(word) for word in right_context_include_mention][:self.max_context_len]\n\n data['context'].insert(0, Token('[CLS]'))\n data['context'].insert(len(data['context']), Token('[SEP]'))\n data['mention_preprocessed'].insert(0, Token('[CLS]'))\n data['mention_preprocessed'].insert(len(data['mention_preprocessed']), Token('[SEP]'))\n data['left_context_include_mention'].insert(0, Token('[CLS]'))\n data['left_context_include_mention'].insert(len(data['left_context_include_mention']), Token('[SEP]'))\n data['right_context_include_mention'].insert(0, Token('[CLS]'))\n data['right_context_include_mention'].insert(len(data['right_context_include_mention']), Token('[SEP]'))\n\n data['gold_cui_cano_and_def_concatenated'] = self.gold_canonical_and_def_concatenated_returner(gold_cui=gold_cui)\n\n return data\n\n def linesparser_for_blink_implementation(self, line, mention_uniq_id):\n gold_cui, gold_type, gold_surface_mention, targetanchor_included_sentence = line.split('\\t')\n gold_cui = gold_cui.replace('UMLS:', '')\n tokenized_context_including_target_anchors = self.mention_and_contexttokenizer_followblinkimplementation(txt=targetanchor_included_sentence)\n tokenized_context_including_target_anchors = [Token(split_token) for split_token in tokenized_context_including_target_anchors]\n data = {}\n data['context'] = tokenized_context_including_target_anchors\n data['gold_cui_cano_and_def_concatenated'] = self.gold_canonical_and_def_concatenated_returner(gold_cui=gold_cui)\n data['gold_cuidx'] = int(self.cui2idx[gold_cui])\n data['mention_uniq_id'] = int(mention_uniq_id)\n return data\n\n def gold_canonical_and_def_concatenated_returner(self, gold_cui):\n canonical = self.tokenizer_custom(txt=self.cui2cano[gold_cui])\n definition = self.tokenizer_custom(txt=self.cui2def[gold_cui])\n\n concatenated = ['[CLS]']\n concatenated += canonical[:self.max_canonical_len]\n concatenated.append(self.canonical_and_def_connecttoken)\n concatenated += definition[:self.max_def_len]\n concatenated.append('[SEP]')\n\n return [Token(tokenized_word) for tokenized_word in concatenated]\n\n def to_be_ignored_mention_idx_checker(self):\n to_be_ignored_mention_idxs = []\n all_mention_idxs = list()\n all_mention_idxs += self.train_mention_id\n all_mention_idxs += self.dev_mention_id\n all_mention_idxs += self.test_mention_id\n for mention_idx in all_mention_idxs:\n gold_cui_or_dui = self.id2line[mention_idx].split('\\t')[0].replace('UMLS:', '')\n if gold_cui_or_dui not in self.cui2idx:\n to_be_ignored_mention_idxs.append(mention_idx)\n return to_be_ignored_mention_idxs\n\n @overrides\n def text_to_instance(self, data=None) -> Instance:\n if self.args.model_for_training == 'blink_implementation_inbatchencoder':\n context_field = TextField(data['context'], self.token_indexers)\n fields = {\"context\": context_field}\n fields['gold_cui_cano_and_def_concatenated'] = TextField(data['gold_cui_cano_and_def_concatenated'], self.token_indexers)\n fields['gold_cuidx'] = ArrayField(np.array(data['gold_cuidx']))\n fields['mention_uniq_id'] = ArrayField(np.array(data['mention_uniq_id']))\n else:\n context_field = TextField(data['context'], self.token_indexers)\n fields = {\"context\": context_field}\n surface_mention_field = TextField(data['mention_preprocessed'], self.token_indexers)\n fields['left_context_include_mention'] = TextField(data['left_context_include_mention'], self.token_indexers)\n fields['right_context_include_mention'] = TextField(data['right_context_include_mention'], self.token_indexers)\n fields['mention_processed'] = surface_mention_field\n fields['gold_cui_cano_and_def_concatenated'] = TextField(data['gold_cui_cano_and_def_concatenated'], self.token_indexers)\n fields['gold_id_for_knn'] = ArrayField(np.array(data['gold_id_idx_with_cui2idx']))\n\n return Instance(fields)\n'''\nFor encoding all entities, we need another datasetreader\n'''\nclass AllEntityCanonical_and_Defs_loader(DatasetReader):\n def __init__(self, args, idx2cui, cui2cano, cui2def,\n textfield_embedder, pretrained_tokenizer, tokenindexer, canonical_and_def_connect_token):\n super().__init__(lazy=args.allen_lazyload)\n\n self.args = args\n self.idx2cui = idx2cui\n self.cui2cano = cui2cano\n self.cui2def = cui2def\n self.textfield_embedder = textfield_embedder\n self.pretrained_tokenizer = pretrained_tokenizer\n self.token_indexers = tokenindexer\n self.canonical_and_def_connect_token = canonical_and_def_connect_token\n\n @overrides\n def _read(self,file_path=None) -> Iterator[Instance]:\n for idx, cui in tqdm(self.idx2cui.items()):\n if self.args.debug_for_entity_encoder and idx==2100:\n break\n data = self.cui2data(cui=cui, idx=idx)\n yield self.text_to_instance(data=data)\n\n @overrides\n def text_to_instance(self, data=None) -> Instance:\n cano_and_def_concatenated = TextField(data['cano_and_def_concatenated'], self.token_indexers)\n fields = {\"cano_and_def_concatenated\": cano_and_def_concatenated, 'cui_idx':ArrayField(np.array(data['cui_idx'], dtype='int32'))}\n\n return Instance(fields)\n\n def tokenizer_custom(self, txt):\n original_tokens = txt.split(' ')\n new_tokens = list()\n\n for token in original_tokens:\n split_to_subwords = self.pretrained_tokenizer.tokenize(token) # token is oneword, split_tokens\n if ['[CLS]'] in split_to_subwords:\n split_to_subwords.remove('[CLS]')\n if ['[SEP]'] in split_to_subwords:\n split_to_subwords.remove('[SEP]')\n if split_to_subwords == []:\n new_tokens.append('[UNK]')\n else:\n new_tokens += split_to_subwords\n\n return new_tokens\n\n def cui2data(self, cui, idx):\n canonical_plus_definition = []\n canonical_plus_definition.append('[CLS]')\n\n canonical = self.cui2cano[cui]\n canonical_tokens = [split_word for split_word in self.tokenizer_custom(txt=canonical)]\n canonical_plus_definition += canonical_tokens[:self.args.max_canonical_len]\n\n canonical_plus_definition.append(self.canonical_and_def_connect_token)\n\n definition = self.cui2def[cui]\n definition_tokens = [split_word for split_word in self.tokenizer_custom(txt=definition)]\n canonical_plus_definition += definition_tokens[:self.args.max_def_len]\n\n canonical_plus_definition.append('[SEP]')\n\n return {'cano_and_def_concatenated':[ Token(split_word_) for split_word_ in canonical_plus_definition],\n 'cui_idx': idx}" ]
[ [ "torch.manual_seed", "numpy.array", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sanjitjain2/soundnet_tf
[ "ba8d85246dbf14f2573ad5b46355ae512bb630de" ]
[ "util.py" ]
[ "import numpy as np\nimport librosa\n# import pdb\nimport wget\n\nlocal_config = {\n 'batch_size': 64, \n 'load_size': 22050*20,\n 'phase': 'extract'\n }\n\ndef get_audio(audio_link):\n file_name = audio_link.split('/')[-1]\n save_location = \"/Users/sanjitjain/projects/soundnet_tf/data/\"\n wget.download(audio_link, save_location + file_name + \".mp3\")\n return str(save_location+file_name)\n\n\ndef load_from_link(link, config=local_config):\n audio_path = get_audio(link)\n audio_path = \"/Users/sanjitjain/projects/soundnet_tf/data/tame_impala.mp3\"\n sound_sample, _ = load_audio(audio_path)\n audio = preprocess(sound_sample, config)\n\n return audio\n\ndef load_from_list(name_list, config=local_config):\n assert len(name_list) == config['batch_size'], \\\n \"The length of name_list({})[{}] is not the same as batch_size[{}]\".format(\n name_list[0], len(name_list), config['batch_size'])\n audios = np.zeros([config['batch_size'], config['load_size'], 1, 1])\n for idx, audio_path in enumerate(name_list):\n sound_sample, _ = load_audio(audio_path)\n audios[idx] = preprocess(sound_sample, config)\n \n return audios\n\n\ndef load_from_txt(txt_name, config=local_config):\n with open(txt_name, 'r') as handle:\n txt_list = handle.read().splitlines()\n\n audios = []\n for idx, audio_path in enumerate(txt_list):\n sound_sample, _ = load_audio(audio_path)\n audios.append(preprocess(sound_sample, config))\n \n return audios\n\n\n# NOTE: Load an audio as the same format in soundnet\n# 1. Keep original sample rate (which conflicts their own paper)\n# 2. Use first channel in multiple channels\n# 3. Keep range in [-256, 256]\n\ndef load_audio(audio_path, sr=None):\n # By default, librosa will resample the signal to 22050Hz(sr=None). And range in (-1., 1.)\n sound_sample, sr = librosa.load(audio_path, sr=sr, mono=False)\n\n return sound_sample, sr\n\n\ndef preprocess(raw_audio, config=local_config):\n # Select first channel (mono)\n if len(raw_audio.shape) > 1:\n raw_audio = raw_audio[0]\n\n # Make range [-256, 256]\n raw_audio *= 256.0\n\n # Make minimum length available\n length = config['load_size']\n if length > raw_audio.shape[0]:\n raw_audio = np.tile(raw_audio, length/raw_audio.shape[0] + 1)\n\n # Make equal training length\n if config['phase'] != 'extract':\n raw_audio = raw_audio[:length]\n\n # Check conditions\n assert len(raw_audio.shape) == 1, \"It seems this audio contains two channels, we only need the first channel\"\n assert np.max(raw_audio) <= 256, \"It seems this audio contains signal that exceeds 256\"\n assert np.min(raw_audio) >= -256, \"It seems this audio contains signal that exceeds -256\"\n\n # Shape to 1 x DIM x 1 x 1\n raw_audio = np.reshape(raw_audio, [1, -1, 1, 1])\n\n return raw_audio.copy()\n\n\n" ]
[ [ "numpy.min", "numpy.reshape", "numpy.tile", "numpy.max", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aaalgo/aardvark
[ "cdd42acdc20e85f4b3070dd1486f3dc9c9a9b905", "cdd42acdc20e85f4b3070dd1486f3dc9c9a9b905" ]
[ "cxray/predict-cls-vis.py", "zoo/slim/nets/resnet_utils.py" ]
[ "#!/usr/bin/env python3\nimport os\nimport sys\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nsys.path.append('..')\nimport numpy as np\nimport cv2\nimport tensorflow as tf\nfrom tensorflow.python.framework import meta_graph\nfrom mold import Scaling\nfrom gallery import Gallery\nfrom chest import *\n\nclass Model:\n def __init__ (self, X, path, name):\n mg = meta_graph.read_meta_graph_file(path + '.meta')\n is_training = tf.constant(False)\n self.probs, self.heatmap = \\\n tf.import_graph_def(mg.graph_def, name=name,\n input_map={'images:0': X, 'is_training:0': is_training},\n return_elements=['probs:0', 'heatmap:0'])\n self.saver = tf.train.Saver(saver_def=mg.saver_def, name=name)\n self.loader = lambda sess: self.saver.restore(sess, path)\n pass\n pass\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('model', None, '')\nflags.DEFINE_integer('stride', 16, '')\nflags.DEFINE_integer('channels', 1, '')\nflags.DEFINE_string('list', 'scratch/val-nz.list', '')\nflags.DEFINE_integer('max', 10, '')\nflags.DEFINE_integer('resize', 256, '')\n\ndef save_prediction_image (gal, image, label, probs, heatmap):\n pred = np.argmax(probs)\n image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR).astype(np.float32) \n orig = np.copy(image)\n\n # ground truth\n cv2.putText(image, 'gt %d: %.3f %s' % (label, probs[label], CATEGORIES[label][1]), (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n cv2.putText(image, 'inf %d: %.3f %s' % (pred, probs[pred], CATEGORIES[pred][1]), (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)\n image[:, :, 1] += heatmap[:, :, label] * 128\n image[:, :, 2] += heatmap[:, :, pred] * 128\n image = np.concatenate([image, orig], axis=1)\n cv2.imwrite(gal.next(), np.clip(image, 0, 255))\n pass\n\ndef main (_):\n X = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.channels), name=\"images\")\n model = Model(X, FLAGS.model, 'xxx')\n config = tf.ConfigProto()\n config.gpu_options.allow_growth=True\n\n mold = Scaling(stride = FLAGS.stride)\n with tf.Session(config=config) as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n model.loader(sess)\n\n gal = Gallery('output', ext='.png')\n CC = 0\n if FLAGS.list:\n with open(FLAGS.list, 'r') as f:\n for line in f:\n if CC > FLAGS.max:\n break\n path, label = line.strip().split(',')\n label = int(label)\n\n print(path)\n if FLAGS.channels == 3:\n image = cv2.imread(path, cv2.IMREAD_COLOR)\n elif FLAGS.channels == 1:\n image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n else:\n assert False\n\n image = cv2.resize(image, (FLAGS.resize, FLAGS.resize))\n\n probs, heatmap = sess.run([model.probs, model.heatmap], feed_dict={X: mold.batch_image(image)})\n probs = probs[0]\n heatmap = mold.unbatch_prob(image, heatmap)\n '''END INFERENCE'''\n\n save_prediction_image(gal, image, label, probs, heatmap)\n CC += 1\n gal.flush()\n pass\n\nif __name__ == '__main__':\n tf.app.run()\n\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains building blocks for various versions of Residual Networks.\n\nResidual networks (ResNets) were proposed in:\n Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Deep Residual Learning for Image Recognition. arXiv:1512.03385, 2015\n\nMore variants were introduced in:\n Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Identity Mappings in Deep Residual Networks. arXiv: 1603.05027, 2016\n\nWe can obtain different ResNet variants by changing the network depth, width,\nand form of residual unit. This module implements the infrastructure for\nbuilding them. Concrete ResNet units and full ResNet networks are implemented in\nthe accompanying resnet_v1.py and resnet_v2.py modules.\n\nCompared to https://github.com/KaimingHe/deep-residual-networks, in the current\nimplementation we subsample the output activations in the last residual unit of\neach block, instead of subsampling the input activations in the first residual\nunit of each block. The two implementations give identical results but our\nimplementation is more memory efficient.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport tensorflow as tf\n\nslim = tf.contrib.slim\n\n\nclass Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):\n \"\"\"A named tuple describing a ResNet block.\n\n Its parts are:\n scope: The scope of the `Block`.\n unit_fn: The ResNet unit function which takes as input a `Tensor` and\n returns another `Tensor` with the output of the ResNet unit.\n args: A list of length equal to the number of units in the `Block`. The list\n contains one (depth, depth_bottleneck, stride) tuple for each unit in the\n block to serve as argument to unit_fn.\n \"\"\"\n\n\ndef subsample(inputs, factor, scope=None):\n \"\"\"Subsamples the input along the spatial dimensions.\n\n Args:\n inputs: A `Tensor` of size [batch, height_in, width_in, channels].\n factor: The subsampling factor.\n scope: Optional variable_scope.\n\n Returns:\n output: A `Tensor` of size [batch, height_out, width_out, channels] with the\n input, either intact (if factor == 1) or subsampled (if factor > 1).\n \"\"\"\n if factor == 1:\n return inputs\n else:\n return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)\n\n\ndef conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):\n \"\"\"Strided 2-D convolution with 'SAME' padding.\n\n When stride > 1, then we do explicit zero-padding, followed by conv2d with\n 'VALID' padding.\n\n Note that\n\n net = conv2d_same(inputs, num_outputs, 3, stride=stride)\n\n is equivalent to\n\n net = slim.conv2d(inputs, num_outputs, 3, stride=1, padding='SAME')\n net = subsample(net, factor=stride)\n\n whereas\n\n net = slim.conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME')\n\n is different when the input's height or width is even, which is why we add the\n current function. For more details, see ResnetUtilsTest.testConv2DSameEven().\n\n Args:\n inputs: A 4-D tensor of size [batch, height_in, width_in, channels].\n num_outputs: An integer, the number of output filters.\n kernel_size: An int with the kernel_size of the filters.\n stride: An integer, the output stride.\n rate: An integer, rate for atrous convolution.\n scope: Scope.\n\n Returns:\n output: A 4-D tensor of size [batch, height_out, width_out, channels] with\n the convolution output.\n \"\"\"\n if stride == 1:\n return slim.conv2d(inputs, num_outputs, kernel_size, stride=1, rate=rate,\n padding='SAME', scope=scope)\n else:\n kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)\n pad_total = kernel_size_effective - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n inputs = tf.pad(inputs,\n [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])\n return slim.conv2d(inputs, num_outputs, kernel_size, stride=stride,\n rate=rate, padding='VALID', scope=scope)\n\n\[email protected]_arg_scope\ndef stack_blocks_dense(net, blocks, output_stride=None,\n store_non_strided_activations=False,\n outputs_collections=None):\n \"\"\"Stacks ResNet `Blocks` and controls output feature density.\n\n First, this function creates scopes for the ResNet in the form of\n 'block_name/unit_1', 'block_name/unit_2', etc.\n\n Second, this function allows the user to explicitly control the ResNet\n output_stride, which is the ratio of the input to output spatial resolution.\n This is useful for dense prediction tasks such as semantic segmentation or\n object detection.\n\n Most ResNets consist of 4 ResNet blocks and subsample the activations by a\n factor of 2 when transitioning between consecutive ResNet blocks. This results\n to a nominal ResNet output_stride equal to 8. If we set the output_stride to\n half the nominal network stride (e.g., output_stride=4), then we compute\n responses twice.\n\n Control of the output feature density is implemented by atrous convolution.\n\n Args:\n net: A `Tensor` of size [batch, height, width, channels].\n blocks: A list of length equal to the number of ResNet `Blocks`. Each\n element is a ResNet `Block` object describing the units in the `Block`.\n output_stride: If `None`, then the output will be computed at the nominal\n network stride. If output_stride is not `None`, it specifies the requested\n ratio of input to output spatial resolution, which needs to be equal to\n the product of unit strides from the start up to some level of the ResNet.\n For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1,\n then valid values for the output_stride are 1, 2, 6, 24 or None (which\n is equivalent to output_stride=24).\n store_non_strided_activations: If True, we compute non-strided (undecimated)\n activations at the last unit of each block and store them in the\n `outputs_collections` before subsampling them. This gives us access to\n higher resolution intermediate activations which are useful in some\n dense prediction problems but increases 4x the computation and memory cost\n at the last unit of each block.\n outputs_collections: Collection to add the ResNet block outputs.\n\n Returns:\n net: Output tensor with stride equal to the specified output_stride.\n\n Raises:\n ValueError: If the target output_stride is not valid.\n \"\"\"\n # The current_stride variable keeps track of the effective stride of the\n # activations. This allows us to invoke atrous convolution whenever applying\n # the next residual unit would result in the activations having stride larger\n # than the target output_stride.\n current_stride = 1\n\n # The atrous convolution rate parameter.\n rate = 1\n\n for block in blocks:\n with tf.variable_scope(block.scope, 'block', [net]) as sc:\n block_stride = 1\n for i, unit in enumerate(block.args):\n if store_non_strided_activations and i == len(block.args) - 1:\n # Move stride from the block's last unit to the end of the block.\n block_stride = unit.get('stride', 1)\n unit = dict(unit, stride=1)\n\n with tf.variable_scope('unit_%d' % (i + 1), values=[net]):\n # If we have reached the target output_stride, then we need to employ\n # atrous convolution with stride=1 and multiply the atrous rate by the\n # current unit's stride for use in subsequent layers.\n if output_stride is not None and current_stride == output_stride:\n net = block.unit_fn(net, rate=rate, **dict(unit, stride=1))\n rate *= unit.get('stride', 1)\n\n else:\n net = block.unit_fn(net, rate=1, **unit)\n current_stride *= unit.get('stride', 1)\n if output_stride is not None and current_stride > output_stride:\n raise ValueError('The target output_stride cannot be reached.')\n\n # Collect activations at the block's end before performing subsampling.\n net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)\n\n # Subsampling of the block's output activations.\n if output_stride is not None and current_stride == output_stride:\n rate *= block_stride\n else:\n net = subsample(net, block_stride)\n current_stride *= block_stride\n if output_stride is not None and current_stride > output_stride:\n raise ValueError('The target output_stride cannot be reached.')\n\n if output_stride is not None and current_stride != output_stride:\n raise ValueError('The target output_stride cannot be reached.')\n\n return net\n\n\ndef resnet_arg_scope(weight_decay=0.0001,\n batch_norm_decay=0.997,\n batch_norm_epsilon=1e-5,\n batch_norm_scale=True,\n #batch_norm_decay=0.9,\n #batch_norm_epsilon=5e-4,\n #batch_norm_scale=False,\n activation_fn=tf.nn.relu,\n use_batch_norm=True):\n \"\"\"Defines the default ResNet arg scope.\n\n TODO(gpapan): The batch-normalization related default values above are\n appropriate for use in conjunction with the reference ResNet models\n released at https://github.com/KaimingHe/deep-residual-networks. When\n training ResNets from scratch, they might need to be tuned.\n\n Args:\n weight_decay: The weight decay to use for regularizing the model.\n batch_norm_decay: The moving average decay when estimating layer activation\n statistics in batch normalization.\n batch_norm_epsilon: Small constant to prevent division by zero when\n normalizing activations by their variance in batch normalization.\n batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the\n activations in the batch normalization layer.\n activation_fn: The activation function which is used in ResNet.\n use_batch_norm: Whether or not to use batch normalization.\n\n Returns:\n An `arg_scope` to use for the resnet models.\n \"\"\"\n batch_norm_params = {\n 'decay': batch_norm_decay,\n 'epsilon': batch_norm_epsilon,\n 'scale': batch_norm_scale,\n 'updates_collections': tf.GraphKeys.UPDATE_OPS,\n 'fused': None, # Use fused batch norm if possible.\n }\n\n with slim.arg_scope(\n [slim.conv2d],\n weights_regularizer=slim.l2_regularizer(weight_decay),\n weights_initializer=slim.variance_scaling_initializer(),\n activation_fn=activation_fn,\n normalizer_fn=slim.batch_norm if use_batch_norm else None,\n normalizer_params=batch_norm_params):\n with slim.arg_scope([slim.batch_norm], **batch_norm_params):\n # The following implies padding='SAME' for pool1, which makes feature\n # alignment easier for dense prediction tasks. This is also used in\n # https://github.com/facebook/fb.resnet.torch. However the accompanying\n # code of 'Deep Residual Learning for Image Recognition' uses\n # padding='VALID' for pool1. You can switch to that choice by setting\n # slim.arg_scope([slim.max_pool2d], padding='VALID').\n with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:\n return arg_sc\n" ]
[ [ "tensorflow.import_graph_def", "tensorflow.constant", "tensorflow.local_variables_initializer", "numpy.clip", "tensorflow.placeholder", "tensorflow.python.framework.meta_graph.read_meta_graph_file", "numpy.concatenate", "tensorflow.ConfigProto", "numpy.copy", "numpy.argmax", "tensorflow.global_variables_initializer", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.app.run" ], [ "tensorflow.variable_scope", "tensorflow.pad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
shippingwang/models
[ "a92e212932b764e500a833527e0fb772ac9a491a", "a92e212932b764e500a833527e0fb772ac9a491a", "a92e212932b764e500a833527e0fb772ac9a491a" ]
[ "PaddleCV/PaddleDetection/tools/face_eval.py", "PaddleCV/Research/danet/utils/voc_data.py", "PaddleCV/PaddleDetection/inference/tools/vis.py" ]
[ "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport paddle.fluid as fluid\nimport numpy as np\nfrom PIL import Image\nfrom collections import OrderedDict\n\nimport ppdet.utils.checkpoint as checkpoint\nfrom ppdet.utils.cli import ArgsParser\nfrom ppdet.utils.check import check_gpu\nfrom ppdet.utils.widerface_eval_utils import get_shrink, bbox_vote, \\\n save_widerface_bboxes, save_fddb_bboxes, to_chw_bgr\nfrom ppdet.core.workspace import load_config, merge_config, create\nfrom ppdet.modeling.model_input import create_feed\n\nimport logging\nFORMAT = '%(asctime)s-%(levelname)s: %(message)s'\nlogging.basicConfig(level=logging.INFO, format=FORMAT)\nlogger = logging.getLogger(__name__)\n\n\ndef face_img_process(image,\n mean=[104., 117., 123.],\n std=[127.502231, 127.502231, 127.502231]):\n img = np.array(image)\n img = to_chw_bgr(img)\n img = img.astype('float32')\n img -= np.array(mean)[:, np.newaxis, np.newaxis].astype('float32')\n img /= np.array(std)[:, np.newaxis, np.newaxis].astype('float32')\n img = [img]\n img = np.array(img)\n return img\n\n\ndef face_eval_run(exe,\n compile_program,\n fetches,\n img_root_dir,\n gt_file,\n pred_dir='output/pred',\n eval_mode='widerface',\n multi_scale=False):\n # load ground truth files\n with open(gt_file, 'r') as f:\n gt_lines = f.readlines()\n imid2path = []\n pos_gt = 0\n while pos_gt < len(gt_lines):\n name_gt = gt_lines[pos_gt].strip('\\n\\t').split()[0]\n imid2path.append(name_gt)\n pos_gt += 1\n n_gt = int(gt_lines[pos_gt].strip('\\n\\t').split()[0])\n pos_gt += 1 + n_gt\n logger.info('The ground truth file load {} images'.format(len(imid2path)))\n\n dets_dist = OrderedDict()\n for iter_id, im_path in enumerate(imid2path):\n image_path = os.path.join(img_root_dir, im_path)\n if eval_mode == 'fddb':\n image_path += '.jpg'\n image = Image.open(image_path).convert('RGB')\n if multi_scale:\n shrink, max_shrink = get_shrink(image.size[1], image.size[0])\n det0 = detect_face(exe, compile_program, fetches, image, shrink)\n det1 = flip_test(exe, compile_program, fetches, image, shrink)\n [det2, det3] = multi_scale_test(exe, compile_program, fetches, image,\n max_shrink)\n det4 = multi_scale_test_pyramid(exe, compile_program, fetches, image,\n max_shrink)\n det = np.row_stack((det0, det1, det2, det3, det4))\n dets = bbox_vote(det)\n else:\n dets = detect_face(exe, compile_program, fetches, image, 1)\n if eval_mode == 'widerface':\n save_widerface_bboxes(image_path, dets, pred_dir)\n else:\n dets_dist[im_path] = dets\n if iter_id % 100 == 0:\n logger.info('Test iter {}'.format(iter_id))\n if eval_mode == 'fddb':\n save_fddb_bboxes(dets_dist, pred_dir)\n logger.info(\"Finish evaluation.\")\n\n\ndef detect_face(exe, compile_program, fetches, image, shrink):\n image_shape = [3, image.size[1], image.size[0]]\n if shrink != 1:\n h, w = int(image_shape[1] * shrink), int(image_shape[2] * shrink)\n image = image.resize((w, h), Image.ANTIALIAS)\n image_shape = [3, h, w]\n\n img = face_img_process(image)\n detection, = exe.run(compile_program,\n feed={'image': img},\n fetch_list=[fetches['bbox']],\n return_numpy=False)\n detection = np.array(detection)\n # layout: xmin, ymin, xmax. ymax, score\n if np.prod(detection.shape) == 1:\n logger.info(\"No face detected\")\n return np.array([[0, 0, 0, 0, 0]])\n det_conf = detection[:, 1]\n det_xmin = image_shape[2] * detection[:, 2] / shrink\n det_ymin = image_shape[1] * detection[:, 3] / shrink\n det_xmax = image_shape[2] * detection[:, 4] / shrink\n det_ymax = image_shape[1] * detection[:, 5] / shrink\n\n det = np.column_stack((det_xmin, det_ymin, det_xmax, det_ymax, det_conf))\n return det\n\n\ndef flip_test(exe, compile_program, fetches, image, shrink):\n img = image.transpose(Image.FLIP_LEFT_RIGHT)\n det_f = detect_face(exe, compile_program, fetches, img, shrink)\n det_t = np.zeros(det_f.shape)\n # image.size: [width, height]\n det_t[:, 0] = image.size[0] - det_f[:, 2]\n det_t[:, 1] = det_f[:, 1]\n det_t[:, 2] = image.size[0] - det_f[:, 0]\n det_t[:, 3] = det_f[:, 3]\n det_t[:, 4] = det_f[:, 4]\n return det_t\n\n\ndef multi_scale_test(exe, compile_program, fetches, image, max_shrink):\n # Shrink detecting is only used to detect big faces\n st = 0.5 if max_shrink >= 0.75 else 0.5 * max_shrink\n det_s = detect_face(exe, compile_program, fetches, image, st)\n index = np.where(\n np.maximum(det_s[:, 2] - det_s[:, 0] + 1, det_s[:, 3] - det_s[:, 1] + 1)\n > 30)[0]\n det_s = det_s[index, :]\n # Enlarge one times\n bt = min(2, max_shrink) if max_shrink > 1 else (st + max_shrink) / 2\n det_b = detect_face(exe, compile_program, fetches, image, bt)\n\n # Enlarge small image x times for small faces\n if max_shrink > 2:\n bt *= 2\n while bt < max_shrink:\n det_b = np.row_stack((det_b, detect_face(exe, compile_program,\n fetches, image, bt)))\n bt *= 2\n det_b = np.row_stack((det_b, detect_face(exe, compile_program, fetches,\n image, max_shrink)))\n\n # Enlarged images are only used to detect small faces.\n if bt > 1:\n index = np.where(\n np.minimum(det_b[:, 2] - det_b[:, 0] + 1,\n det_b[:, 3] - det_b[:, 1] + 1) < 100)[0]\n det_b = det_b[index, :]\n # Shrinked images are only used to detect big faces.\n else:\n index = np.where(\n np.maximum(det_b[:, 2] - det_b[:, 0] + 1,\n det_b[:, 3] - det_b[:, 1] + 1) > 30)[0]\n det_b = det_b[index, :]\n return det_s, det_b\n\n\ndef multi_scale_test_pyramid(exe, compile_program, fetches, image, max_shrink):\n # Use image pyramids to detect faces\n det_b = detect_face(exe, compile_program, fetches, image, 0.25)\n index = np.where(\n np.maximum(det_b[:, 2] - det_b[:, 0] + 1, det_b[:, 3] - det_b[:, 1] + 1)\n > 30)[0]\n det_b = det_b[index, :]\n\n st = [0.75, 1.25, 1.5, 1.75]\n for i in range(len(st)):\n if st[i] <= max_shrink:\n det_temp = detect_face(exe, compile_program, fetches, image, st[i])\n # Enlarged images are only used to detect small faces.\n if st[i] > 1:\n index = np.where(\n np.minimum(det_temp[:, 2] - det_temp[:, 0] + 1,\n det_temp[:, 3] - det_temp[:, 1] + 1) < 100)[0]\n det_temp = det_temp[index, :]\n # Shrinked images are only used to detect big faces.\n else:\n index = np.where(\n np.maximum(det_temp[:, 2] - det_temp[:, 0] + 1,\n det_temp[:, 3] - det_temp[:, 1] + 1) > 30)[0]\n det_temp = det_temp[index, :]\n det_b = np.row_stack((det_b, det_temp))\n return det_b\n\n\ndef main():\n \"\"\"\n Main evaluate function\n \"\"\"\n cfg = load_config(FLAGS.config)\n if 'architecture' in cfg:\n main_arch = cfg.architecture\n else:\n raise ValueError(\"'architecture' not specified in config file.\")\n\n merge_config(FLAGS.opt)\n\n # check if set use_gpu=True in paddlepaddle cpu version\n check_gpu(cfg.use_gpu)\n\n if 'eval_feed' not in cfg:\n eval_feed = create(main_arch + 'EvalFeed')\n else:\n eval_feed = create(cfg.eval_feed)\n\n # define executor\n place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n # build program\n model = create(main_arch)\n startup_prog = fluid.Program()\n eval_prog = fluid.Program()\n with fluid.program_guard(eval_prog, startup_prog):\n with fluid.unique_name.guard():\n _, feed_vars = create_feed(eval_feed, use_pyreader=False)\n fetches = model.eval(feed_vars)\n\n eval_prog = eval_prog.clone(True)\n\n # load model\n exe.run(startup_prog)\n if 'weights' in cfg:\n checkpoint.load_params(exe, eval_prog, cfg.weights)\n\n assert cfg.metric in ['WIDERFACE'], \\\n \"unknown metric type {}\".format(cfg.metric)\n\n annotation_file = getattr(eval_feed.dataset, 'annotation', None)\n dataset_dir = FLAGS.dataset_dir if FLAGS.dataset_dir else \\\n getattr(eval_feed.dataset, 'dataset_dir', None)\n img_root_dir = dataset_dir\n if FLAGS.eval_mode == \"widerface\":\n image_dir = getattr(eval_feed.dataset, 'image_dir', None)\n img_root_dir = os.path.join(dataset_dir, image_dir)\n gt_file = os.path.join(dataset_dir, annotation_file)\n pred_dir = FLAGS.output_eval if FLAGS.output_eval else 'output/pred'\n face_eval_run(\n exe,\n eval_prog,\n fetches,\n img_root_dir,\n gt_file,\n pred_dir=pred_dir,\n eval_mode=FLAGS.eval_mode,\n multi_scale=FLAGS.multi_scale)\n\n\nif __name__ == '__main__':\n parser = ArgsParser()\n parser.add_argument(\n \"-d\",\n \"--dataset_dir\",\n default=None,\n type=str,\n help=\"Dataset path, same as DataFeed.dataset.dataset_dir\")\n parser.add_argument(\n \"-f\",\n \"--output_eval\",\n default=None,\n type=str,\n help=\"Evaluation file directory, default is current directory.\")\n parser.add_argument(\n \"-e\",\n \"--eval_mode\",\n default=\"widerface\",\n type=str,\n help=\"Evaluation mode, include `widerface` and `fddb`, default is `widerface`.\"\n )\n parser.add_argument(\n \"--multi_scale\",\n action='store_true',\n default=False,\n help=\"If True it will select `multi_scale` evaluation. Default is `False`, it will select `single-scale` evaluation.\")\n FLAGS = parser.parse_args()\n main()\n", "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport random\nimport paddle\nimport numpy as np\n\nfrom PIL import Image\n\nfrom utils.voc import VOC\n\n__all__ = ['voc_train', 'voc_val', 'voc_train_val', 'voc_test']\n\n# globals\ndata_mean = np.array([0.485, 0.456, 0.406]).reshape(3, 1, 1)\ndata_std = np.array([0.229, 0.224, 0.225]).reshape(3, 1, 1)\n\n\ndef mapper_train(sample):\n image_path, label_path, voc = sample\n image = Image.open(image_path, mode='r').convert('RGB')\n label = Image.open(label_path, mode='r')\n\n image, label = voc.sync_transform(image, label) \n image_array = np.array(image) # HWC\n label_array = np.array(label) # HW\n\n image_array = image_array.transpose((2, 0, 1)) # CHW\n image_array = image_array / 255.0 \n image_array = (image_array - data_mean) / data_std \n image_array = image_array.astype('float32')\n label_array = label_array.astype('int64')\n return image_array, label_array\n\n\ndef mapper_val(sample):\n image_path, label_path, city = sample\n image = Image.open(image_path, mode='r').convert('RGB')\n label = Image.open(label_path, mode='r')\n\n image, label = city.sync_val_transform(image, label) \n image_array = np.array(image) \n label_array = np.array(label) \n\n image_array = image_array.transpose((2, 0, 1)) \n image_array = image_array / 255.0 \n image_array = (image_array - data_mean) / data_std \n image_array = image_array.astype('float32')\n label_array = label_array.astype('int64')\n return image_array, label_array\n\n\ndef mapper_test(sample):\n image_path, label_path = sample # label is path\n image = Image.open(image_path, mode='r').convert('RGB')\n image_array = image\n return image_array, label_path # label is path\n\n\n# 已完成, 引用时记得传入参数,root, base_size, crop_size等, gpu_num必须设置,否则syncBN会出现某些卡没有数据的情况\ndef voc_train(data_root='../dataset', base_size=768, crop_size=576, scale=True, xmap=True, batch_size=1, gpu_num=1):\n voc = VOC(root=data_root, split='train', base_size=base_size, crop_size=crop_size, scale=scale)\n image_path, label_path = voc.get_path_pairs()\n\n def reader():\n if len(image_path) % (batch_size * gpu_num) != 0:\n length = (len(image_path) // (batch_size * gpu_num)) * (batch_size * gpu_num)\n else:\n length = len(image_path)\n for i in range(length):\n if i == 0: \n cc = list(zip(image_path, label_path))\n random.shuffle(cc)\n image_path[:], label_path[:] = zip(*cc)\n yield image_path[i], label_path[i], voc\n if xmap:\n return paddle.reader.xmap_readers(mapper_train, reader, 4, 32)\n else:\n return paddle.reader.map_readers(mapper_train, reader)\n\n\ndef voc_val(data_root='../dataset', base_size=768, crop_size=576, scale=True, xmap=True):\n voc = VOC(root=data_root, split='val', base_size=base_size, crop_size=crop_size, scale=scale)\n image_path, label_path = voc.get_path_pairs()\n\n def reader():\n for i in range(len(image_path)):\n yield image_path[i], label_path[i], voc\n\n if xmap:\n return paddle.reader.xmap_readers(mapper_val, reader, 4, 32)\n else:\n return paddle.reader.map_readers(mapper_val, reader)\n\n\ndef voc_train_val(data_root='./dataset', base_size=768, crop_size=576, scale=True, xmap=True, batch_size=1, gpu_num=1):\n voc = VOC(root=data_root, split='train_val', base_size=base_size, crop_size=crop_size, scale=scale)\n image_path, label_path = voc.get_path_pairs()\n\n def reader():\n if len(image_path) % (batch_size * gpu_num) != 0:\n length = (len(image_path) // (batch_size * gpu_num)) * (batch_size * gpu_num)\n else:\n length = len(image_path)\n for i in range(length):\n if i == 0: \n cc = list(zip(image_path, label_path))\n random.shuffle(cc)\n image_path[:], label_path[:] = zip(*cc)\n yield image_path[i], label_path[i]\n\n if xmap:\n return paddle.reader.xmap_readers(mapper_train, reader, 4, 32)\n else:\n return paddle.reader.map_readers(mapper_train, reader)\n\n\ndef voc_test(split='test', base_size=2048, crop_size=1024, scale=True, xmap=True):\n # 实际未使用base_size, crop_size, scale\n voc = VOC(split=split, base_size=base_size, crop_size=crop_size, scale=scale)\n image_path = voc.get_path_pairs()\n\n def reader():\n for i in range(len(image_path[:1])):\n yield image_path[i], image_path[i]\n if xmap:\n return paddle.reader.xmap_readers(mapper_test, reader, 4, 32)\n else:\n return paddle.reader.map_readers(mapper_test, reader)\n", "# coding: utf-8\n# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport detection_result_pb2\nimport cv2\nimport sys\nimport gflags\nimport numpy as np\nimport json\nfrom PIL import Image, ImageDraw, ImageFont\n\nFlags = gflags.FLAGS\ngflags.DEFINE_string('img_path', 'abc', 'image path')\ngflags.DEFINE_string('img_result_path', 'def', 'image result path')\ngflags.DEFINE_float('threshold', 0.0, 'threshold of score') \ngflags.DEFINE_string('c2l_path', 'ghk', 'class to label path')\n\ndef colormap(rgb=False):\n \"\"\"\n Get colormap\n \"\"\"\n color_list = np.array([\n 0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494,\n 0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078,\n 0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000,\n 1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000,\n 0.000, 1.000, 0.667, 0.000, 1.000, 0.333, 0.333, 0.000, 0.333, 0.667,\n 0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000,\n 0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000,\n 1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, 0.500, 0.000, 1.000,\n 0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500,\n 0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667,\n 0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333,\n 0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000,\n 0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333,\n 0.333, 1.000, 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000,\n 1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000,\n 1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.167,\n 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000,\n 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000,\n 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000,\n 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000,\n 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833,\n 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.286,\n 0.286, 0.286, 0.429, 0.429, 0.429, 0.571, 0.571, 0.571, 0.714, 0.714,\n 0.714, 0.857, 0.857, 0.857, 1.000, 1.000, 1.000\n ]).astype(np.float32)\n color_list = color_list.reshape((-1, 3)) * 255\n if not rgb:\n color_list = color_list[:, ::-1]\n return color_list\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 5:\n print(\"Usage: python vis.py --img_path=/path/to/image --img_result_path=/path/to/image_result.pb --threshold=0.1 --c2l_path=/path/to/class2label.json\")\n else:\n Flags(sys.argv) \n color_list = colormap(rgb=True)\n text_thickness = 1\n text_scale = 0.3\n with open(Flags.img_result_path, \"rb\") as f:\n detection_result = detection_result_pb2.DetectionResult()\n detection_result.ParseFromString(f.read())\n img = cv2.imread(Flags.img_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n class2LabelMap = dict()\n with open(Flags.c2l_path, \"r\", encoding=\"utf-8\") as json_f:\n class2LabelMap = json.load(json_f)\n for box in detection_result.detection_boxes:\n if box.score >= Flags.threshold:\n box_class = getattr(box, 'class')\n text_class_score_str = \"%s %.2f\" % (class2LabelMap.get(str(box_class)), box.score)\n text_point = (int(box.left_top_x), int(box.left_top_y))\n\n ptLeftTop = (int(box.left_top_x), int(box.left_top_y))\n ptRightBottom = (int(box.right_bottom_x), int(box.right_bottom_y))\n box_thickness = 1\n color = tuple([int(c) for c in color_list[box_class]])\n cv2.rectangle(img, ptLeftTop, ptRightBottom, color, box_thickness, 8)\n if text_point[1] < 0:\n text_point = (int(box.left_top_x), int(box.right_bottom_y))\n WHITE = (255, 255, 255)\n font = cv2.FONT_HERSHEY_SIMPLEX\n text_size = cv2.getTextSize(text_class_score_str, font, text_scale, text_thickness)\n \n text_box_left_top = (text_point[0], text_point[1] - text_size[0][1])\n text_box_right_bottom = (text_point[0] + text_size[0][0], text_point[1])\n\n cv2.rectangle(img, text_box_left_top, text_box_right_bottom, color, -1, 8)\n cv2.putText(img, text_class_score_str, text_point, font, text_scale, WHITE, text_thickness)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n cv2.imwrite(Flags.img_path + \".png\", img)\n" ]
[ [ "numpy.maximum", "numpy.minimum", "numpy.row_stack", "numpy.prod", "numpy.column_stack", "numpy.array", "numpy.zeros" ], [ "numpy.array" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DequanZhu/facenet
[ "78f32c2fa43217489f5b340826991780b3276fe2" ]
[ "src/train.py" ]
[ "from __future__ import division\r\nimport os\r\nimport sys\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.optimizers import schedules, Adam\r\nfrom tensorflow.python.keras.losses import SparseCategoricalCrossentropy\r\nfrom tensorflow.python.keras.metrics import SparseCategoricalAccuracy\r\nfrom facenet import FaceNet\r\nfrom options.train_options import TrainOptions\r\nfrom losses import *\r\nfrom datasets import create_datasets_from_tfrecord\r\nfrom utils import check_folder\r\nfrom progressbar import *\r\n\r\n\r\nclass Trainer(object):\r\n def __init__(self, args):\r\n self.args=args\r\n self.model = FaceNet(args).model\r\n self.train_datasets, self.nrof_train = create_datasets_from_tfrecord(tfrcd_dir=args.datasets,\r\n batch_size = args.batch_size,\r\n phase='train')\r\n\r\n self.val_datasets, self.nrof_val = create_datasets_from_tfrecord(tfrcd_dir=args.datasets,\r\n batch_size = args.batch_size,\r\n phase='val')\r\n self.lr_schedule = schedules.ExponentialDecay(args.learning_rate,\r\n decay_steps=10000,\r\n decay_rate=0.96,\r\n staircase=True)\r\n\r\n self.optimizer = Adam(learning_rate=self.lr_schedule, beta_1=0.9, beta_2=0.999, epsilon=0.1)\r\n self.checkpoint = tf.train.Checkpoint(epoch=tf.Variable(0,dtype=tf.int64),\r\n n_iter=tf.Variable(0,dtype=tf.int64), \r\n best_pred=tf.Variable(0.0,dtype=tf.float32),\r\n optimizer=self.optimizer,\r\n model=self.model)\r\n self.manager = tf.train.CheckpointManager(self.checkpoint, args.checkpoint_dir, max_to_keep=3)\r\n check_folder(args.log_dir)\r\n self.train_summary_writer = tf.summary.create_file_writer(args.log_dir)\r\n\r\n # @tf.function()\r\n def train_one_step(self, train_acc_metric, loss_layer, batch_examples, trainable_variables):\r\n with tf.GradientTape() as tape:\r\n batch_images, batch_labels = batch_examples\r\n features = self.model(batch_images)\r\n embedding = tf.math.l2_normalize(features, axis=1, epsilon=1e-10)\r\n logits = loss_layer(embedding, batch_labels)\r\n loss = SparseCategoricalCrossentropy(from_logits=True)(batch_labels, logits)\r\n train_acc_metric(batch_labels, logits)\r\n gradients = tape.gradient(loss, trainable_variables)\r\n self.optimizer.apply_gradients(zip(gradients, trainable_variables))\r\n return loss\r\n\r\n def training(self, epoch):\r\n opt = self.args\r\n loss_layer = ArcFaceSoftmaxLinear(opt.nrof_classes, opt.embedding_size, opt.margin, opt.feature_scale)\r\n trainable_variables=[]\r\n trainable_variables.extend(loss_layer.trainable_variables)\r\n trainable_variables.extend(self.model.trainable_variables)\r\n train_acc_metric = SparseCategoricalAccuracy()\r\n widgets = ['train :', Percentage(), ' ', Bar('#'), ' ',Timer(), ' ', ETA(), ' ']\r\n pbar = ProgressBar(widgets=widgets, max_value=int(self.nrof_train//opt.batch_size)+1).start()\r\n for batch_id, batch_examples in pbar(enumerate(self.train_datasets)):\r\n loss = self.train_one_step(train_acc_metric, loss_layer, batch_examples, trainable_variables)\r\n with self.train_summary_writer.as_default():\r\n tf.summary.scalar('total_loss', loss, self.checkpoint.n_iter)\r\n self.checkpoint.n_iter.assign_add(1)\r\n pbar.finish() \r\n train_acc = train_acc_metric.result()\r\n print('\\nTraining acc over epoch {}: {:.4f}'.format(epoch, train_acc))\r\n with self.train_summary_writer.as_default():\r\n tf.summary.scalar('train/acc', train_acc_metric.result(), self.checkpoint.epoch)\r\n train_acc_metric.reset_states()\r\n save_path = self.manager.save()\r\n print('save checkpoint to {}'.format(save_path))\r\n\r\n\r\n def validate(self, epoch):\r\n widgets = ['validate :', Percentage(), ' ', Bar('#'), ' ',Timer(), ' ', ETA(), ' ']\r\n pbar = ProgressBar(widgets=widgets, max_value=int(self.nrof_val//self.args.batch_size)+1).start()\r\n val_acc_metric = SparseCategoricalAccuracy()\r\n for batch_id, (batch_images_validate, batch_labels_validate) in pbar(enumerate(self.val_datasets)):\r\n prediction = self.model(batch_images_validate)\r\n val_acc_metric(batch_labels_validate, prediction)\r\n pbar.finish() \r\n val_acc = val_acc_metric.result()\r\n print('\\nvalidate acc over epoch {}: {:.4f}'.format(epoch, val_acc))\r\n with self.train_summary_writer.as_default():\r\n tf.summary.scalar('val/acc', val_acc_metric.result(),self.checkpoint.epoch)\r\n self.checkpoint.epoch.assign_add(1)\r\n \r\n val_acc_metric.reset_states()\r\n\r\n if(val_acc > self.checkpoint.best_pred):\r\n self.checkpoint.best_pred = val_acc\r\n with open(os.path.join(self.checkpoint_dir, 'best_pred.txt'), 'w') as f:\r\n f.write(str(self.best_pred))\r\n self.model.save(os.path.join(self.checkpoint_dir, 'best_model.h5'))\r\n\r\n\r\ndef main(argv):\r\n opt = TrainOptions(argv).parse()\r\n trainer = Trainer(opt)\r\n start_epoch = 0\r\n if opt.restore:\r\n start_epoch = trainer.checkpoint.restore(trainer.manager.latest_checkpoint)\r\n for epoch in range(start_epoch, opt.max_epoch):\r\n # trainer.training(epoch)\r\n if not opt.no_val and epoch % opt.eval_interval == (opt.eval_interval - 1):\r\n trainer.validate(epoch)\r\n\r\n\r\nif __name__ == '__main__':\r\n main(sys.argv[1:])\r\n" ]
[ [ "tensorflow.train.CheckpointManager", "tensorflow.python.keras.metrics.SparseCategoricalAccuracy", "tensorflow.Variable", "tensorflow.math.l2_normalize", "tensorflow.keras.optimizers.schedules.ExponentialDecay", "tensorflow.GradientTape", "tensorflow.keras.optimizers.Adam", "tensorflow.python.keras.losses.SparseCategoricalCrossentropy", "tensorflow.summary.scalar", "tensorflow.summary.create_file_writer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
huzq/scikit-learn
[ "f862129f36786acbae3d9f2d161bbb72d77b87ec", "f862129f36786acbae3d9f2d161bbb72d77b87ec", "f862129f36786acbae3d9f2d161bbb72d77b87ec", "f862129f36786acbae3d9f2d161bbb72d77b87ec", "f862129f36786acbae3d9f2d161bbb72d77b87ec", "f862129f36786acbae3d9f2d161bbb72d77b87ec", "f862129f36786acbae3d9f2d161bbb72d77b87ec", "f862129f36786acbae3d9f2d161bbb72d77b87ec" ]
[ "sklearn/decomposition/tests/test_incremental_pca.py", "examples/model_selection/plot_roc_crossval.py", "sklearn/tree/_classes.py", "sklearn/feature_extraction/tests/test_feature_hasher.py", "examples/neural_networks/plot_mnist_filters.py", "sklearn/decomposition/tests/test_fastica.py", "sklearn/utils/tests/test_encode.py", "examples/cluster/plot_segmentation_toy.py" ]
[ "\"\"\"Tests for Incremental PCA.\"\"\"\nimport numpy as np\nimport pytest\nimport warnings\n\nfrom sklearn.utils._testing import assert_almost_equal\nfrom sklearn.utils._testing import assert_array_almost_equal\nfrom sklearn.utils._testing import assert_allclose_dense_sparse\nfrom numpy.testing import assert_array_equal\n\nfrom sklearn import datasets\nfrom sklearn.decomposition import PCA, IncrementalPCA\n\nfrom scipy import sparse\n\niris = datasets.load_iris()\n\n\ndef test_incremental_pca():\n # Incremental PCA on dense arrays.\n X = iris.data\n batch_size = X.shape[0] // 3\n ipca = IncrementalPCA(n_components=2, batch_size=batch_size)\n pca = PCA(n_components=2)\n pca.fit_transform(X)\n\n X_transformed = ipca.fit_transform(X)\n\n assert X_transformed.shape == (X.shape[0], 2)\n np.testing.assert_allclose(\n ipca.explained_variance_ratio_.sum(),\n pca.explained_variance_ratio_.sum(),\n rtol=1e-3,\n )\n\n for n_components in [1, 2, X.shape[1]]:\n ipca = IncrementalPCA(n_components, batch_size=batch_size)\n ipca.fit(X)\n cov = ipca.get_covariance()\n precision = ipca.get_precision()\n np.testing.assert_allclose(\n np.dot(cov, precision), np.eye(X.shape[1]), atol=1e-13\n )\n\n\[email protected](\n \"matrix_class\", [sparse.csc_matrix, sparse.csr_matrix, sparse.lil_matrix]\n)\ndef test_incremental_pca_sparse(matrix_class):\n # Incremental PCA on sparse arrays.\n X = iris.data\n pca = PCA(n_components=2)\n pca.fit_transform(X)\n X_sparse = matrix_class(X)\n batch_size = X_sparse.shape[0] // 3\n ipca = IncrementalPCA(n_components=2, batch_size=batch_size)\n\n X_transformed = ipca.fit_transform(X_sparse)\n\n assert X_transformed.shape == (X_sparse.shape[0], 2)\n np.testing.assert_allclose(\n ipca.explained_variance_ratio_.sum(),\n pca.explained_variance_ratio_.sum(),\n rtol=1e-3,\n )\n\n for n_components in [1, 2, X.shape[1]]:\n ipca = IncrementalPCA(n_components, batch_size=batch_size)\n ipca.fit(X_sparse)\n cov = ipca.get_covariance()\n precision = ipca.get_precision()\n np.testing.assert_allclose(\n np.dot(cov, precision), np.eye(X_sparse.shape[1]), atol=1e-13\n )\n\n with pytest.raises(\n TypeError,\n match=(\n \"IncrementalPCA.partial_fit does not support \"\n \"sparse input. Either convert data to dense \"\n \"or use IncrementalPCA.fit to do so in batches.\"\n ),\n ):\n ipca.partial_fit(X_sparse)\n\n\ndef test_incremental_pca_check_projection():\n # Test that the projection of data is correct.\n rng = np.random.RandomState(1999)\n n, p = 100, 3\n X = rng.randn(n, p) * 0.1\n X[:10] += np.array([3, 4, 5])\n Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])\n\n # Get the reconstruction of the generated data X\n # Note that Xt has the same \"components\" as X, just separated\n # This is what we want to ensure is recreated correctly\n Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)\n\n # Normalize\n Yt /= np.sqrt((Yt**2).sum())\n\n # Make sure that the first element of Yt is ~1, this means\n # the reconstruction worked as expected\n assert_almost_equal(np.abs(Yt[0][0]), 1.0, 1)\n\n\ndef test_incremental_pca_inverse():\n # Test that the projection of data can be inverted.\n rng = np.random.RandomState(1999)\n n, p = 50, 3\n X = rng.randn(n, p) # spherical data\n X[:, 1] *= 0.00001 # make middle component relatively small\n X += [5, 4, 3] # make a large mean\n\n # same check that we can find the original data from the transformed\n # signal (since the data is almost of rank n_components)\n ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)\n Y = ipca.transform(X)\n Y_inverse = ipca.inverse_transform(Y)\n assert_almost_equal(X, Y_inverse, decimal=3)\n\n\ndef test_incremental_pca_validation():\n # Test that n_components is >=1 and <= n_features.\n X = np.array([[0, 1, 0], [1, 0, 0]])\n n_samples, n_features = X.shape\n for n_components in [-1, 0, 0.99, 4]:\n with pytest.raises(\n ValueError,\n match=(\n \"n_components={} invalid\"\n \" for n_features={}, need more rows than\"\n \" columns for IncrementalPCA\"\n \" processing\".format(n_components, n_features)\n ),\n ):\n IncrementalPCA(n_components, batch_size=10).fit(X)\n\n # Tests that n_components is also <= n_samples.\n n_components = 3\n with pytest.raises(\n ValueError,\n match=(\n \"n_components={} must be\"\n \" less or equal to the batch number of\"\n \" samples {}\".format(n_components, n_samples)\n ),\n ):\n IncrementalPCA(n_components=n_components).partial_fit(X)\n\n\ndef test_n_samples_equal_n_components():\n # Ensures no warning is raised when n_samples==n_components\n # Non-regression test for gh-19050\n ipca = IncrementalPCA(n_components=5)\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", RuntimeWarning)\n ipca.partial_fit(np.random.randn(5, 7))\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", RuntimeWarning)\n ipca.fit(np.random.randn(5, 7))\n\n\ndef test_n_components_none():\n # Ensures that n_components == None is handled correctly\n rng = np.random.RandomState(1999)\n for n_samples, n_features in [(50, 10), (10, 50)]:\n X = rng.rand(n_samples, n_features)\n ipca = IncrementalPCA(n_components=None)\n\n # First partial_fit call, ipca.n_components_ is inferred from\n # min(X.shape)\n ipca.partial_fit(X)\n assert ipca.n_components_ == min(X.shape)\n\n # Second partial_fit call, ipca.n_components_ is inferred from\n # ipca.components_ computed from the first partial_fit call\n ipca.partial_fit(X)\n assert ipca.n_components_ == ipca.components_.shape[0]\n\n\ndef test_incremental_pca_set_params():\n # Test that components_ sign is stable over batch sizes.\n rng = np.random.RandomState(1999)\n n_samples = 100\n n_features = 20\n X = rng.randn(n_samples, n_features)\n X2 = rng.randn(n_samples, n_features)\n X3 = rng.randn(n_samples, n_features)\n ipca = IncrementalPCA(n_components=20)\n ipca.fit(X)\n # Decreasing number of components\n ipca.set_params(n_components=10)\n with pytest.raises(ValueError):\n ipca.partial_fit(X2)\n # Increasing number of components\n ipca.set_params(n_components=15)\n with pytest.raises(ValueError):\n ipca.partial_fit(X3)\n # Returning to original setting\n ipca.set_params(n_components=20)\n ipca.partial_fit(X)\n\n\ndef test_incremental_pca_num_features_change():\n # Test that changing n_components will raise an error.\n rng = np.random.RandomState(1999)\n n_samples = 100\n X = rng.randn(n_samples, 20)\n X2 = rng.randn(n_samples, 50)\n ipca = IncrementalPCA(n_components=None)\n ipca.fit(X)\n with pytest.raises(ValueError):\n ipca.partial_fit(X2)\n\n\ndef test_incremental_pca_batch_signs():\n # Test that components_ sign is stable over batch sizes.\n rng = np.random.RandomState(1999)\n n_samples = 100\n n_features = 3\n X = rng.randn(n_samples, n_features)\n all_components = []\n batch_sizes = np.arange(10, 20)\n for batch_size in batch_sizes:\n ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)\n all_components.append(ipca.components_)\n\n for i, j in zip(all_components[:-1], all_components[1:]):\n assert_almost_equal(np.sign(i), np.sign(j), decimal=6)\n\n\ndef test_incremental_pca_batch_values():\n # Test that components_ values are stable over batch sizes.\n rng = np.random.RandomState(1999)\n n_samples = 100\n n_features = 3\n X = rng.randn(n_samples, n_features)\n all_components = []\n batch_sizes = np.arange(20, 40, 3)\n for batch_size in batch_sizes:\n ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)\n all_components.append(ipca.components_)\n\n for i, j in zip(all_components[:-1], all_components[1:]):\n assert_almost_equal(i, j, decimal=1)\n\n\ndef test_incremental_pca_batch_rank():\n # Test sample size in each batch is always larger or equal to n_components\n rng = np.random.RandomState(1999)\n n_samples = 100\n n_features = 20\n X = rng.randn(n_samples, n_features)\n all_components = []\n batch_sizes = np.arange(20, 90, 3)\n for batch_size in batch_sizes:\n ipca = IncrementalPCA(n_components=20, batch_size=batch_size).fit(X)\n all_components.append(ipca.components_)\n\n for components_i, components_j in zip(all_components[:-1], all_components[1:]):\n assert_allclose_dense_sparse(components_i, components_j)\n\n\ndef test_incremental_pca_partial_fit():\n # Test that fit and partial_fit get equivalent results.\n rng = np.random.RandomState(1999)\n n, p = 50, 3\n X = rng.randn(n, p) # spherical data\n X[:, 1] *= 0.00001 # make middle component relatively small\n X += [5, 4, 3] # make a large mean\n\n # same check that we can find the original data from the transformed\n # signal (since the data is almost of rank n_components)\n batch_size = 10\n ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)\n pipca = IncrementalPCA(n_components=2, batch_size=batch_size)\n # Add one to make sure endpoint is included\n batch_itr = np.arange(0, n + 1, batch_size)\n for i, j in zip(batch_itr[:-1], batch_itr[1:]):\n pipca.partial_fit(X[i:j, :])\n assert_almost_equal(ipca.components_, pipca.components_, decimal=3)\n\n\ndef test_incremental_pca_against_pca_iris():\n # Test that IncrementalPCA and PCA are approximate (to a sign flip).\n X = iris.data\n\n Y_pca = PCA(n_components=2).fit_transform(X)\n Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)\n\n assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)\n\n\ndef test_incremental_pca_against_pca_random_data():\n # Test that IncrementalPCA and PCA are approximate (to a sign flip).\n rng = np.random.RandomState(1999)\n n_samples = 100\n n_features = 3\n X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)\n\n Y_pca = PCA(n_components=3).fit_transform(X)\n Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)\n\n assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)\n\n\ndef test_explained_variances():\n # Test that PCA and IncrementalPCA calculations match\n X = datasets.make_low_rank_matrix(\n 1000, 100, tail_strength=0.0, effective_rank=10, random_state=1999\n )\n prec = 3\n n_samples, n_features = X.shape\n for nc in [None, 99]:\n pca = PCA(n_components=nc).fit(X)\n ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)\n assert_almost_equal(\n pca.explained_variance_, ipca.explained_variance_, decimal=prec\n )\n assert_almost_equal(\n pca.explained_variance_ratio_, ipca.explained_variance_ratio_, decimal=prec\n )\n assert_almost_equal(pca.noise_variance_, ipca.noise_variance_, decimal=prec)\n\n\ndef test_singular_values():\n # Check that the IncrementalPCA output has the correct singular values\n\n rng = np.random.RandomState(0)\n n_samples = 1000\n n_features = 100\n\n X = datasets.make_low_rank_matrix(\n n_samples, n_features, tail_strength=0.0, effective_rank=10, random_state=rng\n )\n\n pca = PCA(n_components=10, svd_solver=\"full\", random_state=rng).fit(X)\n ipca = IncrementalPCA(n_components=10, batch_size=100).fit(X)\n assert_array_almost_equal(pca.singular_values_, ipca.singular_values_, 2)\n\n # Compare to the Frobenius norm\n X_pca = pca.transform(X)\n X_ipca = ipca.transform(X)\n assert_array_almost_equal(\n np.sum(pca.singular_values_**2.0), np.linalg.norm(X_pca, \"fro\") ** 2.0, 12\n )\n assert_array_almost_equal(\n np.sum(ipca.singular_values_**2.0), np.linalg.norm(X_ipca, \"fro\") ** 2.0, 2\n )\n\n # Compare to the 2-norms of the score vectors\n assert_array_almost_equal(\n pca.singular_values_, np.sqrt(np.sum(X_pca**2.0, axis=0)), 12\n )\n assert_array_almost_equal(\n ipca.singular_values_, np.sqrt(np.sum(X_ipca**2.0, axis=0)), 2\n )\n\n # Set the singular values and see what we get back\n rng = np.random.RandomState(0)\n n_samples = 100\n n_features = 110\n\n X = datasets.make_low_rank_matrix(\n n_samples, n_features, tail_strength=0.0, effective_rank=3, random_state=rng\n )\n\n pca = PCA(n_components=3, svd_solver=\"full\", random_state=rng)\n ipca = IncrementalPCA(n_components=3, batch_size=100)\n\n X_pca = pca.fit_transform(X)\n X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0))\n X_pca[:, 0] *= 3.142\n X_pca[:, 1] *= 2.718\n\n X_hat = np.dot(X_pca, pca.components_)\n pca.fit(X_hat)\n ipca.fit(X_hat)\n assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14)\n assert_array_almost_equal(ipca.singular_values_, [3.142, 2.718, 1.0], 14)\n\n\ndef test_whitening():\n # Test that PCA and IncrementalPCA transforms match to sign flip.\n X = datasets.make_low_rank_matrix(\n 1000, 10, tail_strength=0.0, effective_rank=2, random_state=1999\n )\n prec = 3\n n_samples, n_features = X.shape\n for nc in [None, 9]:\n pca = PCA(whiten=True, n_components=nc).fit(X)\n ipca = IncrementalPCA(whiten=True, n_components=nc, batch_size=250).fit(X)\n\n Xt_pca = pca.transform(X)\n Xt_ipca = ipca.transform(X)\n assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)\n Xinv_ipca = ipca.inverse_transform(Xt_ipca)\n Xinv_pca = pca.inverse_transform(Xt_pca)\n assert_almost_equal(X, Xinv_ipca, decimal=prec)\n assert_almost_equal(X, Xinv_pca, decimal=prec)\n assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)\n\n\ndef test_incremental_pca_partial_fit_float_division():\n # Test to ensure float division is used in all versions of Python\n # (non-regression test for issue #9489)\n\n rng = np.random.RandomState(0)\n A = rng.randn(5, 3) + 2\n B = rng.randn(7, 3) + 5\n\n pca = IncrementalPCA(n_components=2)\n pca.partial_fit(A)\n # Set n_samples_seen_ to be a floating point number instead of an int\n pca.n_samples_seen_ = float(pca.n_samples_seen_)\n pca.partial_fit(B)\n singular_vals_float_samples_seen = pca.singular_values_\n\n pca2 = IncrementalPCA(n_components=2)\n pca2.partial_fit(A)\n pca2.partial_fit(B)\n singular_vals_int_samples_seen = pca2.singular_values_\n\n np.testing.assert_allclose(\n singular_vals_float_samples_seen, singular_vals_int_samples_seen\n )\n\n\ndef test_incremental_pca_fit_overflow_error():\n # Test for overflow error on Windows OS\n # (non-regression test for issue #17693)\n rng = np.random.RandomState(0)\n A = rng.rand(500000, 2)\n\n ipca = IncrementalPCA(n_components=2, batch_size=10000)\n ipca.fit(A)\n\n pca = PCA(n_components=2)\n pca.fit(A)\n\n np.testing.assert_allclose(ipca.singular_values_, pca.singular_values_)\n\n\ndef test_incremental_pca_feature_names_out():\n \"\"\"Check feature names out for IncrementalPCA.\"\"\"\n ipca = IncrementalPCA(n_components=2).fit(iris.data)\n\n names = ipca.get_feature_names_out()\n assert_array_equal([f\"incrementalpca{i}\" for i in range(2)], names)\n", "\"\"\"\n=============================================================\nReceiver Operating Characteristic (ROC) with cross validation\n=============================================================\n\nExample of Receiver Operating Characteristic (ROC) metric to evaluate\nclassifier output quality using cross-validation.\n\nROC curves typically feature true positive rate on the Y axis, and false\npositive rate on the X axis. This means that the top left corner of the plot is\nthe \"ideal\" point - a false positive rate of zero, and a true positive rate of\none. This is not very realistic, but it does mean that a larger area under the\ncurve (AUC) is usually better.\n\nThe \"steepness\" of ROC curves is also important, since it is ideal to maximize\nthe true positive rate while minimizing the false positive rate.\n\nThis example shows the ROC response of different datasets, created from K-fold\ncross-validation. Taking all of these curves, it is possible to calculate the\nmean area under curve, and see the variance of the curve when the\ntraining set is split into different subsets. This roughly shows how the\nclassifier output is affected by changes in the training data, and how\ndifferent the splits generated by K-fold cross-validation are from one another.\n\n.. note::\n\n See also :func:`sklearn.metrics.roc_auc_score`,\n :func:`sklearn.model_selection.cross_val_score`,\n :ref:`sphx_glr_auto_examples_model_selection_plot_roc.py`,\n\n\"\"\"\n\n# %%\n# Data IO and generation\n# ----------------------\nimport numpy as np\n\nfrom sklearn import datasets\n\n# Import some data to play with\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\nX, y = X[y != 2], y[y != 2]\nn_samples, n_features = X.shape\n\n# Add noisy features\nrandom_state = np.random.RandomState(0)\nX = np.c_[X, random_state.randn(n_samples, 200 * n_features)]\n\n# %%\n# Classification and ROC analysis\n# -------------------------------\nimport matplotlib.pyplot as plt\n\nfrom sklearn import svm\nfrom sklearn.metrics import auc\nfrom sklearn.metrics import RocCurveDisplay\nfrom sklearn.model_selection import StratifiedKFold\n\n# Run classifier with cross-validation and plot ROC curves\ncv = StratifiedKFold(n_splits=6)\nclassifier = svm.SVC(kernel=\"linear\", probability=True, random_state=random_state)\n\ntprs = []\naucs = []\nmean_fpr = np.linspace(0, 1, 100)\n\nfig, ax = plt.subplots()\nfor i, (train, test) in enumerate(cv.split(X, y)):\n classifier.fit(X[train], y[train])\n viz = RocCurveDisplay.from_estimator(\n classifier,\n X[test],\n y[test],\n name=\"ROC fold {}\".format(i),\n alpha=0.3,\n lw=1,\n ax=ax,\n )\n interp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr)\n interp_tpr[0] = 0.0\n tprs.append(interp_tpr)\n aucs.append(viz.roc_auc)\n\nax.plot([0, 1], [0, 1], linestyle=\"--\", lw=2, color=\"r\", label=\"Chance\", alpha=0.8)\n\nmean_tpr = np.mean(tprs, axis=0)\nmean_tpr[-1] = 1.0\nmean_auc = auc(mean_fpr, mean_tpr)\nstd_auc = np.std(aucs)\nax.plot(\n mean_fpr,\n mean_tpr,\n color=\"b\",\n label=r\"Mean ROC (AUC = %0.2f $\\pm$ %0.2f)\" % (mean_auc, std_auc),\n lw=2,\n alpha=0.8,\n)\n\nstd_tpr = np.std(tprs, axis=0)\ntprs_upper = np.minimum(mean_tpr + std_tpr, 1)\ntprs_lower = np.maximum(mean_tpr - std_tpr, 0)\nax.fill_between(\n mean_fpr,\n tprs_lower,\n tprs_upper,\n color=\"grey\",\n alpha=0.2,\n label=r\"$\\pm$ 1 std. dev.\",\n)\n\nax.set(\n xlim=[-0.05, 1.05],\n ylim=[-0.05, 1.05],\n title=\"Receiver operating characteristic example\",\n)\nax.legend(loc=\"lower right\")\nplt.show()\n", "\"\"\"\nThis module gathers tree-based methods, including decision, regression and\nrandomized trees. Single and multi-output problems are both handled.\n\"\"\"\n\n# Authors: Gilles Louppe <[email protected]>\n# Peter Prettenhofer <[email protected]>\n# Brian Holt <[email protected]>\n# Noel Dawe <[email protected]>\n# Satrajit Gosh <[email protected]>\n# Joly Arnaud <[email protected]>\n# Fares Hedayati <[email protected]>\n# Nelson Liu <[email protected]>\n#\n# License: BSD 3 clause\n\nimport numbers\nimport warnings\nimport copy\nfrom abc import ABCMeta\nfrom abc import abstractmethod\nfrom math import ceil\n\nimport numpy as np\nfrom scipy.sparse import issparse\n\nfrom ..base import BaseEstimator\nfrom ..base import ClassifierMixin\nfrom ..base import clone\nfrom ..base import RegressorMixin\nfrom ..base import is_classifier\nfrom ..base import MultiOutputMixin\nfrom ..utils import Bunch\nfrom ..utils import check_random_state\nfrom ..utils import check_scalar\nfrom ..utils.deprecation import deprecated\nfrom ..utils.validation import _check_sample_weight\nfrom ..utils import compute_sample_weight\nfrom ..utils.multiclass import check_classification_targets\nfrom ..utils.validation import check_is_fitted\n\nfrom ._criterion import Criterion\nfrom ._splitter import Splitter\nfrom ._tree import DepthFirstTreeBuilder\nfrom ._tree import BestFirstTreeBuilder\nfrom ._tree import Tree\nfrom ._tree import _build_pruned_tree_ccp\nfrom ._tree import ccp_pruning_path\nfrom . import _tree, _splitter, _criterion\n\n__all__ = [\n \"DecisionTreeClassifier\",\n \"DecisionTreeRegressor\",\n \"ExtraTreeClassifier\",\n \"ExtraTreeRegressor\",\n]\n\n\n# =============================================================================\n# Types and constants\n# =============================================================================\n\nDTYPE = _tree.DTYPE\nDOUBLE = _tree.DOUBLE\n\nCRITERIA_CLF = {\n \"gini\": _criterion.Gini,\n \"log_loss\": _criterion.Entropy,\n \"entropy\": _criterion.Entropy,\n}\n# TODO(1.2): Remove \"mse\" and \"mae\".\nCRITERIA_REG = {\n \"squared_error\": _criterion.MSE,\n \"mse\": _criterion.MSE,\n \"friedman_mse\": _criterion.FriedmanMSE,\n \"absolute_error\": _criterion.MAE,\n \"mae\": _criterion.MAE,\n \"poisson\": _criterion.Poisson,\n}\n\nDENSE_SPLITTERS = {\"best\": _splitter.BestSplitter, \"random\": _splitter.RandomSplitter}\n\nSPARSE_SPLITTERS = {\n \"best\": _splitter.BestSparseSplitter,\n \"random\": _splitter.RandomSparseSplitter,\n}\n\n# =============================================================================\n# Base decision tree\n# =============================================================================\n\n\nclass BaseDecisionTree(MultiOutputMixin, BaseEstimator, metaclass=ABCMeta):\n \"\"\"Base class for decision trees.\n\n Warning: This class should not be used directly.\n Use derived classes instead.\n \"\"\"\n\n @abstractmethod\n def __init__(\n self,\n *,\n criterion,\n splitter,\n max_depth,\n min_samples_split,\n min_samples_leaf,\n min_weight_fraction_leaf,\n max_features,\n max_leaf_nodes,\n random_state,\n min_impurity_decrease,\n class_weight=None,\n ccp_alpha=0.0,\n ):\n self.criterion = criterion\n self.splitter = splitter\n self.max_depth = max_depth\n self.min_samples_split = min_samples_split\n self.min_samples_leaf = min_samples_leaf\n self.min_weight_fraction_leaf = min_weight_fraction_leaf\n self.max_features = max_features\n self.max_leaf_nodes = max_leaf_nodes\n self.random_state = random_state\n self.min_impurity_decrease = min_impurity_decrease\n self.class_weight = class_weight\n self.ccp_alpha = ccp_alpha\n\n def get_depth(self):\n \"\"\"Return the depth of the decision tree.\n\n The depth of a tree is the maximum distance between the root\n and any leaf.\n\n Returns\n -------\n self.tree_.max_depth : int\n The maximum depth of the tree.\n \"\"\"\n check_is_fitted(self)\n return self.tree_.max_depth\n\n def get_n_leaves(self):\n \"\"\"Return the number of leaves of the decision tree.\n\n Returns\n -------\n self.tree_.n_leaves : int\n Number of leaves.\n \"\"\"\n check_is_fitted(self)\n return self.tree_.n_leaves\n\n def fit(self, X, y, sample_weight=None, check_input=True):\n\n random_state = check_random_state(self.random_state)\n\n check_scalar(\n self.ccp_alpha,\n name=\"ccp_alpha\",\n target_type=numbers.Real,\n min_val=0.0,\n )\n\n if check_input:\n # Need to validate separately here.\n # We can't pass multi_output=True because that would allow y to be\n # csr.\n check_X_params = dict(dtype=DTYPE, accept_sparse=\"csc\")\n check_y_params = dict(ensure_2d=False, dtype=None)\n X, y = self._validate_data(\n X, y, validate_separately=(check_X_params, check_y_params)\n )\n if issparse(X):\n X.sort_indices()\n\n if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:\n raise ValueError(\n \"No support for np.int64 index based sparse matrices\"\n )\n\n if self.criterion == \"poisson\":\n if np.any(y < 0):\n raise ValueError(\n \"Some value(s) of y are negative which is\"\n \" not allowed for Poisson regression.\"\n )\n if np.sum(y) <= 0:\n raise ValueError(\n \"Sum of y is not positive which is \"\n \"necessary for Poisson regression.\"\n )\n\n # Determine output settings\n n_samples, self.n_features_in_ = X.shape\n is_classification = is_classifier(self)\n\n y = np.atleast_1d(y)\n expanded_class_weight = None\n\n if y.ndim == 1:\n # reshape is necessary to preserve the data contiguity against vs\n # [:, np.newaxis] that does not.\n y = np.reshape(y, (-1, 1))\n\n self.n_outputs_ = y.shape[1]\n\n if is_classification:\n check_classification_targets(y)\n y = np.copy(y)\n\n self.classes_ = []\n self.n_classes_ = []\n\n if self.class_weight is not None:\n y_original = np.copy(y)\n\n y_encoded = np.zeros(y.shape, dtype=int)\n for k in range(self.n_outputs_):\n classes_k, y_encoded[:, k] = np.unique(y[:, k], return_inverse=True)\n self.classes_.append(classes_k)\n self.n_classes_.append(classes_k.shape[0])\n y = y_encoded\n\n if self.class_weight is not None:\n expanded_class_weight = compute_sample_weight(\n self.class_weight, y_original\n )\n\n self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)\n\n if getattr(y, \"dtype\", None) != DOUBLE or not y.flags.contiguous:\n y = np.ascontiguousarray(y, dtype=DOUBLE)\n\n # Check parameters\n if self.max_depth is not None:\n check_scalar(\n self.max_depth,\n name=\"max_depth\",\n target_type=numbers.Integral,\n min_val=1,\n )\n max_depth = np.iinfo(np.int32).max if self.max_depth is None else self.max_depth\n\n if isinstance(self.min_samples_leaf, numbers.Integral):\n check_scalar(\n self.min_samples_leaf,\n name=\"min_samples_leaf\",\n target_type=numbers.Integral,\n min_val=1,\n )\n min_samples_leaf = self.min_samples_leaf\n else: # float\n check_scalar(\n self.min_samples_leaf,\n name=\"min_samples_leaf\",\n target_type=numbers.Real,\n min_val=0.0,\n include_boundaries=\"neither\",\n )\n min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))\n\n if isinstance(self.min_samples_split, numbers.Integral):\n check_scalar(\n self.min_samples_split,\n name=\"min_samples_split\",\n target_type=numbers.Integral,\n min_val=2,\n )\n min_samples_split = self.min_samples_split\n else: # float\n check_scalar(\n self.min_samples_split,\n name=\"min_samples_split\",\n target_type=numbers.Real,\n min_val=0.0,\n max_val=1.0,\n include_boundaries=\"right\",\n )\n min_samples_split = int(ceil(self.min_samples_split * n_samples))\n min_samples_split = max(2, min_samples_split)\n\n min_samples_split = max(min_samples_split, 2 * min_samples_leaf)\n\n check_scalar(\n self.min_weight_fraction_leaf,\n name=\"min_weight_fraction_leaf\",\n target_type=numbers.Real,\n min_val=0.0,\n max_val=0.5,\n )\n\n if isinstance(self.max_features, str):\n if self.max_features == \"auto\":\n if is_classification:\n max_features = max(1, int(np.sqrt(self.n_features_in_)))\n warnings.warn(\n \"`max_features='auto'` has been deprecated in 1.1 \"\n \"and will be removed in 1.3. To keep the past behaviour, \"\n \"explicitly set `max_features='sqrt'`.\",\n FutureWarning,\n )\n else:\n max_features = self.n_features_in_\n warnings.warn(\n \"`max_features='auto'` has been deprecated in 1.1 \"\n \"and will be removed in 1.3. To keep the past behaviour, \"\n \"explicitly set `max_features=1.0'`.\",\n FutureWarning,\n )\n elif self.max_features == \"sqrt\":\n max_features = max(1, int(np.sqrt(self.n_features_in_)))\n elif self.max_features == \"log2\":\n max_features = max(1, int(np.log2(self.n_features_in_)))\n else:\n raise ValueError(\n \"Invalid value for max_features. \"\n \"Allowed string values are 'auto', \"\n \"'sqrt' or 'log2'.\"\n )\n elif self.max_features is None:\n max_features = self.n_features_in_\n elif isinstance(self.max_features, numbers.Integral):\n check_scalar(\n self.max_features,\n name=\"max_features\",\n target_type=numbers.Integral,\n min_val=1,\n include_boundaries=\"left\",\n )\n max_features = self.max_features\n else: # float\n check_scalar(\n self.max_features,\n name=\"max_features\",\n target_type=numbers.Real,\n min_val=0.0,\n max_val=1.0,\n include_boundaries=\"right\",\n )\n if self.max_features > 0.0:\n max_features = max(1, int(self.max_features * self.n_features_in_))\n else:\n max_features = 0\n\n self.max_features_ = max_features\n\n if self.max_leaf_nodes is not None:\n check_scalar(\n self.max_leaf_nodes,\n name=\"max_leaf_nodes\",\n target_type=numbers.Integral,\n min_val=2,\n )\n max_leaf_nodes = -1 if self.max_leaf_nodes is None else self.max_leaf_nodes\n\n check_scalar(\n self.min_impurity_decrease,\n name=\"min_impurity_decrease\",\n target_type=numbers.Real,\n min_val=0.0,\n )\n\n if len(y) != n_samples:\n raise ValueError(\n \"Number of labels=%d does not match number of samples=%d\"\n % (len(y), n_samples)\n )\n\n if sample_weight is not None:\n sample_weight = _check_sample_weight(sample_weight, X, DOUBLE)\n\n if expanded_class_weight is not None:\n if sample_weight is not None:\n sample_weight = sample_weight * expanded_class_weight\n else:\n sample_weight = expanded_class_weight\n\n # Set min_weight_leaf from min_weight_fraction_leaf\n if sample_weight is None:\n min_weight_leaf = self.min_weight_fraction_leaf * n_samples\n else:\n min_weight_leaf = self.min_weight_fraction_leaf * np.sum(sample_weight)\n\n # Build tree\n criterion = self.criterion\n if not isinstance(criterion, Criterion):\n if is_classification:\n criterion = CRITERIA_CLF[self.criterion](\n self.n_outputs_, self.n_classes_\n )\n else:\n criterion = CRITERIA_REG[self.criterion](self.n_outputs_, n_samples)\n # TODO(1.2): Remove \"mse\" and \"mae\"\n if self.criterion == \"mse\":\n warnings.warn(\n \"Criterion 'mse' was deprecated in v1.0 and will be \"\n \"removed in version 1.2. Use `criterion='squared_error'` \"\n \"which is equivalent.\",\n FutureWarning,\n )\n elif self.criterion == \"mae\":\n warnings.warn(\n \"Criterion 'mae' was deprecated in v1.0 and will be \"\n \"removed in version 1.2. Use `criterion='absolute_error'` \"\n \"which is equivalent.\",\n FutureWarning,\n )\n else:\n # Make a deepcopy in case the criterion has mutable attributes that\n # might be shared and modified concurrently during parallel fitting\n criterion = copy.deepcopy(criterion)\n\n SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS\n\n splitter = self.splitter\n if not isinstance(self.splitter, Splitter):\n splitter = SPLITTERS[self.splitter](\n criterion,\n self.max_features_,\n min_samples_leaf,\n min_weight_leaf,\n random_state,\n )\n\n if is_classifier(self):\n self.tree_ = Tree(self.n_features_in_, self.n_classes_, self.n_outputs_)\n else:\n self.tree_ = Tree(\n self.n_features_in_,\n # TODO: tree shouldn't need this in this case\n np.array([1] * self.n_outputs_, dtype=np.intp),\n self.n_outputs_,\n )\n\n # Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise\n if max_leaf_nodes < 0:\n builder = DepthFirstTreeBuilder(\n splitter,\n min_samples_split,\n min_samples_leaf,\n min_weight_leaf,\n max_depth,\n self.min_impurity_decrease,\n )\n else:\n builder = BestFirstTreeBuilder(\n splitter,\n min_samples_split,\n min_samples_leaf,\n min_weight_leaf,\n max_depth,\n max_leaf_nodes,\n self.min_impurity_decrease,\n )\n\n builder.build(self.tree_, X, y, sample_weight)\n\n if self.n_outputs_ == 1 and is_classifier(self):\n self.n_classes_ = self.n_classes_[0]\n self.classes_ = self.classes_[0]\n\n self._prune_tree()\n\n return self\n\n def _validate_X_predict(self, X, check_input):\n \"\"\"Validate the training data on predict (probabilities).\"\"\"\n if check_input:\n X = self._validate_data(X, dtype=DTYPE, accept_sparse=\"csr\", reset=False)\n if issparse(X) and (\n X.indices.dtype != np.intc or X.indptr.dtype != np.intc\n ):\n raise ValueError(\"No support for np.int64 index based sparse matrices\")\n else:\n # The number of features is checked regardless of `check_input`\n self._check_n_features(X, reset=False)\n return X\n\n def predict(self, X, check_input=True):\n \"\"\"Predict class or regression value for X.\n\n For a classification model, the predicted class for each sample in X is\n returned. For a regression model, the predicted value based on X is\n returned.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n check_input : bool, default=True\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Returns\n -------\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n The predicted classes, or the predict values.\n \"\"\"\n check_is_fitted(self)\n X = self._validate_X_predict(X, check_input)\n proba = self.tree_.predict(X)\n n_samples = X.shape[0]\n\n # Classification\n if is_classifier(self):\n if self.n_outputs_ == 1:\n return self.classes_.take(np.argmax(proba, axis=1), axis=0)\n\n else:\n class_type = self.classes_[0].dtype\n predictions = np.zeros((n_samples, self.n_outputs_), dtype=class_type)\n for k in range(self.n_outputs_):\n predictions[:, k] = self.classes_[k].take(\n np.argmax(proba[:, k], axis=1), axis=0\n )\n\n return predictions\n\n # Regression\n else:\n if self.n_outputs_ == 1:\n return proba[:, 0]\n\n else:\n return proba[:, :, 0]\n\n def apply(self, X, check_input=True):\n \"\"\"Return the index of the leaf that each sample is predicted as.\n\n .. versionadded:: 0.17\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n check_input : bool, default=True\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Returns\n -------\n X_leaves : array-like of shape (n_samples,)\n For each datapoint x in X, return the index of the leaf x\n ends up in. Leaves are numbered within\n ``[0; self.tree_.node_count)``, possibly with gaps in the\n numbering.\n \"\"\"\n check_is_fitted(self)\n X = self._validate_X_predict(X, check_input)\n return self.tree_.apply(X)\n\n def decision_path(self, X, check_input=True):\n \"\"\"Return the decision path in the tree.\n\n .. versionadded:: 0.18\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n check_input : bool, default=True\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Returns\n -------\n indicator : sparse matrix of shape (n_samples, n_nodes)\n Return a node indicator CSR matrix where non zero elements\n indicates that the samples goes through the nodes.\n \"\"\"\n X = self._validate_X_predict(X, check_input)\n return self.tree_.decision_path(X)\n\n def _prune_tree(self):\n \"\"\"Prune tree using Minimal Cost-Complexity Pruning.\"\"\"\n check_is_fitted(self)\n\n if self.ccp_alpha == 0.0:\n return\n\n # build pruned tree\n if is_classifier(self):\n n_classes = np.atleast_1d(self.n_classes_)\n pruned_tree = Tree(self.n_features_in_, n_classes, self.n_outputs_)\n else:\n pruned_tree = Tree(\n self.n_features_in_,\n # TODO: the tree shouldn't need this param\n np.array([1] * self.n_outputs_, dtype=np.intp),\n self.n_outputs_,\n )\n _build_pruned_tree_ccp(pruned_tree, self.tree_, self.ccp_alpha)\n\n self.tree_ = pruned_tree\n\n def cost_complexity_pruning_path(self, X, y, sample_weight=None):\n \"\"\"Compute the pruning path during Minimal Cost-Complexity Pruning.\n\n See :ref:`minimal_cost_complexity_pruning` for details on the pruning\n process.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csc_matrix``.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n The target values (class labels) as integers or strings.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted. Splits\n that would create child nodes with net zero or negative weight are\n ignored while searching for a split in each node. Splits are also\n ignored if they would result in any single class carrying a\n negative weight in either child node.\n\n Returns\n -------\n ccp_path : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n ccp_alphas : ndarray\n Effective alphas of subtree during pruning.\n\n impurities : ndarray\n Sum of the impurities of the subtree leaves for the\n corresponding alpha value in ``ccp_alphas``.\n \"\"\"\n est = clone(self).set_params(ccp_alpha=0.0)\n est.fit(X, y, sample_weight=sample_weight)\n return Bunch(**ccp_pruning_path(est.tree_))\n\n @property\n def feature_importances_(self):\n \"\"\"Return the feature importances.\n\n The importance of a feature is computed as the (normalized) total\n reduction of the criterion brought by that feature.\n It is also known as the Gini importance.\n\n Warning: impurity-based feature importances can be misleading for\n high cardinality features (many unique values). See\n :func:`sklearn.inspection.permutation_importance` as an alternative.\n\n Returns\n -------\n feature_importances_ : ndarray of shape (n_features,)\n Normalized total reduction of criteria by feature\n (Gini importance).\n \"\"\"\n check_is_fitted(self)\n\n return self.tree_.compute_feature_importances()\n\n\n# =============================================================================\n# Public estimators\n# =============================================================================\n\n\nclass DecisionTreeClassifier(ClassifierMixin, BaseDecisionTree):\n \"\"\"A decision tree classifier.\n\n Read more in the :ref:`User Guide <tree>`.\n\n Parameters\n ----------\n criterion : {\"gini\", \"entropy\", \"log_loss\"}, default=\"gini\"\n The function to measure the quality of a split. Supported criteria are\n \"gini\" for the Gini impurity and \"log_loss\" and \"entropy\" both for the\n Shannon information gain, see :ref:`tree_mathematical_formulation`.\n\n splitter : {\"best\", \"random\"}, default=\"best\"\n The strategy used to choose the split at each node. Supported\n strategies are \"best\" to choose the best split and \"random\" to choose\n the best random split.\n\n max_depth : int, default=None\n The maximum depth of the tree. If None, then nodes are expanded until\n all leaves are pure or until all leaves contain less than\n min_samples_split samples.\n\n min_samples_split : int or float, default=2\n The minimum number of samples required to split an internal node:\n\n - If int, then consider `min_samples_split` as the minimum number.\n - If float, then `min_samples_split` is a fraction and\n `ceil(min_samples_split * n_samples)` are the minimum\n number of samples for each split.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_samples_leaf : int or float, default=1\n The minimum number of samples required to be at a leaf node.\n A split point at any depth will only be considered if it leaves at\n least ``min_samples_leaf`` training samples in each of the left and\n right branches. This may have the effect of smoothing the model,\n especially in regression.\n\n - If int, then consider `min_samples_leaf` as the minimum number.\n - If float, then `min_samples_leaf` is a fraction and\n `ceil(min_samples_leaf * n_samples)` are the minimum\n number of samples for each node.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_weight_fraction_leaf : float, default=0.0\n The minimum weighted fraction of the sum total of weights (of all\n the input samples) required to be at a leaf node. Samples have\n equal weight when sample_weight is not provided.\n\n max_features : int, float or {\"auto\", \"sqrt\", \"log2\"}, default=None\n The number of features to consider when looking for the best split:\n\n - If int, then consider `max_features` features at each split.\n - If float, then `max_features` is a fraction and\n `int(max_features * n_features)` features are considered at each\n split.\n - If \"auto\", then `max_features=sqrt(n_features)`.\n - If \"sqrt\", then `max_features=sqrt(n_features)`.\n - If \"log2\", then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n\n .. deprecated:: 1.1\n The `\"auto\"` option was deprecated in 1.1 and will be removed\n in 1.3.\n\n Note: the search for a split does not stop until at least one\n valid partition of the node samples is found, even if it requires to\n effectively inspect more than ``max_features`` features.\n\n random_state : int, RandomState instance or None, default=None\n Controls the randomness of the estimator. The features are always\n randomly permuted at each split, even if ``splitter`` is set to\n ``\"best\"``. When ``max_features < n_features``, the algorithm will\n select ``max_features`` at random at each split before finding the best\n split among them. But the best found split may vary across different\n runs, even if ``max_features=n_features``. That is the case, if the\n improvement of the criterion is identical for several splits and one\n split has to be selected at random. To obtain a deterministic behaviour\n during fitting, ``random_state`` has to be fixed to an integer.\n See :term:`Glossary <random_state>` for details.\n\n max_leaf_nodes : int, default=None\n Grow a tree with ``max_leaf_nodes`` in best-first fashion.\n Best nodes are defined as relative reduction in impurity.\n If None then unlimited number of leaf nodes.\n\n min_impurity_decrease : float, default=0.0\n A node will be split if this split induces a decrease of the impurity\n greater than or equal to this value.\n\n The weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\n where ``N`` is the total number of samples, ``N_t`` is the number of\n samples at the current node, ``N_t_L`` is the number of samples in the\n left child, and ``N_t_R`` is the number of samples in the right child.\n\n ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\n if ``sample_weight`` is passed.\n\n .. versionadded:: 0.19\n\n class_weight : dict, list of dict or \"balanced\", default=None\n Weights associated with classes in the form ``{class_label: weight}``.\n If None, all classes are supposed to have weight one. For\n multi-output problems, a list of dicts can be provided in the same\n order as the columns of y.\n\n Note that for multioutput (including multilabel) weights should be\n defined for each class of every column in its own dict. For example,\n for four-class multilabel classification weights should be\n [{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of\n [{1:1}, {2:5}, {3:1}, {4:1}].\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``\n\n For multi-output, the weights of each column of y will be multiplied.\n\n Note that these weights will be multiplied with sample_weight (passed\n through the fit method) if sample_weight is specified.\n\n ccp_alpha : non-negative float, default=0.0\n Complexity parameter used for Minimal Cost-Complexity Pruning. The\n subtree with the largest cost complexity that is smaller than\n ``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n :ref:`minimal_cost_complexity_pruning` for details.\n\n .. versionadded:: 0.22\n\n Attributes\n ----------\n classes_ : ndarray of shape (n_classes,) or list of ndarray\n The classes labels (single output problem),\n or a list of arrays of class labels (multi-output problem).\n\n feature_importances_ : ndarray of shape (n_features,)\n The impurity-based feature importances.\n The higher, the more important the feature.\n The importance of a feature is computed as the (normalized)\n total reduction of the criterion brought by that feature. It is also\n known as the Gini importance [4]_.\n\n Warning: impurity-based feature importances can be misleading for\n high cardinality features (many unique values). See\n :func:`sklearn.inspection.permutation_importance` as an alternative.\n\n max_features_ : int\n The inferred value of max_features.\n\n n_classes_ : int or list of int\n The number of classes (for single output problems),\n or a list containing the number of classes for each\n output (for multi-output problems).\n\n n_features_ : int\n The number of features when ``fit`` is performed.\n\n .. deprecated:: 1.0\n `n_features_` is deprecated in 1.0 and will be removed in\n 1.2. Use `n_features_in_` instead.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n n_outputs_ : int\n The number of outputs when ``fit`` is performed.\n\n tree_ : Tree instance\n The underlying Tree object. Please refer to\n ``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and\n :ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`\n for basic usage of these attributes.\n\n See Also\n --------\n DecisionTreeRegressor : A decision tree regressor.\n\n Notes\n -----\n The default values for the parameters controlling the size of the trees\n (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and\n unpruned trees which can potentially be very large on some data sets. To\n reduce memory consumption, the complexity and size of the trees should be\n controlled by setting those parameter values.\n\n The :meth:`predict` method operates using the :func:`numpy.argmax`\n function on the outputs of :meth:`predict_proba`. This means that in\n case the highest predicted probabilities are tied, the classifier will\n predict the tied class with the lowest index in :term:`classes_`.\n\n References\n ----------\n\n .. [1] https://en.wikipedia.org/wiki/Decision_tree_learning\n\n .. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, \"Classification\n and Regression Trees\", Wadsworth, Belmont, CA, 1984.\n\n .. [3] T. Hastie, R. Tibshirani and J. Friedman. \"Elements of Statistical\n Learning\", Springer, 2009.\n\n .. [4] L. Breiman, and A. Cutler, \"Random Forests\",\n https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm\n\n Examples\n --------\n >>> from sklearn.datasets import load_iris\n >>> from sklearn.model_selection import cross_val_score\n >>> from sklearn.tree import DecisionTreeClassifier\n >>> clf = DecisionTreeClassifier(random_state=0)\n >>> iris = load_iris()\n >>> cross_val_score(clf, iris.data, iris.target, cv=10)\n ... # doctest: +SKIP\n ...\n array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,\n 0.93..., 0.93..., 1. , 0.93..., 1. ])\n \"\"\"\n\n def __init__(\n self,\n *,\n criterion=\"gini\",\n splitter=\"best\",\n max_depth=None,\n min_samples_split=2,\n min_samples_leaf=1,\n min_weight_fraction_leaf=0.0,\n max_features=None,\n random_state=None,\n max_leaf_nodes=None,\n min_impurity_decrease=0.0,\n class_weight=None,\n ccp_alpha=0.0,\n ):\n super().__init__(\n criterion=criterion,\n splitter=splitter,\n max_depth=max_depth,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n min_weight_fraction_leaf=min_weight_fraction_leaf,\n max_features=max_features,\n max_leaf_nodes=max_leaf_nodes,\n class_weight=class_weight,\n random_state=random_state,\n min_impurity_decrease=min_impurity_decrease,\n ccp_alpha=ccp_alpha,\n )\n\n def fit(self, X, y, sample_weight=None, check_input=True):\n \"\"\"Build a decision tree classifier from the training set (X, y).\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csc_matrix``.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n The target values (class labels) as integers or strings.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted. Splits\n that would create child nodes with net zero or negative weight are\n ignored while searching for a split in each node. Splits are also\n ignored if they would result in any single class carrying a\n negative weight in either child node.\n\n check_input : bool, default=True\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Returns\n -------\n self : DecisionTreeClassifier\n Fitted estimator.\n \"\"\"\n\n super().fit(\n X,\n y,\n sample_weight=sample_weight,\n check_input=check_input,\n )\n return self\n\n def predict_proba(self, X, check_input=True):\n \"\"\"Predict class probabilities of the input samples X.\n\n The predicted class probability is the fraction of samples of the same\n class in a leaf.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n check_input : bool, default=True\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Returns\n -------\n proba : ndarray of shape (n_samples, n_classes) or list of n_outputs \\\n such arrays if n_outputs > 1\n The class probabilities of the input samples. The order of the\n classes corresponds to that in the attribute :term:`classes_`.\n \"\"\"\n check_is_fitted(self)\n X = self._validate_X_predict(X, check_input)\n proba = self.tree_.predict(X)\n\n if self.n_outputs_ == 1:\n proba = proba[:, : self.n_classes_]\n normalizer = proba.sum(axis=1)[:, np.newaxis]\n normalizer[normalizer == 0.0] = 1.0\n proba /= normalizer\n\n return proba\n\n else:\n all_proba = []\n\n for k in range(self.n_outputs_):\n proba_k = proba[:, k, : self.n_classes_[k]]\n normalizer = proba_k.sum(axis=1)[:, np.newaxis]\n normalizer[normalizer == 0.0] = 1.0\n proba_k /= normalizer\n all_proba.append(proba_k)\n\n return all_proba\n\n def predict_log_proba(self, X):\n \"\"\"Predict class log-probabilities of the input samples X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Returns\n -------\n proba : ndarray of shape (n_samples, n_classes) or list of n_outputs \\\n such arrays if n_outputs > 1\n The class log-probabilities of the input samples. The order of the\n classes corresponds to that in the attribute :term:`classes_`.\n \"\"\"\n proba = self.predict_proba(X)\n\n if self.n_outputs_ == 1:\n return np.log(proba)\n\n else:\n for k in range(self.n_outputs_):\n proba[k] = np.log(proba[k])\n\n return proba\n\n @deprecated( # type: ignore\n \"The attribute `n_features_` is deprecated in 1.0 and will be removed \"\n \"in 1.2. Use `n_features_in_` instead.\"\n )\n @property\n def n_features_(self):\n return self.n_features_in_\n\n def _more_tags(self):\n return {\"multilabel\": True}\n\n\nclass DecisionTreeRegressor(RegressorMixin, BaseDecisionTree):\n \"\"\"A decision tree regressor.\n\n Read more in the :ref:`User Guide <tree>`.\n\n Parameters\n ----------\n criterion : {\"squared_error\", \"friedman_mse\", \"absolute_error\", \\\n \"poisson\"}, default=\"squared_error\"\n The function to measure the quality of a split. Supported criteria\n are \"squared_error\" for the mean squared error, which is equal to\n variance reduction as feature selection criterion and minimizes the L2\n loss using the mean of each terminal node, \"friedman_mse\", which uses\n mean squared error with Friedman's improvement score for potential\n splits, \"absolute_error\" for the mean absolute error, which minimizes\n the L1 loss using the median of each terminal node, and \"poisson\" which\n uses reduction in Poisson deviance to find splits.\n\n .. versionadded:: 0.18\n Mean Absolute Error (MAE) criterion.\n\n .. versionadded:: 0.24\n Poisson deviance criterion.\n\n .. deprecated:: 1.0\n Criterion \"mse\" was deprecated in v1.0 and will be removed in\n version 1.2. Use `criterion=\"squared_error\"` which is equivalent.\n\n .. deprecated:: 1.0\n Criterion \"mae\" was deprecated in v1.0 and will be removed in\n version 1.2. Use `criterion=\"absolute_error\"` which is equivalent.\n\n splitter : {\"best\", \"random\"}, default=\"best\"\n The strategy used to choose the split at each node. Supported\n strategies are \"best\" to choose the best split and \"random\" to choose\n the best random split.\n\n max_depth : int, default=None\n The maximum depth of the tree. If None, then nodes are expanded until\n all leaves are pure or until all leaves contain less than\n min_samples_split samples.\n\n min_samples_split : int or float, default=2\n The minimum number of samples required to split an internal node:\n\n - If int, then consider `min_samples_split` as the minimum number.\n - If float, then `min_samples_split` is a fraction and\n `ceil(min_samples_split * n_samples)` are the minimum\n number of samples for each split.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_samples_leaf : int or float, default=1\n The minimum number of samples required to be at a leaf node.\n A split point at any depth will only be considered if it leaves at\n least ``min_samples_leaf`` training samples in each of the left and\n right branches. This may have the effect of smoothing the model,\n especially in regression.\n\n - If int, then consider `min_samples_leaf` as the minimum number.\n - If float, then `min_samples_leaf` is a fraction and\n `ceil(min_samples_leaf * n_samples)` are the minimum\n number of samples for each node.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_weight_fraction_leaf : float, default=0.0\n The minimum weighted fraction of the sum total of weights (of all\n the input samples) required to be at a leaf node. Samples have\n equal weight when sample_weight is not provided.\n\n max_features : int, float or {\"auto\", \"sqrt\", \"log2\"}, default=None\n The number of features to consider when looking for the best split:\n\n - If int, then consider `max_features` features at each split.\n - If float, then `max_features` is a fraction and\n `int(max_features * n_features)` features are considered at each\n split.\n - If \"auto\", then `max_features=n_features`.\n - If \"sqrt\", then `max_features=sqrt(n_features)`.\n - If \"log2\", then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n\n .. deprecated:: 1.1\n The `\"auto\"` option was deprecated in 1.1 and will be removed\n in 1.3.\n\n Note: the search for a split does not stop until at least one\n valid partition of the node samples is found, even if it requires to\n effectively inspect more than ``max_features`` features.\n\n random_state : int, RandomState instance or None, default=None\n Controls the randomness of the estimator. The features are always\n randomly permuted at each split, even if ``splitter`` is set to\n ``\"best\"``. When ``max_features < n_features``, the algorithm will\n select ``max_features`` at random at each split before finding the best\n split among them. But the best found split may vary across different\n runs, even if ``max_features=n_features``. That is the case, if the\n improvement of the criterion is identical for several splits and one\n split has to be selected at random. To obtain a deterministic behaviour\n during fitting, ``random_state`` has to be fixed to an integer.\n See :term:`Glossary <random_state>` for details.\n\n max_leaf_nodes : int, default=None\n Grow a tree with ``max_leaf_nodes`` in best-first fashion.\n Best nodes are defined as relative reduction in impurity.\n If None then unlimited number of leaf nodes.\n\n min_impurity_decrease : float, default=0.0\n A node will be split if this split induces a decrease of the impurity\n greater than or equal to this value.\n\n The weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\n where ``N`` is the total number of samples, ``N_t`` is the number of\n samples at the current node, ``N_t_L`` is the number of samples in the\n left child, and ``N_t_R`` is the number of samples in the right child.\n\n ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\n if ``sample_weight`` is passed.\n\n .. versionadded:: 0.19\n\n ccp_alpha : non-negative float, default=0.0\n Complexity parameter used for Minimal Cost-Complexity Pruning. The\n subtree with the largest cost complexity that is smaller than\n ``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n :ref:`minimal_cost_complexity_pruning` for details.\n\n .. versionadded:: 0.22\n\n Attributes\n ----------\n feature_importances_ : ndarray of shape (n_features,)\n The feature importances.\n The higher, the more important the feature.\n The importance of a feature is computed as the\n (normalized) total reduction of the criterion brought\n by that feature. It is also known as the Gini importance [4]_.\n\n Warning: impurity-based feature importances can be misleading for\n high cardinality features (many unique values). See\n :func:`sklearn.inspection.permutation_importance` as an alternative.\n\n max_features_ : int\n The inferred value of max_features.\n\n n_features_ : int\n The number of features when ``fit`` is performed.\n\n .. deprecated:: 1.0\n `n_features_` is deprecated in 1.0 and will be removed in\n 1.2. Use `n_features_in_` instead.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n n_outputs_ : int\n The number of outputs when ``fit`` is performed.\n\n tree_ : Tree instance\n The underlying Tree object. Please refer to\n ``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and\n :ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`\n for basic usage of these attributes.\n\n See Also\n --------\n DecisionTreeClassifier : A decision tree classifier.\n\n Notes\n -----\n The default values for the parameters controlling the size of the trees\n (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and\n unpruned trees which can potentially be very large on some data sets. To\n reduce memory consumption, the complexity and size of the trees should be\n controlled by setting those parameter values.\n\n References\n ----------\n\n .. [1] https://en.wikipedia.org/wiki/Decision_tree_learning\n\n .. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, \"Classification\n and Regression Trees\", Wadsworth, Belmont, CA, 1984.\n\n .. [3] T. Hastie, R. Tibshirani and J. Friedman. \"Elements of Statistical\n Learning\", Springer, 2009.\n\n .. [4] L. Breiman, and A. Cutler, \"Random Forests\",\n https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm\n\n Examples\n --------\n >>> from sklearn.datasets import load_diabetes\n >>> from sklearn.model_selection import cross_val_score\n >>> from sklearn.tree import DecisionTreeRegressor\n >>> X, y = load_diabetes(return_X_y=True)\n >>> regressor = DecisionTreeRegressor(random_state=0)\n >>> cross_val_score(regressor, X, y, cv=10)\n ... # doctest: +SKIP\n ...\n array([-0.39..., -0.46..., 0.02..., 0.06..., -0.50...,\n 0.16..., 0.11..., -0.73..., -0.30..., -0.00...])\n \"\"\"\n\n def __init__(\n self,\n *,\n criterion=\"squared_error\",\n splitter=\"best\",\n max_depth=None,\n min_samples_split=2,\n min_samples_leaf=1,\n min_weight_fraction_leaf=0.0,\n max_features=None,\n random_state=None,\n max_leaf_nodes=None,\n min_impurity_decrease=0.0,\n ccp_alpha=0.0,\n ):\n super().__init__(\n criterion=criterion,\n splitter=splitter,\n max_depth=max_depth,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n min_weight_fraction_leaf=min_weight_fraction_leaf,\n max_features=max_features,\n max_leaf_nodes=max_leaf_nodes,\n random_state=random_state,\n min_impurity_decrease=min_impurity_decrease,\n ccp_alpha=ccp_alpha,\n )\n\n def fit(self, X, y, sample_weight=None, check_input=True):\n \"\"\"Build a decision tree regressor from the training set (X, y).\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csc_matrix``.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n The target values (real numbers). Use ``dtype=np.float64`` and\n ``order='C'`` for maximum efficiency.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted. Splits\n that would create child nodes with net zero or negative weight are\n ignored while searching for a split in each node.\n\n check_input : bool, default=True\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Returns\n -------\n self : DecisionTreeRegressor\n Fitted estimator.\n \"\"\"\n\n super().fit(\n X,\n y,\n sample_weight=sample_weight,\n check_input=check_input,\n )\n return self\n\n def _compute_partial_dependence_recursion(self, grid, target_features):\n \"\"\"Fast partial dependence computation.\n\n Parameters\n ----------\n grid : ndarray of shape (n_samples, n_target_features)\n The grid points on which the partial dependence should be\n evaluated.\n target_features : ndarray of shape (n_target_features)\n The set of target features for which the partial dependence\n should be evaluated.\n\n Returns\n -------\n averaged_predictions : ndarray of shape (n_samples,)\n The value of the partial dependence function on each grid point.\n \"\"\"\n grid = np.asarray(grid, dtype=DTYPE, order=\"C\")\n averaged_predictions = np.zeros(\n shape=grid.shape[0], dtype=np.float64, order=\"C\"\n )\n\n self.tree_.compute_partial_dependence(\n grid, target_features, averaged_predictions\n )\n return averaged_predictions\n\n @deprecated( # type: ignore\n \"The attribute `n_features_` is deprecated in 1.0 and will be removed \"\n \"in 1.2. Use `n_features_in_` instead.\"\n )\n @property\n def n_features_(self):\n return self.n_features_in_\n\n\nclass ExtraTreeClassifier(DecisionTreeClassifier):\n \"\"\"An extremely randomized tree classifier.\n\n Extra-trees differ from classic decision trees in the way they are built.\n When looking for the best split to separate the samples of a node into two\n groups, random splits are drawn for each of the `max_features` randomly\n selected features and the best split among those is chosen. When\n `max_features` is set 1, this amounts to building a totally random\n decision tree.\n\n Warning: Extra-trees should only be used within ensemble methods.\n\n Read more in the :ref:`User Guide <tree>`.\n\n Parameters\n ----------\n criterion : {\"gini\", \"entropy\", \"log_loss\"}, default=\"gini\"\n The function to measure the quality of a split. Supported criteria are\n \"gini\" for the Gini impurity and \"log_loss\" and \"entropy\" both for the\n Shannon information gain, see :ref:`tree_mathematical_formulation`.\n\n splitter : {\"random\", \"best\"}, default=\"random\"\n The strategy used to choose the split at each node. Supported\n strategies are \"best\" to choose the best split and \"random\" to choose\n the best random split.\n\n max_depth : int, default=None\n The maximum depth of the tree. If None, then nodes are expanded until\n all leaves are pure or until all leaves contain less than\n min_samples_split samples.\n\n min_samples_split : int or float, default=2\n The minimum number of samples required to split an internal node:\n\n - If int, then consider `min_samples_split` as the minimum number.\n - If float, then `min_samples_split` is a fraction and\n `ceil(min_samples_split * n_samples)` are the minimum\n number of samples for each split.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_samples_leaf : int or float, default=1\n The minimum number of samples required to be at a leaf node.\n A split point at any depth will only be considered if it leaves at\n least ``min_samples_leaf`` training samples in each of the left and\n right branches. This may have the effect of smoothing the model,\n especially in regression.\n\n - If int, then consider `min_samples_leaf` as the minimum number.\n - If float, then `min_samples_leaf` is a fraction and\n `ceil(min_samples_leaf * n_samples)` are the minimum\n number of samples for each node.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_weight_fraction_leaf : float, default=0.0\n The minimum weighted fraction of the sum total of weights (of all\n the input samples) required to be at a leaf node. Samples have\n equal weight when sample_weight is not provided.\n\n max_features : int, float, {\"auto\", \"sqrt\", \"log2\"} or None, default=\"sqrt\"\n The number of features to consider when looking for the best split:\n\n - If int, then consider `max_features` features at each split.\n - If float, then `max_features` is a fraction and\n `int(max_features * n_features)` features are considered at each\n split.\n - If \"auto\", then `max_features=sqrt(n_features)`.\n - If \"sqrt\", then `max_features=sqrt(n_features)`.\n - If \"log2\", then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n\n .. versionchanged:: 1.1\n The default of `max_features` changed from `\"auto\"` to `\"sqrt\"`.\n\n .. deprecated:: 1.1\n The `\"auto\"` option was deprecated in 1.1 and will be removed\n in 1.3.\n\n Note: the search for a split does not stop until at least one\n valid partition of the node samples is found, even if it requires to\n effectively inspect more than ``max_features`` features.\n\n random_state : int, RandomState instance or None, default=None\n Used to pick randomly the `max_features` used at each split.\n See :term:`Glossary <random_state>` for details.\n\n max_leaf_nodes : int, default=None\n Grow a tree with ``max_leaf_nodes`` in best-first fashion.\n Best nodes are defined as relative reduction in impurity.\n If None then unlimited number of leaf nodes.\n\n min_impurity_decrease : float, default=0.0\n A node will be split if this split induces a decrease of the impurity\n greater than or equal to this value.\n\n The weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\n where ``N`` is the total number of samples, ``N_t`` is the number of\n samples at the current node, ``N_t_L`` is the number of samples in the\n left child, and ``N_t_R`` is the number of samples in the right child.\n\n ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\n if ``sample_weight`` is passed.\n\n .. versionadded:: 0.19\n\n class_weight : dict, list of dict or \"balanced\", default=None\n Weights associated with classes in the form ``{class_label: weight}``.\n If None, all classes are supposed to have weight one. For\n multi-output problems, a list of dicts can be provided in the same\n order as the columns of y.\n\n Note that for multioutput (including multilabel) weights should be\n defined for each class of every column in its own dict. For example,\n for four-class multilabel classification weights should be\n [{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of\n [{1:1}, {2:5}, {3:1}, {4:1}].\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``\n\n For multi-output, the weights of each column of y will be multiplied.\n\n Note that these weights will be multiplied with sample_weight (passed\n through the fit method) if sample_weight is specified.\n\n ccp_alpha : non-negative float, default=0.0\n Complexity parameter used for Minimal Cost-Complexity Pruning. The\n subtree with the largest cost complexity that is smaller than\n ``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n :ref:`minimal_cost_complexity_pruning` for details.\n\n .. versionadded:: 0.22\n\n Attributes\n ----------\n classes_ : ndarray of shape (n_classes,) or list of ndarray\n The classes labels (single output problem),\n or a list of arrays of class labels (multi-output problem).\n\n max_features_ : int\n The inferred value of max_features.\n\n n_classes_ : int or list of int\n The number of classes (for single output problems),\n or a list containing the number of classes for each\n output (for multi-output problems).\n\n feature_importances_ : ndarray of shape (n_features,)\n The impurity-based feature importances.\n The higher, the more important the feature.\n The importance of a feature is computed as the (normalized)\n total reduction of the criterion brought by that feature. It is also\n known as the Gini importance.\n\n Warning: impurity-based feature importances can be misleading for\n high cardinality features (many unique values). See\n :func:`sklearn.inspection.permutation_importance` as an alternative.\n\n n_features_ : int\n The number of features when ``fit`` is performed.\n\n .. deprecated:: 1.0\n `n_features_` is deprecated in 1.0 and will be removed in\n 1.2. Use `n_features_in_` instead.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n n_outputs_ : int\n The number of outputs when ``fit`` is performed.\n\n tree_ : Tree instance\n The underlying Tree object. Please refer to\n ``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and\n :ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`\n for basic usage of these attributes.\n\n See Also\n --------\n ExtraTreeRegressor : An extremely randomized tree regressor.\n sklearn.ensemble.ExtraTreesClassifier : An extra-trees classifier.\n sklearn.ensemble.ExtraTreesRegressor : An extra-trees regressor.\n sklearn.ensemble.RandomForestClassifier : A random forest classifier.\n sklearn.ensemble.RandomForestRegressor : A random forest regressor.\n sklearn.ensemble.RandomTreesEmbedding : An ensemble of\n totally random trees.\n\n Notes\n -----\n The default values for the parameters controlling the size of the trees\n (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and\n unpruned trees which can potentially be very large on some data sets. To\n reduce memory consumption, the complexity and size of the trees should be\n controlled by setting those parameter values.\n\n References\n ----------\n\n .. [1] P. Geurts, D. Ernst., and L. Wehenkel, \"Extremely randomized trees\",\n Machine Learning, 63(1), 3-42, 2006.\n\n Examples\n --------\n >>> from sklearn.datasets import load_iris\n >>> from sklearn.model_selection import train_test_split\n >>> from sklearn.ensemble import BaggingClassifier\n >>> from sklearn.tree import ExtraTreeClassifier\n >>> X, y = load_iris(return_X_y=True)\n >>> X_train, X_test, y_train, y_test = train_test_split(\n ... X, y, random_state=0)\n >>> extra_tree = ExtraTreeClassifier(random_state=0)\n >>> cls = BaggingClassifier(extra_tree, random_state=0).fit(\n ... X_train, y_train)\n >>> cls.score(X_test, y_test)\n 0.8947...\n \"\"\"\n\n def __init__(\n self,\n *,\n criterion=\"gini\",\n splitter=\"random\",\n max_depth=None,\n min_samples_split=2,\n min_samples_leaf=1,\n min_weight_fraction_leaf=0.0,\n max_features=\"sqrt\",\n random_state=None,\n max_leaf_nodes=None,\n min_impurity_decrease=0.0,\n class_weight=None,\n ccp_alpha=0.0,\n ):\n super().__init__(\n criterion=criterion,\n splitter=splitter,\n max_depth=max_depth,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n min_weight_fraction_leaf=min_weight_fraction_leaf,\n max_features=max_features,\n max_leaf_nodes=max_leaf_nodes,\n class_weight=class_weight,\n min_impurity_decrease=min_impurity_decrease,\n random_state=random_state,\n ccp_alpha=ccp_alpha,\n )\n\n\nclass ExtraTreeRegressor(DecisionTreeRegressor):\n \"\"\"An extremely randomized tree regressor.\n\n Extra-trees differ from classic decision trees in the way they are built.\n When looking for the best split to separate the samples of a node into two\n groups, random splits are drawn for each of the `max_features` randomly\n selected features and the best split among those is chosen. When\n `max_features` is set 1, this amounts to building a totally random\n decision tree.\n\n Warning: Extra-trees should only be used within ensemble methods.\n\n Read more in the :ref:`User Guide <tree>`.\n\n Parameters\n ----------\n criterion : {\"squared_error\", \"friedman_mse\"}, default=\"squared_error\"\n The function to measure the quality of a split. Supported criteria\n are \"squared_error\" for the mean squared error, which is equal to\n variance reduction as feature selection criterion and \"mae\" for the\n mean absolute error.\n\n .. versionadded:: 0.18\n Mean Absolute Error (MAE) criterion.\n\n .. versionadded:: 0.24\n Poisson deviance criterion.\n\n .. deprecated:: 1.0\n Criterion \"mse\" was deprecated in v1.0 and will be removed in\n version 1.2. Use `criterion=\"squared_error\"` which is equivalent.\n\n .. deprecated:: 1.0\n Criterion \"mae\" was deprecated in v1.0 and will be removed in\n version 1.2. Use `criterion=\"absolute_error\"` which is equivalent.\n\n splitter : {\"random\", \"best\"}, default=\"random\"\n The strategy used to choose the split at each node. Supported\n strategies are \"best\" to choose the best split and \"random\" to choose\n the best random split.\n\n max_depth : int, default=None\n The maximum depth of the tree. If None, then nodes are expanded until\n all leaves are pure or until all leaves contain less than\n min_samples_split samples.\n\n min_samples_split : int or float, default=2\n The minimum number of samples required to split an internal node:\n\n - If int, then consider `min_samples_split` as the minimum number.\n - If float, then `min_samples_split` is a fraction and\n `ceil(min_samples_split * n_samples)` are the minimum\n number of samples for each split.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_samples_leaf : int or float, default=1\n The minimum number of samples required to be at a leaf node.\n A split point at any depth will only be considered if it leaves at\n least ``min_samples_leaf`` training samples in each of the left and\n right branches. This may have the effect of smoothing the model,\n especially in regression.\n\n - If int, then consider `min_samples_leaf` as the minimum number.\n - If float, then `min_samples_leaf` is a fraction and\n `ceil(min_samples_leaf * n_samples)` are the minimum\n number of samples for each node.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_weight_fraction_leaf : float, default=0.0\n The minimum weighted fraction of the sum total of weights (of all\n the input samples) required to be at a leaf node. Samples have\n equal weight when sample_weight is not provided.\n\n max_features : int, float, {\"auto\", \"sqrt\", \"log2\"} or None, default=1.0\n The number of features to consider when looking for the best split:\n\n - If int, then consider `max_features` features at each split.\n - If float, then `max_features` is a fraction and\n `int(max_features * n_features)` features are considered at each\n split.\n - If \"auto\", then `max_features=n_features`.\n - If \"sqrt\", then `max_features=sqrt(n_features)`.\n - If \"log2\", then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n\n .. versionchanged:: 1.1\n The default of `max_features` changed from `\"auto\"` to `1.0`.\n\n .. deprecated:: 1.1\n The `\"auto\"` option was deprecated in 1.1 and will be removed\n in 1.3.\n\n Note: the search for a split does not stop until at least one\n valid partition of the node samples is found, even if it requires to\n effectively inspect more than ``max_features`` features.\n\n random_state : int, RandomState instance or None, default=None\n Used to pick randomly the `max_features` used at each split.\n See :term:`Glossary <random_state>` for details.\n\n min_impurity_decrease : float, default=0.0\n A node will be split if this split induces a decrease of the impurity\n greater than or equal to this value.\n\n The weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\n where ``N`` is the total number of samples, ``N_t`` is the number of\n samples at the current node, ``N_t_L`` is the number of samples in the\n left child, and ``N_t_R`` is the number of samples in the right child.\n\n ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\n if ``sample_weight`` is passed.\n\n .. versionadded:: 0.19\n\n max_leaf_nodes : int, default=None\n Grow a tree with ``max_leaf_nodes`` in best-first fashion.\n Best nodes are defined as relative reduction in impurity.\n If None then unlimited number of leaf nodes.\n\n ccp_alpha : non-negative float, default=0.0\n Complexity parameter used for Minimal Cost-Complexity Pruning. The\n subtree with the largest cost complexity that is smaller than\n ``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n :ref:`minimal_cost_complexity_pruning` for details.\n\n .. versionadded:: 0.22\n\n Attributes\n ----------\n max_features_ : int\n The inferred value of max_features.\n\n n_features_ : int\n The number of features when ``fit`` is performed.\n\n .. deprecated:: 1.0\n `n_features_` is deprecated in 1.0 and will be removed in\n 1.2. Use `n_features_in_` instead.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n feature_importances_ : ndarray of shape (n_features,)\n Return impurity-based feature importances (the higher, the more\n important the feature).\n\n Warning: impurity-based feature importances can be misleading for\n high cardinality features (many unique values). See\n :func:`sklearn.inspection.permutation_importance` as an alternative.\n\n n_outputs_ : int\n The number of outputs when ``fit`` is performed.\n\n tree_ : Tree instance\n The underlying Tree object. Please refer to\n ``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and\n :ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`\n for basic usage of these attributes.\n\n See Also\n --------\n ExtraTreeClassifier : An extremely randomized tree classifier.\n sklearn.ensemble.ExtraTreesClassifier : An extra-trees classifier.\n sklearn.ensemble.ExtraTreesRegressor : An extra-trees regressor.\n\n Notes\n -----\n The default values for the parameters controlling the size of the trees\n (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and\n unpruned trees which can potentially be very large on some data sets. To\n reduce memory consumption, the complexity and size of the trees should be\n controlled by setting those parameter values.\n\n References\n ----------\n\n .. [1] P. Geurts, D. Ernst., and L. Wehenkel, \"Extremely randomized trees\",\n Machine Learning, 63(1), 3-42, 2006.\n\n Examples\n --------\n >>> from sklearn.datasets import load_diabetes\n >>> from sklearn.model_selection import train_test_split\n >>> from sklearn.ensemble import BaggingRegressor\n >>> from sklearn.tree import ExtraTreeRegressor\n >>> X, y = load_diabetes(return_X_y=True)\n >>> X_train, X_test, y_train, y_test = train_test_split(\n ... X, y, random_state=0)\n >>> extra_tree = ExtraTreeRegressor(random_state=0)\n >>> reg = BaggingRegressor(extra_tree, random_state=0).fit(\n ... X_train, y_train)\n >>> reg.score(X_test, y_test)\n 0.33...\n \"\"\"\n\n def __init__(\n self,\n *,\n criterion=\"squared_error\",\n splitter=\"random\",\n max_depth=None,\n min_samples_split=2,\n min_samples_leaf=1,\n min_weight_fraction_leaf=0.0,\n max_features=1.0,\n random_state=None,\n min_impurity_decrease=0.0,\n max_leaf_nodes=None,\n ccp_alpha=0.0,\n ):\n super().__init__(\n criterion=criterion,\n splitter=splitter,\n max_depth=max_depth,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n min_weight_fraction_leaf=min_weight_fraction_leaf,\n max_features=max_features,\n max_leaf_nodes=max_leaf_nodes,\n min_impurity_decrease=min_impurity_decrease,\n random_state=random_state,\n ccp_alpha=ccp_alpha,\n )\n", "import numpy as np\nfrom numpy.testing import assert_array_equal\nimport pytest\n\nfrom sklearn.feature_extraction import FeatureHasher\nfrom sklearn.feature_extraction._hashing_fast import transform as _hashing_transform\n\n\ndef test_feature_hasher_dicts():\n feature_hasher = FeatureHasher(n_features=16)\n assert \"dict\" == feature_hasher.input_type\n\n raw_X = [{\"foo\": \"bar\", \"dada\": 42, \"tzara\": 37}, {\"foo\": \"baz\", \"gaga\": \"string1\"}]\n X1 = FeatureHasher(n_features=16).transform(raw_X)\n gen = (iter(d.items()) for d in raw_X)\n X2 = FeatureHasher(n_features=16, input_type=\"pair\").transform(gen)\n assert_array_equal(X1.toarray(), X2.toarray())\n\n\ndef test_feature_hasher_strings():\n # mix byte and Unicode strings; note that \"foo\" is a duplicate in row 0\n raw_X = [\n [\"foo\", \"bar\", \"baz\", \"foo\".encode(\"ascii\")],\n [\"bar\".encode(\"ascii\"), \"baz\", \"quux\"],\n ]\n\n for lg_n_features in (7, 9, 11, 16, 22):\n n_features = 2**lg_n_features\n\n it = (x for x in raw_X) # iterable\n\n feature_hasher = FeatureHasher(\n n_features=n_features, input_type=\"string\", alternate_sign=False\n )\n X = feature_hasher.transform(it)\n\n assert X.shape[0] == len(raw_X)\n assert X.shape[1] == n_features\n\n assert X[0].sum() == 4\n assert X[1].sum() == 3\n\n assert X.nnz == 6\n\n\ndef test_hashing_transform_seed():\n # check the influence of the seed when computing the hashes\n raw_X = [\n [\"foo\", \"bar\", \"baz\", \"foo\".encode(\"ascii\")],\n [\"bar\".encode(\"ascii\"), \"baz\", \"quux\"],\n ]\n\n raw_X_ = (((f, 1) for f in x) for x in raw_X)\n indices, indptr, _ = _hashing_transform(raw_X_, 2**7, str, False)\n\n raw_X_ = (((f, 1) for f in x) for x in raw_X)\n indices_0, indptr_0, _ = _hashing_transform(raw_X_, 2**7, str, False, seed=0)\n assert_array_equal(indices, indices_0)\n assert_array_equal(indptr, indptr_0)\n\n raw_X_ = (((f, 1) for f in x) for x in raw_X)\n indices_1, _, _ = _hashing_transform(raw_X_, 2**7, str, False, seed=1)\n with pytest.raises(AssertionError):\n assert_array_equal(indices, indices_1)\n\n\ndef test_feature_hasher_pairs():\n raw_X = (\n iter(d.items())\n for d in [{\"foo\": 1, \"bar\": 2}, {\"baz\": 3, \"quux\": 4, \"foo\": -1}]\n )\n feature_hasher = FeatureHasher(n_features=16, input_type=\"pair\")\n x1, x2 = feature_hasher.transform(raw_X).toarray()\n x1_nz = sorted(np.abs(x1[x1 != 0]))\n x2_nz = sorted(np.abs(x2[x2 != 0]))\n assert [1, 2] == x1_nz\n assert [1, 3, 4] == x2_nz\n\n\ndef test_feature_hasher_pairs_with_string_values():\n raw_X = (\n iter(d.items())\n for d in [{\"foo\": 1, \"bar\": \"a\"}, {\"baz\": \"abc\", \"quux\": 4, \"foo\": -1}]\n )\n feature_hasher = FeatureHasher(n_features=16, input_type=\"pair\")\n x1, x2 = feature_hasher.transform(raw_X).toarray()\n x1_nz = sorted(np.abs(x1[x1 != 0]))\n x2_nz = sorted(np.abs(x2[x2 != 0]))\n assert [1, 1] == x1_nz\n assert [1, 1, 4] == x2_nz\n\n raw_X = (iter(d.items()) for d in [{\"bax\": \"abc\"}, {\"bax\": \"abc\"}])\n x1, x2 = feature_hasher.transform(raw_X).toarray()\n x1_nz = np.abs(x1[x1 != 0])\n x2_nz = np.abs(x2[x2 != 0])\n assert [1] == x1_nz\n assert [1] == x2_nz\n assert_array_equal(x1, x2)\n\n\ndef test_hash_empty_input():\n n_features = 16\n raw_X = [[], (), iter(range(0))]\n\n feature_hasher = FeatureHasher(n_features=n_features, input_type=\"string\")\n X = feature_hasher.transform(raw_X)\n\n assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))\n\n\ndef test_hasher_invalid_input():\n raw_X = [[], (), iter(range(0))]\n\n feature_hasher = FeatureHasher(input_type=\"gobbledygook\")\n with pytest.raises(ValueError):\n feature_hasher.transform(raw_X)\n feature_hasher = FeatureHasher(n_features=-1)\n with pytest.raises(ValueError):\n feature_hasher.transform(raw_X)\n feature_hasher = FeatureHasher(n_features=0)\n with pytest.raises(ValueError):\n feature_hasher.transform(raw_X)\n feature_hasher = FeatureHasher(n_features=\"ham\")\n with pytest.raises(TypeError):\n feature_hasher.transform(raw_X)\n\n feature_hasher = FeatureHasher(n_features=np.uint16(2**6))\n with pytest.raises(ValueError):\n feature_hasher.transform([])\n with pytest.raises(Exception):\n feature_hasher.transform([[5.5]])\n with pytest.raises(Exception):\n feature_hasher.transform([[None]])\n\n\ndef test_hasher_set_params():\n # Test delayed input validation in fit (useful for grid search).\n hasher = FeatureHasher()\n hasher.set_params(n_features=np.inf)\n with pytest.raises(TypeError):\n hasher.fit()\n\n\ndef test_hasher_zeros():\n # Assert that no zeros are materialized in the output.\n X = FeatureHasher().transform([{\"foo\": 0}])\n assert X.data.shape == (0,)\n\n\ndef test_hasher_alternate_sign():\n X = [list(\"Thequickbrownfoxjumped\")]\n\n Xt = FeatureHasher(alternate_sign=True, input_type=\"string\").fit_transform(X)\n assert Xt.data.min() < 0 and Xt.data.max() > 0\n\n Xt = FeatureHasher(alternate_sign=False, input_type=\"string\").fit_transform(X)\n assert Xt.data.min() > 0\n\n\ndef test_hash_collisions():\n X = [list(\"Thequickbrownfoxjumped\")]\n\n Xt = FeatureHasher(\n alternate_sign=True, n_features=1, input_type=\"string\"\n ).fit_transform(X)\n # check that some of the hashed tokens are added\n # with an opposite sign and cancel out\n assert abs(Xt.data[0]) < len(X[0])\n\n Xt = FeatureHasher(\n alternate_sign=False, n_features=1, input_type=\"string\"\n ).fit_transform(X)\n assert Xt.data[0] == len(X[0])\n", "\"\"\"\n=====================================\nVisualization of MLP weights on MNIST\n=====================================\n\nSometimes looking at the learned coefficients of a neural network can provide\ninsight into the learning behavior. For example if weights look unstructured,\nmaybe some were not used at all, or if very large coefficients exist, maybe\nregularization was too low or the learning rate too high.\n\nThis example shows how to plot some of the first layer weights in a\nMLPClassifier trained on the MNIST dataset.\n\nThe input data consists of 28x28 pixel handwritten digits, leading to 784\nfeatures in the dataset. Therefore the first layer weight matrix has the shape\n(784, hidden_layer_sizes[0]). We can therefore visualize a single column of\nthe weight matrix as a 28x28 pixel image.\n\nTo make the example run faster, we use very few hidden units, and train only\nfor a very short time. Training longer would result in weights with a much\nsmoother spatial appearance. The example will throw a warning because it\ndoesn't converge, in this case this is what we want because of resource\nusage constraints on our Continuous Integration infrastructure that is used\nto build this documentation on a regular basis.\n\"\"\"\n\nimport warnings\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import train_test_split\n\n# Load data from https://www.openml.org/d/554\nX, y = fetch_openml(\n \"mnist_784\", version=1, return_X_y=True, as_frame=False, parser=\"pandas\"\n)\nX = X / 255.0\n\n# Split data into train partition and test partition\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size=0.7)\n\nmlp = MLPClassifier(\n hidden_layer_sizes=(40,),\n max_iter=8,\n alpha=1e-4,\n solver=\"sgd\",\n verbose=10,\n random_state=1,\n learning_rate_init=0.2,\n)\n\n# this example won't converge because of resource usage constraints on\n# our Continuous Integration infrastructure, so we catch the warning and\n# ignore it here\nwith warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=ConvergenceWarning, module=\"sklearn\")\n mlp.fit(X_train, y_train)\n\nprint(\"Training set score: %f\" % mlp.score(X_train, y_train))\nprint(\"Test set score: %f\" % mlp.score(X_test, y_test))\n\nfig, axes = plt.subplots(4, 4)\n# use global min / max to ensure all weights are shown on the same scale\nvmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()\nfor coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):\n ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=0.5 * vmin, vmax=0.5 * vmax)\n ax.set_xticks(())\n ax.set_yticks(())\n\nplt.show()\n", "\"\"\"\nTest the fastica algorithm.\n\"\"\"\nimport itertools\nimport pytest\nimport warnings\n\nimport numpy as np\nfrom scipy import stats\n\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.utils._testing import assert_allclose\n\nfrom sklearn.decomposition import FastICA, fastica, PCA\nfrom sklearn.decomposition._fastica import _gs_decorrelation\nfrom sklearn.exceptions import ConvergenceWarning\n\n\ndef center_and_norm(x, axis=-1):\n \"\"\"Centers and norms x **in place**\n\n Parameters\n -----------\n x: ndarray\n Array with an axis of observations (statistical units) measured on\n random variables.\n axis: int, optional\n Axis along which the mean and variance are calculated.\n \"\"\"\n x = np.rollaxis(x, axis)\n x -= x.mean(axis=0)\n x /= x.std(axis=0)\n\n\ndef test_gs():\n # Test gram schmidt orthonormalization\n # generate a random orthogonal matrix\n rng = np.random.RandomState(0)\n W, _, _ = np.linalg.svd(rng.randn(10, 10))\n w = rng.randn(10)\n _gs_decorrelation(w, W, 10)\n assert (w**2).sum() < 1.0e-10\n w = rng.randn(10)\n u = _gs_decorrelation(w, W, 5)\n tmp = np.dot(u, W.T)\n assert (tmp[:5] ** 2).sum() < 1.0e-10\n\n\ndef test_fastica_attributes_dtypes(global_dtype):\n rng = np.random.RandomState(0)\n X = rng.random_sample((100, 10)).astype(global_dtype, copy=False)\n fica = FastICA(\n n_components=5, max_iter=1000, whiten=\"unit-variance\", random_state=0\n ).fit(X)\n assert fica.components_.dtype == global_dtype\n assert fica.mixing_.dtype == global_dtype\n assert fica.mean_.dtype == global_dtype\n assert fica.whitening_.dtype == global_dtype\n\n\ndef test_fastica_return_dtypes(global_dtype):\n rng = np.random.RandomState(0)\n X = rng.random_sample((100, 10)).astype(global_dtype, copy=False)\n k_, mixing_, s_ = fastica(\n X, max_iter=1000, whiten=\"unit-variance\", random_state=rng\n )\n assert k_.dtype == global_dtype\n assert mixing_.dtype == global_dtype\n assert s_.dtype == global_dtype\n\n\n# FIXME remove filter in 1.3\[email protected](\n \"ignore:From version 1.3 whiten='unit-variance' will be used by default.\"\n)\[email protected](\"add_noise\", [True, False])\ndef test_fastica_simple(add_noise, global_random_seed, global_dtype):\n # Test the FastICA algorithm on very simple data.\n rng = np.random.RandomState(global_random_seed)\n n_samples = 1000\n # Generate two sources:\n s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1\n s2 = stats.t.rvs(1, size=n_samples, random_state=global_random_seed)\n s = np.c_[s1, s2].T\n center_and_norm(s)\n s = s.astype(global_dtype)\n s1, s2 = s\n\n # Mixing angle\n phi = 0.6\n mixing = np.array([[np.cos(phi), np.sin(phi)], [np.sin(phi), -np.cos(phi)]])\n mixing = mixing.astype(global_dtype)\n m = np.dot(mixing, s)\n\n if add_noise:\n m += 0.1 * rng.randn(2, 1000)\n\n center_and_norm(m)\n\n # function as fun arg\n def g_test(x):\n return x**3, (3 * x**2).mean(axis=-1)\n\n algos = [\"parallel\", \"deflation\"]\n nls = [\"logcosh\", \"exp\", \"cube\", g_test]\n whitening = [\"arbitrary-variance\", \"unit-variance\", False]\n for algo, nl, whiten in itertools.product(algos, nls, whitening):\n if whiten:\n k_, mixing_, s_ = fastica(\n m.T, fun=nl, whiten=whiten, algorithm=algo, random_state=rng\n )\n with pytest.raises(ValueError):\n fastica(m.T, fun=np.tanh, whiten=whiten, algorithm=algo)\n else:\n pca = PCA(n_components=2, whiten=True, random_state=rng)\n X = pca.fit_transform(m.T)\n k_, mixing_, s_ = fastica(\n X, fun=nl, algorithm=algo, whiten=False, random_state=rng\n )\n with pytest.raises(ValueError):\n fastica(X, fun=np.tanh, algorithm=algo)\n s_ = s_.T\n # Check that the mixing model described in the docstring holds:\n if whiten:\n # XXX: exact reconstruction to standard relative tolerance is not\n # possible. This is probably expected when add_noise is True but we\n # also need a non-trivial atol in float32 when add_noise is False.\n #\n # Note that the 2 sources are non-Gaussian in this test.\n atol = 1e-5 if global_dtype == np.float32 else 0\n assert_allclose(np.dot(np.dot(mixing_, k_), m), s_, atol=atol)\n\n center_and_norm(s_)\n s1_, s2_ = s_\n # Check to see if the sources have been estimated\n # in the wrong order\n if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):\n s2_, s1_ = s_\n s1_ *= np.sign(np.dot(s1_, s1))\n s2_ *= np.sign(np.dot(s2_, s2))\n\n # Check that we have estimated the original sources\n if not add_noise:\n assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-2)\n assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-2)\n else:\n assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-1)\n assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-1)\n\n # Test FastICA class\n _, _, sources_fun = fastica(\n m.T, fun=nl, algorithm=algo, random_state=global_random_seed\n )\n ica = FastICA(fun=nl, algorithm=algo, random_state=global_random_seed)\n sources = ica.fit_transform(m.T)\n assert ica.components_.shape == (2, 2)\n assert sources.shape == (1000, 2)\n\n assert_allclose(sources_fun, sources)\n assert_allclose(sources, ica.transform(m.T))\n\n assert ica.mixing_.shape == (2, 2)\n\n for fn in [np.tanh, \"exp(-.5(x^2))\"]:\n ica = FastICA(fun=fn, algorithm=algo)\n with pytest.raises(ValueError):\n ica.fit(m.T)\n\n with pytest.raises(TypeError):\n FastICA(fun=range(10)).fit(m.T)\n\n\ndef test_fastica_nowhiten():\n m = [[0, 1], [1, 0]]\n\n # test for issue #697\n ica = FastICA(n_components=1, whiten=False, random_state=0)\n warn_msg = \"Ignoring n_components with whiten=False.\"\n with pytest.warns(UserWarning, match=warn_msg):\n ica.fit(m)\n assert hasattr(ica, \"mixing_\")\n\n\ndef test_fastica_convergence_fail():\n # Test the FastICA algorithm on very simple data\n # (see test_non_square_fastica).\n # Ensure a ConvergenceWarning raised if the tolerance is sufficiently low.\n rng = np.random.RandomState(0)\n\n n_samples = 1000\n # Generate two sources:\n t = np.linspace(0, 100, n_samples)\n s1 = np.sin(t)\n s2 = np.ceil(np.sin(np.pi * t))\n s = np.c_[s1, s2].T\n center_and_norm(s)\n\n # Mixing matrix\n mixing = rng.randn(6, 2)\n m = np.dot(mixing, s)\n\n # Do fastICA with tolerance 0. to ensure failing convergence\n warn_msg = (\n \"FastICA did not converge. Consider increasing tolerance \"\n \"or the maximum number of iterations.\"\n )\n with pytest.warns(ConvergenceWarning, match=warn_msg):\n ica = FastICA(\n algorithm=\"parallel\", n_components=2, random_state=rng, max_iter=2, tol=0.0\n )\n ica.fit(m.T)\n\n\[email protected](\"add_noise\", [True, False])\ndef test_non_square_fastica(add_noise):\n # Test the FastICA algorithm on very simple data.\n rng = np.random.RandomState(0)\n\n n_samples = 1000\n # Generate two sources:\n t = np.linspace(0, 100, n_samples)\n s1 = np.sin(t)\n s2 = np.ceil(np.sin(np.pi * t))\n s = np.c_[s1, s2].T\n center_and_norm(s)\n s1, s2 = s\n\n # Mixing matrix\n mixing = rng.randn(6, 2)\n m = np.dot(mixing, s)\n\n if add_noise:\n m += 0.1 * rng.randn(6, n_samples)\n\n center_and_norm(m)\n\n k_, mixing_, s_ = fastica(\n m.T, n_components=2, whiten=\"unit-variance\", random_state=rng\n )\n s_ = s_.T\n\n # Check that the mixing model described in the docstring holds:\n assert_allclose(s_, np.dot(np.dot(mixing_, k_), m))\n\n center_and_norm(s_)\n s1_, s2_ = s_\n # Check to see if the sources have been estimated\n # in the wrong order\n if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):\n s2_, s1_ = s_\n s1_ *= np.sign(np.dot(s1_, s1))\n s2_ *= np.sign(np.dot(s2_, s2))\n\n # Check that we have estimated the original sources\n if not add_noise:\n assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-3)\n assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-3)\n\n\ndef test_fit_transform(global_random_seed, global_dtype):\n \"\"\"Test unit variance of transformed data using FastICA algorithm.\n\n Check that `fit_transform` gives the same result as applying\n `fit` and then `transform`.\n\n Bug #13056\n \"\"\"\n # multivariate uniform data in [0, 1]\n rng = np.random.RandomState(global_random_seed)\n X = rng.random_sample((100, 10)).astype(global_dtype)\n max_iter = 300\n for whiten, n_components in [[\"unit-variance\", 5], [False, None]]:\n n_components_ = n_components if n_components is not None else X.shape[1]\n\n ica = FastICA(\n n_components=n_components, max_iter=max_iter, whiten=whiten, random_state=0\n )\n with warnings.catch_warnings():\n # make sure that numerical errors do not cause sqrt of negative\n # values\n warnings.simplefilter(\"error\", RuntimeWarning)\n # XXX: for some seeds, the model does not converge.\n # However this is not what we test here.\n warnings.simplefilter(\"ignore\", ConvergenceWarning)\n Xt = ica.fit_transform(X)\n assert ica.components_.shape == (n_components_, 10)\n assert Xt.shape == (X.shape[0], n_components_)\n\n ica2 = FastICA(\n n_components=n_components, max_iter=max_iter, whiten=whiten, random_state=0\n )\n with warnings.catch_warnings():\n # make sure that numerical errors do not cause sqrt of negative\n # values\n warnings.simplefilter(\"error\", RuntimeWarning)\n warnings.simplefilter(\"ignore\", ConvergenceWarning)\n ica2.fit(X)\n assert ica2.components_.shape == (n_components_, 10)\n Xt2 = ica2.transform(X)\n\n # XXX: we have to set atol for this test to pass for all seeds when\n # fitting with float32 data. Is this revealing a bug?\n if global_dtype:\n atol = np.abs(Xt2).mean() / 1e6\n else:\n atol = 0.0 # the default rtol is enough for float64 data\n assert_allclose(Xt, Xt2, atol=atol)\n\n\[email protected](\"ignore:Ignoring n_components with whiten=False.\")\[email protected](\n \"whiten, n_components, expected_mixing_shape\",\n [\n (\"arbitrary-variance\", 5, (10, 5)),\n (\"arbitrary-variance\", 10, (10, 10)),\n (\"unit-variance\", 5, (10, 5)),\n (\"unit-variance\", 10, (10, 10)),\n (False, 5, (10, 10)),\n (False, 10, (10, 10)),\n ],\n)\ndef test_inverse_transform(\n whiten, n_components, expected_mixing_shape, global_random_seed, global_dtype\n):\n # Test FastICA.inverse_transform\n n_samples = 100\n rng = np.random.RandomState(global_random_seed)\n X = rng.random_sample((n_samples, 10)).astype(global_dtype)\n\n ica = FastICA(n_components=n_components, random_state=rng, whiten=whiten)\n with warnings.catch_warnings():\n # For some dataset (depending on the value of global_dtype) the model\n # can fail to converge but this should not impact the definition of\n # a valid inverse transform.\n warnings.simplefilter(\"ignore\", ConvergenceWarning)\n Xt = ica.fit_transform(X)\n assert ica.mixing_.shape == expected_mixing_shape\n X2 = ica.inverse_transform(Xt)\n assert X.shape == X2.shape\n\n # reversibility test in non-reduction case\n if n_components == X.shape[1]:\n # XXX: we have to set atol for this test to pass for all seeds when\n # fitting with float32 data. Is this revealing a bug?\n if global_dtype:\n # XXX: dividing by a smaller number makes\n # tests fail for some seeds.\n atol = np.abs(X2).mean() / 1e5\n else:\n atol = 0.0 # the default rtol is enough for float64 data\n assert_allclose(X, X2, atol=atol)\n\n\n# FIXME remove filter in 1.3\[email protected](\n \"ignore:From version 1.3 whiten='unit-variance' will be used by default.\"\n)\ndef test_fastica_errors():\n n_features = 3\n n_samples = 10\n rng = np.random.RandomState(0)\n X = rng.random_sample((n_samples, n_features))\n w_init = rng.randn(n_features + 1, n_features + 1)\n fastica_estimator = FastICA(max_iter=0)\n with pytest.raises(ValueError, match=\"max_iter should be greater than 1\"):\n fastica_estimator.fit(X)\n with pytest.raises(ValueError, match=r\"alpha must be in \\[1,2\\]\"):\n fastica(X, fun_args={\"alpha\": 0})\n with pytest.raises(\n ValueError, match=\"w_init has invalid shape.+\" r\"should be \\(3L?, 3L?\\)\"\n ):\n fastica(X, w_init=w_init)\n with pytest.raises(\n ValueError, match=\"Invalid algorithm.+must be.+parallel.+or.+deflation\"\n ):\n fastica(X, algorithm=\"pizza\")\n\n\ndef test_fastica_whiten_unit_variance():\n \"\"\"Test unit variance of transformed data using FastICA algorithm.\n\n Bug #13056\n \"\"\"\n rng = np.random.RandomState(0)\n X = rng.random_sample((100, 10))\n n_components = X.shape[1]\n ica = FastICA(n_components=n_components, whiten=\"unit-variance\", random_state=0)\n Xt = ica.fit_transform(X)\n\n assert np.var(Xt) == pytest.approx(1.0)\n\n\[email protected](\"ica\", [FastICA(), FastICA(whiten=True)])\ndef test_fastica_whiten_default_value_deprecation(ica):\n \"\"\"Test FastICA whiten default value deprecation.\n\n Regression test for #19490\n \"\"\"\n rng = np.random.RandomState(0)\n X = rng.random_sample((100, 10))\n with pytest.warns(FutureWarning, match=r\"From version 1.3 whiten=\"):\n ica.fit(X)\n assert ica._whiten == \"arbitrary-variance\"\n\n\ndef test_fastica_whiten_backwards_compatibility():\n \"\"\"Test previous behavior for FastICA whitening (whiten=True)\n\n Regression test for #19490\n \"\"\"\n rng = np.random.RandomState(0)\n X = rng.random_sample((100, 10))\n n_components = X.shape[1]\n\n default_ica = FastICA(n_components=n_components, random_state=0)\n with pytest.warns(FutureWarning):\n Xt_on_default = default_ica.fit_transform(X)\n\n ica = FastICA(n_components=n_components, whiten=True, random_state=0)\n with pytest.warns(FutureWarning):\n Xt = ica.fit_transform(X)\n\n # No warning must be raised in this case.\n av_ica = FastICA(\n n_components=n_components, whiten=\"arbitrary-variance\", random_state=0\n )\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", FutureWarning)\n Xt_av = av_ica.fit_transform(X)\n\n # The whitening strategy must be \"arbitrary-variance\" in all the cases.\n assert default_ica._whiten == \"arbitrary-variance\"\n assert ica._whiten == \"arbitrary-variance\"\n assert av_ica._whiten == \"arbitrary-variance\"\n\n assert_array_equal(Xt, Xt_on_default)\n assert_array_equal(Xt, Xt_av)\n\n assert np.var(Xt) == pytest.approx(1.0 / 100)\n\n\[email protected](\"whiten\", [\"arbitrary-variance\", \"unit-variance\", False])\[email protected](\"return_X_mean\", [True, False])\[email protected](\"return_n_iter\", [True, False])\ndef test_fastica_output_shape(whiten, return_X_mean, return_n_iter):\n n_features = 3\n n_samples = 10\n rng = np.random.RandomState(0)\n X = rng.random_sample((n_samples, n_features))\n\n expected_len = 3 + return_X_mean + return_n_iter\n\n out = fastica(\n X, whiten=whiten, return_n_iter=return_n_iter, return_X_mean=return_X_mean\n )\n\n assert len(out) == expected_len\n if not whiten:\n assert out[0] is None\n", "import pickle\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_equal\n\nfrom sklearn.utils._encode import _unique\nfrom sklearn.utils._encode import _encode\nfrom sklearn.utils._encode import _check_unknown\nfrom sklearn.utils._encode import _get_counts\n\n\[email protected](\n \"values, expected\",\n [\n (np.array([2, 1, 3, 1, 3], dtype=\"int64\"), np.array([1, 2, 3], dtype=\"int64\")),\n (\n np.array([2, 1, np.nan, 1, np.nan], dtype=\"float32\"),\n np.array([1, 2, np.nan], dtype=\"float32\"),\n ),\n (\n np.array([\"b\", \"a\", \"c\", \"a\", \"c\"], dtype=object),\n np.array([\"a\", \"b\", \"c\"], dtype=object),\n ),\n (\n np.array([\"b\", \"a\", None, \"a\", None], dtype=object),\n np.array([\"a\", \"b\", None], dtype=object),\n ),\n (np.array([\"b\", \"a\", \"c\", \"a\", \"c\"]), np.array([\"a\", \"b\", \"c\"])),\n ],\n ids=[\"int64\", \"float32-nan\", \"object\", \"object-None\", \"str\"],\n)\ndef test_encode_util(values, expected):\n uniques = _unique(values)\n assert_array_equal(uniques, expected)\n\n result, encoded = _unique(values, return_inverse=True)\n assert_array_equal(result, expected)\n assert_array_equal(encoded, np.array([1, 0, 2, 0, 2]))\n\n encoded = _encode(values, uniques=uniques)\n assert_array_equal(encoded, np.array([1, 0, 2, 0, 2]))\n\n result, counts = _unique(values, return_counts=True)\n assert_array_equal(result, expected)\n assert_array_equal(counts, np.array([2, 1, 2]))\n\n result, encoded, counts = _unique(values, return_inverse=True, return_counts=True)\n assert_array_equal(result, expected)\n assert_array_equal(encoded, np.array([1, 0, 2, 0, 2]))\n assert_array_equal(counts, np.array([2, 1, 2]))\n\n\ndef test_encode_with_check_unknown():\n # test for the check_unknown parameter of _encode()\n uniques = np.array([1, 2, 3])\n values = np.array([1, 2, 3, 4])\n\n # Default is True, raise error\n with pytest.raises(ValueError, match=\"y contains previously unseen labels\"):\n _encode(values, uniques=uniques, check_unknown=True)\n\n # dont raise error if False\n _encode(values, uniques=uniques, check_unknown=False)\n\n # parameter is ignored for object dtype\n uniques = np.array([\"a\", \"b\", \"c\"], dtype=object)\n values = np.array([\"a\", \"b\", \"c\", \"d\"], dtype=object)\n with pytest.raises(ValueError, match=\"y contains previously unseen labels\"):\n _encode(values, uniques=uniques, check_unknown=False)\n\n\ndef _assert_check_unknown(values, uniques, expected_diff, expected_mask):\n diff = _check_unknown(values, uniques)\n assert_array_equal(diff, expected_diff)\n\n diff, valid_mask = _check_unknown(values, uniques, return_mask=True)\n assert_array_equal(diff, expected_diff)\n assert_array_equal(valid_mask, expected_mask)\n\n\[email protected](\n \"values, uniques, expected_diff, expected_mask\",\n [\n (np.array([1, 2, 3, 4]), np.array([1, 2, 3]), [4], [True, True, True, False]),\n (np.array([2, 1, 4, 5]), np.array([2, 5, 1]), [4], [True, True, False, True]),\n (np.array([2, 1, np.nan]), np.array([2, 5, 1]), [np.nan], [True, True, False]),\n (\n np.array([2, 1, 4, np.nan]),\n np.array([2, 5, 1, np.nan]),\n [4],\n [True, True, False, True],\n ),\n (\n np.array([2, 1, 4, np.nan]),\n np.array([2, 5, 1]),\n [4, np.nan],\n [True, True, False, False],\n ),\n (\n np.array([2, 1, 4, 5]),\n np.array([2, 5, 1, np.nan]),\n [4],\n [True, True, False, True],\n ),\n (\n np.array([\"a\", \"b\", \"c\", \"d\"], dtype=object),\n np.array([\"a\", \"b\", \"c\"], dtype=object),\n np.array([\"d\"], dtype=object),\n [True, True, True, False],\n ),\n (\n np.array([\"d\", \"c\", \"a\", \"b\"], dtype=object),\n np.array([\"a\", \"c\", \"b\"], dtype=object),\n np.array([\"d\"], dtype=object),\n [False, True, True, True],\n ),\n (\n np.array([\"a\", \"b\", \"c\", \"d\"]),\n np.array([\"a\", \"b\", \"c\"]),\n np.array([\"d\"]),\n [True, True, True, False],\n ),\n (\n np.array([\"d\", \"c\", \"a\", \"b\"]),\n np.array([\"a\", \"c\", \"b\"]),\n np.array([\"d\"]),\n [False, True, True, True],\n ),\n ],\n)\ndef test_check_unknown(values, uniques, expected_diff, expected_mask):\n _assert_check_unknown(values, uniques, expected_diff, expected_mask)\n\n\[email protected](\"missing_value\", [None, np.nan, float(\"nan\")])\[email protected](\"pickle_uniques\", [True, False])\ndef test_check_unknown_missing_values(missing_value, pickle_uniques):\n # check for check_unknown with missing values with object dtypes\n values = np.array([\"d\", \"c\", \"a\", \"b\", missing_value], dtype=object)\n uniques = np.array([\"c\", \"a\", \"b\", missing_value], dtype=object)\n if pickle_uniques:\n uniques = pickle.loads(pickle.dumps(uniques))\n\n expected_diff = [\"d\"]\n expected_mask = [False, True, True, True, True]\n _assert_check_unknown(values, uniques, expected_diff, expected_mask)\n\n values = np.array([\"d\", \"c\", \"a\", \"b\", missing_value], dtype=object)\n uniques = np.array([\"c\", \"a\", \"b\"], dtype=object)\n if pickle_uniques:\n uniques = pickle.loads(pickle.dumps(uniques))\n\n expected_diff = [\"d\", missing_value]\n\n expected_mask = [False, True, True, True, False]\n _assert_check_unknown(values, uniques, expected_diff, expected_mask)\n\n values = np.array([\"a\", missing_value], dtype=object)\n uniques = np.array([\"a\", \"b\", \"z\"], dtype=object)\n if pickle_uniques:\n uniques = pickle.loads(pickle.dumps(uniques))\n\n expected_diff = [missing_value]\n expected_mask = [True, False]\n _assert_check_unknown(values, uniques, expected_diff, expected_mask)\n\n\[email protected](\"missing_value\", [np.nan, None, float(\"nan\")])\[email protected](\"pickle_uniques\", [True, False])\ndef test_unique_util_missing_values_objects(missing_value, pickle_uniques):\n # check for _unique and _encode with missing values with object dtypes\n values = np.array([\"a\", \"c\", \"c\", missing_value, \"b\"], dtype=object)\n expected_uniques = np.array([\"a\", \"b\", \"c\", missing_value], dtype=object)\n\n uniques = _unique(values)\n\n if missing_value is None:\n assert_array_equal(uniques, expected_uniques)\n else: # missing_value == np.nan\n assert_array_equal(uniques[:-1], expected_uniques[:-1])\n assert np.isnan(uniques[-1])\n\n if pickle_uniques:\n uniques = pickle.loads(pickle.dumps(uniques))\n\n encoded = _encode(values, uniques=uniques)\n assert_array_equal(encoded, np.array([0, 2, 2, 3, 1]))\n\n\ndef test_unique_util_missing_values_numeric():\n # Check missing values in numerical values\n values = np.array([3, 1, np.nan, 5, 3, np.nan], dtype=float)\n expected_uniques = np.array([1, 3, 5, np.nan], dtype=float)\n expected_inverse = np.array([1, 0, 3, 2, 1, 3])\n\n uniques = _unique(values)\n assert_array_equal(uniques, expected_uniques)\n\n uniques, inverse = _unique(values, return_inverse=True)\n assert_array_equal(uniques, expected_uniques)\n assert_array_equal(inverse, expected_inverse)\n\n encoded = _encode(values, uniques=uniques)\n assert_array_equal(encoded, expected_inverse)\n\n\ndef test_unique_util_with_all_missing_values():\n # test for all types of missing values for object dtype\n values = np.array([np.nan, \"a\", \"c\", \"c\", None, float(\"nan\"), None], dtype=object)\n\n uniques = _unique(values)\n assert_array_equal(uniques[:-1], [\"a\", \"c\", None])\n # last value is nan\n assert np.isnan(uniques[-1])\n\n expected_inverse = [3, 0, 1, 1, 2, 3, 2]\n _, inverse = _unique(values, return_inverse=True)\n assert_array_equal(inverse, expected_inverse)\n\n\ndef test_check_unknown_with_both_missing_values():\n # test for both types of missing values for object dtype\n values = np.array([np.nan, \"a\", \"c\", \"c\", None, np.nan, None], dtype=object)\n\n diff = _check_unknown(values, known_values=np.array([\"a\", \"c\"], dtype=object))\n assert diff[0] is None\n assert np.isnan(diff[1])\n\n diff, valid_mask = _check_unknown(\n values, known_values=np.array([\"a\", \"c\"], dtype=object), return_mask=True\n )\n\n assert diff[0] is None\n assert np.isnan(diff[1])\n assert_array_equal(valid_mask, [False, True, True, True, False, False, False])\n\n\[email protected](\n \"values, uniques, expected_counts\",\n [\n (np.array([1] * 10 + [2] * 4 + [3] * 15), np.array([1, 2, 3]), [10, 4, 15]),\n (\n np.array([1] * 10 + [2] * 4 + [3] * 15),\n np.array([1, 2, 3, 5]),\n [10, 4, 15, 0],\n ),\n (\n np.array([np.nan] * 10 + [2] * 4 + [3] * 15),\n np.array([2, 3, np.nan]),\n [4, 15, 10],\n ),\n (\n np.array([\"b\"] * 4 + [\"a\"] * 16 + [\"c\"] * 20, dtype=object),\n [\"a\", \"b\", \"c\"],\n [16, 4, 20],\n ),\n (\n np.array([\"b\"] * 4 + [\"a\"] * 16 + [\"c\"] * 20, dtype=object),\n [\"c\", \"b\", \"a\"],\n [20, 4, 16],\n ),\n (\n np.array([np.nan] * 4 + [\"a\"] * 16 + [\"c\"] * 20, dtype=object),\n [\"c\", np.nan, \"a\"],\n [20, 4, 16],\n ),\n (\n np.array([\"b\"] * 4 + [\"a\"] * 16 + [\"c\"] * 20, dtype=object),\n [\"a\", \"b\", \"c\", \"e\"],\n [16, 4, 20, 0],\n ),\n ],\n)\ndef test_get_counts(values, uniques, expected_counts):\n counts = _get_counts(values, uniques)\n assert_array_equal(counts, expected_counts)\n", "\"\"\"\n===========================================\nSpectral clustering for image segmentation\n===========================================\n\nIn this example, an image with connected circles is generated and\nspectral clustering is used to separate the circles.\n\nIn these settings, the :ref:`spectral_clustering` approach solves the problem\nknow as 'normalized graph cuts': the image is seen as a graph of\nconnected voxels, and the spectral clustering algorithm amounts to\nchoosing graph cuts defining regions while minimizing the ratio of the\ngradient along the cut, and the volume of the region.\n\nAs the algorithm tries to balance the volume (ie balance the region\nsizes), if we take circles with different sizes, the segmentation fails.\n\nIn addition, as there is no useful information in the intensity of the image,\nor its gradient, we choose to perform the spectral clustering on a graph\nthat is only weakly informed by the gradient. This is close to performing\na Voronoi partition of the graph.\n\nIn addition, we use the mask of the objects to restrict the graph to the\noutline of the objects. In this example, we are interested in\nseparating the objects one from the other, and not from the background.\n\n\"\"\"\n\n# Authors: Emmanuelle Gouillart <[email protected]>\n# Gael Varoquaux <[email protected]>\n# License: BSD 3 clause\n\n# %%\n# Generate the data\n# -----------------\nimport numpy as np\n\nl = 100\nx, y = np.indices((l, l))\n\ncenter1 = (28, 24)\ncenter2 = (40, 50)\ncenter3 = (67, 58)\ncenter4 = (24, 70)\n\nradius1, radius2, radius3, radius4 = 16, 14, 15, 14\n\ncircle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1**2\ncircle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2**2\ncircle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3**2\ncircle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4**2\n\n# %%\n# Plotting four circles\n# ---------------------\nimg = circle1 + circle2 + circle3 + circle4\n\n# We use a mask that limits to the foreground: the problem that we are\n# interested in here is not separating the objects from the background,\n# but separating them one from the other.\nmask = img.astype(bool)\n\nimg = img.astype(float)\nimg += 1 + 0.2 * np.random.randn(*img.shape)\n\n# %%\n# Convert the image into a graph with the value of the gradient on the\n# edges.\nfrom sklearn.feature_extraction import image\n\ngraph = image.img_to_graph(img, mask=mask)\n\n# %%\n# Take a decreasing function of the gradient resulting in a segmentation\n# that is close to a Voronoi partition\ngraph.data = np.exp(-graph.data / graph.data.std())\n\n# %%\n# Here we perform spectral clustering using the arpack solver since amg is\n# numerically unstable on this example. We then plot the results.\nfrom sklearn.cluster import spectral_clustering\nimport matplotlib.pyplot as plt\n\nlabels = spectral_clustering(graph, n_clusters=4, eigen_solver=\"arpack\")\nlabel_im = np.full(mask.shape, -1.0)\nlabel_im[mask] = labels\n\nfig, axs = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))\naxs[0].matshow(img)\naxs[1].matshow(label_im)\n\nplt.show()\n\n# %%\n# Plotting two circles\n# --------------------\n# Here we repeat the above process but only consider the first two circles\n# we generated. Note that this results in a cleaner separation between the\n# circles as the region sizes are easier to balance in this case.\n\nimg = circle1 + circle2\nmask = img.astype(bool)\nimg = img.astype(float)\n\nimg += 1 + 0.2 * np.random.randn(*img.shape)\n\ngraph = image.img_to_graph(img, mask=mask)\ngraph.data = np.exp(-graph.data / graph.data.std())\n\nlabels = spectral_clustering(graph, n_clusters=2, eigen_solver=\"arpack\")\nlabel_im = np.full(mask.shape, -1.0)\nlabel_im[mask] = labels\n\nfig, axs = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))\naxs[0].matshow(img)\naxs[1].matshow(label_im)\n\nplt.show()\n" ]
[ [ "numpy.dot", "sklearn.utils._testing.assert_allclose_dense_sparse", "numpy.sum", "numpy.abs", "sklearn.decomposition.IncrementalPCA", "numpy.arange", "numpy.eye", "sklearn.datasets.load_iris", "numpy.linalg.norm", "sklearn.datasets.make_low_rank_matrix", "numpy.sign", "sklearn.utils._testing.assert_almost_equal", "sklearn.utils._testing.assert_array_almost_equal", "numpy.random.randn", "numpy.testing.assert_allclose", "numpy.array", "sklearn.decomposition.PCA", "numpy.random.RandomState" ], [ "numpy.minimum", "numpy.maximum", "numpy.linspace", "sklearn.datasets.load_iris", "matplotlib.pyplot.subplots", "sklearn.model_selection.StratifiedKFold", "numpy.std", "numpy.mean", "numpy.interp", "sklearn.svm.SVC", "sklearn.metrics.auc", "numpy.random.RandomState", "matplotlib.pyplot.show" ], [ "numpy.log", "numpy.log2", "scipy.sparse.issparse", "numpy.sqrt", "numpy.unique", "numpy.asarray", "numpy.reshape", "numpy.ascontiguousarray", "numpy.atleast_1d", "numpy.copy", "numpy.argmax", "numpy.any", "numpy.iinfo", "numpy.array", "numpy.zeros", "numpy.sum" ], [ "numpy.abs", "sklearn.feature_extraction._hashing_fast.transform", "numpy.testing.assert_array_equal", "numpy.uint16", "sklearn.feature_extraction.FeatureHasher" ], [ "sklearn.neural_network.MLPClassifier", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "sklearn.datasets.fetch_openml" ], [ "numpy.rollaxis", "numpy.dot", "sklearn.utils._testing.assert_allclose", "numpy.abs", "numpy.linspace", "sklearn.decomposition.fastica", "sklearn.decomposition.FastICA", "sklearn.decomposition._fastica._gs_decorrelation", "numpy.cos", "numpy.sin", "numpy.var", "scipy.stats.t.rvs", "numpy.random.RandomState", "sklearn.utils._testing.assert_array_equal", "sklearn.decomposition.PCA" ], [ "numpy.isnan", "sklearn.utils._encode._get_counts", "numpy.testing.assert_array_equal", "sklearn.utils._encode._unique", "sklearn.utils._encode._check_unknown", "numpy.array", "sklearn.utils._encode._encode" ], [ "sklearn.feature_extraction.image.img_to_graph", "matplotlib.pyplot.subplots", "numpy.indices", "numpy.full", "numpy.random.randn", "matplotlib.pyplot.show", "sklearn.cluster.spectral_clustering" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
EVS-ATMOS/precipitation-onset
[ "3c9cf010b5246d17fae2796d271f6a8be892efb1" ]
[ "radar/Amazon Weather Program.py" ]
[ "import boto\nfrom boto.s3.connection import S3Connection\nfrom datetime import timedelta, datetime\nimport os\nimport pyart\nfrom matplotlib import pyplot as plt\nimport tempfile\nimport numpy as np\nimport cartopy\n\n\ndef _nearestDate(dates, pivot):\n return min(dates, key=lambda x: abs(x - pivot))\n\n\ndef get_radar_from_aws(site, datetime_t):\n \"\"\"\n Get the closest volume of NEXRAD data to a particular datetime.\n Parameters\n ----------\n site : string\n four letter radar designation\n datetime_t : datetime\n desired date time\n Returns\n -------\n radar : Py-ART Radar Object\n Radar closest to the queried datetime\n \"\"\"\n\n #First create the query string for the bucket knowing\n #how NOAA and AWS store the data\n\n my_pref = datetime_t.strftime('%Y/%m/%d/') + site\n\n #Connect to the bucket\n\n conn = S3Connection(anon = True)\n bucket = conn.get_bucket('noaa-nexrad-level2')\n\n #Get a list of files\n\n bucket_list = list(bucket.list(prefix = my_pref))\n\n #we are going to create a list of keys and datetimes to allow easy searching\n\n keys = []\n datetimes = []\n\n #populate the list\n\n for i in range(len(bucket_list)):\n this_str = str(bucket_list[i].key)\n if 'gz' in this_str:\n endme = this_str[-22:-4]\n fmt = '%Y%m%d_%H%M%S_V0'\n dt = datetime.strptime(endme, fmt)\n datetimes.append(dt)\n keys.append(bucket_list[i])\n\n if this_str[-3::] == 'V06':\n endme = this_str[-19::]\n fmt = '%Y%m%d_%H%M%S_V06'\n dt = datetime.strptime(endme, fmt)\n datetimes.append(dt)\n keys.append(bucket_list[i])\n\n #find the closest available radar to your datetime\n\n closest_datetime = _nearestDate(datetimes, datetime_t)\n index = datetimes.index(closest_datetime)\n\n localfile = tempfile.NamedTemporaryFile()\n keys[index].get_contents_to_filename(localfile.name)\n radar = pyart.io.read(localfile.name)\n #print (radar.info())\n #my_radar.fields.keys()\n #my_radar.metadata['vcp_pattern']\n return radar\n\nbase_date = \"20150520_190000\"\nfmt = '%Y%m%d_%H%M%S' \nb_d = datetime.strptime(base_date, fmt)\n\nmy_radar = get_radar_from_aws('KHGX',b_d )\nnyq = my_radar.instrument_parameters['nyquist_velocity']['data'].max()\n#print (\"VCP: %s\"%my_radar.metadata['vcp_pattern'])\n#print (\"NYQ: %s\"%nyq)\n\n#Plot Bounds\n\ncenterx = -95.3632700\ncentery = 29.4718835\nzoom = 1.5\n\nxm = 25/18\nmin_lon = centerx - (zoom*xm)\nmin_lat = centery - zoom\nmax_lon = centerx + (zoom*xm)\nmax_lat = centery + zoom\n\nlal = np.arange(min_lat, max_lat, .5)\nlol = np.arange(min_lon, max_lon, .5)\n\n\ndisplay = pyart.graph.RadarMapDisplayCartopy(my_radar)\nlat_0 = display.loc[0]\nlon_0 = display.loc[1]\nproj = cartopy.crs.Mercator(\n central_longitude=lon_0,\n min_latitude=min_lat, max_latitude=max_lat)\n\nsaveloc = '/home/scarani/Desktop/output/'\n\n#Plot Relfectivity\nfig = plt.figure(figsize = [20,8])\ndisplay.plot_ppi_map('reflectivity', sweep = 0, projection=proj, resolution = '10m',\n vmin = -8, vmax = 64, mask_outside = False,\n cmap = pyart.graph.cm.NWSRef,\n min_lat = min_lat, min_lon = min_lon,\n max_lat = max_lat, max_lon = max_lon,\n lat_lines = lal, lon_lines = lol)\ngl = display.ax.gridlines(draw_labels=True,\n linewidth=2, color='gray', alpha=0.5, linestyle='--')\ngl.xlabels_top = False\ngl.ylabels_right = False\nplt.title('Reflectivity: ' + my_radar.time['units'].split()[2])\nplt.savefig(saveloc + my_radar.time['units'].split()[2] +'.png', bbox_inches = 'tight')\n\"\"\"\n#Plot Correlation Coefficient\nfig = plt.figure(figsize = [20,8])\ndisplay.plot_ppi_map('cross_correlation_ratio', sweep = 0, projection=proj, resolution = '10m',\n vmin = .8, vmax = 1, mask_outside = False,\n cmap = pyart.graph.cm.RefDiff,\n min_lat = min_lat, min_lon = min_lon,\n max_lat = max_lat, max_lon = max_lon,\n lat_lines = lal, lon_lines = lol)\ngl = display.ax.gridlines(draw_labels=True,\n linewidth=2, color='gray', alpha=0.5, linestyle='--')\ngl.xlabels_top = False\ngl.ylabels_right = False\nplt.savefig('/home/scarani/Desktop/Output/correlation_coefficent.png', bbox_inches = 'tight')\n\n#Plot Differential Reflectivity\nfig = plt.figure(figsize = [20,8])\ndisplay.plot_ppi_map('differential_reflectivity', sweep = 0, projection=proj, resolution = '10m',\n vmin = -1, vmax = 4, mask_outside = False,\n cmap = pyart.graph.cm.RefDiff,\n min_lat = min_lat, min_lon = min_lon,\n max_lat = max_lat, max_lon = max_lon,\n lat_lines = lal, lon_lines = lol)\ngl = display.ax.gridlines(draw_labels=True,\n linewidth=2, color='gray', alpha=0.5, linestyle='--')\ngl.xlabels_top = False\ngl.ylabels_right = False\nplt.title('Differential Reflectivity: ' + my_radar.time['units'].split()[2])\nplt.savefig('/home/scarani/Desktop/Output/differential_reflectivity.png', bbox_inches = 'tight')\n\n#Plot Velocity\nfig = plt.figure(figsize = [20,8])\ndisplay.plot_ppi_map('velocity', sweep = 1, projection=proj, resolution = '10m',\n vmin = -nyq*1.5, vmax = nyq*1.5, mask_outside = False,\n cmap = pyart.graph.cm.NWSVel,\n min_lat = min_lat, min_lon = min_lon,\n max_lat = max_lat, max_lon = max_lon,\n lat_lines = lal, lon_lines = lol)\ngl = display.ax.gridlines(draw_labels=True,\n linewidth=2, color='gray', alpha=0.5, linestyle='--')\ngl.xlabels_top = False\ngl.ylabels_right = False\nplt.savefig('/home/scarani/Desktop/Output/velocity.png', bbox_inches = 'tight')\n\"\"\"" ]
[ [ "numpy.arange", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
magnusmel/Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda
[ "cc226deb7b46852407900f9fec0caf62638defe2", "cc226deb7b46852407900f9fec0caf62638defe2", "cc226deb7b46852407900f9fec0caf62638defe2", "cc226deb7b46852407900f9fec0caf62638defe2", "cc226deb7b46852407900f9fec0caf62638defe2", "ec8216568d8cd9810004067558041c11a8356685", "cc226deb7b46852407900f9fec0caf62638defe2" ]
[ "lesson7.4/tensorflow/contrib/distributions/python/ops/bijectors/affine_impl.py", "lesson5.4/index.py", "lesson7.4/tensorflow/contrib/fused_conv/ops/gen_fused_conv2d_bias_activation_op.py", "lesson7.4/tensorflow/python/ops/gen_set_ops.py", "lesson7.4/tensorflow/python/ops/gen_string_ops.py", "lesson7.4/tensorflow/python/ops/tensor_array_ops.py", "lesson7.4/tensorflow/python/ops/distributions/util.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Affine bijector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib import linalg\nfrom tensorflow.contrib.distributions.python.ops import distribution_util\nfrom tensorflow.contrib.distributions.python.ops.shape import _DistributionShape\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.distributions import bijector\n\n\n__all__ = [\n \"Affine\",\n]\n\n\ndef _as_tensor(x, name):\n \"\"\"Convenience to convert to `Tensor` or leave as `None`.\"\"\"\n return None if x is None else ops.convert_to_tensor(x, name=name)\n\n\nclass Affine(bijector.Bijector):\n \"\"\"Compute `Y = g(X; shift, scale) = scale @ X + shift`.\n\n Here `scale = c * I + diag(D1) + tril(L) + V @ diag(D2) @ V.T`.\n\n In TF parlance, the `scale` term is logically equivalent to:\n\n ```python\n scale = (\n scale_identity_multiplier * tf.diag(tf.ones(d)) +\n tf.diag(scale_diag) +\n scale_tril +\n scale_perturb_factor @ diag(scale_perturb_diag) @\n tf.transpose([scale_perturb_factor])\n )\n ```\n\n The `scale` term is applied without necessarily materializing constituent\n matrices, i.e., the matmul is [matrix-free](\n https://en.wikipedia.org/wiki/Matrix-free_methods) when possible.\n\n Examples:\n\n ```python\n # Y = X\n b = Affine()\n\n # Y = X + shift\n b = Affine(shift=[1., 2, 3])\n\n # Y = 2 * I @ X.T + shift\n b = Affine(shift=[1., 2, 3],\n scale_identity_multiplier=2.)\n\n # Y = tf.diag(d1) @ X.T + shift\n b = Affine(shift=[1., 2, 3],\n scale_diag=[-1., 2, 1]) # Implicitly 3x3.\n\n # Y = (I + v * v.T) @ X.T + shift\n b = Affine(shift=[1., 2, 3],\n scale_perturb_factor=[[1., 0],\n [0, 1],\n [1, 1]])\n\n # Y = (diag(d1) + v * diag(d2) * v.T) @ X.T + shift\n b = Affine(shift=[1., 2, 3],\n scale_diag=[1., 3, 3], # Implicitly 3x3.\n scale_perturb_diag=[2., 1], # Implicitly 2x2.\n scale_perturb_factor=[[1., 0],\n [0, 1],\n [1, 1]])\n\n ```\n\n \"\"\"\n\n def __init__(self,\n shift=None,\n scale_identity_multiplier=None,\n scale_diag=None,\n scale_tril=None,\n scale_perturb_factor=None,\n scale_perturb_diag=None,\n event_ndims=1,\n validate_args=False,\n name=\"affine\"):\n \"\"\"Instantiates the `Affine` bijector.\n\n This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments,\n giving the forward operation:\n\n ```none\n Y = g(X) = scale @ X + shift\n ```\n\n where the `scale` term is logically equivalent to:\n\n ```python\n scale = (\n scale_identity_multiplier * tf.diag(tf.ones(d)) +\n tf.diag(scale_diag) +\n scale_tril +\n scale_perturb_factor @ diag(scale_perturb_diag) @\n tf.transpose([scale_perturb_factor])\n )\n ```\n\n If none of `scale_identity_multiplier`, `scale_diag`, or `scale_tril` are\n specified then `scale += IdentityMatrix`. Otherwise specifying a\n `scale` argument has the semantics of `scale += Expand(arg)`, i.e.,\n `scale_diag != None` means `scale += tf.diag(scale_diag)`.\n\n Args:\n shift: Floating-point `Tensor`. If this is set to `None`, no shift is\n applied.\n scale_identity_multiplier: floating point rank 0 `Tensor` representing a\n scaling done to the identity matrix.\n When `scale_identity_multiplier = scale_diag = scale_tril = None` then\n `scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added\n to `scale`.\n scale_diag: Floating-point `Tensor` representing the diagonal matrix.\n `scale_diag` has shape [N1, N2, ... k], which represents a k x k\n diagonal matrix.\n When `None` no diagonal term is added to `scale`.\n scale_tril: Floating-point `Tensor` representing the diagonal matrix.\n `scale_diag` has shape [N1, N2, ... k, k], which represents a k x k\n lower triangular matrix.\n When `None` no `scale_tril` term is added to `scale`.\n The upper triangular elements above the diagonal are ignored.\n scale_perturb_factor: Floating-point `Tensor` representing factor matrix\n with last two dimensions of shape `(k, r)`. When `None`, no rank-r\n update is added to `scale`.\n scale_perturb_diag: Floating-point `Tensor` representing the diagonal\n matrix. `scale_perturb_diag` has shape [N1, N2, ... r], which\n represents an `r x r` diagonal matrix. When `None` low rank updates will\n take the form `scale_perturb_factor * scale_perturb_factor.T`.\n event_ndims: Scalar `int` `Tensor` indicating the number of dimensions\n associated with a particular draw from the distribution. Must be 0 or 1.\n validate_args: Python `bool` indicating whether arguments should be\n checked for correctness.\n name: Python `str` name given to ops managed by this object.\n\n Raises:\n ValueError: if `perturb_diag` is specified but not `perturb_factor`.\n TypeError: if `shift` has different `dtype` from `scale` arguments.\n \"\"\"\n self._graph_parents = []\n self._name = name\n self._validate_args = validate_args\n\n # Ambiguous definition of low rank update.\n if scale_perturb_diag is not None and scale_perturb_factor is None:\n raise ValueError(\"When scale_perturb_diag is specified, \"\n \"scale_perturb_factor must be specified.\")\n\n # Special case, only handling a scaled identity matrix. We don't know its\n # dimensions, so this is special cased.\n # We don't check identity_multiplier, since below we set it to 1. if all\n # other scale args are None.\n self._is_only_identity_multiplier = (scale_tril is None and\n scale_diag is None and\n scale_perturb_factor is None)\n\n with self._name_scope(\"init\", values=[\n shift, scale_identity_multiplier, scale_diag, scale_tril,\n scale_perturb_diag, scale_perturb_factor]):\n event_ndims = ops.convert_to_tensor(event_ndims, name=\"event_ndims\")\n event_ndims_const = tensor_util.constant_value(event_ndims)\n if event_ndims_const is not None and event_ndims_const not in (0, 1):\n raise ValueError(\"event_ndims(%s) was not 0 or 1\" % event_ndims_const)\n else:\n if validate_args:\n # Shape tool will catch if event_ndims is negative.\n event_ndims = control_flow_ops.with_dependencies(\n [check_ops.assert_less(\n event_ndims, 2, message=\"event_ndims must be 0 or 1\")],\n event_ndims)\n\n if event_ndims_const == 0 and not self._is_only_identity_multiplier:\n raise ValueError(\n \"If event_ndims == 0, the only scale argument you can pass is \"\n \"scale_identity_multiplier. All others operate on vectors.\")\n\n # In the absence of `loc` and `scale`, we'll assume `dtype` is `float32`.\n dtype = dtypes.float32\n\n if shift is not None:\n shift = ops.convert_to_tensor(shift, name=\"shift\")\n dtype = shift.dtype.base_dtype\n self._shift = shift\n\n # When no args are specified, pretend the scale matrix is the identity\n # matrix.\n if (self._is_only_identity_multiplier and\n scale_identity_multiplier is None):\n scale_identity_multiplier = ops.convert_to_tensor(1., dtype=dtype)\n\n # self._create_scale_operator returns a LinearOperator in all cases\n # except if self._is_only_identity_multiplier; in which case it\n # returns a scalar Tensor.\n scale = self._create_scale_operator(\n identity_multiplier=scale_identity_multiplier,\n diag=scale_diag,\n tril=scale_tril,\n perturb_diag=scale_perturb_diag,\n perturb_factor=scale_perturb_factor,\n shift=shift,\n validate_args=validate_args)\n\n if scale.dtype is not None:\n dtype = scale.dtype.base_dtype\n\n if scale is not None and not self._is_only_identity_multiplier:\n if (shift is not None and\n shift.dtype.base_dtype != scale.dtype.base_dtype):\n raise TypeError(\n \"shift.dtype({}) is incompatible with scale.dtype({}).\".format(\n shift.dtype, scale.dtype))\n\n if scale.tensor_rank is not None:\n batch_ndims = scale.tensor_rank - 2\n else:\n batch_ndims = scale.tensor_rank_tensor() - 2\n else:\n # We won't need shape inference when scale is None or when scale is a\n # scalar.\n batch_ndims = 0\n self._scale = scale\n self._shaper = _DistributionShape(\n batch_ndims=batch_ndims,\n event_ndims=event_ndims,\n validate_args=validate_args)\n super(Affine, self).__init__(\n event_ndims=event_ndims,\n graph_parents=(\n [event_ndims] +\n [self._scale] if tensor_util.is_tensor(self._scale)\n else self._scale.graph_parents +\n [self._shift] if self._shift is not None else []),\n is_constant_jacobian=True,\n dtype=dtype,\n validate_args=validate_args,\n name=name)\n\n def _create_scale_operator(self, identity_multiplier, diag, tril,\n perturb_diag, perturb_factor, shift,\n validate_args):\n \"\"\"Construct `scale` from various components.\n\n Args:\n identity_multiplier: floating point rank 0 `Tensor` representing a scaling\n done to the identity matrix.\n diag: Floating-point `Tensor` representing the diagonal matrix.\n `scale_diag` has shape [N1, N2, ... k], which represents a k x k\n diagonal matrix.\n tril: Floating-point `Tensor` representing the diagonal matrix.\n `scale_tril` has shape [N1, N2, ... k], which represents a k x k lower\n triangular matrix.\n perturb_diag: Floating-point `Tensor` representing the diagonal matrix of\n the low rank update.\n perturb_factor: Floating-point `Tensor` representing factor matrix.\n shift: Floating-point `Tensor` representing `shift in `scale @ X + shift`.\n validate_args: Python `bool` indicating whether arguments should be\n checked for correctness.\n\n Returns:\n scale. In the case of scaling by a constant, scale is a\n floating point `Tensor`. Otherwise, scale is a `LinearOperator`.\n\n Raises:\n ValueError: if all of `tril`, `diag` and `identity_multiplier` are `None`.\n \"\"\"\n identity_multiplier = _as_tensor(identity_multiplier, \"identity_multiplier\")\n diag = _as_tensor(diag, \"diag\")\n tril = _as_tensor(tril, \"tril\")\n perturb_diag = _as_tensor(perturb_diag, \"perturb_diag\")\n perturb_factor = _as_tensor(perturb_factor, \"perturb_factor\")\n\n # If possible, use the low rank update to infer the shape of\n # the identity matrix, when scale represents a scaled identity matrix\n # with a low rank update.\n shape_hint = None\n if perturb_factor is not None:\n shape_hint = distribution_util.dimension_size(perturb_factor, axis=-2)\n\n if self._is_only_identity_multiplier:\n if validate_args:\n return control_flow_ops.with_dependencies(\n [check_ops.assert_none_equal(\n identity_multiplier,\n array_ops.zeros([], identity_multiplier.dtype),\n [\"identity_multiplier should be non-zero.\"])],\n identity_multiplier)\n return identity_multiplier\n\n scale = distribution_util.make_tril_scale(\n loc=shift,\n scale_tril=tril,\n scale_diag=diag,\n scale_identity_multiplier=identity_multiplier,\n validate_args=validate_args,\n assert_positive=False,\n shape_hint=shape_hint)\n\n if perturb_factor is not None:\n return linalg.LinearOperatorUDVHUpdate(\n scale,\n u=perturb_factor,\n diag_update=perturb_diag,\n is_diag_update_positive=perturb_diag is None,\n is_non_singular=True, # Implied by is_positive_definite=True.\n is_self_adjoint=True,\n is_positive_definite=True,\n is_square=True)\n\n return scale\n\n @property\n def shift(self):\n \"\"\"The `shift` `Tensor` in `Y = scale @ X + shift`.\"\"\"\n return self._shift\n\n @property\n def scale(self):\n \"\"\"The `scale` `LinearOperator` in `Y = scale @ X + shift`.\"\"\"\n return self._scale\n\n def _forward(self, x):\n y = x\n if self._is_only_identity_multiplier:\n y *= self._scale\n if self.shift is not None:\n return y + self.shift\n return y\n y, sample_shape = self._shaper.make_batch_of_event_sample_matrices(\n y, expand_batch_dim=False)\n with ops.control_dependencies(self._maybe_check_scale() if\n self.validate_args else []):\n y = self.scale.matmul(y)\n y = self._shaper.undo_make_batch_of_event_sample_matrices(\n y, sample_shape, expand_batch_dim=False)\n if self.shift is not None:\n y += self.shift\n return y\n\n def _inverse(self, y):\n x = y\n if self.shift is not None:\n x -= self.shift\n if self._is_only_identity_multiplier:\n return x / self._scale\n\n x, sample_shape = self._shaper.make_batch_of_event_sample_matrices(\n x, expand_batch_dim=False)\n # Solve fails if the op is singular so we may safely skip this assertion.\n x = self.scale.solve(x)\n x = self._shaper.undo_make_batch_of_event_sample_matrices(\n x, sample_shape, expand_batch_dim=False)\n return x\n\n def _inverse_log_det_jacobian(self, y):\n return -self._forward_log_det_jacobian(y)\n\n def _forward_log_det_jacobian(self, x):\n if self._is_only_identity_multiplier:\n # We don't pad in this case and instead let the fldj be applied\n # via broadcast.\n event_size = distribution_util.pick_vector(\n math_ops.equal(self._shaper.event_ndims, 0),\n [1], array_ops.shape(x))[-1]\n event_size = math_ops.cast(event_size, dtype=self._scale.dtype)\n return math_ops.log(math_ops.abs(self._scale)) * event_size\n return self.scale.log_abs_determinant()\n\n def _maybe_check_scale(self):\n try:\n return [self.scale.assert_non_singular()]\n except NotImplementedError:\n pass\n return []\n", "import boto3\nimport numpy as np\nimport tensorflow as tf\nimport os.path\nimport re\nfrom urllib.request import urlretrieve\nimport json\n\nSESSION = None\nstrBucket = 'serverlessdeeplearning'\n\ndef handler(event, context):\n global strBucket\n global SESSION\n\n if not os.path.exists('/tmp/imagenet/'):\n os.makedirs('/tmp/imagenet/')\n\n if SESSION is None:\n downloadFromS3(strBucket,'imagenet/imagenet_2012_challenge_label_map_proto.pbtxt','/tmp/imagenet/imagenet_2012_challenge_label_map_proto.pbtxt')\n downloadFromS3(strBucket,'imagenet/imagenet_synset_to_human_label_map.txt','/tmp/imagenet/imagenet_synset_to_human_label_map.txt')\n \n strFile = '/tmp/imagenet/inputimage.png'\n\n if ('queryStringParameters' in event):\n if (event['queryStringParameters'] is not None):\n if ('url' in event['queryStringParameters']):\n urlretrieve(event['queryStringParameters']['url'], strFile)\n else:\n downloadFromS3(strBucket,'imagenet/inputimage.png',strFile)\n else:\n downloadFromS3(strBucket,'imagenet/inputimage.png',strFile)\n else:\n downloadFromS3(strBucket,'imagenet/inputimage.png',strFile)\n \n strResult = run_inference_on_image(strFile)\n\n return {\n 'statusCode': 200,\n 'body': json.dumps(strResult)\n }\n\ndef run_inference_on_image(image):\n image_data = tf.gfile.FastGFile(image, 'rb').read()\n global SESSION\n if SESSION is None:\n SESSION = tf.InteractiveSession()\n create_graph()\n\n softmax_tensor = tf.get_default_graph().get_tensor_by_name('softmax:0')\n predictions = SESSION.run(softmax_tensor, {'DecodeJpeg/contents:0': image_data})\n predictions = np.squeeze(predictions)\n top_k = predictions.argsort()[-5:][::-1]\n\n node_lookup = NodeLookup()\n strResult = '%s (score = %.5f)' % (node_lookup.id_to_string(top_k[0]), predictions[top_k[0]])\n vecStr = []\n for node_id in top_k:\n human_string = node_lookup.id_to_string(node_id)\n score = predictions[node_id]\n vecStr.append('%s (score = %.5f)' % (human_string, score))\n return vecStr\n\ndef downloadFromS3(strBucket,strKey,strFile):\n s3_client = boto3.client('s3')\n s3_client.download_file(strBucket, strKey, strFile)\n\ndef getObject(strBucket,strKey):\n s3_client = boto3.client('s3')\n s3_response_object = s3_client.get_object(Bucket=strBucket, Key=strKey)\n return s3_response_object['Body'].read() \n\ndef create_graph():\n global strBucket\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(getObject(strBucket,'imagenet/classify_image_graph_def.pb'))\n _ = tf.import_graph_def(graph_def, name='')\n\nclass NodeLookup(object):\n \"\"\"Converts integer node ID's to human readable labels.\"\"\"\n\n def __init__(self,\n label_lookup_path=None,\n uid_lookup_path=None):\n if not label_lookup_path:\n label_lookup_path = os.path.join(\n '/tmp/imagenet/', 'imagenet_2012_challenge_label_map_proto.pbtxt')\n if not uid_lookup_path:\n uid_lookup_path = os.path.join(\n '/tmp/imagenet/', 'imagenet_synset_to_human_label_map.txt')\n self.node_lookup = self.load(label_lookup_path, uid_lookup_path)\n\n def load(self, label_lookup_path, uid_lookup_path):\n if not tf.gfile.Exists(uid_lookup_path):\n tf.logging.fatal('File does not exist %s', uid_lookup_path)\n if not tf.gfile.Exists(label_lookup_path):\n tf.logging.fatal('File does not exist %s', label_lookup_path)\n\n # Loads mapping from string UID to human-readable string\n proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()\n uid_to_human = {}\n p = re.compile(r'[n\\d]*[ \\S,]*')\n for line in proto_as_ascii_lines:\n parsed_items = p.findall(line)\n uid = parsed_items[0]\n human_string = parsed_items[2]\n uid_to_human[uid] = human_string\n\n # Loads mapping from string UID to integer node ID.\n node_id_to_uid = {}\n proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()\n for line in proto_as_ascii:\n if line.startswith(' target_class:'):\n target_class = int(line.split(': ')[1])\n if line.startswith(' target_class_string:'):\n target_class_string = line.split(': ')[1]\n node_id_to_uid[target_class] = target_class_string[1:-2]\n\n # Loads the final mapping of integer node ID to human-readable string\n node_id_to_name = {}\n for key, val in node_id_to_uid.items():\n if val not in uid_to_human:\n tf.logging.fatal('Failed to locate: %s', val)\n name = uid_to_human[val]\n node_id_to_name[key] = name\n\n return node_id_to_name\n\n def id_to_string(self, node_id):\n if node_id not in self.node_lookup:\n return ''\n return self.node_lookup[node_id]\n", "\"\"\"Python wrappers around TensorFlow ops.\n\nThis file is MACHINE GENERATED! Do not edit.\nOriginal C++ source file: fused_conv2d_bias_activation_op.cc\n\"\"\"\n\nimport collections as _collections\n\nfrom tensorflow.python.eager import execute as _execute\nfrom tensorflow.python.eager import context as _context\nfrom tensorflow.python.eager import core as _core\nfrom tensorflow.python.framework import dtypes as _dtypes\nfrom tensorflow.python.framework import tensor_shape as _tensor_shape\n\nfrom tensorflow.core.framework import op_def_pb2 as _op_def_pb2\n# Needed to trigger the call to _set_call_cpp_shape_fn.\nfrom tensorflow.python.framework import common_shapes as _common_shapes\nfrom tensorflow.python.framework import op_def_registry as _op_def_registry\nfrom tensorflow.python.framework import ops as _ops\nfrom tensorflow.python.framework import op_def_library as _op_def_library\n\n\ndef fused_conv2d_bias_activation(conv_input, filter, bias, side_input, conv_input_scale, side_input_scale, strides, padding, data_format=\"NHWC\", filter_format=\"HWIO\", activation_mode=\"Relu\", name=None):\n r\"\"\" Computes a fused kernel which implements: 2-D convolution, adds side input,\n\n with separate scaling on convolution and side inputs, then adds bias and\n applies the RELU activation function to the result. Supports both float and\n qint8 data formats. In the case of qint8, the output is clipped to [0..127].\n\n conv_input: A tensor with format as specified by `data_format` (see below).\n filter: A tensor with format depending on `data_format` as follows:\n \"NHWC\", \"NCHW\":\n `float [ filter_height, filter_width, in_channels, out_channels ]`\n \"NCHW_VECT_C\":\n `qint8 [ out_channels, in_channels, filter_height, filter_width ]`\n bias: 1-D float tensor with size matching the `out_channels` dimension of\n `filter`.\n Note: this tensor is still float, even if other inputs are qint8.\n side_input: A tensor with format as specified by `data_format` (see below).\n This tensor will be ignored and can be [] if side_input_scale == 0.\n Otherwise, the size of each dimension must match the `output` tensor.\n conv_input_scale: scalar float value to be multiplied by `conv_input`.\n (conceptually.. in reality it is applied after convolution).\n side_input_scale: scalar float value to be multiplied by `side_input`.\n output: A tensor with format as specified by `data_format` (see below).\n The dimension sizes are determined automatically based on other inputs\n and attributes.\n T: The element data type of `conv_input`, `side_input` and `output` tensors.\n Note: must match with the `data_format`.\n Tbias: The element data type of `bias`.\n strides: 1-D tensor of length 4. The stride of the sliding window for each\n dimension of `input`. The dimension order is determined by the value of\n `data_format`, see below for details.\n Note: the stride for batch and channel dimensions must be 1.\n padding: The type of padding algorithm to use.\n data_format: A string specifying the data format of `conv_input`,\n `side_input` and `output` tensors with the following options:\n \"NHWC\": `float [ batch, height, width, channels ]`\n \"NCHW\": `float [ batch, channels, height, width ]`\n \"NCHW_VECT_C\":\n `qint8 [ batch, channels / 4, height, width, channels % 4 ]`\n Note: for \"NCHW_VECT_C\", `channels` must be a multiple of 4.\n filter_format: A string specifying the data format of `filter`,\n \"HWIO\": `float [ kernel_height, kernel_width, input_channels,\n output_channels ]`\n \"OIHW_VECT_I\":\n `qint8 [ output_channels, input_channels / 4,\n kernel_height, kernel_width, input_channels % 4 ]`\n activation_mode: The activation applied to the output.\n Currently must be \"Relu\".\n\n Args:\n conv_input: A `Tensor`. Must be one of the following types: `float32`, `half`, `qint8`.\n filter: A `Tensor`. Must have the same type as `conv_input`.\n bias: A `Tensor`. Must be one of the following types: `float32`, `half`.\n side_input: A `Tensor`. Must have the same type as `conv_input`.\n conv_input_scale: A `Tensor` of type `float32`.\n side_input_scale: A `Tensor` of type `float32`.\n strides: A list of `ints`.\n padding: A `string` from: `\"SAME\", \"VALID\"`.\n data_format: An optional `string` from: `\"NHWC\", \"NCHW\", \"NCHW_VECT_C\"`. Defaults to `\"NHWC\"`.\n filter_format: An optional `string` from: `\"HWIO\", \"OIHW\", \"OIHW_VECT_I\"`. Defaults to `\"HWIO\"`.\n activation_mode: An optional `string` from: `\"Relu\"`. Defaults to `\"Relu\"`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `conv_input`.\n \"\"\"\n if not isinstance(strides, (list, tuple)):\n raise TypeError(\n \"Expected list for 'strides' argument to \"\n \"'fused_conv2d_bias_activation' Op, not %r.\" % strides)\n strides = [_execute.make_int(_i, \"strides\") for _i in strides]\n padding = _execute.make_str(padding, \"padding\")\n if data_format is None:\n data_format = \"NHWC\"\n data_format = _execute.make_str(data_format, \"data_format\")\n if filter_format is None:\n filter_format = \"HWIO\"\n filter_format = _execute.make_str(filter_format, \"filter_format\")\n if activation_mode is None:\n activation_mode = \"Relu\"\n activation_mode = _execute.make_str(activation_mode, \"activation_mode\")\n _ctx = _context.context()\n if _ctx.in_graph_mode():\n _, _, _op = _op_def_lib._apply_op_helper(\n \"FusedConv2DBiasActivation\", conv_input=conv_input, filter=filter,\n bias=bias, side_input=side_input, conv_input_scale=conv_input_scale,\n side_input_scale=side_input_scale, strides=strides, padding=padding,\n data_format=data_format, filter_format=filter_format,\n activation_mode=activation_mode, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tbias\", _op.get_attr(\"Tbias\"),\n \"strides\", _op.get_attr(\"strides\"), \"padding\",\n _op.get_attr(\"padding\"), \"data_format\",\n _op.get_attr(\"data_format\"), \"filter_format\",\n _op.get_attr(\"filter_format\"), \"activation_mode\",\n _op.get_attr(\"activation_mode\"))\n else:\n _attr_T, _inputs_T = _execute.args_to_matching_eager([conv_input, filter, side_input], _ctx)\n (conv_input, filter, side_input) = _inputs_T\n _attr_T = _attr_T.as_datatype_enum\n _attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], _ctx)\n _attr_Tbias = _attr_Tbias.as_datatype_enum\n conv_input_scale = _ops.convert_to_tensor(conv_input_scale, _dtypes.float32)\n side_input_scale = _ops.convert_to_tensor(side_input_scale, _dtypes.float32)\n _inputs_flat = [conv_input, filter, bias, side_input, conv_input_scale, side_input_scale]\n _attrs = (\"T\", _attr_T, \"Tbias\", _attr_Tbias, \"strides\", strides,\n \"padding\", padding, \"data_format\", data_format, \"filter_format\",\n filter_format, \"activation_mode\", activation_mode)\n _result = _execute.execute(b\"FusedConv2DBiasActivation\", 1,\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"FusedConv2DBiasActivation\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n_ops.RegisterShape(\"FusedConv2DBiasActivation\")(None)\n\ndef _InitOpDefLibrary(op_list_proto_bytes):\n op_list = _op_def_pb2.OpList()\n op_list.ParseFromString(op_list_proto_bytes)\n _op_def_registry.register_op_list(op_list)\n op_def_lib = _op_def_library.OpDefLibrary()\n op_def_lib.add_op_list(op_list)\n return op_def_lib\n# op {\n# name: \"FusedConv2DBiasActivation\"\n# input_arg {\n# name: \"conv_input\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"filter\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"bias\"\n# type_attr: \"Tbias\"\n# }\n# input_arg {\n# name: \"side_input\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"conv_input_scale\"\n# type: DT_FLOAT\n# }\n# input_arg {\n# name: \"side_input_scale\"\n# type: DT_FLOAT\n# }\n# output_arg {\n# name: \"output\"\n# type_attr: \"T\"\n# }\n# attr {\n# name: \"T\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_FLOAT\n# type: DT_HALF\n# type: DT_QINT8\n# }\n# }\n# }\n# attr {\n# name: \"Tbias\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_FLOAT\n# type: DT_HALF\n# }\n# }\n# }\n# attr {\n# name: \"strides\"\n# type: \"list(int)\"\n# }\n# attr {\n# name: \"padding\"\n# type: \"string\"\n# allowed_values {\n# list {\n# s: \"SAME\"\n# s: \"VALID\"\n# }\n# }\n# }\n# attr {\n# name: \"data_format\"\n# type: \"string\"\n# default_value {\n# s: \"NHWC\"\n# }\n# allowed_values {\n# list {\n# s: \"NHWC\"\n# s: \"NCHW\"\n# s: \"NCHW_VECT_C\"\n# }\n# }\n# }\n# attr {\n# name: \"filter_format\"\n# type: \"string\"\n# default_value {\n# s: \"HWIO\"\n# }\n# allowed_values {\n# list {\n# s: \"HWIO\"\n# s: \"OIHW\"\n# s: \"OIHW_VECT_I\"\n# }\n# }\n# }\n# attr {\n# name: \"activation_mode\"\n# type: \"string\"\n# default_value {\n# s: \"Relu\"\n# }\n# allowed_values {\n# list {\n# s: \"Relu\"\n# }\n# }\n# }\n# }\n_op_def_lib = _InitOpDefLibrary(b\"\\n\\236\\003\\n\\031FusedConv2DBiasActivation\\022\\017\\n\\nconv_input\\\"\\001T\\022\\013\\n\\006filter\\\"\\001T\\022\\r\\n\\004bias\\\"\\005Tbias\\022\\017\\n\\nside_input\\\"\\001T\\022\\024\\n\\020conv_input_scale\\030\\001\\022\\024\\n\\020side_input_scale\\030\\001\\032\\013\\n\\006output\\\"\\001T\\\"\\022\\n\\001T\\022\\004type:\\007\\n\\0052\\003\\001\\023\\013\\\"\\025\\n\\005Tbias\\022\\004type:\\006\\n\\0042\\002\\001\\023\\\"\\024\\n\\007strides\\022\\tlist(int)\\\"\\\"\\n\\007padding\\022\\006string:\\017\\n\\r\\022\\004SAME\\022\\005VALID\\\":\\n\\013data_format\\022\\006string\\032\\006\\022\\004NHWC:\\033\\n\\031\\022\\004NHWC\\022\\004NCHW\\022\\013NCHW_VECT_C\\\"<\\n\\rfilter_format\\022\\006string\\032\\006\\022\\004HWIO:\\033\\n\\031\\022\\004HWIO\\022\\004OIHW\\022\\013OIHW_VECT_I\\\"+\\n\\017activation_mode\\022\\006string\\032\\006\\022\\004Relu:\\010\\n\\006\\022\\004Relu\")\n", "\"\"\"Python wrappers around TensorFlow ops.\n\nThis file is MACHINE GENERATED! Do not edit.\nOriginal C++ source file: set_ops.cc\n\"\"\"\n\nimport collections as _collections\n\nfrom tensorflow.python.eager import execute as _execute\nfrom tensorflow.python.eager import context as _context\nfrom tensorflow.python.eager import core as _core\nfrom tensorflow.python.framework import dtypes as _dtypes\nfrom tensorflow.python.framework import tensor_shape as _tensor_shape\n\nfrom tensorflow.core.framework import op_def_pb2 as _op_def_pb2\n# Needed to trigger the call to _set_call_cpp_shape_fn.\nfrom tensorflow.python.framework import common_shapes as _common_shapes\nfrom tensorflow.python.framework import op_def_registry as _op_def_registry\nfrom tensorflow.python.framework import ops as _ops\nfrom tensorflow.python.framework import op_def_library as _op_def_library\n\n\n_dense_to_dense_set_operation_outputs = [\"result_indices\", \"result_values\",\n \"result_shape\"]\n_DenseToDenseSetOperationOutput = _collections.namedtuple(\n \"DenseToDenseSetOperation\", _dense_to_dense_set_operation_outputs)\n\n\ndef dense_to_dense_set_operation(set1, set2, set_operation, validate_indices=True, name=None):\n r\"\"\"Applies set operation along last dimension of 2 `Tensor` inputs.\n\n See SetOperationOp::SetOperationFromContext for values of `set_operation`.\n\n Output `result` is a `SparseTensor` represented by `result_indices`,\n `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this\n has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`\n dimension contains the result of `set_operation` applied to the corresponding\n `[0...n-1]` dimension of `set`.\n\n Args:\n set1: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`.\n `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.\n Dimension `n` contains values in a set, duplicates are allowed but ignored.\n set2: A `Tensor`. Must have the same type as `set1`.\n `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`.\n Dimension `n` contains values in a set, duplicates are allowed but ignored.\n set_operation: A `string`.\n validate_indices: An optional `bool`. Defaults to `True`.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (result_indices, result_values, result_shape).\n\n result_indices: A `Tensor` of type `int64`. 2D indices of a `SparseTensor`.\n result_values: A `Tensor`. Has the same type as `set1`. 1D values of a `SparseTensor`.\n result_shape: A `Tensor` of type `int64`. 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is\n the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`\n is the max result set size across all `0...n-1` dimensions.\n \"\"\"\n set_operation = _execute.make_str(set_operation, \"set_operation\")\n if validate_indices is None:\n validate_indices = True\n validate_indices = _execute.make_bool(validate_indices, \"validate_indices\")\n _ctx = _context.context()\n if _ctx.in_graph_mode():\n _, _, _op = _op_def_lib._apply_op_helper(\n \"DenseToDenseSetOperation\", set1=set1, set2=set2,\n set_operation=set_operation, validate_indices=validate_indices,\n name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"set_operation\", _op.get_attr(\"set_operation\"),\n \"validate_indices\", _op.get_attr(\"validate_indices\"), \"T\",\n _op.get_attr(\"T\"))\n else:\n _attr_T, _inputs_T = _execute.args_to_matching_eager([set1, set2], _ctx)\n (set1, set2) = _inputs_T\n _attr_T = _attr_T.as_datatype_enum\n _inputs_flat = [set1, set2]\n _attrs = (\"set_operation\", set_operation, \"validate_indices\",\n validate_indices, \"T\", _attr_T)\n _result = _execute.execute(b\"DenseToDenseSetOperation\", 3,\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"DenseToDenseSetOperation\", _inputs_flat, _attrs, _result, name)\n _result = _DenseToDenseSetOperationOutput._make(_result)\n return _result\n\n\n_dense_to_sparse_set_operation_outputs = [\"result_indices\", \"result_values\",\n \"result_shape\"]\n_DenseToSparseSetOperationOutput = _collections.namedtuple(\n \"DenseToSparseSetOperation\", _dense_to_sparse_set_operation_outputs)\n\n\ndef dense_to_sparse_set_operation(set1, set2_indices, set2_values, set2_shape, set_operation, validate_indices=True, name=None):\n r\"\"\"Applies set operation along last dimension of `Tensor` and `SparseTensor`.\n\n See SetOperationOp::SetOperationFromContext for values of `set_operation`.\n\n Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,\n and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same\n as `set1`. Dimension `n` contains values in a set, duplicates are allowed but\n ignored.\n\n If `validate_indices` is `True`, this op validates the order and range of `set2`\n indices.\n\n Output `result` is a `SparseTensor` represented by `result_indices`,\n `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this\n has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`\n dimension contains the result of `set_operation` applied to the corresponding\n `[0...n-1]` dimension of `set`.\n\n Args:\n set1: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`.\n `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.\n Dimension `n` contains values in a set, duplicates are allowed but ignored.\n set2_indices: A `Tensor` of type `int64`.\n 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major\n order.\n set2_values: A `Tensor`. Must have the same type as `set1`.\n 1D `Tensor`, values of a `SparseTensor`. Must be in row-major\n order.\n set2_shape: A `Tensor` of type `int64`.\n 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must\n be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the\n max set size across `n-1` dimensions.\n set_operation: A `string`.\n validate_indices: An optional `bool`. Defaults to `True`.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (result_indices, result_values, result_shape).\n\n result_indices: A `Tensor` of type `int64`. 2D indices of a `SparseTensor`.\n result_values: A `Tensor`. Has the same type as `set1`. 1D values of a `SparseTensor`.\n result_shape: A `Tensor` of type `int64`. 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is\n the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`\n is the max result set size across all `0...n-1` dimensions.\n \"\"\"\n set_operation = _execute.make_str(set_operation, \"set_operation\")\n if validate_indices is None:\n validate_indices = True\n validate_indices = _execute.make_bool(validate_indices, \"validate_indices\")\n _ctx = _context.context()\n if _ctx.in_graph_mode():\n _, _, _op = _op_def_lib._apply_op_helper(\n \"DenseToSparseSetOperation\", set1=set1, set2_indices=set2_indices,\n set2_values=set2_values, set2_shape=set2_shape,\n set_operation=set_operation, validate_indices=validate_indices,\n name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"set_operation\", _op.get_attr(\"set_operation\"),\n \"validate_indices\", _op.get_attr(\"validate_indices\"), \"T\",\n _op.get_attr(\"T\"))\n else:\n _attr_T, _inputs_T = _execute.args_to_matching_eager([set1, set2_values], _ctx)\n (set1, set2_values) = _inputs_T\n _attr_T = _attr_T.as_datatype_enum\n set2_indices = _ops.convert_to_tensor(set2_indices, _dtypes.int64)\n set2_shape = _ops.convert_to_tensor(set2_shape, _dtypes.int64)\n _inputs_flat = [set1, set2_indices, set2_values, set2_shape]\n _attrs = (\"set_operation\", set_operation, \"validate_indices\",\n validate_indices, \"T\", _attr_T)\n _result = _execute.execute(b\"DenseToSparseSetOperation\", 3,\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"DenseToSparseSetOperation\", _inputs_flat, _attrs, _result, name)\n _result = _DenseToSparseSetOperationOutput._make(_result)\n return _result\n\n\ndef set_size(set_indices, set_values, set_shape, validate_indices=True, name=None):\n r\"\"\"Number of unique elements along last dimension of input `set`.\n\n Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`,\n and `set_shape`. The last dimension contains values in a set, duplicates are\n allowed but ignored.\n\n If `validate_indices` is `True`, this op validates the order and range of `set`\n indices.\n\n Args:\n set_indices: A `Tensor` of type `int64`.\n 2D `Tensor`, indices of a `SparseTensor`.\n set_values: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`.\n 1D `Tensor`, values of a `SparseTensor`.\n set_shape: A `Tensor` of type `int64`.\n 1D `Tensor`, shape of a `SparseTensor`.\n validate_indices: An optional `bool`. Defaults to `True`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `int32`.\n For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st\n `n-1` dimensions as `set`. Each value is the number of unique elements in\n the corresponding `[0...n-1]` dimension of `set`.\n \"\"\"\n if validate_indices is None:\n validate_indices = True\n validate_indices = _execute.make_bool(validate_indices, \"validate_indices\")\n _ctx = _context.context()\n if _ctx.in_graph_mode():\n _, _, _op = _op_def_lib._apply_op_helper(\n \"SetSize\", set_indices=set_indices, set_values=set_values,\n set_shape=set_shape, validate_indices=validate_indices, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"validate_indices\", _op.get_attr(\"validate_indices\"), \"T\",\n _op.get_attr(\"T\"))\n else:\n _attr_T, (set_values,) = _execute.args_to_matching_eager([set_values], _ctx)\n _attr_T = _attr_T.as_datatype_enum\n set_indices = _ops.convert_to_tensor(set_indices, _dtypes.int64)\n set_shape = _ops.convert_to_tensor(set_shape, _dtypes.int64)\n _inputs_flat = [set_indices, set_values, set_shape]\n _attrs = (\"validate_indices\", validate_indices, \"T\", _attr_T)\n _result = _execute.execute(b\"SetSize\", 1, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"SetSize\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\n_sparse_to_sparse_set_operation_outputs = [\"result_indices\", \"result_values\",\n \"result_shape\"]\n_SparseToSparseSetOperationOutput = _collections.namedtuple(\n \"SparseToSparseSetOperation\", _sparse_to_sparse_set_operation_outputs)\n\n\ndef sparse_to_sparse_set_operation(set1_indices, set1_values, set1_shape, set2_indices, set2_values, set2_shape, set_operation, validate_indices=True, name=None):\n r\"\"\"Applies set operation along last dimension of 2 `SparseTensor` inputs.\n\n See SetOperationOp::SetOperationFromContext for values of `set_operation`.\n\n If `validate_indices` is `True`, `SparseToSparseSetOperation` validates the\n order and range of `set1` and `set2` indices.\n\n Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`,\n and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same\n as `set2`. Dimension `n` contains values in a set, duplicates are allowed but\n ignored.\n\n Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,\n and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same\n as `set1`. Dimension `n` contains values in a set, duplicates are allowed but\n ignored.\n\n If `validate_indices` is `True`, this op validates the order and range of `set1`\n and `set2` indices.\n\n Output `result` is a `SparseTensor` represented by `result_indices`,\n `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this\n has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`\n dimension contains the result of `set_operation` applied to the corresponding\n `[0...n-1]` dimension of `set`.\n\n Args:\n set1_indices: A `Tensor` of type `int64`.\n 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major\n order.\n set1_values: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`.\n 1D `Tensor`, values of a `SparseTensor`. Must be in row-major\n order.\n set1_shape: A `Tensor` of type `int64`.\n 1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must\n be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the\n max set size across `0...n-1` dimensions.\n set2_indices: A `Tensor` of type `int64`.\n 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major\n order.\n set2_values: A `Tensor`. Must have the same type as `set1_values`.\n 1D `Tensor`, values of a `SparseTensor`. Must be in row-major\n order.\n set2_shape: A `Tensor` of type `int64`.\n 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must\n be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the\n max set size across `0...n-1` dimensions.\n set_operation: A `string`.\n validate_indices: An optional `bool`. Defaults to `True`.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (result_indices, result_values, result_shape).\n\n result_indices: A `Tensor` of type `int64`. 2D indices of a `SparseTensor`.\n result_values: A `Tensor`. Has the same type as `set1_values`. 1D values of a `SparseTensor`.\n result_shape: A `Tensor` of type `int64`. 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is\n the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`\n is the max result set size across all `0...n-1` dimensions.\n \"\"\"\n set_operation = _execute.make_str(set_operation, \"set_operation\")\n if validate_indices is None:\n validate_indices = True\n validate_indices = _execute.make_bool(validate_indices, \"validate_indices\")\n _ctx = _context.context()\n if _ctx.in_graph_mode():\n _, _, _op = _op_def_lib._apply_op_helper(\n \"SparseToSparseSetOperation\", set1_indices=set1_indices,\n set1_values=set1_values, set1_shape=set1_shape,\n set2_indices=set2_indices, set2_values=set2_values,\n set2_shape=set2_shape, set_operation=set_operation,\n validate_indices=validate_indices, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"set_operation\", _op.get_attr(\"set_operation\"),\n \"validate_indices\", _op.get_attr(\"validate_indices\"), \"T\",\n _op.get_attr(\"T\"))\n else:\n _attr_T, _inputs_T = _execute.args_to_matching_eager([set1_values, set2_values], _ctx)\n (set1_values, set2_values) = _inputs_T\n _attr_T = _attr_T.as_datatype_enum\n set1_indices = _ops.convert_to_tensor(set1_indices, _dtypes.int64)\n set1_shape = _ops.convert_to_tensor(set1_shape, _dtypes.int64)\n set2_indices = _ops.convert_to_tensor(set2_indices, _dtypes.int64)\n set2_shape = _ops.convert_to_tensor(set2_shape, _dtypes.int64)\n _inputs_flat = [set1_indices, set1_values, set1_shape, set2_indices, set2_values, set2_shape]\n _attrs = (\"set_operation\", set_operation, \"validate_indices\",\n validate_indices, \"T\", _attr_T)\n _result = _execute.execute(b\"SparseToSparseSetOperation\", 3,\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"SparseToSparseSetOperation\", _inputs_flat, _attrs, _result, name)\n _result = _SparseToSparseSetOperationOutput._make(_result)\n return _result\n\ndef _InitOpDefLibrary(op_list_proto_bytes):\n op_list = _op_def_pb2.OpList()\n op_list.ParseFromString(op_list_proto_bytes)\n _op_def_registry.register_op_list(op_list)\n op_def_lib = _op_def_library.OpDefLibrary()\n op_def_lib.add_op_list(op_list)\n return op_def_lib\n# op {\n# name: \"DenseToDenseSetOperation\"\n# input_arg {\n# name: \"set1\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"set2\"\n# type_attr: \"T\"\n# }\n# output_arg {\n# name: \"result_indices\"\n# type: DT_INT64\n# }\n# output_arg {\n# name: \"result_values\"\n# type_attr: \"T\"\n# }\n# output_arg {\n# name: \"result_shape\"\n# type: DT_INT64\n# }\n# attr {\n# name: \"set_operation\"\n# type: \"string\"\n# }\n# attr {\n# name: \"validate_indices\"\n# type: \"bool\"\n# default_value {\n# b: true\n# }\n# }\n# attr {\n# name: \"T\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_INT8\n# type: DT_INT16\n# type: DT_INT32\n# type: DT_INT64\n# type: DT_UINT8\n# type: DT_UINT16\n# type: DT_STRING\n# }\n# }\n# }\n# }\n# op {\n# name: \"DenseToSparseSetOperation\"\n# input_arg {\n# name: \"set1\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"set2_indices\"\n# type: DT_INT64\n# }\n# input_arg {\n# name: \"set2_values\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"set2_shape\"\n# type: DT_INT64\n# }\n# output_arg {\n# name: \"result_indices\"\n# type: DT_INT64\n# }\n# output_arg {\n# name: \"result_values\"\n# type_attr: \"T\"\n# }\n# output_arg {\n# name: \"result_shape\"\n# type: DT_INT64\n# }\n# attr {\n# name: \"set_operation\"\n# type: \"string\"\n# }\n# attr {\n# name: \"validate_indices\"\n# type: \"bool\"\n# default_value {\n# b: true\n# }\n# }\n# attr {\n# name: \"T\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_INT8\n# type: DT_INT16\n# type: DT_INT32\n# type: DT_INT64\n# type: DT_UINT8\n# type: DT_UINT16\n# type: DT_STRING\n# }\n# }\n# }\n# }\n# op {\n# name: \"SetSize\"\n# input_arg {\n# name: \"set_indices\"\n# type: DT_INT64\n# }\n# input_arg {\n# name: \"set_values\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"set_shape\"\n# type: DT_INT64\n# }\n# output_arg {\n# name: \"size\"\n# type: DT_INT32\n# }\n# attr {\n# name: \"validate_indices\"\n# type: \"bool\"\n# default_value {\n# b: true\n# }\n# }\n# attr {\n# name: \"T\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_INT8\n# type: DT_INT16\n# type: DT_INT32\n# type: DT_INT64\n# type: DT_UINT8\n# type: DT_UINT16\n# type: DT_STRING\n# }\n# }\n# }\n# }\n# op {\n# name: \"SparseToSparseSetOperation\"\n# input_arg {\n# name: \"set1_indices\"\n# type: DT_INT64\n# }\n# input_arg {\n# name: \"set1_values\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"set1_shape\"\n# type: DT_INT64\n# }\n# input_arg {\n# name: \"set2_indices\"\n# type: DT_INT64\n# }\n# input_arg {\n# name: \"set2_values\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"set2_shape\"\n# type: DT_INT64\n# }\n# output_arg {\n# name: \"result_indices\"\n# type: DT_INT64\n# }\n# output_arg {\n# name: \"result_values\"\n# type_attr: \"T\"\n# }\n# output_arg {\n# name: \"result_shape\"\n# type: DT_INT64\n# }\n# attr {\n# name: \"set_operation\"\n# type: \"string\"\n# }\n# attr {\n# name: \"validate_indices\"\n# type: \"bool\"\n# default_value {\n# b: true\n# }\n# }\n# attr {\n# name: \"T\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_INT8\n# type: DT_INT16\n# type: DT_INT32\n# type: DT_INT64\n# type: DT_UINT8\n# type: DT_UINT16\n# type: DT_STRING\n# }\n# }\n# }\n# }\n_op_def_lib = _InitOpDefLibrary(b\"\\n\\271\\001\\n\\030DenseToDenseSetOperation\\022\\t\\n\\004set1\\\"\\001T\\022\\t\\n\\004set2\\\"\\001T\\032\\022\\n\\016result_indices\\030\\t\\032\\022\\n\\rresult_values\\\"\\001T\\032\\020\\n\\014result_shape\\030\\t\\\"\\027\\n\\rset_operation\\022\\006string\\\"\\034\\n\\020validate_indices\\022\\004bool\\032\\002(\\001\\\"\\026\\n\\001T\\022\\004type:\\013\\n\\t2\\007\\006\\005\\003\\t\\004\\021\\007\\n\\343\\001\\n\\031DenseToSparseSetOperation\\022\\t\\n\\004set1\\\"\\001T\\022\\020\\n\\014set2_indices\\030\\t\\022\\020\\n\\013set2_values\\\"\\001T\\022\\016\\n\\nset2_shape\\030\\t\\032\\022\\n\\016result_indices\\030\\t\\032\\022\\n\\rresult_values\\\"\\001T\\032\\020\\n\\014result_shape\\030\\t\\\"\\027\\n\\rset_operation\\022\\006string\\\"\\034\\n\\020validate_indices\\022\\004bool\\032\\002(\\001\\\"\\026\\n\\001T\\022\\004type:\\013\\n\\t2\\007\\006\\005\\003\\t\\004\\021\\007\\nz\\n\\007SetSize\\022\\017\\n\\013set_indices\\030\\t\\022\\017\\n\\nset_values\\\"\\001T\\022\\r\\n\\tset_shape\\030\\t\\032\\010\\n\\004size\\030\\003\\\"\\034\\n\\020validate_indices\\022\\004bool\\032\\002(\\001\\\"\\026\\n\\001T\\022\\004type:\\013\\n\\t2\\007\\006\\005\\003\\t\\004\\021\\007\\n\\215\\002\\n\\032SparseToSparseSetOperation\\022\\020\\n\\014set1_indices\\030\\t\\022\\020\\n\\013set1_values\\\"\\001T\\022\\016\\n\\nset1_shape\\030\\t\\022\\020\\n\\014set2_indices\\030\\t\\022\\020\\n\\013set2_values\\\"\\001T\\022\\016\\n\\nset2_shape\\030\\t\\032\\022\\n\\016result_indices\\030\\t\\032\\022\\n\\rresult_values\\\"\\001T\\032\\020\\n\\014result_shape\\030\\t\\\"\\027\\n\\rset_operation\\022\\006string\\\"\\034\\n\\020validate_indices\\022\\004bool\\032\\002(\\001\\\"\\026\\n\\001T\\022\\004type:\\013\\n\\t2\\007\\006\\005\\003\\t\\004\\021\\007\")\n", "\"\"\"Python wrappers around TensorFlow ops.\n\nThis file is MACHINE GENERATED! Do not edit.\nOriginal C++ source file: string_ops.cc\n\"\"\"\n\nimport collections as _collections\n\nfrom tensorflow.python.eager import execute as _execute\nfrom tensorflow.python.eager import context as _context\nfrom tensorflow.python.eager import core as _core\nfrom tensorflow.python.framework import dtypes as _dtypes\nfrom tensorflow.python.framework import tensor_shape as _tensor_shape\n\nfrom tensorflow.core.framework import op_def_pb2 as _op_def_pb2\n# Needed to trigger the call to _set_call_cpp_shape_fn.\nfrom tensorflow.python.framework import common_shapes as _common_shapes\nfrom tensorflow.python.framework import op_def_registry as _op_def_registry\nfrom tensorflow.python.framework import ops as _ops\nfrom tensorflow.python.framework import op_def_library as _op_def_library\n\n\ndef as_string(input, precision=-1, scientific=False, shortest=False, width=-1, fill=\"\", name=None):\n r\"\"\"Converts each entry in the given tensor to strings. Supports many numeric\n\n types and boolean.\n\n Args:\n input: A `Tensor`. Must be one of the following types: `int32`, `int64`, `complex64`, `float32`, `float64`, `bool`, `int8`.\n precision: An optional `int`. Defaults to `-1`.\n The post-decimal precision to use for floating point numbers.\n Only used if precision > -1.\n scientific: An optional `bool`. Defaults to `False`.\n Use scientific notation for floating point numbers.\n shortest: An optional `bool`. Defaults to `False`.\n Use shortest representation (either scientific or standard) for\n floating point numbers.\n width: An optional `int`. Defaults to `-1`.\n Pad pre-decimal numbers to this width.\n Applies to both floating point and integer numbers.\n Only used if width > -1.\n fill: An optional `string`. Defaults to `\"\"`.\n The value to pad if width > -1. If empty, pads with spaces.\n Another typical value is '0'. String cannot be longer than 1 character.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `string`.\n \"\"\"\n if precision is None:\n precision = -1\n precision = _execute.make_int(precision, \"precision\")\n if scientific is None:\n scientific = False\n scientific = _execute.make_bool(scientific, \"scientific\")\n if shortest is None:\n shortest = False\n shortest = _execute.make_bool(shortest, \"shortest\")\n if width is None:\n width = -1\n width = _execute.make_int(width, \"width\")\n if fill is None:\n fill = \"\"\n fill = _execute.make_str(fill, \"fill\")\n _ctx = _context.context()\n if _ctx.in_graph_mode():\n _, _, _op = _op_def_lib._apply_op_helper(\n \"AsString\", input=input, precision=precision, scientific=scientific,\n shortest=shortest, width=width, fill=fill, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"T\", _op.get_attr(\"T\"), \"precision\", _op.get_attr(\"precision\"),\n \"scientific\", _op.get_attr(\"scientific\"), \"shortest\",\n _op.get_attr(\"shortest\"), \"width\", _op.get_attr(\"width\"),\n \"fill\", _op.get_attr(\"fill\"))\n else:\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\n _attr_T = _attr_T.as_datatype_enum\n _inputs_flat = [input]\n _attrs = (\"T\", _attr_T, \"precision\", precision, \"scientific\", scientific,\n \"shortest\", shortest, \"width\", width, \"fill\", fill)\n _result = _execute.execute(b\"AsString\", 1, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"AsString\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\ndef decode_base64(input, name=None):\n r\"\"\"Decode web-safe base64-encoded strings.\n\n Input may or may not have padding at the end. See EncodeBase64 for padding.\n Web-safe means that input must use - and _ instead of + and /.\n\n Args:\n input: A `Tensor` of type `string`. Base64 strings to decode.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `string`. Decoded strings.\n \"\"\"\n _ctx = _context.context()\n if _ctx.in_graph_mode():\n _, _, _op = _op_def_lib._apply_op_helper(\n \"DecodeBase64\", input=input, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = None\n else:\n input = _ops.convert_to_tensor(input, _dtypes.string)\n _inputs_flat = [input]\n _attrs = None\n _result = _execute.execute(b\"DecodeBase64\", 1, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"DecodeBase64\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\ndef encode_base64(input, pad=False, name=None):\n r\"\"\"Encode strings into web-safe base64 format.\n\n Refer to the following article for more information on base64 format:\n en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the\n end so that the encoded has length multiple of 4. See Padding section of the\n link above.\n\n Web-safe means that the encoder uses - and _ instead of + and /.\n\n Args:\n input: A `Tensor` of type `string`. Strings to be encoded.\n pad: An optional `bool`. Defaults to `False`.\n Bool whether padding is applied at the ends.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `string`. Input strings encoded in base64.\n \"\"\"\n if pad is None:\n pad = False\n pad = _execute.make_bool(pad, \"pad\")\n _ctx = _context.context()\n if _ctx.in_graph_mode():\n _, _, _op = _op_def_lib._apply_op_helper(\n \"EncodeBase64\", input=input, pad=pad, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"pad\", _op.get_attr(\"pad\"))\n else:\n input = _ops.convert_to_tensor(input, _dtypes.string)\n _inputs_flat = [input]\n _attrs = (\"pad\", pad)\n _result = _execute.execute(b\"EncodeBase64\", 1, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"EncodeBase64\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\ndef reduce_join(inputs, reduction_indices, keep_dims=False, separator=\"\", name=None):\n r\"\"\"Joins a string Tensor across the given dimensions.\n\n Computes the string join across dimensions in the given string Tensor of shape\n `[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by joining the input\n strings with the given separator (default: empty string). Negative indices are\n counted backwards from the end, with `-1` being equivalent to `n - 1`.\n\n For example:\n\n ```python\n # tensor `a` is [[\"a\", \"b\"], [\"c\", \"d\"]]\n tf.reduce_join(a, 0) ==> [\"ac\", \"bd\"]\n tf.reduce_join(a, 1) ==> [\"ab\", \"cd\"]\n tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> [\"ac\", \"bd\"]\n tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> [\"ab\", \"cd\"]\n tf.reduce_join(a, 0, keep_dims=True) ==> [[\"ac\", \"bd\"]]\n tf.reduce_join(a, 1, keep_dims=True) ==> [[\"ab\"], [\"cd\"]]\n tf.reduce_join(a, 0, separator=\".\") ==> [\"a.c\", \"b.d\"]\n tf.reduce_join(a, [0, 1]) ==> [\"acbd\"]\n tf.reduce_join(a, [1, 0]) ==> [\"abcd\"]\n tf.reduce_join(a, []) ==> [\"abcd\"]\n ```\n\n Args:\n inputs: A `Tensor` of type `string`.\n The input to be joined. All reduced indices must have non-zero size.\n reduction_indices: A `Tensor` of type `int32`.\n The dimensions to reduce over. Dimensions are reduced in the\n order specified. Omitting `reduction_indices` is equivalent to passing\n `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported.\n keep_dims: An optional `bool`. Defaults to `False`.\n If `True`, retain reduced dimensions with length `1`.\n separator: An optional `string`. Defaults to `\"\"`.\n The separator to use when joining.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `string`.\n Has shape equal to that of the input with reduced dimensions removed or\n set to `1` depending on `keep_dims`.\n \"\"\"\n if keep_dims is None:\n keep_dims = False\n keep_dims = _execute.make_bool(keep_dims, \"keep_dims\")\n if separator is None:\n separator = \"\"\n separator = _execute.make_str(separator, \"separator\")\n _ctx = _context.context()\n if _ctx.in_graph_mode():\n _, _, _op = _op_def_lib._apply_op_helper(\n \"ReduceJoin\", inputs=inputs, reduction_indices=reduction_indices,\n keep_dims=keep_dims, separator=separator, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"keep_dims\", _op.get_attr(\"keep_dims\"), \"separator\",\n _op.get_attr(\"separator\"))\n else:\n inputs = _ops.convert_to_tensor(inputs, _dtypes.string)\n reduction_indices = _ops.convert_to_tensor(reduction_indices, _dtypes.int32)\n _inputs_flat = [inputs, reduction_indices]\n _attrs = (\"keep_dims\", keep_dims, \"separator\", separator)\n _result = _execute.execute(b\"ReduceJoin\", 1, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"ReduceJoin\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\ndef string_join(inputs, separator=\"\", name=None):\n r\"\"\"Joins the strings in the given list of string tensors into one tensor;\n\n with the given separator (default is an empty separator).\n\n Args:\n inputs: A list of at least 1 `Tensor` objects with type `string`.\n A list of string tensors. The tensors must all have the same shape,\n or be scalars. Scalars may be mixed in; these will be broadcast to the shape\n of non-scalar inputs.\n separator: An optional `string`. Defaults to `\"\"`.\n string, an optional join separator.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `string`.\n \"\"\"\n if not isinstance(inputs, (list, tuple)):\n raise TypeError(\n \"Expected list for 'inputs' argument to \"\n \"'string_join' Op, not %r.\" % inputs)\n _attr_N = len(inputs)\n if separator is None:\n separator = \"\"\n separator = _execute.make_str(separator, \"separator\")\n _ctx = _context.context()\n if _ctx.in_graph_mode():\n _, _, _op = _op_def_lib._apply_op_helper(\n \"StringJoin\", inputs=inputs, separator=separator, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"N\", _op.get_attr(\"N\"), \"separator\", _op.get_attr(\"separator\"))\n else:\n inputs = _ops.convert_n_to_tensor(inputs, _dtypes.string)\n _inputs_flat = list(inputs)\n _attrs = (\"N\", _attr_N, \"separator\", separator)\n _result = _execute.execute(b\"StringJoin\", 1, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"StringJoin\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\n__string_split_outputs = [\"indices\", \"values\", \"shape\"]\n_StringSplitOutput = _collections.namedtuple(\n \"StringSplit\", __string_split_outputs)\n\n\ndef _string_split(input, delimiter, skip_empty=True, name=None):\n r\"\"\"Split elements of `input` based on `delimiter` into a `SparseTensor`.\n\n Let N be the size of source (typically N will be the batch size). Split each\n element of `input` based on `delimiter` and return a `SparseTensor`\n containing the splitted tokens. Empty tokens are ignored.\n\n `delimiter` can be empty, or a string of split characters. If `delimiter` is an\n empty string, each element of `input` is split into individual single-byte\n character strings, including splitting of UTF-8 multibyte sequences. Otherwise\n every character of `delimiter` is a potential split point.\n\n For example:\n N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output\n will be\n\n indices = [0, 0;\n 0, 1;\n 1, 0;\n 1, 1;\n 1, 2]\n shape = [2, 3]\n values = ['hello', 'world', 'a', 'b', 'c']\n\n Args:\n input: A `Tensor` of type `string`. 1-D. Strings to split.\n delimiter: A `Tensor` of type `string`.\n 0-D. Delimiter characters (bytes), or empty string.\n skip_empty: An optional `bool`. Defaults to `True`.\n A `bool`. If `True`, skip the empty strings from the result.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (indices, values, shape).\n\n indices: A `Tensor` of type `int64`. A dense matrix of int64 representing the indices of the sparse tensor.\n values: A `Tensor` of type `string`. A vector of strings corresponding to the splited values.\n shape: A `Tensor` of type `int64`. a length-2 vector of int64 representing the shape of the sparse\n tensor, where the first value is N and the second value is the maximum number\n of tokens in a single input entry.\n \"\"\"\n if skip_empty is None:\n skip_empty = True\n skip_empty = _execute.make_bool(skip_empty, \"skip_empty\")\n _ctx = _context.context()\n if _ctx.in_graph_mode():\n _, _, _op = _op_def_lib._apply_op_helper(\n \"StringSplit\", input=input, delimiter=delimiter,\n skip_empty=skip_empty, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"skip_empty\", _op.get_attr(\"skip_empty\"))\n else:\n input = _ops.convert_to_tensor(input, _dtypes.string)\n delimiter = _ops.convert_to_tensor(delimiter, _dtypes.string)\n _inputs_flat = [input, delimiter]\n _attrs = (\"skip_empty\", skip_empty)\n _result = _execute.execute(b\"StringSplit\", 3, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"StringSplit\", _inputs_flat, _attrs, _result, name)\n _result = _StringSplitOutput._make(_result)\n return _result\n\n\ndef string_to_hash_bucket(string_tensor, num_buckets, name=None):\n r\"\"\"Converts each string in the input Tensor to its hash mod by a number of buckets.\n\n The hash function is deterministic on the content of the string within the\n process.\n\n Note that the hash function may change from time to time.\n This functionality will be deprecated and it's recommended to use\n `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.\n\n Args:\n string_tensor: A `Tensor` of type `string`.\n num_buckets: An `int` that is `>= 1`. The number of buckets.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `int64`.\n A Tensor of the same shape as the input `string_tensor`.\n \"\"\"\n num_buckets = _execute.make_int(num_buckets, \"num_buckets\")\n _ctx = _context.context()\n if _ctx.in_graph_mode():\n _, _, _op = _op_def_lib._apply_op_helper(\n \"StringToHashBucket\", string_tensor=string_tensor,\n num_buckets=num_buckets, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"num_buckets\", _op.get_attr(\"num_buckets\"))\n else:\n string_tensor = _ops.convert_to_tensor(string_tensor, _dtypes.string)\n _inputs_flat = [string_tensor]\n _attrs = (\"num_buckets\", num_buckets)\n _result = _execute.execute(b\"StringToHashBucket\", 1, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"StringToHashBucket\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\ndef string_to_hash_bucket_fast(input, num_buckets, name=None):\n r\"\"\"Converts each string in the input Tensor to its hash mod by a number of buckets.\n\n The hash function is deterministic on the content of the string within the\n process and will never change. However, it is not suitable for cryptography.\n This function may be used when CPU time is scarce and inputs are trusted or\n unimportant. There is a risk of adversaries constructing inputs that all hash\n to the same bucket. To prevent this problem, use a strong hash function with\n `tf.string_to_hash_bucket_strong`.\n\n Args:\n input: A `Tensor` of type `string`. The strings to assign a hash bucket.\n num_buckets: An `int` that is `>= 1`. The number of buckets.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `int64`.\n A Tensor of the same shape as the input `string_tensor`.\n \"\"\"\n num_buckets = _execute.make_int(num_buckets, \"num_buckets\")\n _ctx = _context.context()\n if _ctx.in_graph_mode():\n _, _, _op = _op_def_lib._apply_op_helper(\n \"StringToHashBucketFast\", input=input, num_buckets=num_buckets,\n name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"num_buckets\", _op.get_attr(\"num_buckets\"))\n else:\n input = _ops.convert_to_tensor(input, _dtypes.string)\n _inputs_flat = [input]\n _attrs = (\"num_buckets\", num_buckets)\n _result = _execute.execute(b\"StringToHashBucketFast\", 1,\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"StringToHashBucketFast\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\ndef string_to_hash_bucket_strong(input, num_buckets, key, name=None):\n r\"\"\"Converts each string in the input Tensor to its hash mod by a number of buckets.\n\n The hash function is deterministic on the content of the string within the\n process. The hash function is a keyed hash function, where attribute `key`\n defines the key of the hash function. `key` is an array of 2 elements.\n\n A strong hash is important when inputs may be malicious, e.g. URLs with\n additional components. Adversaries could try to make their inputs hash to the\n same bucket for a denial-of-service attack or to skew the results. A strong\n hash prevents this by making it difficult, if not infeasible, to compute inputs\n that hash to the same bucket. This comes at a cost of roughly 4x higher compute\n time than `tf.string_to_hash_bucket_fast`.\n\n Args:\n input: A `Tensor` of type `string`. The strings to assign a hash bucket.\n num_buckets: An `int` that is `>= 1`. The number of buckets.\n key: A list of `ints`.\n The key for the keyed hash function passed as a list of two uint64\n elements.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `int64`.\n A Tensor of the same shape as the input `string_tensor`.\n \"\"\"\n num_buckets = _execute.make_int(num_buckets, \"num_buckets\")\n if not isinstance(key, (list, tuple)):\n raise TypeError(\n \"Expected list for 'key' argument to \"\n \"'string_to_hash_bucket_strong' Op, not %r.\" % key)\n key = [_execute.make_int(_i, \"key\") for _i in key]\n _ctx = _context.context()\n if _ctx.in_graph_mode():\n _, _, _op = _op_def_lib._apply_op_helper(\n \"StringToHashBucketStrong\", input=input, num_buckets=num_buckets,\n key=key, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"num_buckets\", _op.get_attr(\"num_buckets\"), \"key\",\n _op.get_attr(\"key\"))\n else:\n input = _ops.convert_to_tensor(input, _dtypes.string)\n _inputs_flat = [input]\n _attrs = (\"num_buckets\", num_buckets, \"key\", key)\n _result = _execute.execute(b\"StringToHashBucketStrong\", 1,\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"StringToHashBucketStrong\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\ndef substr(input, pos, len, name=None):\n r\"\"\"Return substrings from `Tensor` of strings.\n\n For each string in the input `Tensor`, creates a substring starting at index\n `pos` with a total length of `len`.\n\n If `len` defines a substring that would extend beyond the length of the input\n string, then as many characters as possible are used.\n\n If `pos` is negative or specifies a character index larger than any of the input\n strings, then an `InvalidArgumentError` is thrown.\n\n `pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on\n Op creation.\n\n *NOTE*: `Substr` supports broadcasting up to two dimensions. More about\n broadcasting\n [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\n ---\n\n Examples\n\n Using scalar `pos` and `len`:\n\n ```python\n input = [b'Hello', b'World']\n position = 1\n length = 3\n\n output = [b'ell', b'orl']\n ```\n\n Using `pos` and `len` with same shape as `input`:\n\n ```python\n input = [[b'ten', b'eleven', b'twelve'],\n [b'thirteen', b'fourteen', b'fifteen'],\n [b'sixteen', b'seventeen', b'eighteen']]\n position = [[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]]\n length = [[2, 3, 4],\n [4, 3, 2],\n [5, 5, 5]]\n\n output = [[b'en', b'eve', b'lve'],\n [b'hirt', b'urt', b'te'],\n [b'ixtee', b'vente', b'hteen']]\n ```\n\n Broadcasting `pos` and `len` onto `input`:\n\n ```\n input = [[b'ten', b'eleven', b'twelve'],\n [b'thirteen', b'fourteen', b'fifteen'],\n [b'sixteen', b'seventeen', b'eighteen'],\n [b'nineteen', b'twenty', b'twentyone']]\n position = [1, 2, 3]\n length = [1, 2, 3]\n\n output = [[b'e', b'ev', b'lve'],\n [b'h', b'ur', b'tee'],\n [b'i', b've', b'hte'],\n [b'i', b'en', b'nty']]\n ```\n\n Broadcasting `input` onto `pos` and `len`:\n\n ```\n input = b'thirteen'\n position = [1, 5, 7]\n length = [3, 2, 1]\n\n output = [b'hir', b'ee', b'n']\n ```\n\n Args:\n input: A `Tensor` of type `string`. Tensor of strings\n pos: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Scalar defining the position of first character in each substring\n len: A `Tensor`. Must have the same type as `pos`.\n Scalar defining the number of characters to include in each substring\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `string`. Tensor of substrings\n \"\"\"\n _ctx = _context.context()\n if _ctx.in_graph_mode():\n _, _, _op = _op_def_lib._apply_op_helper(\n \"Substr\", input=input, pos=pos, len=len, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"T\", _op.get_attr(\"T\"))\n else:\n _attr_T, _inputs_T = _execute.args_to_matching_eager([pos, len], _ctx)\n (pos, len) = _inputs_T\n _attr_T = _attr_T.as_datatype_enum\n input = _ops.convert_to_tensor(input, _dtypes.string)\n _inputs_flat = [input, pos, len]\n _attrs = (\"T\", _attr_T)\n _result = _execute.execute(b\"Substr\", 1, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"Substr\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\ndef _InitOpDefLibrary(op_list_proto_bytes):\n op_list = _op_def_pb2.OpList()\n op_list.ParseFromString(op_list_proto_bytes)\n _op_def_registry.register_op_list(op_list)\n op_def_lib = _op_def_library.OpDefLibrary()\n op_def_lib.add_op_list(op_list)\n return op_def_lib\n# op {\n# name: \"AsString\"\n# input_arg {\n# name: \"input\"\n# type_attr: \"T\"\n# }\n# output_arg {\n# name: \"output\"\n# type: DT_STRING\n# }\n# attr {\n# name: \"T\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_INT32\n# type: DT_INT64\n# type: DT_COMPLEX64\n# type: DT_FLOAT\n# type: DT_DOUBLE\n# type: DT_BOOL\n# type: DT_INT8\n# }\n# }\n# }\n# attr {\n# name: \"precision\"\n# type: \"int\"\n# default_value {\n# i: -1\n# }\n# }\n# attr {\n# name: \"scientific\"\n# type: \"bool\"\n# default_value {\n# b: false\n# }\n# }\n# attr {\n# name: \"shortest\"\n# type: \"bool\"\n# default_value {\n# b: false\n# }\n# }\n# attr {\n# name: \"width\"\n# type: \"int\"\n# default_value {\n# i: -1\n# }\n# }\n# attr {\n# name: \"fill\"\n# type: \"string\"\n# default_value {\n# s: \"\"\n# }\n# }\n# }\n# op {\n# name: \"DecodeBase64\"\n# input_arg {\n# name: \"input\"\n# type: DT_STRING\n# }\n# output_arg {\n# name: \"output\"\n# type: DT_STRING\n# }\n# }\n# op {\n# name: \"EncodeBase64\"\n# input_arg {\n# name: \"input\"\n# type: DT_STRING\n# }\n# output_arg {\n# name: \"output\"\n# type: DT_STRING\n# }\n# attr {\n# name: \"pad\"\n# type: \"bool\"\n# default_value {\n# b: false\n# }\n# }\n# }\n# op {\n# name: \"ReduceJoin\"\n# input_arg {\n# name: \"inputs\"\n# type: DT_STRING\n# }\n# input_arg {\n# name: \"reduction_indices\"\n# type: DT_INT32\n# }\n# output_arg {\n# name: \"output\"\n# type: DT_STRING\n# }\n# attr {\n# name: \"keep_dims\"\n# type: \"bool\"\n# default_value {\n# b: false\n# }\n# }\n# attr {\n# name: \"separator\"\n# type: \"string\"\n# default_value {\n# s: \"\"\n# }\n# }\n# }\n# op {\n# name: \"StringJoin\"\n# input_arg {\n# name: \"inputs\"\n# type: DT_STRING\n# number_attr: \"N\"\n# }\n# output_arg {\n# name: \"output\"\n# type: DT_STRING\n# }\n# attr {\n# name: \"N\"\n# type: \"int\"\n# has_minimum: true\n# minimum: 1\n# }\n# attr {\n# name: \"separator\"\n# type: \"string\"\n# default_value {\n# s: \"\"\n# }\n# }\n# }\n# op {\n# name: \"StringSplit\"\n# input_arg {\n# name: \"input\"\n# type: DT_STRING\n# }\n# input_arg {\n# name: \"delimiter\"\n# type: DT_STRING\n# }\n# output_arg {\n# name: \"indices\"\n# type: DT_INT64\n# }\n# output_arg {\n# name: \"values\"\n# type: DT_STRING\n# }\n# output_arg {\n# name: \"shape\"\n# type: DT_INT64\n# }\n# attr {\n# name: \"skip_empty\"\n# type: \"bool\"\n# default_value {\n# b: true\n# }\n# }\n# }\n# op {\n# name: \"StringToHashBucket\"\n# input_arg {\n# name: \"string_tensor\"\n# type: DT_STRING\n# }\n# output_arg {\n# name: \"output\"\n# type: DT_INT64\n# }\n# attr {\n# name: \"num_buckets\"\n# type: \"int\"\n# has_minimum: true\n# minimum: 1\n# }\n# }\n# op {\n# name: \"StringToHashBucketFast\"\n# input_arg {\n# name: \"input\"\n# type: DT_STRING\n# }\n# output_arg {\n# name: \"output\"\n# type: DT_INT64\n# }\n# attr {\n# name: \"num_buckets\"\n# type: \"int\"\n# has_minimum: true\n# minimum: 1\n# }\n# }\n# op {\n# name: \"StringToHashBucketStrong\"\n# input_arg {\n# name: \"input\"\n# type: DT_STRING\n# }\n# output_arg {\n# name: \"output\"\n# type: DT_INT64\n# }\n# attr {\n# name: \"num_buckets\"\n# type: \"int\"\n# has_minimum: true\n# minimum: 1\n# }\n# attr {\n# name: \"key\"\n# type: \"list(int)\"\n# }\n# }\n# op {\n# name: \"Substr\"\n# input_arg {\n# name: \"input\"\n# type: DT_STRING\n# }\n# input_arg {\n# name: \"pos\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"len\"\n# type_attr: \"T\"\n# }\n# output_arg {\n# name: \"output\"\n# type: DT_STRING\n# }\n# attr {\n# name: \"T\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_INT32\n# type: DT_INT64\n# }\n# }\n# }\n# }\n_op_def_lib = _InitOpDefLibrary(b\"\\n\\266\\001\\n\\010AsString\\022\\n\\n\\005input\\\"\\001T\\032\\n\\n\\006output\\030\\007\\\"\\026\\n\\001T\\022\\004type:\\013\\n\\t2\\007\\003\\t\\010\\001\\002\\n\\006\\\"\\035\\n\\tprecision\\022\\003int\\032\\013\\030\\377\\377\\377\\377\\377\\377\\377\\377\\377\\001\\\"\\026\\n\\nscientific\\022\\004bool\\032\\002(\\000\\\"\\024\\n\\010shortest\\022\\004bool\\032\\002(\\000\\\"\\031\\n\\005width\\022\\003int\\032\\013\\030\\377\\377\\377\\377\\377\\377\\377\\377\\377\\001\\\"\\022\\n\\004fill\\022\\006string\\032\\002\\022\\000\\n%\\n\\014DecodeBase64\\022\\t\\n\\005input\\030\\007\\032\\n\\n\\006output\\030\\007\\n6\\n\\014EncodeBase64\\022\\t\\n\\005input\\030\\007\\032\\n\\n\\006output\\030\\007\\\"\\017\\n\\003pad\\022\\004bool\\032\\002(\\000\\nk\\n\\nReduceJoin\\022\\n\\n\\006inputs\\030\\007\\022\\025\\n\\021reduction_indices\\030\\003\\032\\n\\n\\006output\\030\\007\\\"\\025\\n\\tkeep_dims\\022\\004bool\\032\\002(\\000\\\"\\027\\n\\tseparator\\022\\006string\\032\\002\\022\\000\\nN\\n\\nStringJoin\\022\\r\\n\\006inputs\\030\\007*\\001N\\032\\n\\n\\006output\\030\\007\\\"\\014\\n\\001N\\022\\003int(\\0010\\001\\\"\\027\\n\\tseparator\\022\\006string\\032\\002\\022\\000\\nc\\n\\013StringSplit\\022\\t\\n\\005input\\030\\007\\022\\r\\n\\tdelimiter\\030\\007\\032\\013\\n\\007indices\\030\\t\\032\\n\\n\\006values\\030\\007\\032\\t\\n\\005shape\\030\\t\\\"\\026\\n\\nskip_empty\\022\\004bool\\032\\002(\\001\\nK\\n\\022StringToHashBucket\\022\\021\\n\\rstring_tensor\\030\\007\\032\\n\\n\\006output\\030\\t\\\"\\026\\n\\013num_buckets\\022\\003int(\\0010\\001\\nG\\n\\026StringToHashBucketFast\\022\\t\\n\\005input\\030\\007\\032\\n\\n\\006output\\030\\t\\\"\\026\\n\\013num_buckets\\022\\003int(\\0010\\001\\n[\\n\\030StringToHashBucketStrong\\022\\t\\n\\005input\\030\\007\\032\\n\\n\\006output\\030\\t\\\"\\026\\n\\013num_buckets\\022\\003int(\\0010\\001\\\"\\020\\n\\003key\\022\\tlist(int)\\nF\\n\\006Substr\\022\\t\\n\\005input\\030\\007\\022\\010\\n\\003pos\\\"\\001T\\022\\010\\n\\003len\\\"\\001T\\032\\n\\n\\006output\\030\\007\\\"\\021\\n\\001T\\022\\004type:\\006\\n\\0042\\002\\003\\t\")\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorArray: a dynamically sized array of Tensors.\n\n@@TensorArray\n\"\"\"\n# Mixture of pep8 and non-pep8 names, so disable pylint bad-name\n# pylint: disable=g-bad-name\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_data_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.util import tf_should_use\n\n\n# TensorArray object accesses many of the hidden generated ops, but is\n# in fact built to wrap these methods.\n# pylint: disable=protected-access\nclass TensorArray(object):\n \"\"\"Class wrapping dynamic-sized, per-time-step, write-once Tensor arrays.\n\n This class is meant to be used with dynamic iteration primitives such as\n `while_loop` and `map_fn`. It supports gradient back-propagation via special\n \"flow\" control flow dependencies.\n \"\"\"\n\n def __init__(self,\n dtype,\n size=None,\n dynamic_size=None,\n clear_after_read=None,\n tensor_array_name=None,\n handle=None,\n flow=None,\n infer_shape=True,\n element_shape=None,\n colocate_with_first_write_call=True,\n name=None):\n \"\"\"Construct a new TensorArray or wrap an existing TensorArray handle.\n\n A note about the parameter `name`:\n\n The name of the `TensorArray` (even if passed in) is uniquified: each time\n a new `TensorArray` is created at runtime it is assigned its own name for\n the duration of the run. This avoids name collisions if a `TensorArray`\n is created within a `while_loop`.\n\n Args:\n dtype: (required) data type of the TensorArray.\n size: (optional) int32 scalar `Tensor`: the size of the TensorArray.\n Required if handle is not provided.\n dynamic_size: (optional) Python bool: If true, writes to the TensorArray\n can grow the TensorArray past its initial size. Default: False.\n clear_after_read: Boolean (optional, default: True). If True, clear\n TensorArray values after reading them. This disables read-many\n semantics, but allows early release of memory.\n tensor_array_name: (optional) Python string: the name of the TensorArray.\n This is used when creating the TensorArray handle. If this value is\n set, handle should be None.\n handle: (optional) A `Tensor` handle to an existing TensorArray. If this\n is set, tensor_array_name should be None.\n flow: (optional) A float `Tensor` scalar coming from an existing\n `TensorArray.flow`.\n infer_shape: (optional, default: True) If True, shape inference\n is enabled. In this case, all elements must have the same shape.\n element_shape: (optional, default: None) A `TensorShape` object specifying\n the shape constraints of each of the elements of the TensorArray.\n Need not be fully defined.\n colocate_with_first_write_call: If `True`, the TensorArray will be\n colocated on the same device as the Tensor used on its first write\n (write operations include `write`, `unstack`, and `split`). If `False`,\n the TensorArray will be placed on the device determined by the\n device context available during its initialization.\n name: A name for the operation (optional).\n\n Raises:\n ValueError: if both handle and tensor_array_name are provided.\n TypeError: if handle is provided but is not a Tensor.\n \"\"\"\n if handle is not None and tensor_array_name:\n raise ValueError(\n \"Cannot construct with both handle and tensor_array_name\")\n if handle is not None and not isinstance(handle, ops.Tensor):\n raise TypeError(\"Handle must be a Tensor\")\n if handle is None and size is None:\n raise ValueError(\"Size must be provided if handle is not provided\")\n if handle is not None and size is not None:\n raise ValueError(\"Cannot provide both a handle and size \"\n \"at the same time\")\n if handle is not None and element_shape is not None:\n raise ValueError(\"Cannot provide both a handle and element_shape \"\n \"at the same time\")\n if handle is not None and dynamic_size is not None:\n raise ValueError(\"Cannot provide both a handle and dynamic_size \"\n \"at the same time\")\n if handle is not None and clear_after_read is not None:\n raise ValueError(\"Cannot provide both a handle and clear_after_read \"\n \"at the same time\")\n\n if clear_after_read is None:\n clear_after_read = True\n dynamic_size = dynamic_size or False\n\n self._dtype = dtype\n\n # Used to keep track of what tensors the TensorArray should be\n # colocated with. We choose to colocate the TensorArray with the\n # first tensor written to it.\n self._colocate_with_first_write_call = colocate_with_first_write_call\n if colocate_with_first_write_call:\n self._colocate_with = []\n else:\n self._colocate_with = None\n\n # Record the current static shape for the array elements. The element\n # shape is defined either by `element_shape` or the shape of the tensor\n # of the first write. If `infer_shape` is true, all writes checks for\n # shape equality.\n if element_shape is None:\n self._infer_shape = infer_shape\n self._element_shape = []\n else:\n self._infer_shape = True\n self._element_shape = [tensor_shape.TensorShape(element_shape)]\n with ops.name_scope(name, \"TensorArray\", [handle, size, flow]) as scope:\n if handle is not None:\n self._handle = handle\n if flow is None:\n raise ValueError(\"flow must not be None if handle is not None.\")\n self._flow = flow\n else:\n # Construct the TensorArray with an empty device. The first\n # write into the TensorArray from a Tensor with a set device\n # will retroactively set the device value of this op.\n def create():\n return gen_data_flow_ops._tensor_array_v3(\n dtype=dtype,\n size=size,\n element_shape=element_shape,\n dynamic_size=dynamic_size,\n clear_after_read=clear_after_read,\n tensor_array_name=tensor_array_name,\n name=scope)\n if colocate_with_first_write_call:\n with ops.device(None), ops.colocate_with(None, ignore_existing=True):\n self._handle, self._flow = create()\n else:\n self._handle, self._flow = create()\n\n @property\n def flow(self):\n \"\"\"The flow `Tensor` forcing ops leading to this TensorArray state.\"\"\"\n return self._flow\n\n @property\n def dtype(self):\n \"\"\"The data type of this TensorArray.\"\"\"\n return self._dtype\n\n @property\n def handle(self):\n \"\"\"The reference to the TensorArray.\"\"\"\n return self._handle\n\n def _merge_element_shape(self, shape):\n \"\"\"Changes the element shape of the array given a shape to merge with.\n\n Args:\n shape: A `TensorShape` object to merge with.\n\n Raises:\n ValueError: if the provided shape is incompatible with the current\n element shape of the `TensorArray`.\n \"\"\"\n\n if self._element_shape:\n if not shape.is_compatible_with(self._element_shape[0]):\n raise ValueError(\n \"Inconsistent shapes: saw %s but expected %s \"\n \"(and infer_shape=True)\" % (shape, self._element_shape[0]))\n self._element_shape[0] = self._element_shape[0].merge_with(shape)\n else:\n self._element_shape.append(shape)\n\n @contextlib.contextmanager\n def _maybe_colocate_with(self, value):\n \"\"\"Colocate operations with an internal colocation group or `value`.\n\n Args:\n value: `Tensor`, the tensor to try to colocate with.\n\n Yields:\n Does not yield anything, but the new context is a colocation context.\n\n If no internal colocation group is set, colocate with `value` and set\n the internal colocation group to be value.\n \"\"\"\n if not self._colocate_with_first_write_call:\n yield\n else:\n if not self._colocate_with:\n self._colocate_with.append(value)\n with ops.colocate_with(self._colocate_with[0]):\n yield\n\n def identity(self):\n \"\"\"Returns a TensorArray with the same content and properties.\n\n Returns:\n A new TensorArray object with flow that ensures the control dependencies\n from the contexts will become control dependencies for writes, reads, etc.\n Use this object all for subsequent operations.\n \"\"\"\n flow = array_ops.identity(self._flow)\n ta = TensorArray(\n dtype=self._dtype, handle=self._handle, flow=flow,\n infer_shape=self._infer_shape,\n colocate_with_first_write_call=self._colocate_with_first_write_call)\n ta._element_shape = self._element_shape\n ta._colocate_with = self._colocate_with\n return ta\n\n def grad(self, source, flow=None, name=None):\n # tensor_array_grad requires a flow input when forward\n # TensorArrays are dynamically sized. This forces the creation\n # of the grad TensorArray only once the final forward array's size\n # is fixed.\n if flow is None:\n flow = self.flow\n with ops.name_scope(name, \"TensorArrayGrad\", [self._handle]):\n with ops.colocate_with(self._handle):\n g_handle, unused_flow = gen_data_flow_ops._tensor_array_grad_v3(\n handle=self._handle, source=source, flow_in=flow, name=name)\n with ops.control_dependencies([g_handle]):\n flow = array_ops.identity(flow, name=\"gradient_flow\")\n g = TensorArray(\n dtype=self._dtype,\n handle=g_handle,\n flow=flow,\n infer_shape=self._infer_shape,\n colocate_with_first_write_call=False)\n g._element_shape = self._element_shape\n return g\n\n def read(self, index, name=None):\n \"\"\"Read the value at location `index` in the TensorArray.\n\n Args:\n index: 0-D. int32 tensor with the index to read from.\n name: A name for the operation (optional).\n\n Returns:\n The tensor at index `index`.\n \"\"\"\n value = gen_data_flow_ops._tensor_array_read_v3(\n handle=self._handle,\n index=index,\n flow_in=self._flow,\n dtype=self._dtype,\n name=name)\n if self._element_shape:\n value.set_shape(self._element_shape[0].dims)\n return value\n\n @tf_should_use.should_use_result\n def write(self, index, value, name=None):\n \"\"\"Write `value` into index `index` of the TensorArray.\n\n Args:\n index: 0-D. int32 scalar with the index to write to.\n value: N-D. Tensor of type `dtype`. The Tensor to write to this index.\n name: A name for the operation (optional).\n\n Returns:\n A new TensorArray object with flow that ensures the write occurs.\n Use this object all for subsequent operations.\n\n Raises:\n ValueError: if there are more writers than specified.\n \"\"\"\n with ops.name_scope(name, \"TensorArrayWrite\", [self._handle, index, value]):\n value = ops.convert_to_tensor(value, name=\"value\")\n with self._maybe_colocate_with(value):\n flow_out = gen_data_flow_ops._tensor_array_write_v3(\n handle=self._handle,\n index=index,\n value=value,\n flow_in=self._flow,\n name=name)\n ta = TensorArray(\n dtype=self._dtype, handle=self._handle, flow=flow_out,\n colocate_with_first_write_call=self._colocate_with_first_write_call)\n ta._infer_shape = self._infer_shape\n ta._element_shape = self._element_shape\n ta._colocate_with = self._colocate_with\n if ta._infer_shape:\n ta._merge_element_shape(value.get_shape())\n return ta\n\n def stack(self, name=None):\n \"\"\"Return the values in the TensorArray as a stacked `Tensor`.\n\n All of the values must have been written and their shapes must all match.\n If input shapes have rank-`R`, then output shape will have rank-`(R+1)`.\n\n Args:\n name: A name for the operation (optional).\n\n Returns:\n All the tensors in the TensorArray stacked into one tensor.\n \"\"\"\n with ops.colocate_with(self._handle):\n with ops.name_scope(name, \"TensorArrayStack\", [self._handle]):\n return self.gather(math_ops.range(0, self.size()), name=name)\n\n def gather(self, indices, name=None):\n \"\"\"Return selected values in the TensorArray as a packed `Tensor`.\n\n All of selected values must have been written and their shapes\n must all match.\n\n Args:\n indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If\n the `TensorArray` is not dynamic, `max_value=size()`.\n name: A name for the operation (optional).\n\n Returns:\n The in the `TensorArray` selected by `indices`, packed into one tensor.\n \"\"\"\n if self._element_shape:\n element_shape = self._element_shape[0]\n else:\n element_shape = tensor_shape.TensorShape(None)\n value = gen_data_flow_ops._tensor_array_gather_v3(\n handle=self._handle,\n indices=indices,\n flow_in=self._flow,\n dtype=self._dtype,\n name=name,\n element_shape=element_shape)\n if self._element_shape and self._element_shape[0].dims is not None:\n value.set_shape([None] + self._element_shape[0].dims)\n return value\n\n def concat(self, name=None):\n \"\"\"Return the values in the TensorArray as a concatenated `Tensor`.\n\n All of the values must have been written, their ranks must match, and\n and their shapes must all match for all dimensions except the first.\n\n Args:\n name: A name for the operation (optional).\n\n Returns:\n All the tensors in the TensorArray concatenated into one tensor.\n \"\"\"\n if self._element_shape and self._element_shape[0].dims is not None:\n element_shape_except0 = (\n tensor_shape.TensorShape(self._element_shape[0].dims[1:]))\n else:\n element_shape_except0 = tensor_shape.TensorShape(None)\n value, _ = gen_data_flow_ops._tensor_array_concat_v3(\n handle=self._handle,\n flow_in=self._flow,\n dtype=self._dtype,\n name=name,\n element_shape_except0=element_shape_except0)\n if self._element_shape and self._element_shape[0].dims is not None:\n value.set_shape([None] + self._element_shape[0].dims[1:])\n return value\n\n @tf_should_use.should_use_result\n def unstack(self, value, name=None):\n \"\"\"Unstack the values of a `Tensor` in the TensorArray.\n\n If input value shapes have rank-`R`, then the output TensorArray will\n contain elements whose shapes are rank-`(R-1)`.\n\n Args:\n value: (N+1)-D. Tensor of type `dtype`. The Tensor to unstack.\n name: A name for the operation (optional).\n\n Returns:\n A new TensorArray object with flow that ensures the unstack occurs.\n Use this object all for subsequent operations.\n\n Raises:\n ValueError: if the shape inference fails.\n \"\"\"\n with ops.name_scope(name, \"TensorArrayUnstack\", [self._handle, value]):\n num_elements = array_ops.shape(value)[0]\n return self.scatter(\n indices=math_ops.range(0, num_elements), value=value, name=name)\n\n @tf_should_use.should_use_result\n def scatter(self, indices, value, name=None):\n \"\"\"Scatter the values of a `Tensor` in specific indices of a `TensorArray`.\n\n Args:\n indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If\n the `TensorArray` is not dynamic, `max_value=size()`.\n value: (N+1)-D. Tensor of type `dtype`. The Tensor to unpack.\n name: A name for the operation (optional).\n\n Returns:\n A new TensorArray object with flow that ensures the scatter occurs.\n Use this object all for subsequent operations.\n\n Raises:\n ValueError: if the shape inference fails.\n \"\"\"\n with ops.name_scope(name, \"TensorArrayScatter\",\n [self._handle, value, indices]):\n value = ops.convert_to_tensor(value, name=\"value\")\n with self._maybe_colocate_with(value):\n flow_out = gen_data_flow_ops._tensor_array_scatter_v3(\n handle=self._handle,\n indices=indices,\n value=value,\n flow_in=self._flow,\n name=name)\n ta = TensorArray(\n dtype=self._dtype, handle=self._handle, flow=flow_out,\n colocate_with_first_write_call=self._colocate_with_first_write_call)\n ta._infer_shape = self._infer_shape\n ta._element_shape = self._element_shape\n ta._colocate_with = self._colocate_with\n if ta._infer_shape and context.in_graph_mode():\n val_shape = flow_out.op.inputs[2].get_shape()\n element_shape = tensor_shape.unknown_shape()\n if val_shape.dims is not None:\n element_shape = tensor_shape.TensorShape(val_shape.dims[1:])\n ta._merge_element_shape(element_shape)\n return ta\n\n @tf_should_use.should_use_result\n def split(self, value, lengths, name=None):\n \"\"\"Split the values of a `Tensor` into the TensorArray.\n\n Args:\n value: (N+1)-D. Tensor of type `dtype`. The Tensor to split.\n lengths: 1-D. int32 vector with the lengths to use when splitting\n `value` along its first dimension.\n name: A name for the operation (optional).\n\n Returns:\n A new TensorArray object with flow that ensures the split occurs.\n Use this object all for subsequent operations.\n\n Raises:\n ValueError: if the shape inference fails.\n \"\"\"\n with ops.name_scope(name, \"TensorArraySplit\",\n [self._handle, value, lengths]):\n value = ops.convert_to_tensor(value, name=\"value\")\n with self._maybe_colocate_with(value):\n lengths_64 = math_ops.to_int64(lengths)\n flow_out = gen_data_flow_ops._tensor_array_split_v3(\n handle=self._handle,\n value=value,\n lengths=lengths_64,\n flow_in=self._flow,\n name=name)\n ta = TensorArray(\n dtype=self._dtype, handle=self._handle, flow=flow_out,\n colocate_with_first_write_call=self._colocate_with_first_write_call)\n ta._infer_shape = self._infer_shape\n ta._element_shape = self._element_shape\n ta._colocate_with = self._colocate_with\n if ta._infer_shape and context.in_graph_mode():\n val_shape = flow_out.op.inputs[1].get_shape()\n clengths = tensor_util.constant_value(flow_out.op.inputs[2])\n element_shape = tensor_shape.unknown_shape()\n if val_shape.dims is not None:\n if clengths is not None and clengths.max() == clengths.min():\n element_shape = tensor_shape.TensorShape([clengths[0]] +\n val_shape.dims[1:])\n ta._merge_element_shape(element_shape)\n return ta\n\n def size(self, name=None):\n \"\"\"Return the size of the TensorArray.\"\"\"\n return gen_data_flow_ops._tensor_array_size_v3(\n handle=self._handle, flow_in=self.flow, name=name)\n\n @tf_should_use.should_use_result\n def close(self, name=None):\n \"\"\"Close the current TensorArray.\"\"\"\n return gen_data_flow_ops._tensor_array_close_v3(\n handle=self._handle, name=name)\n\n# pylint: enable=protected-access\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utilities for probability distributions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport hashlib\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\n\n\ndef assert_close(\n x, y, data=None, summarize=None, message=None, name=\"assert_close\"):\n \"\"\"Assert that x and y are within machine epsilon of each other.\n\n Args:\n x: Floating-point `Tensor`\n y: Floating-point `Tensor`\n data: The tensors to print out if the condition is `False`. Defaults to\n error message and first few entries of `x` and `y`.\n summarize: Print this many entries of each tensor.\n message: A string to prefix to the default message.\n name: A name for this operation (optional).\n\n Returns:\n Op raising `InvalidArgumentError` if |x - y| > machine epsilon.\n \"\"\"\n message = message or \"\"\n x = ops.convert_to_tensor(x, name=\"x\")\n y = ops.convert_to_tensor(y, name=\"y\")\n\n if data is None:\n data = [\n message,\n \"Condition x ~= y did not hold element-wise: x = \", x.name, x, \"y = \",\n y.name, y\n ]\n\n if x.dtype.is_integer:\n return check_ops.assert_equal(\n x, y, data=data, summarize=summarize, message=message, name=name)\n\n with ops.name_scope(name, \"assert_close\", [x, y, data]):\n tol = np.finfo(x.dtype.as_numpy_dtype).eps\n condition = math_ops.reduce_all(math_ops.less_equal(math_ops.abs(x-y), tol))\n return control_flow_ops.Assert(\n condition, data, summarize=summarize)\n\n\ndef assert_integer_form(\n x, data=None, summarize=None, message=None,\n int_dtype=None, name=\"assert_integer_form\"):\n \"\"\"Assert that x has integer components (or floats equal to integers).\n\n Args:\n x: Floating-point `Tensor`\n data: The tensors to print out if the condition is `False`. Defaults to\n error message and first few entries of `x` and `y`.\n summarize: Print this many entries of each tensor.\n message: A string to prefix to the default message.\n int_dtype: A `tf.dtype` used to cast the float to. The default (`None`)\n implies the smallest possible signed int will be used for casting.\n name: A name for this operation (optional).\n\n Returns:\n Op raising `InvalidArgumentError` if `cast(x, int_dtype) != x`.\n \"\"\"\n with ops.name_scope(name, values=[x, data]):\n x = ops.convert_to_tensor(x, name=\"x\")\n if x.dtype.is_integer:\n return control_flow_ops.no_op()\n message = message or \"{} has non-integer components\".format(x.op.name)\n if int_dtype is None:\n try:\n int_dtype = {\n dtypes.float16: dtypes.int16,\n dtypes.float32: dtypes.int32,\n dtypes.float64: dtypes.int64,\n }[x.dtype.base_dtype]\n except KeyError:\n raise TypeError(\"Unrecognized type {}\".format(x.dtype.name))\n return check_ops.assert_equal(\n x, math_ops.cast(math_ops.cast(x, int_dtype), x.dtype),\n data=data, summarize=summarize, message=message, name=name)\n\n\ndef assert_symmetric(matrix):\n matrix_t = array_ops.matrix_transpose(matrix)\n return control_flow_ops.with_dependencies(\n [check_ops.assert_equal(matrix, matrix_t)], matrix)\n\n\ndef embed_check_nonnegative_integer_form(\n x, name=\"embed_check_nonnegative_integer_form\"):\n \"\"\"Assert x is a non-negative tensor, and optionally of integers.\"\"\"\n with ops.name_scope(name, values=[x]):\n x = ops.convert_to_tensor(x, name=\"x\")\n assertions = [\n check_ops.assert_non_negative(\n x, message=\"'{}' must be non-negative.\".format(x.op.name)),\n ]\n if not x.dtype.is_integer:\n assertions += [\n assert_integer_form(\n x, message=\"'{}' cannot contain fractional components.\".format(\n x.op.name)),\n ]\n return control_flow_ops.with_dependencies(assertions, x)\n\n\ndef same_dynamic_shape(a, b):\n \"\"\"Returns whether a and b have the same dynamic shape.\n\n Args:\n a: `Tensor`\n b: `Tensor`\n\n Returns:\n `bool` `Tensor` representing if both tensors have the same shape.\n \"\"\"\n a = ops.convert_to_tensor(a, name=\"a\")\n b = ops.convert_to_tensor(b, name=\"b\")\n\n # Here we can't just do math_ops.equal(a.shape, b.shape), since\n # static shape inference may break the equality comparison between\n # shape(a) and shape(b) in math_ops.equal.\n def all_shapes_equal():\n return math_ops.reduce_all(math_ops.equal(\n array_ops.concat([array_ops.shape(a), array_ops.shape(b)], 0),\n array_ops.concat([array_ops.shape(b), array_ops.shape(a)], 0)))\n\n # One of the shapes isn't fully defined, so we need to use the dynamic\n # shape.\n return control_flow_ops.cond(\n math_ops.equal(array_ops.rank(a), array_ops.rank(b)),\n all_shapes_equal,\n lambda: constant_op.constant(False))\n\n\ndef get_logits_and_probs(logits=None,\n probs=None,\n multidimensional=False,\n validate_args=False,\n name=\"get_logits_and_probs\"):\n \"\"\"Converts logit to probabilities (or vice-versa), and returns both.\n\n Args:\n logits: Floating-point `Tensor` representing log-odds.\n probs: Floating-point `Tensor` representing probabilities.\n multidimensional: Python `bool`, default `False`.\n If `True`, represents whether the last dimension of `logits` or `probs`,\n a `[N1, N2, ... k]` dimensional tensor, representing the\n logit or probability of `shape[-1]` classes.\n validate_args: Python `bool`, default `False`. When `True`, either assert\n `0 <= probs <= 1` (if not `multidimensional`) or that the last dimension\n of `probs` sums to one.\n name: A name for this operation (optional).\n\n Returns:\n logits, probs: Tuple of `Tensor`s. If `probs` has an entry that is `0` or\n `1`, then the corresponding entry in the returned logit will be `-Inf` and\n `Inf` respectively.\n\n Raises:\n ValueError: if neither `probs` nor `logits` were passed in, or both were.\n \"\"\"\n with ops.name_scope(name, values=[probs, logits]):\n if (probs is None) == (logits is None):\n raise ValueError(\"Must pass probs or logits, but not both.\")\n\n if probs is None:\n logits = ops.convert_to_tensor(logits, name=\"logits\")\n if not logits.dtype.is_floating:\n raise TypeError(\"logits must having floating type.\")\n # We can early return since we constructed probs and therefore know\n # they're valid.\n if multidimensional:\n if validate_args:\n logits = embed_check_categorical_event_shape(logits)\n return logits, nn.softmax(logits, name=\"probs\")\n return logits, math_ops.sigmoid(logits, name=\"probs\")\n\n probs = ops.convert_to_tensor(probs, name=\"probs\")\n if not probs.dtype.is_floating:\n raise TypeError(\"probs must having floating type.\")\n\n if validate_args:\n with ops.name_scope(\"validate_probs\"):\n one = constant_op.constant(1., probs.dtype)\n dependencies = [check_ops.assert_non_negative(probs)]\n if multidimensional:\n probs = embed_check_categorical_event_shape(probs)\n dependencies += [assert_close(math_ops.reduce_sum(probs, -1), one,\n message=\"probs does not sum to 1.\")]\n else:\n dependencies += [check_ops.assert_less_equal(\n probs, one, message=\"probs has components greater than 1.\")]\n probs = control_flow_ops.with_dependencies(dependencies, probs)\n\n with ops.name_scope(\"logits\"):\n if multidimensional:\n # Here we don't compute the multidimensional case, in a manner\n # consistent with respect to the unidimensional case. We do so\n # following the TF convention. Typically, you might expect to see\n # logits = log(probs) - log(probs[pivot]). A side-effect of\n # being consistent with the TF approach is that the unidimensional case\n # implicitly handles the second dimension but the multidimensional case\n # explicitly keeps the pivot dimension.\n return math_ops.log(probs), probs\n return math_ops.log(probs) - math_ops.log1p(-1. * probs), probs\n\n\ndef _is_known_unsigned_by_dtype(dt):\n \"\"\"Helper returning True if dtype is known to be unsigned.\"\"\"\n return {\n dtypes.bool: True,\n dtypes.uint8: True,\n dtypes.uint16: True,\n }.get(dt.base_dtype, False)\n\n\ndef _is_known_signed_by_dtype(dt):\n \"\"\"Helper returning True if dtype is known to be signed.\"\"\"\n return {\n dtypes.float16: True,\n dtypes.float32: True,\n dtypes.float64: True,\n dtypes.int8: True,\n dtypes.int16: True,\n dtypes.int32: True,\n dtypes.int64: True,\n }.get(dt.base_dtype, False)\n\n\ndef _is_known_dtype(dt):\n \"\"\"Helper returning True if dtype is known.\"\"\"\n return _is_known_unsigned_by_dtype(dt) or _is_known_signed_by_dtype(dt)\n\n\ndef _largest_integer_by_dtype(dt):\n \"\"\"Helper returning the largest integer exactly representable by dtype.\"\"\"\n if not _is_known_dtype(dt):\n raise TypeError(\"Unrecognized dtype: {}\".format(dt.name))\n if dt.is_floating:\n return int(2**(np.finfo(dt.as_numpy_dtype).nmant + 1))\n if dt.is_integer:\n return np.iinfo(dt.as_numpy_dtype).max\n if dt.base_dtype == dtypes.bool:\n return int(1)\n # We actually can't land here but keep the case for completeness.\n raise TypeError(\"Unrecognized dtype: {}\".format(dt.name))\n\n\ndef _smallest_integer_by_dtype(dt):\n \"\"\"Helper returning the smallest integer exactly representable by dtype.\"\"\"\n if not _is_known_dtype(dt):\n raise TypeError(\"Unrecognized dtype: {}\".format(dt.name))\n if _is_known_unsigned_by_dtype(dt):\n return 0\n return -1 * _largest_integer_by_dtype(dt)\n\n\ndef _is_integer_like_by_dtype(dt):\n \"\"\"Helper returning True if dtype.is_integer or is `bool`.\"\"\"\n if not _is_known_dtype(dt):\n raise TypeError(\"Unrecognized dtype: {}\".format(dt.name))\n return dt.is_integer or dt.base_dtype == dtypes.bool\n\n\ndef embed_check_categorical_event_shape(\n categorical_param,\n name=\"embed_check_categorical_event_shape\"):\n \"\"\"Embeds checks that categorical distributions don't have too many classes.\n\n A categorical-type distribution is one which, e.g., returns the class label\n rather than a one-hot encoding. E.g., `Categorical(probs)`.\n\n Since distributions output samples in the same dtype as the parameters, we\n must ensure that casting doesn't lose precision. That is, the\n `parameter.dtype` implies a maximum number of classes. However, since shape is\n `int32` and categorical variables are presumed to be indexes into a `Tensor`,\n we must also ensure that the number of classes is no larger than the largest\n possible `int32` index, i.e., `2**31-1`.\n\n In other words the number of classes, `K`, must satisfy the following\n condition:\n\n ```python\n K <= min(\n int(2**31 - 1), # Largest float as an index.\n {\n dtypes.float16: int(2**11), # Largest int as a float16.\n dtypes.float32: int(2**24),\n dtypes.float64: int(2**53),\n }.get(categorical_param.dtype.base_dtype, 0))\n ```\n\n Args:\n categorical_param: Floating-point `Tensor` representing parameters of\n distribution over categories. The rightmost shape is presumed to be the\n number of categories.\n name: A name for this operation (optional).\n\n Returns:\n categorical_param: Input `Tensor` with appropriate assertions embedded.\n\n Raises:\n TypeError: if `categorical_param` has an unknown `dtype`.\n ValueError: if we can statically identify `categorical_param` as being too\n large (for being closed under int32/float casting).\n \"\"\"\n with ops.name_scope(name, values=[categorical_param]):\n x = ops.convert_to_tensor(categorical_param, name=\"categorical_param\")\n # The size must not exceed both of:\n # - The largest possible int32 (since categorical values are presumed to be\n # indexes into a Tensor).\n # - The largest possible integer exactly representable under the given\n # floating-point dtype (since we need to cast to/from).\n #\n # The chosen floating-point thresholds are 2**(1 + mantissa_bits).\n # For more details, see:\n # https://en.wikipedia.org/wiki/Floating-point_arithmetic#Internal_representation\n x_dtype = x.dtype.base_dtype\n max_event_size = (_largest_integer_by_dtype(x_dtype)\n if x_dtype.is_floating else 0)\n if max_event_size is 0:\n raise TypeError(\"Unable to validate size of unrecognized dtype \"\n \"({}).\".format(x_dtype.name))\n try:\n x_shape_static = x.get_shape().with_rank_at_least(1)\n except ValueError:\n raise ValueError(\"A categorical-distribution parameter must have \"\n \"at least 1 dimension.\")\n if x_shape_static[-1].value is not None:\n event_size = x_shape_static[-1].value\n if event_size < 2:\n raise ValueError(\"A categorical-distribution parameter must have at \"\n \"least 2 events.\")\n if event_size > max_event_size:\n raise ValueError(\n \"Number of classes exceeds `dtype` precision, i.e., \"\n \"{} implies shape ({}) cannot exceed {}.\".format(\n x_dtype.name, event_size, max_event_size))\n return x\n else:\n event_size = array_ops.shape(x, name=\"x_shape\")[-1]\n return control_flow_ops.with_dependencies([\n check_ops.assert_rank_at_least(\n x, 1, message=(\"A categorical-distribution parameter must have \"\n \"at least 1 dimension.\")),\n check_ops.assert_greater_equal(\n array_ops.shape(x)[-1], 2,\n message=(\"A categorical-distribution parameter must have at \"\n \"least 2 events.\")),\n check_ops.assert_less_equal(\n event_size, max_event_size,\n message=\"Number of classes exceeds `dtype` precision, \"\n \"i.e., {} dtype cannot exceed {} shape.\".format(\n x_dtype.name, max_event_size)),\n ], x)\n\n\ndef embed_check_integer_casting_closed(\n x,\n target_dtype,\n assert_nonnegative=True,\n name=\"embed_check_casting_closed\"):\n \"\"\"Ensures integers remain unaffected despite casting to/from int/float types.\n\n Example integer-types: `uint8`, `int32`, `bool`.\n Example floating-types: `float32`, `float64`.\n\n The largest possible integer representable by an IEEE754 floating-point is\n `2**(1 + mantissa_bits)` yet the largest possible integer as an int-type is\n `2**(bits - 1) - 1`. This function ensures that a `Tensor` purporting to have\n integer-form values can be cast to some other type without loss of precision.\n\n The smallest representable integer is the negative of the largest\n representable integer, except for types: `uint8`, `uint16`, `bool`. For these\n types, the smallest representable integer is `0`.\n\n Args:\n x: `Tensor` representing integer-form values.\n target_dtype: TF `dtype` under which `x` should have identical values.\n assert_nonnegative: `bool` indicating `x` should contain nonnegative values.\n name: A name for this operation (optional).\n\n Returns:\n x: Input `Tensor` with appropriate assertions embedded.\n\n Raises:\n TypeError: if `x` is neither integer- nor floating-type.\n TypeError: if `target_dtype` is neither integer- nor floating-type.\n TypeError: if neither `x` nor `target_dtype` are integer-type.\n \"\"\"\n\n with ops.name_scope(name, values=[x]):\n x = ops.convert_to_tensor(x, name=\"x\")\n if (not _is_integer_like_by_dtype(x.dtype)\n and not x.dtype.is_floating):\n raise TypeError(\"{}.dtype must be floating- or \"\n \"integer-type.\".format(x.dtype.name))\n if (not _is_integer_like_by_dtype(target_dtype)\n and not target_dtype.is_floating):\n raise TypeError(\"target_dtype ({}) must be floating- or \"\n \"integer-type.\".format(target_dtype.name))\n if (not _is_integer_like_by_dtype(x.dtype)\n and not _is_integer_like_by_dtype(target_dtype)):\n raise TypeError(\"At least one of {}.dtype ({}) and target_dtype ({}) \"\n \"must be integer-type.\".format(\n x.op.name, x.dtype.name, target_dtype.name))\n\n assertions = []\n if assert_nonnegative:\n assertions += [\n check_ops.assert_non_negative(\n x, message=\"Elements must be non-negative.\"),\n ]\n\n if x.dtype.is_floating:\n # Being here means _is_integer_like_by_dtype(target_dtype) = True.\n # Since this check implies the magnitude check below, we need only it.\n assertions += [\n assert_integer_form(\n x, int_dtype=target_dtype,\n message=\"Elements must be {}-equivalent.\".format(\n target_dtype.name)),\n ]\n else:\n if (_largest_integer_by_dtype(x.dtype)\n > _largest_integer_by_dtype(target_dtype)):\n # Cast may lose integer precision.\n assertions += [\n check_ops.assert_less_equal(\n x, _largest_integer_by_dtype(target_dtype),\n message=(\"Elements cannot exceed {}.\".format(\n _largest_integer_by_dtype(target_dtype)))),\n ]\n if (not assert_nonnegative and\n (_smallest_integer_by_dtype(x.dtype)\n < _smallest_integer_by_dtype(target_dtype))):\n assertions += [\n check_ops.assert_greater_equal(\n x, _smallest_integer_by_dtype(target_dtype),\n message=(\"Elements cannot be smaller than {}.\".format(\n _smallest_integer_by_dtype(target_dtype)))),\n ]\n\n if not assertions:\n return x\n return control_flow_ops.with_dependencies(assertions, x)\n\n\ndef log_combinations(n, counts, name=\"log_combinations\"):\n \"\"\"Multinomial coefficient.\n\n Given `n` and `counts`, where `counts` has last dimension `k`, we compute\n the multinomial coefficient as:\n\n ```n! / sum_i n_i!```\n\n where `i` runs over all `k` classes.\n\n Args:\n n: Floating-point `Tensor` broadcastable with `counts`. This represents `n`\n outcomes.\n counts: Floating-point `Tensor` broadcastable with `n`. This represents\n counts in `k` classes, where `k` is the last dimension of the tensor.\n name: A name for this operation (optional).\n\n Returns:\n `Tensor` representing the multinomial coefficient between `n` and `counts`.\n \"\"\"\n # First a bit about the number of ways counts could have come in:\n # E.g. if counts = [1, 2], then this is 3 choose 2.\n # In general, this is (sum counts)! / sum(counts!)\n # The sum should be along the last dimension of counts. This is the\n # \"distribution\" dimension. Here n a priori represents the sum of counts.\n with ops.name_scope(name, values=[n, counts]):\n n = ops.convert_to_tensor(n, name=\"n\")\n counts = ops.convert_to_tensor(counts, name=\"counts\")\n total_permutations = math_ops.lgamma(n + 1)\n counts_factorial = math_ops.lgamma(counts + 1)\n redundant_permutations = math_ops.reduce_sum(counts_factorial, axis=[-1])\n return total_permutations - redundant_permutations\n\n\ndef matrix_diag_transform(matrix, transform=None, name=None):\n \"\"\"Transform diagonal of [batch-]matrix, leave rest of matrix unchanged.\n\n Create a trainable covariance defined by a Cholesky factor:\n\n ```python\n # Transform network layer into 2 x 2 array.\n matrix_values = tf.contrib.layers.fully_connected(activations, 4)\n matrix = tf.reshape(matrix_values, (batch_size, 2, 2))\n\n # Make the diagonal positive. If the upper triangle was zero, this would be a\n # valid Cholesky factor.\n chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)\n\n # LinearOperatorTriL ignores the upper triangle.\n operator = LinearOperatorTriL(chol)\n ```\n\n Example of heteroskedastic 2-D linear regression.\n\n ```python\n # Get a trainable Cholesky factor.\n matrix_values = tf.contrib.layers.fully_connected(activations, 4)\n matrix = tf.reshape(matrix_values, (batch_size, 2, 2))\n chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)\n\n # Get a trainable mean.\n mu = tf.contrib.layers.fully_connected(activations, 2)\n\n # This is a fully trainable multivariate normal!\n dist = tf.contrib.distributions.MVNCholesky(mu, chol)\n\n # Standard log loss. Minimizing this will \"train\" mu and chol, and then dist\n # will be a distribution predicting labels as multivariate Gaussians.\n loss = -1 * tf.reduce_mean(dist.log_prob(labels))\n ```\n\n Args:\n matrix: Rank `R` `Tensor`, `R >= 2`, where the last two dimensions are\n equal.\n transform: Element-wise function mapping `Tensors` to `Tensors`. To\n be applied to the diagonal of `matrix`. If `None`, `matrix` is returned\n unchanged. Defaults to `None`.\n name: A name to give created ops.\n Defaults to \"matrix_diag_transform\".\n\n Returns:\n A `Tensor` with same shape and `dtype` as `matrix`.\n \"\"\"\n with ops.name_scope(name, \"matrix_diag_transform\", [matrix]):\n matrix = ops.convert_to_tensor(matrix, name=\"matrix\")\n if transform is None:\n return matrix\n # Replace the diag with transformed diag.\n diag = array_ops.matrix_diag_part(matrix)\n transformed_diag = transform(diag)\n transformed_mat = array_ops.matrix_set_diag(matrix, transformed_diag)\n\n return transformed_mat\n\n\ndef rotate_transpose(x, shift, name=\"rotate_transpose\"):\n \"\"\"Circularly moves dims left or right.\n\n Effectively identical to:\n\n ```python\n numpy.transpose(x, numpy.roll(numpy.arange(len(x.shape)), shift))\n ```\n\n When `validate_args=False` additional graph-runtime checks are\n performed. These checks entail moving data from to GPU to CPU.\n\n Example:\n\n ```python\n x = tf.random_normal([1, 2, 3, 4]) # Tensor of shape [1, 2, 3, 4].\n rotate_transpose(x, -1).shape == [2, 3, 4, 1]\n rotate_transpose(x, -2).shape == [3, 4, 1, 2]\n rotate_transpose(x, 1).shape == [4, 1, 2, 3]\n rotate_transpose(x, 2).shape == [3, 4, 1, 2]\n rotate_transpose(x, 7).shape == rotate_transpose(x, 3).shape # [2, 3, 4, 1]\n rotate_transpose(x, -7).shape == rotate_transpose(x, -3).shape # [4, 1, 2, 3]\n ```\n\n Args:\n x: `Tensor`.\n shift: `Tensor`. Number of dimensions to transpose left (shift<0) or\n transpose right (shift>0).\n name: Python `str`. The name to give this op.\n\n Returns:\n rotated_x: Input `Tensor` with dimensions circularly rotated by shift.\n\n Raises:\n TypeError: if shift is not integer type.\n \"\"\"\n with ops.name_scope(name, values=[x, shift]):\n x = ops.convert_to_tensor(x, name=\"x\")\n shift = ops.convert_to_tensor(shift, name=\"shift\")\n # We do not assign back to preserve constant-ness.\n check_ops.assert_integer(shift)\n shift_value_static = tensor_util.constant_value(shift)\n ndims = x.get_shape().ndims\n if ndims is not None and shift_value_static is not None:\n if ndims < 2: return x\n shift_value_static = np.sign(shift_value_static) * (\n abs(shift_value_static) % ndims)\n if shift_value_static == 0: return x\n perm = np.roll(np.arange(ndims), shift_value_static)\n return array_ops.transpose(x, perm=perm)\n else:\n # Consider if we always had a positive shift, and some specified\n # direction.\n # When shifting left we want the new array:\n # last(x, n-shift) + first(x, shift)\n # and if shifting right then we want:\n # last(x, shift) + first(x, n-shift)\n # Observe that last(a) == slice(a, n) and first(a) == slice(0, a).\n # Also, we can encode direction and shift as one: direction * shift.\n # Combining these facts, we have:\n # a = cond(shift<0, -shift, n-shift)\n # last(x, n-a) + first(x, a) == x[a:n] + x[0:a]\n # Finally, we transform shift by modulo length so it can be specified\n # independently from the array upon which it operates (like python).\n ndims = array_ops.rank(x)\n shift = array_ops.where(math_ops.less(shift, 0),\n math_ops.mod(-shift, ndims),\n ndims - math_ops.mod(shift, ndims))\n first = math_ops.range(0, shift)\n last = math_ops.range(shift, ndims)\n perm = array_ops.concat([last, first], 0)\n return array_ops.transpose(x, perm=perm)\n\n\ndef pick_vector(cond,\n true_vector,\n false_vector,\n name=\"pick_vector\"):\n \"\"\"Picks possibly different length row `Tensor`s based on condition.\n\n Value `Tensor`s should have exactly one dimension.\n\n If `cond` is a python Boolean or `tf.constant` then either `true_vector` or\n `false_vector` is immediately returned. I.e., no graph nodes are created and\n no validation happens.\n\n Args:\n cond: `Tensor`. Must have `dtype=tf.bool` and be scalar.\n true_vector: `Tensor` of one dimension. Returned when cond is `True`.\n false_vector: `Tensor` of one dimension. Returned when cond is `False`.\n name: Python `str`. The name to give this op.\n\n Example:\n\n ```python\n pick_vector(tf.less(0, 5), tf.range(10, 12), tf.range(15, 18)) # [10, 11]\n pick_vector(tf.less(5, 0), tf.range(10, 12), tf.range(15, 18)) # [15, 16, 17]\n ```\n\n Returns:\n true_or_false_vector: `Tensor`.\n\n Raises:\n TypeError: if `cond.dtype != tf.bool`\n TypeError: if `cond` is not a constant and\n `true_vector.dtype != false_vector.dtype`\n \"\"\"\n with ops.name_scope(name, values=(cond, true_vector, false_vector)):\n cond = ops.convert_to_tensor(cond, name=\"cond\")\n if cond.dtype != dtypes.bool:\n raise TypeError(\"%s.dtype=%s which is not %s\" %\n (cond.name, cond.dtype, dtypes.bool))\n cond_value_static = tensor_util.constant_value(cond)\n if cond_value_static is not None:\n return true_vector if cond_value_static else false_vector\n true_vector = ops.convert_to_tensor(true_vector, name=\"true_vector\")\n false_vector = ops.convert_to_tensor(false_vector, name=\"false_vector\")\n if true_vector.dtype != false_vector.dtype:\n raise TypeError(\n \"%s.dtype=%s does not match %s.dtype=%s\"\n % (true_vector.name, true_vector.dtype,\n false_vector.name, false_vector.dtype))\n n = array_ops.shape(true_vector)[0]\n return array_ops.slice(\n array_ops.concat([true_vector, false_vector], 0),\n [array_ops.where(cond, 0, n)], [array_ops.where(cond, n, -1)])\n\n\ndef gen_new_seed(seed, salt):\n \"\"\"Generate a new seed, from the given seed and salt.\"\"\"\n if seed is None:\n return None\n string = (str(seed) + salt).encode(\"utf-8\")\n return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF\n\n\ndef fill_triangular(x, upper=False, name=None):\n \"\"\"Creates a (batch of) triangular matrix from a vector of inputs.\n\n Created matrix can be lower- or upper-triangular. (It is more efficient to\n create the matrix as upper or lower, rather than transpose.)\n\n Triangular matrix elements are filled in a clockwise spiral. See example,\n below.\n\n If `x.get_shape()` is `[b1, b2, ..., bK, d]` then the output shape is `[b1,\n b2, ..., bK, n, n]` where `n` is such that `d = n(n+1)/2`, i.e.,\n `n = int(np.sqrt(0.25 + 2. * m) - 0.5)`.\n\n Example:\n\n ```python\n fill_triangular([1, 2, 3, 4, 5, 6])\n # ==> [[4, 0, 0],\n # [6, 5, 0],\n # [3, 2, 1]]\n\n fill_triangular([1, 2, 3, 4, 5, 6], upper=True)\n # ==> [[1, 2, 3],\n # [0, 5, 6],\n # [0, 0, 4]]\n ```\n\n For comparison, a pure numpy version of this function can be found in\n `util_test.py`, function `_fill_triangular`.\n\n Args:\n x: `Tensor` representing lower (or upper) triangular elements.\n upper: Python `bool` representing whether output matrix should be upper\n triangular (`True`) or lower triangular (`False`, default).\n name: Python `str`. The name to give this op.\n\n Returns:\n tril: `Tensor` with lower (or upper) triangular elements filled from `x`.\n\n Raises:\n ValueError: if `x` cannot be mapped to a triangular matrix.\n \"\"\"\n\n with ops.name_scope(name, \"fill_triangular\", values=[x]):\n if x.shape.with_rank_at_least(1)[-1].value is not None:\n # Formula derived by solving for n: m = n(n+1)/2.\n m = np.int32(x.shape[-1].value)\n n = np.sqrt(0.25 + 2. * m) - 0.5\n if n != np.floor(n):\n raise ValueError(\"Input right-most shape ({}) does not \"\n \"correspond to a triangular matrix.\".format(m))\n n = np.int32(n)\n static_final_shape = x.shape[:-1].concatenate([n, n])\n else:\n m = array_ops.shape(x)[-1]\n # For derivation, see above. Casting automatically lops off the 0.5, so we\n # omit it. We don't validate n is an integer because this has\n # graph-execution cost; an error will be thrown from the reshape, below.\n n = math_ops.cast(\n math_ops.sqrt(0.25 + math_ops.cast(2 * m, dtype=dtypes.float32)),\n dtype=dtypes.int32)\n static_final_shape = x.shape.with_rank_at_least(1)[:-1].concatenate(\n [None, None])\n # We now concatenate the \"tail\" of `x` to `x` (and reverse one of them).\n #\n # We do this based on the insight that the input `x` provides `ceil(n/2)`\n # rows of an `n x n` matrix, some of which will get zeroed out being on the\n # wrong side of the diagonal. The first row will not get zeroed out at all,\n # and we need `floor(n/2)` more rows, so the first is what we omit from\n # `x_tail`. If we then stack those `ceil(n/2)` rows with the `floor(n/2)`\n # rows provided by a reversed tail, it is exactly the other set of elements\n # of the reversed tail which will be zeroed out for being on the wrong side\n # of the diagonal further up/down the matrix. And, in doing-so, we've filled\n # the triangular matrix in a clock-wise spiral pattern. Neat!\n #\n # Try it out in numpy:\n # n = 3\n # x = np.arange(n * (n + 1) / 2)\n # m = x.shape[0]\n # n = np.int32(np.sqrt(.25 + 2 * m) - .5)\n # x_tail = x[(m - (n**2 - m)):]\n # np.concatenate([x_tail, x[::-1]], 0).reshape(n, n) # lower\n # # ==> array([[3, 4, 5],\n # [5, 4, 3],\n # [2, 1, 0]])\n # np.concatenate([x, x_tail[::-1]], 0).reshape(n, n) # upper\n # # ==> array([[0, 1, 2],\n # [3, 4, 5],\n # [5, 4, 3]])\n #\n # Note that we can't simply do `x[..., -(n**2 - m):]` because this doesn't\n # correctly handle `m == n == 1`. Hence, we do nonnegative indexing.\n # Furthermore observe that:\n # m - (n**2 - m)\n # = n**2 / 2 + n / 2 - (n**2 - n**2 / 2 + n / 2)\n # = 2 (n**2 / 2 + n / 2) - n**2\n # = n**2 + n - n**2\n # = n\n if upper:\n x_list = [x, array_ops.reverse(x[..., n:], axis=[-1])]\n else:\n x_list = [x[..., n:], array_ops.reverse(x, axis=[-1])]\n new_shape = (\n static_final_shape.as_list()\n if static_final_shape.is_fully_defined()\n else array_ops.concat([array_ops.shape(x)[:-1], [n, n]], axis=0))\n x = array_ops.reshape(array_ops.concat(x_list, axis=-1), new_shape)\n x = array_ops.matrix_band_part(\n x,\n num_lower=(0 if upper else -1),\n num_upper=(-1 if upper else 0))\n x.set_shape(static_final_shape)\n return x\n\n\ndef tridiag(below=None, diag=None, above=None, name=None):\n \"\"\"Creates a matrix with values set above, below, and on the diagonal.\n\n Example:\n\n ```python\n tridiag(below=[1., 2., 3.],\n diag=[4., 5., 6., 7.],\n above=[8., 9., 10.])\n # ==> array([[ 4., 8., 0., 0.],\n # [ 1., 5., 9., 0.],\n # [ 0., 2., 6., 10.],\n # [ 0., 0., 3., 7.]], dtype=float32)\n ```\n\n Warning: This Op is intended for convenience, not efficiency.\n\n Args:\n below: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the below\n diagonal part. `None` is logically equivalent to `below = 0`.\n diag: `Tensor` of shape `[B1, ..., Bb, d]` corresponding to the diagonal\n part. `None` is logically equivalent to `diag = 0`.\n above: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the above\n diagonal part. `None` is logically equivalent to `above = 0`.\n name: Python `str`. The name to give this op.\n\n Returns:\n tridiag: `Tensor` with values set above, below and on the diagonal.\n\n Raises:\n ValueError: if all inputs are `None`.\n \"\"\"\n\n def _pad(x):\n \"\"\"Prepends and appends a zero to every vector in a batch of vectors.\"\"\"\n shape = array_ops.concat([array_ops.shape(x)[:-1], [1]], axis=0)\n z = array_ops.zeros(shape, dtype=x.dtype)\n return array_ops.concat([z, x, z], axis=-1)\n\n def _add(*x):\n \"\"\"Adds list of Tensors, ignoring `None`.\"\"\"\n s = None\n for y in x:\n if y is None:\n continue\n elif s is None:\n s = y\n else:\n s += y\n if s is None:\n raise ValueError(\"Must specify at least one of `below`, `diag`, `above`.\")\n return s\n\n with ops.name_scope(name, \"tridiag\", [below, diag, above]):\n if below is not None:\n below = ops.convert_to_tensor(below, name=\"below\")\n below = array_ops.matrix_diag(_pad(below))[..., :-1, 1:]\n if diag is not None:\n diag = ops.convert_to_tensor(diag, name=\"diag\")\n diag = array_ops.matrix_diag(diag)\n if above is not None:\n above = ops.convert_to_tensor(above, name=\"above\")\n above = array_ops.matrix_diag(_pad(above))[..., 1:, :-1]\n # TODO(jvdillon): Consider using scatter_nd instead of creating three full\n # matrices.\n return _add(below, diag, above)\n\n\ndef reduce_weighted_logsumexp(\n logx,\n w=None,\n axis=None,\n keep_dims=False,\n return_sign=False,\n name=None):\n \"\"\"Computes `log(abs(sum(weight * exp(elements across tensor dimensions))))`.\n\n If all weights `w` are known to be positive, it is more efficient to directly\n use `reduce_logsumexp`, i.e., `tf.reduce_logsumexp(logx + tf.log(w))` is more\n efficient than `du.reduce_weighted_logsumexp(logx, w)`.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keep_dims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n This function is more numerically stable than log(sum(w * exp(input))). It\n avoids overflows caused by taking the exp of large inputs and underflows\n caused by taking the log of small inputs.\n\n For example:\n\n ```python\n x = tf.constant([[0., 0, 0],\n [0, 0, 0]])\n\n w = tf.constant([[-1., 1, 1],\n [1, 1, 1]])\n\n du.reduce_weighted_logsumexp(x, w)\n # ==> log(-1*1 + 1*1 + 1*1 + 1*1 + 1*1 + 1*1) = log(4)\n\n du.reduce_weighted_logsumexp(x, w, axis=0)\n # ==> [log(-1+1), log(1+1), log(1+1)]\n\n du.reduce_weighted_logsumexp(x, w, axis=1)\n # ==> [log(-1+1+1), log(1+1+1)]\n\n du.reduce_weighted_logsumexp(x, w, axis=1, keep_dims=True)\n # ==> [[log(-1+1+1)], [log(1+1+1)]]\n\n du.reduce_weighted_logsumexp(x, w, axis=[0, 1])\n # ==> log(-1+5)\n ```\n\n Args:\n logx: The tensor to reduce. Should have numeric type.\n w: The weight tensor. Should have numeric type identical to `logx`.\n axis: The dimensions to reduce. If `None` (the default),\n reduces all dimensions. Must be in the range\n `[-rank(input_tensor), rank(input_tensor))`.\n keep_dims: If true, retains reduced dimensions with length 1.\n return_sign: If `True`, returns the sign of the result.\n name: A name for the operation (optional).\n\n Returns:\n lswe: The `log(abs(sum(weight * exp(x))))` reduced tensor.\n sign: (Optional) The sign of `sum(weight * exp(x))`.\n \"\"\"\n with ops.name_scope(name, \"reduce_weighted_logsumexp\", [logx, w]):\n logx = ops.convert_to_tensor(logx, name=\"logx\")\n if w is None:\n lswe = math_ops.reduce_logsumexp(logx, axis=axis, keep_dims=keep_dims)\n if return_sign:\n sgn = array_ops.ones_like(lswe)\n return lswe, sgn\n return lswe\n w = ops.convert_to_tensor(w, dtype=logx.dtype, name=\"w\")\n log_absw_x = logx + math_ops.log(math_ops.abs(w))\n max_log_absw_x = math_ops.reduce_max(log_absw_x, axis=axis, keep_dims=True)\n # If the largest element is `-inf` or `inf` then we don't bother subtracting\n # off the max. We do this because otherwise we'd get `inf - inf = NaN`. That\n # this is ok follows from the fact that we're actually free to subtract any\n # value we like, so long as we add it back after taking the `log(sum(...))`.\n max_log_absw_x = array_ops.where(\n math_ops.is_inf(max_log_absw_x),\n array_ops.zeros_like(max_log_absw_x),\n max_log_absw_x)\n wx_over_max_absw_x = (\n math_ops.sign(w) * math_ops.exp(log_absw_x - max_log_absw_x))\n sum_wx_over_max_absw_x = math_ops.reduce_sum(\n wx_over_max_absw_x,\n axis=axis,\n keep_dims=keep_dims)\n if not keep_dims:\n max_log_absw_x = array_ops.squeeze(max_log_absw_x, axis)\n sgn = math_ops.sign(sum_wx_over_max_absw_x)\n lswe = max_log_absw_x + math_ops.log(sgn * sum_wx_over_max_absw_x)\n if return_sign:\n return lswe, sgn\n return lswe\n\n\n# TODO(jvdillon): Merge this test back into:\n# tensorflow/python/ops/softplus_op_test.py\n# once TF core is accepting new ops.\ndef softplus_inverse(x, name=None):\n \"\"\"Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)).\n\n Mathematically this op is equivalent to:\n\n ```none\n softplus_inverse = log(exp(x) - 1.)\n ```\n\n Args:\n x: `Tensor`. Non-negative (not enforced), floating-point.\n name: A name for the operation (optional).\n\n Returns:\n `Tensor`. Has the same type/shape as input `x`.\n \"\"\"\n with ops.name_scope(name, \"softplus_inverse\", values=[x]):\n x = ops.convert_to_tensor(x, name=\"x\")\n # We begin by deriving a more numerically stable softplus_inverse:\n # x = softplus(y) = Log[1 + exp{y}], (which means x > 0).\n # ==> exp{x} = 1 + exp{y} (1)\n # ==> y = Log[exp{x} - 1] (2)\n # = Log[(exp{x} - 1) / exp{x}] + Log[exp{x}]\n # = Log[(1 - exp{-x}) / 1] + Log[exp{x}]\n # = Log[1 - exp{-x}] + x (3)\n # (2) is the \"obvious\" inverse, but (3) is more stable than (2) for large x.\n # For small x (e.g. x = 1e-10), (3) will become -inf since 1 - exp{-x} will\n # be zero. To fix this, we use 1 - exp{-x} approx x for small x > 0.\n #\n # In addition to the numerically stable derivation above, we clamp\n # small/large values to be congruent with the logic in:\n # tensorflow/core/kernels/softplus_op.h\n #\n # Finally, we set the input to one whenever the input is too large or too\n # small. This ensures that no unchosen codepath is +/- inf. This is\n # necessary to ensure the gradient doesn't get NaNs. Recall that the\n # gradient of `where` behaves like `pred*pred_true + (1-pred)*pred_false`\n # thus an `inf` in an unselected path results in `0*inf=nan`. We are careful\n # to overwrite `x` with ones only when we will never actually use this\n # value. Note that we use ones and not zeros since `log(expm1(0.)) = -inf`.\n threshold = np.log(np.finfo(x.dtype.as_numpy_dtype).eps) + 2.\n is_too_small = math_ops.less(x, np.exp(threshold))\n is_too_large = math_ops.greater(x, -threshold)\n too_small_value = math_ops.log(x)\n too_large_value = x\n # This `where` will ultimately be a NOP because we won't select this\n # codepath whenever we used the surrogate `ones_like`.\n x = array_ops.where(math_ops.logical_or(is_too_small, is_too_large),\n array_ops.ones_like(x), x)\n y = x + math_ops.log(-math_ops.expm1(-x)) # == log(expm1(x))\n return array_ops.where(is_too_small, too_small_value,\n array_ops.where(is_too_large, too_large_value, y))\n\n\n# TODO(b/35290280): Add unit-tests.\ndef dimension_size(x, axis):\n \"\"\"Returns the size of a specific dimension.\"\"\"\n # Since tf.gather isn't \"constant-in, constant-out\", we must first check the\n # static shape or fallback to dynamic shape.\n num_rows = (None if x.get_shape().ndims is None\n else x.get_shape()[axis].value)\n if num_rows is not None:\n return num_rows\n return array_ops.shape(x)[axis]\n\n\nclass AppendDocstring(object):\n \"\"\"Helper class to promote private subclass docstring to public counterpart.\n\n Example:\n\n ```python\n class TransformedDistribution(Distribution):\n @distribution_util.AppendDocstring(\n additional_note=\"A special note!\",\n kwargs_dict={\"foo\": \"An extra arg.\"})\n def _prob(self, y, foo=None):\n pass\n ```\n\n In this case, the `AppendDocstring` decorator appends the `additional_note` to\n the docstring of `prob` (not `_prob`) and adds a new `kwargs`\n section with each dictionary item as a bullet-point.\n\n For a more detailed example, see `TransformedDistribution`.\n \"\"\"\n\n def __init__(self, additional_note=\"\", kwargs_dict=None):\n \"\"\"Initializes the AppendDocstring object.\n\n Args:\n additional_note: Python string added as additional docstring to public\n version of function.\n kwargs_dict: Python string/string dictionary representing\n specific kwargs expanded from the **kwargs input.\n\n Raises:\n ValueError: if kwargs_dict.key contains whitespace.\n ValueError: if kwargs_dict.value contains newlines.\n \"\"\"\n self._additional_note = additional_note\n if kwargs_dict:\n bullets = []\n for key in sorted(kwargs_dict.keys()):\n value = kwargs_dict[key]\n if any(x.isspace() for x in key):\n raise ValueError(\n \"Parameter name \\\"%s\\\" contains whitespace.\" % key)\n value = value.lstrip()\n if \"\\n\" in value:\n raise ValueError(\n \"Parameter description for \\\"%s\\\" contains newlines.\" % key)\n bullets.append(\"* `%s`: %s\" % (key, value))\n self._additional_note += (\"\\n\\n##### `kwargs`:\\n\\n\" +\n \"\\n\".join(bullets))\n\n def __call__(self, fn):\n @functools.wraps(fn)\n def _fn(*args, **kwargs):\n return fn(*args, **kwargs)\n if _fn.__doc__ is None:\n _fn.__doc__ = self._additional_note\n else:\n _fn.__doc__ += \"\\n%s\" % self._additional_note\n return _fn\n" ]
[ [ "tensorflow.python.ops.check_ops.assert_less", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.math_ops.abs", "tensorflow.contrib.distributions.python.ops.shape._DistributionShape", "tensorflow.python.framework.tensor_util.is_tensor", "tensorflow.contrib.linalg.LinearOperatorUDVHUpdate", "tensorflow.python.ops.math_ops.equal", "tensorflow.contrib.distributions.python.ops.distribution_util.make_tril_scale", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.contrib.distributions.python.ops.distribution_util.dimension_size", "tensorflow.python.ops.math_ops.cast" ], [ "tensorflow.import_graph_def", "tensorflow.InteractiveSession", "tensorflow.gfile.Exists", "tensorflow.gfile.GFile", "numpy.squeeze", "tensorflow.get_default_graph", "tensorflow.GraphDef", "tensorflow.logging.fatal", "tensorflow.gfile.FastGFile" ], [ "tensorflow.python.eager.execute.args_to_matching_eager", "tensorflow.core.framework.op_def_pb2.OpList", "tensorflow.python.eager.execute.make_str", "tensorflow.python.eager.execute.record_gradient", "tensorflow.python.framework.ops.RegisterShape", "tensorflow.python.eager.execute.make_int", "tensorflow.python.framework.op_def_library.OpDefLibrary", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.eager.context.context", "tensorflow.python.framework.op_def_registry.register_op_list", "tensorflow.python.eager.execute.execute" ], [ "tensorflow.python.eager.execute.args_to_matching_eager", "tensorflow.core.framework.op_def_pb2.OpList", "tensorflow.python.eager.execute.make_bool", "tensorflow.python.eager.execute.make_str", "tensorflow.python.eager.execute.record_gradient", "tensorflow.python.framework.op_def_library.OpDefLibrary", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.eager.context.context", "tensorflow.python.framework.op_def_registry.register_op_list", "tensorflow.python.eager.execute.execute" ], [ "tensorflow.python.eager.execute.args_to_matching_eager", "tensorflow.core.framework.op_def_pb2.OpList", "tensorflow.python.eager.execute.make_bool", "tensorflow.python.eager.execute.make_str", "tensorflow.python.eager.execute.record_gradient", "tensorflow.python.framework.ops.convert_n_to_tensor", "tensorflow.python.eager.execute.make_int", "tensorflow.python.framework.op_def_library.OpDefLibrary", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.eager.context.context", "tensorflow.python.framework.op_def_registry.register_op_list", "tensorflow.python.eager.execute.execute" ], [ "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.framework.ops.device", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.gen_data_flow_ops._tensor_array_v3", "tensorflow.python.ops.gen_data_flow_ops._tensor_array_scatter_v3", "tensorflow.python.ops.math_ops.to_int64", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.gen_data_flow_ops._tensor_array_gather_v3", "tensorflow.python.framework.ops.colocate_with", "tensorflow.python.ops.gen_data_flow_ops._tensor_array_size_v3", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.framework.tensor_shape.unknown_shape", "tensorflow.python.ops.math_ops.range", "tensorflow.python.ops.gen_data_flow_ops._tensor_array_write_v3", "tensorflow.python.ops.gen_data_flow_ops._tensor_array_concat_v3", "tensorflow.python.eager.context.in_graph_mode", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.gen_data_flow_ops._tensor_array_grad_v3", "tensorflow.python.ops.gen_data_flow_ops._tensor_array_read_v3", "tensorflow.python.ops.gen_data_flow_ops._tensor_array_close_v3", "tensorflow.python.ops.gen_data_flow_ops._tensor_array_split_v3" ], [ "tensorflow.python.ops.math_ops.log", "tensorflow.python.ops.array_ops.shape", "numpy.sqrt", "tensorflow.python.ops.math_ops.reduce_max", "tensorflow.python.ops.nn.softmax", "tensorflow.python.ops.math_ops.greater", "tensorflow.python.ops.math_ops.exp", "tensorflow.python.ops.array_ops.squeeze", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.ops.math_ops.expm1", "numpy.iinfo", "tensorflow.python.ops.control_flow_ops.no_op", "numpy.exp", "tensorflow.python.ops.check_ops.assert_integer", "tensorflow.python.ops.control_flow_ops.Assert", "tensorflow.python.ops.array_ops.transpose", "tensorflow.python.ops.array_ops.rank", "tensorflow.python.ops.check_ops.assert_less_equal", "tensorflow.python.ops.math_ops.abs", "tensorflow.python.ops.control_flow_ops.with_dependencies", "tensorflow.python.ops.math_ops.reduce_logsumexp", "numpy.arange", "tensorflow.python.ops.array_ops.where", "tensorflow.python.ops.math_ops.less", "tensorflow.python.ops.math_ops.is_inf", "numpy.finfo", "tensorflow.python.ops.math_ops.log1p", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.ops.math_ops.lgamma", "tensorflow.python.ops.math_ops.logical_or", "tensorflow.python.ops.check_ops.assert_non_negative", "tensorflow.python.ops.check_ops.assert_rank_at_least", "tensorflow.python.ops.array_ops.matrix_diag_part", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.ops.array_ops.matrix_set_diag", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.python.ops.array_ops.matrix_transpose", "tensorflow.python.ops.check_ops.assert_equal", "tensorflow.python.ops.array_ops.matrix_band_part", "tensorflow.python.framework.ops.convert_to_tensor", "numpy.floor", "tensorflow.python.ops.math_ops.range", "tensorflow.python.ops.array_ops.ones_like", "tensorflow.python.ops.math_ops.sigmoid", "tensorflow.python.ops.array_ops.reverse", "tensorflow.python.ops.array_ops.concat", "numpy.int32", "tensorflow.python.ops.math_ops.mod", "numpy.sign", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.array_ops.matrix_diag", "tensorflow.python.ops.math_ops.sign", "tensorflow.python.ops.math_ops.reduce_sum", "tensorflow.python.framework.constant_op.constant" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.4" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "1.4", "2.6", "1.13", "2.3", "2.4", "2.2", "2.9", "1.5", "1.7", "2.5", "1.0", "2.8", "1.2", "2.10" ] } ]
bryanblackbee/topic__deep-learning-python
[ "6d916cee3457a886f3bffc7a5dd97a4d627b3c23" ]
[ "chap06/weather_modelv4_stacked_rnn_with_dropout.py" ]
[ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom tensorflow.keras.backend import clear_session\nfrom tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.keras.preprocessing import sequence\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import (Flatten, Dense, SimpleRNN, LSTM, GRU)\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\n\ndef ingest():\n # Read from CSV, keep only values\n df = pd.read_csv('jena_climate_2009_2016.csv')\n df = df.iloc[:,1:]\n df_values = df.values\n # Normalisation\n df_mean = df_values[:200000].mean(axis=0)\n df_std = df_values[:200000].std(axis=0)\n df_values-=df_mean\n df_values/=df_std\n return df_values\n\n# Generator\ndef generator(data, lookback=0, delay=0, min_index=0, \n max_index=None, shuffle=False,\n batch_size=128, step=6):\n if max_index == None:\n max_index = len(data) - delay - 1\n i = min_index + lookback\n \n while 1:\n if shuffle:\n rows = np.random.randint(\n min_index + lookback, max_index, size=batch_size)\n else:\n if i + batch_size >= max_index:\n i = min_index + lookback\n rows = np.arange(i, min(i + batch_size, max_index))\n i+= len(rows)\n \n samples = np.zeros((len(rows),\n lookback // step,\n data.shape[-1]))\n targets = np.zeros((len(rows,)))\n for j, row in enumerate(rows):\n indices = range(rows[j] - lookback, rows[j], step)\n samples[j] = data[indices]\n targets[j] = data[rows[j] + delay][1]\n yield samples, targets\n\ndf_values = ingest()\n\nLOOKBACK, STEP, DELAY, BATCH_SIZE = 1440, 6, 144, 128\ntrain_min_i, train_max_i = 0, 200000\nval_min_i, val_max_i = 200001, 300000\ntest_min_i, test_max_i = 300001, None\nval_steps = (val_max_i - val_min_i - LOOKBACK)\ntest_steps = (len(df_values) - test_min_i - LOOKBACK)\ntrain_gen = generator(df_values, \n lookback=LOOKBACK, delay=DELAY, \n min_index=train_min_i, max_index=train_max_i, \n batch_size=BATCH_SIZE, step=STEP,shuffle=True)\nval_gen = generator(df_values, \n lookback=LOOKBACK, delay=DELAY, \n min_index=val_min_i, max_index=val_max_i, \n batch_size=BATCH_SIZE, step=STEP,shuffle=False)\ntest_gen = generator(df_values, \n lookback=LOOKBACK, delay=DELAY, \n min_index=test_min_i, max_index=test_max_i, \n batch_size=BATCH_SIZE, step=STEP,shuffle=False) \n\n# Instantiate Model\n###################\nclear_session()\nmodel4 = Sequential()\nmodel4.add(GRU(32, dropout=0.1, recurrent_dropout=0.5, \n input_shape=(None, df_values.shape[-1]), return_sequences=True))\nmodel4.add(GRU(64, dropout=0.1, recurrent_dropout=0.5, \n activation='relu'))\nmodel4.add(Dense(1))\nmodel4.compile(optimizer=RMSprop(), loss='mae', metrics=['mae'])\nprint(model4.summary())\n\n# Train\n#######\nm2_callbacks = [\n # interrupt training when there is no more improvement.\n # patience=2 means interrupt training when accuracy has stopped improving\n # for more than 2 epochs. mae MUST be in the compile step in the metrics\n EarlyStopping(monitor='mae', patience=2),\n # saves the current weights after every epoch\n # only overwrite the model file when val_loss has improved\n ModelCheckpoint('weather__v4__stacked_rnn_with_dropout.h5', monitor='val_loss', save_best_only=True)]\nhistory4 = model4.fit(train_gen, \n steps_per_epoch=500, \n epochs=40, \n validation_data=val_gen, \n callbacks=m2_callbacks,\n validation_steps=val_steps)\nmetrics_df = pd.DataFrame(history4.history)\nmetrics_df.to_csv('history4.csv', index=False)\n\n# Save\n######\nmodel4.save('weather__v4__stacked_rnn_with_dropout.h5')\n" ]
[ [ "tensorflow.keras.callbacks.ModelCheckpoint", "pandas.read_csv", "tensorflow.keras.layers.Dense", "tensorflow.keras.optimizers.RMSprop", "pandas.DataFrame", "tensorflow.keras.layers.GRU", "tensorflow.keras.backend.clear_session", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.keras.models.Sequential", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
Johnny-Wish/fake-news-detection-pipeline
[ "3bdad59d680968375a23d72c80af7d6ef11d7711", "3bdad59d680968375a23d72c80af7d6ef11d7711" ]
[ "model/__main__.py", "model/hypertuned_models.py" ]
[ "import os\nimport argparse\nimport heapq\nimport pandas as pd\nimport pickle as pkl\nfrom embedding_utils import EmbeddingLoader\nfrom sklearn.model_selection import RandomizedSearchCV, train_test_split\nfrom sklearn.model_selection._search import BaseSearchCV\n\n\ndef print_cv_result(result, n):\n if isinstance(result, BaseSearchCV):\n result = result.cv_results_\n\n scores = result['mean_test_score']\n params = result['params']\n\n if n < 0:\n n = len(scores)\n\n print(\"Cross Validation result in descending order: (totalling {} trials)\".format(n))\n for rank, candidate, in enumerate(heapq.nlargest(n, zip(scores, params), key=lambda tup: tup[0])):\n print(\"rank {}, score = {}\\n hyperparams = {}\".format(rank + 1, *candidate))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input\", required=True, help=\"parent dir to load embeddings\")\n parser.add_argument(\"--output\", required=True, help=\"parent dir to dump search results\")\n # uses python reflection to dynamically load model\n parser.add_argument(\"--classifier\", required=True,\n help=\"classifier to use, must be existent under model/, such as model/KNN.py\")\n parser.add_argument(\"--corpus\", default=\"title\", help=\"title, text, or concatenated\")\n parser.add_argument(\"--embedding\", default=\"d2v\",\n help=\"embeddings model to use, must be one of [d2v, nd2v, onehot], default is d2v\")\n parser.add_argument(\"--n_iter\", default=100, type=int, help=\"number of trials to run during cross-validation. \"\n \"default=100. This is NOT epochs to train d2v\")\n parser.add_argument(\"--n_jobs\", default=1, type=int, help=\"number of cpu workers to run in parallel\")\n parser.add_argument(\"--cv\", default=5, type=int, help=\"number of folds for cross-validation, default=5\")\n # hyperparameters for doc2vec\n parser.add_argument(\"--vec_size\", default=300, type=int,\n help=\"size of vectors, default is 300, recommended to be left untouched\")\n parser.add_argument(\"--win_size\", default=13, type=int,\n help=\"window size, used if model is d2v, default = 13\")\n parser.add_argument(\"--min_count\", default=5, type=int,\n help=\"min count for inclusion in dict, used if model is d2v, default = 5\")\n parser.add_argument(\"--dm\", action=\"store_true\",\n help=\"whether to use DM or DBOW, used if model is d2v, default is DBOW\")\n parser.add_argument(\"--epochs\", default=100, type=int,\n help=\"number of epochs to train the model for, used if model is d2v, default = 100. This is \"\n \"NOT the epochs for RandomizedSearch\")\n # hyperparameters for naive doc2vec\n parser.add_argument(\"--normalizer\", default=None,\n help=\"normalizer for naive doc2vec, either l2 or mean, default is None\")\n # hyperparameters for one-hot\n parser.add_argument(\"--scorer\", default=\"count\",\n help=\"scorer function for one-hot, either tfidf or count, default is count\")\n\n opt = parser.parse_args()\n print(opt)\n\n loader = EmbeddingLoader(opt.input)\n\n # filename is saved for dumping CV results later\n if opt.embedding == \"d2v\":\n filename = loader.get_d2v_filename(corpus=opt.corpus, vec_size=opt.vec_size, win_size=opt.win_size,\n min_count=opt.min_count, dm=opt.dm, epochs=opt.epochs)\n embeddings = loader.get_d2v(corpus=opt.corpus, vec_size=opt.vec_size, win_size=opt.win_size,\n min_count=opt.min_count, dm=opt.dm, epochs=opt.epochs)\n elif opt.embedding == \"nd2v\":\n filename = loader.get_nd2v_filename(corpus=opt.corpus, normalizer=opt.normalizer)\n embeddings = loader.get_nd2v(corpus=opt.corpus, normalizer=opt.normalizer)\n elif opt.embedding == \"onehot\":\n filename = loader.get_onehot_filename(corpus=opt.corpus, scorer=opt.scorer, normalize=opt.normalize is not None)\n embeddings = loader.get_onehot(corpus=opt.corpus, scorer=opt.scorer, normalize=opt.normalize is not None)\n else:\n print(\"unrecognized embedding method: {}; proceed with d2v as fall back\".format(opt.embedding))\n filename = loader.get_d2v_filename(corpus=opt.corpus, vec_size=opt.vec_size, win_size=opt.win_size,\n min_count=opt.min_count, dm=opt.dm, epochs=opt.epochs)\n embeddings = loader.get_d2v(corpus=opt.corpus, vec_size=opt.vec_size, win_size=opt.win_size,\n min_count=opt.min_count, dm=opt.dm, epochs=opt.epochs)\n\n labels = loader.get_label()\n\n seed = 0\n embeddings_train, embeddings_test, labels_train, labels_test = \\\n train_test_split(embeddings, labels, test_size=0.25, random_state=seed, stratify=labels)\n\n # import the target file\n try:\n module = __import__(\"model.\" + opt.classifier)\n module = getattr(module, opt.classifier)\n except ModuleNotFoundError as e:\n print(\"There is no such file, double check that you have a `model/{}.py`\".format(opt.classifier))\n print(\"If you have checked and the problem persist, make sure to run this script from ROOTDIR instead of \"\n \"ROOTDIR/model, your command should look like `python -m model ...`\")\n raise e\n print(\"Successfully imported module {}\".format(module))\n\n # get the model from the target file\n try:\n model = getattr(module, \"model\")\n except AttributeError as e:\n print(\"There is no `model` attribute in `model/{}.py`\".format(opt.classifier))\n print(\"Make sure to include a variable named `model` in your file\")\n raise e\n print(\"Successfully obtained model {}\".format(model))\n\n # get the hyperparameters to be trained\n try:\n param_dist = getattr(module, \"param_dist\")\n except AttributeError as e:\n print(\"There is no `param_dist` attribute in `model/{}.py`\".format(opt.classifier))\n print(\"Make sure to include a variable named `param_dist` in your file\")\n raise e\n print(\"Successfully obtained param_dist {}\".format(param_dist))\n\n verbose = opt.cv * opt.n_iter\n searcher = RandomizedSearchCV(model, param_distributions=param_dist, n_iter=opt.n_iter, scoring='f1', cv=opt.cv,\n verbose=verbose, random_state=seed, error_score=0, return_train_score=False,\n n_jobs=opt.n_jobs)\n searcher.fit(embeddings_train, labels_train)\n\n print(\"best: {}\\n{}\\n{}\\n{}\".format(searcher.best_index_, searcher.best_score_, searcher.best_estimator_,\n searcher.best_params_))\n # The following line is meant for floydhub renderer to grep\n print('{\"metric\": \"highest_val\", \"value\": %f}' % searcher.best_score_)\n\n results = pd.DataFrame(searcher.cv_results_)\n\n filename_classifier = opt.classifier\n dump_filename = \"{}-{}\".format(opt.classifier, filename)\n with open(os.path.join(opt.output, dump_filename), \"wb\") as f:\n pkl.dump(results, f)\n\n print_cv_result(results, n=-1)\n\n # uses all training samples to refit the model\n searcher.best_estimator_.fit(embeddings_train, labels_train)\n test_score = searcher.best_estimator_.score(embeddings_test, labels_test)\n print(\"Final test score of the best performing model: {}\".format(test_score))\n\n # The following line is meant for floydhub renderer to grep\n print('{\"metric\": \"test\", \"value\": %f}' % test_score)\n", "from sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LogisticRegression\nimport numpy as np\n\n# MLP classifier\nmlp = MLPClassifier(activation='relu', alpha=0.01, batch_size='auto', beta_1=0.8,\n beta_2=0.9, early_stopping=False, epsilon=1e-08,\n hidden_layer_sizes=(600, 300), learning_rate='constant',\n learning_rate_init=0.0001, max_iter=500, momentum=0.9,\n nesterovs_momentum=True, power_t=0.5, random_state=0, shuffle=True,\n solver='adam', tol=0.0001, validation_fraction=0.1, verbose=False,\n warm_start=False)\n\n# KNN classifier\nknn = KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='cosine',\n metric_params=None, n_jobs=-1, n_neighbors=10, p=2,\n weights='distance')\n\n# QDA classifier\nqda = QuadraticDiscriminantAnalysis(priors=np.array([0.5, 0.5]),\n reg_param=0.6531083254653984, store_covariance=False,\n store_covariances=None, tol=0.0001)\n\n# GDB classifier\ngdb = GradientBoostingClassifier(criterion='friedman_mse', init=None,\n learning_rate=0.1, loss='exponential', max_depth=10,\n max_features='log2', max_leaf_nodes=None,\n min_impurity_decrease=0.0, min_impurity_split=None,\n min_samples_leaf=0.0012436966435001434,\n min_samples_split=100, min_weight_fraction_leaf=0.0,\n n_estimators=200, presort='auto', random_state=0,\n subsample=0.8, verbose=0, warm_start=False)\n\n# SVC classifier\nsvc = SVC(C=0.8, cache_size=200, class_weight=None, coef0=0.0,\n decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',\n max_iter=-1, probability=True, random_state=0, shrinking=True,\n tol=0.001, verbose=False)\n\n# GNB classifier\ngnb = GaussianNB(priors=None)\n\n# RF classifier\nrf = RandomForestClassifier(bootstrap=False, class_weight=None,\n criterion='entropy', max_depth=10, max_features=7,\n max_leaf_nodes=None, min_impurity_decrease=0.0,\n min_impurity_split=None, min_samples_leaf=9,\n min_samples_split=6, min_weight_fraction_leaf=0.0,\n n_estimators=50, n_jobs=-1, oob_score=False, random_state=None,\n verbose=0, warm_start=False)\n\n# Logistic Regression classifier\nlg = LogisticRegression(C=7.374558791, class_weight=None, dual=False,\n fit_intercept=True, intercept_scaling=1, max_iter=100,\n multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,\n solver='liblinear', tol=0.0001, verbose=0, warm_start=False)\n\n# # All the parameters of the classifiers above are optimal in our experiments\n# # The list below is used to store every classifier instance\n# classifiers_list = [mlp, knn, qda, gdb, svc, gnb, rf]\n# classifiers_labels = ['MultiLayerPerceptron', 'KNeighbours', 'QuadraticDiscriminantAnalysis', 'GradientBoosting', 'SVC',\n# 'GaussianNB', 'RandomForest']\nclassifiers = [mlp, knn, qda, gdb, svc, gnb, rf, lg]\n\nif __name__ == '__main__':\n for clf in classifiers:\n print(clf)\n" ]
[ [ "sklearn.model_selection.train_test_split", "sklearn.model_selection.RandomizedSearchCV", "pandas.DataFrame" ], [ "sklearn.neural_network.MLPClassifier", "sklearn.naive_bayes.GaussianNB", "sklearn.linear_model.LogisticRegression", "sklearn.ensemble.RandomForestClassifier", "sklearn.neighbors.KNeighborsClassifier", "sklearn.svm.SVC", "sklearn.ensemble.GradientBoostingClassifier", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
naototachibana/chainer-chemistry
[ "04577ba920b46c7141d4f01e212f7040eb91db19", "04577ba920b46c7141d4f01e212f7040eb91db19", "04577ba920b46c7141d4f01e212f7040eb91db19" ]
[ "examples/own_dataset/train_own_dataset.py", "tests/links_tests/array_tests/test_shape_transformer_to_2d.py", "chainer_chemistry/models/prediction/regressor.py" ]
[ "#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport chainer\nimport numpy\nimport os\n\nfrom argparse import ArgumentParser\nfrom chainer.datasets import split_dataset_random\nfrom chainer import functions as F\nfrom chainer import optimizers\nfrom chainer import training\nfrom chainer.iterators import SerialIterator\nfrom chainer.training import extensions as E\n\nfrom chainer_chemistry.dataset.converters import concat_mols\nfrom chainer_chemistry.dataset.parsers import CSVFileParser\nfrom chainer_chemistry.dataset.preprocessors import preprocess_method_dict\nfrom chainer_chemistry.links.scaler.standard_scaler import StandardScaler\nfrom chainer_chemistry.models import Regressor\nfrom chainer_chemistry.models.prediction import set_up_predictor\nfrom chainer_chemistry.training.extensions.auto_print_report import \\\n AutoPrintReport\nfrom chainer_chemistry.utils import run_train\n\n\ndef rmse(x0, x1):\n return F.sqrt(F.mean_squared_error(x0, x1))\n\n\ndef parse_arguments():\n # Lists of supported preprocessing methods/models.\n method_list = ['nfp', 'ggnn', 'schnet', 'weavenet', 'rsgcn', 'relgcn',\n 'relgat', 'mpnn', 'gnnfilm']\n scale_list = ['standardize', 'none']\n\n # Set up the argument parser.\n parser = ArgumentParser(description='Regression on own dataset')\n parser.add_argument('--datafile', '-d', type=str,\n default='dataset_train.csv',\n help='csv file containing the dataset')\n parser.add_argument('--method', '-m', type=str, choices=method_list,\n help='method name', default='nfp')\n parser.add_argument('--label', '-l', nargs='+',\n default=['value1', 'value2'],\n help='target label for regression')\n parser.add_argument('--scale', type=str, choices=scale_list,\n help='label scaling method', default='standardize')\n parser.add_argument('--conv-layers', '-c', type=int, default=4,\n help='number of convolution layers')\n parser.add_argument('--batchsize', '-b', type=int, default=32,\n help='batch size')\n parser.add_argument(\n '--device', type=str, default='-1',\n help='Device specifier. Either ChainerX device specifier or an '\n 'integer. If non-negative integer, CuPy arrays with specified '\n 'device id are used. If negative integer, NumPy arrays are used')\n parser.add_argument('--out', '-o', type=str, default='result',\n help='path to save the computed model to')\n parser.add_argument('--epoch', '-e', type=int, default=10,\n help='number of epochs')\n parser.add_argument('--unit-num', '-u', type=int, default=16,\n help='number of units in one layer of the model')\n parser.add_argument('--seed', '-s', type=int, default=777,\n help='random seed value')\n parser.add_argument('--train-data-ratio', '-r', type=float, default=0.7,\n help='ratio of training data w.r.t the dataset')\n parser.add_argument('--protocol', type=int, default=2,\n help='pickle protocol version')\n parser.add_argument('--model-filename', type=str, default='regressor.pkl',\n help='saved model filename')\n return parser.parse_args()\n\n\ndef main():\n # Parse the arguments.\n args = parse_arguments()\n\n if args.label:\n labels = args.label\n class_num = len(labels) if isinstance(labels, list) else 1\n else:\n raise ValueError('No target label was specified.')\n\n # Dataset preparation. Postprocessing is required for the regression task.\n def postprocess_label(label_list):\n return numpy.asarray(label_list, dtype=numpy.float32)\n\n # Apply a preprocessor to the dataset.\n print('Preprocessing dataset...')\n preprocessor = preprocess_method_dict[args.method]()\n parser = CSVFileParser(preprocessor, postprocess_label=postprocess_label,\n labels=labels, smiles_col='SMILES')\n dataset = parser.parse(args.datafile)['dataset']\n\n # Scale the label values, if necessary.\n if args.scale == 'standardize':\n scaler = StandardScaler()\n scaler.fit(dataset.get_datasets()[-1])\n else:\n scaler = None\n\n # Split the dataset into training and validation.\n train_data_size = int(len(dataset) * args.train_data_ratio)\n train, _ = split_dataset_random(dataset, train_data_size, args.seed)\n\n # Set up the predictor.\n predictor = set_up_predictor(\n args.method, args.unit_num,\n args.conv_layers, class_num, label_scaler=scaler)\n\n # Set up the regressor.\n device = chainer.get_device(args.device)\n metrics_fun = {'mae': F.mean_absolute_error, 'rmse': rmse}\n regressor = Regressor(predictor, lossfun=F.mean_squared_error,\n metrics_fun=metrics_fun, device=device)\n\n print('Training...')\n run_train(regressor, train, valid=None,\n batch_size=args.batchsize, epoch=args.epoch,\n out=args.out, extensions_list=None,\n device=device, converter=concat_mols,\n resume_path=None)\n\n # Save the regressor's parameters.\n model_path = os.path.join(args.out, args.model_filename)\n print('Saving the trained model to {}...'.format(model_path))\n\n # TODO(nakago): ChainerX array cannot be sent to numpy array when internal\n # state has gradients.\n if hasattr(regressor.predictor.graph_conv, 'reset_state'):\n regressor.predictor.graph_conv.reset_state()\n\n regressor.save_pickle(model_path, protocol=args.protocol)\n\n\nif __name__ == '__main__':\n main()\n", "import numpy\nimport pytest\n\nfrom chainer_chemistry.links.array.shape_transformer_to_2d import ShapeTransformerTo2D # NOQA\n\n\[email protected]('axis', [0, 1, -1])\ndef test_shape_transformer_2d_2d_array(axis):\n st = ShapeTransformerTo2D(axis=axis)\n x = numpy.arange(6).reshape((2, 3))\n xt = st.transform(x)\n xit = st.inverse_transform(xt)\n if axis == 0:\n assert numpy.allclose(xt.array, numpy.array([[0, 3], [1, 4], [2, 5]]))\n elif axis == 1 or axis == -1:\n assert numpy.allclose(x, xt.array)\n\n assert numpy.allclose(x, xit.array)\n\n\[email protected]('axis', [0, 1, 2, -1])\ndef test_shape_transformer_2d_3d_array(axis):\n st = ShapeTransformerTo2D(axis=axis)\n x = numpy.arange(12).reshape((2, 3, 2))\n xt = st.transform(x)\n xit = st.inverse_transform(xt)\n if axis == 0:\n assert numpy.allclose(\n xt.array,\n numpy.array([[0, 6], [1, 7], [2, 8], [3, 9], [4, 10], [5, 11]]))\n elif axis == 1:\n assert numpy.allclose(\n xt.array,\n numpy.array([[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]))\n elif axis == 2 or axis == -1:\n assert numpy.allclose(\n xt.array, x.reshape(6, 2))\n assert numpy.allclose(x, xit.array)\n\n\ndef test_shape_transformer_2d_error():\n st = ShapeTransformerTo2D(axis=1)\n x = numpy.arange(6).reshape(2, 3)\n with pytest.raises(AttributeError):\n # call `inverse_transform` before `transform`\n xt = st.inverse_transform(x)\n\n\nif __name__ == '__main__':\n pytest.main([__file__, '-v', '-s'])\n", "import numpy\n\nimport chainer\nfrom chainer.dataset.convert import concat_examples\nfrom chainer import cuda, Variable # NOQA\nfrom chainer import reporter\nfrom chainer_chemistry.models.prediction.base import BaseForwardModel\n\n\nclass Regressor(BaseForwardModel):\n \"\"\"A simple regressor model.\n\n This is an example of chain that wraps another chain. It computes the\n loss and metrics based on a given input/label pair.\n\n Args:\n predictor (~chainer.Link): Predictor network.\n lossfun (function): Loss function.\n metrics_fun (function or dict or None): Function that computes metrics.\n label_key (int or str): Key to specify label variable from arguments.\n When it is ``int``, a variable in positional arguments is used.\n And when it is ``str``, a variable in keyword arguments is used.\n device (int or chainer._backend.Device):\n GPU device id of this Regressor to be used.\n -1 indicates to use in CPU.\n\n Attributes:\n predictor (~chainer.Link): Predictor network.\n lossfun (function): Loss function.\n y (~chainer.Variable): Prediction for the last minibatch.\n loss (~chainer.Variable): Loss value for the last minibatch.\n metrics (dict): Metrics computed in last minibatch\n compute_metrics (bool): If ``True``, compute metrics on the forward\n computation. The default value is ``True``.\n\n \"\"\"\n\n compute_metrics = True\n\n def __init__(self, predictor,\n lossfun=chainer.functions.mean_squared_error,\n metrics_fun=None, label_key=-1, device=-1):\n if not (isinstance(label_key, (int, str))):\n raise TypeError('label_key must be int or str, but is %s' %\n type(label_key))\n super(Regressor, self).__init__()\n self.lossfun = lossfun\n if metrics_fun is None:\n self.compute_metrics = False\n self.metrics_fun = {}\n elif callable(metrics_fun):\n self.metrics_fun = {'metrics': metrics_fun}\n elif isinstance(metrics_fun, dict):\n self.metrics_fun = metrics_fun\n else:\n raise TypeError('Unexpected type metrics_fun must be None or '\n 'Callable or dict. actual {}'\n .format(type(metrics_fun)))\n self.y = None\n self.loss = None\n self.metrics = None\n self.label_key = label_key\n\n with self.init_scope():\n self.predictor = predictor\n\n # `initialize` must be called after `init_scope`.\n self.initialize(device)\n\n def _convert_to_scalar(self, value):\n \"\"\"Converts an input value to a scalar if its type is a Variable,\n numpy or cupy array, otherwise it returns the value as it is.\n \"\"\"\n if isinstance(value, Variable):\n value = value.array\n if numpy.isscalar(value):\n return value\n if type(value) is not numpy.array:\n value = cuda.to_cpu(value)\n return numpy.asscalar(value)\n\n def __call__(self, *args, **kwargs):\n \"\"\"Computes the loss value for an input and label pair.\n\n It also computes metrics and stores it to the attribute.\n\n Args:\n args (list of ~chainer.Variable): Input minibatch.\n kwargs (dict of ~chainer.Variable): Input minibatch.\n\n When ``label_key`` is ``int``, the correpoding element in ``args``\n is treated as ground truth labels. And when it is ``str``, the\n element in ``kwargs`` is used.\n The all elements of ``args`` and ``kwargs`` except the ground trush\n labels are features.\n It feeds features to the predictor and compare the result\n with ground truth labels.\n\n Returns:\n ~chainer.Variable: Loss value.\n\n \"\"\"\n\n # --- Separate `args` and `t` ---\n if isinstance(self.label_key, int):\n if not (-len(args) <= self.label_key < len(args)):\n msg = 'Label key %d is out of bounds' % self.label_key\n raise ValueError(msg)\n t = args[self.label_key]\n if self.label_key == -1:\n args = args[:-1]\n else:\n args = args[:self.label_key] + args[self.label_key + 1:]\n elif isinstance(self.label_key, str):\n if self.label_key not in kwargs:\n msg = 'Label key \"%s\" is not found' % self.label_key\n raise ValueError(msg)\n t = kwargs[self.label_key]\n del kwargs[self.label_key]\n else:\n raise TypeError('Label key type {} not supported'\n .format(type(self.label_key)))\n\n self.y = None\n self.loss = None\n self.metrics = None\n self.y = self.predictor(*args, **kwargs)\n self.loss = self.lossfun(self.y, t)\n\n # When the reported data is a numpy array, the loss and metrics values\n # are scalars. When the reported data is a cupy array, sometimes the\n # same values become arrays instead. This seems to be a bug inside the\n # reporter class, which needs to be addressed and fixed. Until then,\n # the reported values will be converted to numpy arrays.\n reporter.report(\n {'loss': self._convert_to_scalar(self.loss)}, self)\n\n if self.compute_metrics:\n # Note: self.metrics_fun is `dict`,\n # which is different from original chainer implementation\n self.metrics = {key: self._convert_to_scalar(value(self.y, t))\n for key, value in self.metrics_fun.items()}\n reporter.report(self.metrics, self)\n return self.loss\n\n def predict(\n self, data, batchsize=16, converter=concat_examples,\n retain_inputs=False, preprocess_fn=None, postprocess_fn=None):\n \"\"\"Predict label of each category by taking .\n\n Args:\n data: input data\n batchsize (int): batch size\n converter (Callable): convert from `data` to `inputs`\n preprocess_fn (Callable): Its input is numpy.ndarray or\n cupy.ndarray, it can return either Variable, cupy.ndarray or\n numpy.ndarray\n postprocess_fn (Callable): Its input argument is Variable,\n but this method may return either Variable, cupy.ndarray or\n numpy.ndarray.\n retain_inputs (bool): If True, this instance keeps inputs in\n `self.inputs` or not.\n\n Returns (tuple or numpy.ndarray): Typically, it is 1-dimensional int\n array with shape (batchsize, ) which represents each examples\n category prediction.\n\n \"\"\"\n with chainer.no_backprop_mode(), chainer.using_config('train', False):\n predict_labels = self._forward(\n data, fn=self.predictor, batchsize=batchsize,\n converter=converter, retain_inputs=retain_inputs,\n preprocess_fn=preprocess_fn, postprocess_fn=postprocess_fn)\n return predict_labels\n" ]
[ [ "numpy.asarray" ], [ "numpy.arange", "numpy.array", "numpy.allclose" ], [ "numpy.asscalar", "numpy.isscalar" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Tsunaou/Event-Structure-Enumerator
[ "6b8df19517d73d6c144a4395aa9bf33e24bcd8cd" ]
[ "lemma1.py" ]
[ "import numpy as np\nfrom typing import List\nfrom utils import allSubLists, filterPositionsId, setFilterPositionsId, makeAlphas\n\n\ndef cond1(A: np.ndarray, alpha: np.array) -> List[np.array]:\n \"\"\"\n 生成满足条件的 betas\n :param A: 矩阵 n*n\n :param alpha: 行向量 1*n\n :return: 是否可以返回一个符合条件的beta,若存在则返回所有beta的list\n \"\"\"\n assert A.shape[0] == A.shape[1]\n assert A.shape[0] == alpha.shape[0]\n n = A.shape[0]\n zero = np.zeros((1, n), dtype=bool)[0]\n one = np.ones((1, n), dtype=bool)[0]\n if alpha.__eq__(zero).all():\n return allSubLists(one)\n else:\n id_rows = list()\n for idx, value in enumerate(alpha):\n if value:\n id_rows.append(idx)\n tmp = np.ones((1, n), dtype=bool)[0]\n for i in id_rows:\n tmp = tmp.__and__(A[i])\n\n return allSubLists(tmp)\n\n\ndef cond2(A: np.ndarray, beta: np.array) -> bool:\n \"\"\"\n :param A: 矩阵 n*n\n :param beta: 行向量 1*n\n :return:\n \"\"\"\n assert A.shape[0] == A.shape[1]\n assert A.shape[0] == beta.shape[0]\n n = A.shape[0]\n for i in range(n):\n for j in range(n):\n if not (A[i][j] and beta[i]):\n continue\n if not beta[j]:\n return False\n return True\n\n\ndef cond2v2(A: np.ndarray, beta: np.array) -> bool:\n \"\"\"\n :param A: 矩阵 n*n\n :param beta: 行向量 1*n\n :return:\n \"\"\"\n assert A.shape[0] == A.shape[1]\n assert A.shape[0] == beta.shape[0]\n row_set = setFilterPositionsId(beta)\n rows = [A[i] for i in row_set]\n sub_row_set = set()\n for row in rows:\n sub_row_set = sub_row_set.union(setFilterPositionsId(row))\n return sub_row_set <= row_set\n\n\ndef cond3(A: np.ndarray, alpha: np.array) -> bool:\n \"\"\"\n :param A: 矩阵 n*n\n :param alpha: 行向量 n*1\n :return:\n \"\"\"\n assert A.shape[0] == A.shape[1]\n assert A.shape[0] == alpha.shape[0]\n n = A.shape[0]\n for i in range(n):\n for j in range(n):\n if not (A[i][j] and alpha[j]):\n continue\n if not alpha[i]:\n return False\n return True\n\ndef cond3v2(A: np.ndarray, alpha: np.array) -> bool:\n \"\"\"\n :param A: 矩阵 n*n\n :param alpha: 行向量 n*1\n :return:\n \"\"\"\n assert A.shape[0] == A.shape[1]\n assert A.shape[0] == alpha.shape[0]\n col_set = setFilterPositionsId(alpha)\n AT = A.T\n cols = [AT[i] for i in col_set]\n sub_col_set = set()\n for col in cols:\n sub_col_set = sub_col_set.union(setFilterPositionsId(col))\n return sub_col_set <= col_set\n\ndef concat(A: np.ndarray, alpha: np.array, beta: np.array):\n \"\"\"\n [A a^T\n b 1 ]\n :param A: n*n\n :param alpha: 1*n\n :param beta: 1*n\n :return:\n \"\"\"\n n = A.shape[0]\n assert A.shape[0] == A.shape[1]\n assert alpha.shape[0] == n\n assert beta.shape[0] == n\n res = np.ones((n + 1, n + 1), dtype=A.dtype)\n res[:n, :n] = A\n res[:n, n] = alpha.T\n res[n, :n] = beta\n return res\n\n\nif __name__ == '__main__':\n A = np.ones((1, 1), dtype=bool)\n alphas = makeAlphas(1)\n result = list()\n for alpha in alphas:\n betas = cond1(A, alpha)\n for beta in betas:\n assert cond2(A, beta) == cond2v2(A, beta)\n if cond2(A, beta) and cond3(A, alpha):\n result.append(concat(A, alpha, beta))\n" ]
[ [ "numpy.zeros", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
juancruzgassoloncan/Udacity-Robo-nanodegree
[ "7621360ce05faf90660989e9d28f56da083246c9" ]
[ "src/rover/ex_4/extra_functions.py" ]
[ "import numpy as np\nimport cv2\nimport matplotlib.image as mpimg\n\n\ndef perspect_transform(img, src, dst):\n\n # Get transform matrix using cv2.getPerspectivTransform()\n M = cv2.getPerspectiveTransform(src, dst)\n # Warp image using cv2.warpPerspective()\n # keep same size as input image\n warped = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))\n # Return the result\n return warped\n\n\ndef color_thresh(img, rgb_thresh=(160, 160, 160)):\n # Create an array of zeros same xy size as img, but single channel\n color_select = np.zeros_like(img[:, :, 0])\n # Require that each pixel be above all thre threshold values in RGB\n # above_thresh will now contain a boolean array with \"True\"\n # where threshold was met\n above_thresh = (img[:, :, 0] > rgb_thresh[0]) \\\n & (img[:, :, 1] > rgb_thresh[1]) \\\n & (img[:, :, 2] > rgb_thresh[2])\n # Index the array of zeros with the boolean array and set to 1\n color_select[above_thresh] = 1\n # Return the binary image\n return color_select\n\n\nimage_name = '../data/IMG/robocam_2017_10_03_15_35_32_475.jpg'\nimage = mpimg.imread(image_name)\n# Define calibration box in source (actual) and destination (desired) coordinates\n# These source and destination points are defined to warp the image\n# to a grid where each 10x10 pixel square represents 1 square meter\ndst_size = 5\n# Set a bottom offset to account for the fact that the bottom of the image\n# is not the position of the rover but a bit in front of it\nbottom_offset = 6\nsource = np.float32([[35, 135], [120, 97], [202, 97], [300, 135]])\ndestination = np.float32([[image.shape[1] / 2 - dst_size, image.shape[0] - bottom_offset],\n [image.shape[1] / 2 - dst_size, image.shape[0] -\n bottom_offset - 2 * dst_size],\n [image.shape[1] / 2 + dst_size, image.shape[0] -\n bottom_offset - 2 * dst_size],\n [image.shape[1] / 2 + dst_size, image.shape[0] - bottom_offset]])\n" ]
[ [ "matplotlib.image.imread", "numpy.zeros_like", "numpy.float32" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
thomas-brth/sentinel
[ "747bd0b9a4a9356be69aae6d6ebbfa500e845218" ]
[ "sentIA/utils/figure/__init__.py" ]
[ "# Plotting tools and utility functions\n# Nested GridSpec : https://matplotlib.org/stable/gallery/subplots_axes_and_figures/gridspec_nested.html#sphx-glr-gallery-subplots-axes-and-figures-gridspec-nested-py\n# GridSpec : https://matplotlib.org/stable/gallery/subplots_axes_and_figures/gridspec_multicolumn.html#sphx-glr-gallery-subplots-axes-and-figures-gridspec-multicolumn-py\n# colorbar : https://matplotlib.org/stable/gallery/subplots_axes_and_figures/colorbar_placement.html#sphx-glr-gallery-subplots-axes-and-figures-colorbar-placement-py\n\n#############\n## Imports ##\n#############\n\n## General imports ##\nfrom matplotlib import pyplot as plt\nfrom matplotlib import colors\nimport numpy as np\nimport os\n\n###############\n## Constants ##\n###############\n\n#############\n## Classes ##\n#############\n\nclass MidpointNormalize(colors.Normalize):\n\t\"\"\"\n\tUseful object enbling to normalize colorbar with a chosen midpoint.\n\t\"\"\"\n\tdef __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):\n\t\tsuper(MidpointNormalize, self).__init__(vmin, vmax, clip)\n\t\tself.midpoint = midpoint\n\t\n\tdef __call__(self, value, clip=None):\n\t\tx, y = [self.vmin, self.midpoint, self.vmax], [0,0.5,1]\n\t\treturn np.ma.masked_array(np.interp(value, x, y))\n\nclass FigBase():\n\t\"\"\"\n\t\"\"\"\n\n\tCREDITS = \"Credit : EU, contains modified Copernicus Sentinel data, processed with custom script.\"\n\t\n\tdef __init__(self, title : str, dim : tuple):\n\t\tself.title = title\n\t\tself.fig = plt.figure(figsize=dim)\n\n\tdef _format(self):\n\t\tpass\n\n\tdef show(self):\n\t\tpass\n\n###############\n## Functions ##\n###############\n\ndef main():\n\tpass\n\nif __name__ == '__main__':\n\tmain()\nelse:\n\tprint(f\"Module {__name__} imported.\", flush=True)" ]
[ [ "numpy.interp", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Demonliquid/cars-python-cleaning
[ "91c516a33c4522114dc024cfaf04f1c1d594f973" ]
[ "Bike cleaning/motocicleta_p6.py" ]
[ "# %%\nimport os\nimport pandas as pd\nimport numpy as np\nimport datetime\nfrom googletrans import Translator\nfrom vininfo import Vin\n\n\n# %%\nmotocicleta_p6 = pd.read_excel(r'D:\\Basededatos\\Origen\\MOTOCICLETAS-COLOMBIA\\MOTOCICLETA_P6.xlsx', engine='openpyxl')\n\n\n# %%\nmotocicleta_p6.rename(columns={'MODELO': 'AÑO', 'ORIGEN': 'IMPORTACION'}, inplace=True)\nmotocicleta_p6.drop_duplicates(inplace=True)\nmotocicleta_p6convin = motocicleta_p6[motocicleta_p6[\"VIN\"].str.len() == 17]\nmotocicleta_p6convin = motocicleta_p6convin[motocicleta_p6convin[\"VIN\"].str.contains('Q|O|I', regex=True) == False]\nmotocicleta_p6sinvin = pd.concat([motocicleta_p6, motocicleta_p6convin, motocicleta_p6convin]).drop_duplicates(keep=False)\nmotocicleta_p6sinvin[\"VIN\"] = None\nmotocicleta_p6sinvin[\"ORIGEN\"] = None\n\n\n\n# %%\nmotocicleta_p6convin[\"ORIGEN\"] = motocicleta_p6convin[\"VIN\"].map(lambda x: Vin(x).country)\nmotocicleta_p6convin['ORIGEN'].replace('China (Mainland)', 'China', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace('Taiwan, China', 'China', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"Cote d'Ivoire\", 'Costa de Marfil', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"Germany/West Germany\", 'Alemania', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"Korea (South)\", 'Corea del Sur', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"Saudi Arabia\", 'Arabia Saudita', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"United Kingdom\", 'Reino Unido', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"Italy\", 'Italia', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"Greece\", 'Grecia', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"Belgium\", 'Belgica', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"Luxembourg\", 'Luxemburgo', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"United States\", 'Estados Unidos', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"Japan\", 'Japon', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"Czech Republic\", 'Republica Checa', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"United Arab Emirates\", 'Emiratos Arabes Unidos', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"Ethiopia\", 'Etiopia', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"Hungary\", 'Hungria', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"Brazil\", 'Brasil', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"Spain\", 'España', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"France\", 'Francia', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"Switzerland\", 'Suiza', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"Thailand\", 'Tailandia', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"Denmark\", 'Dinamarca', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"Finland\", 'Finlandia', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"Poland\", 'Polonia', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"Myanmar\", 'Birmania', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"Ireland\", 'Irlanda', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"Netherlands\", 'Paises Bajos', inplace=True)\nmotocicleta_p6convin['ORIGEN'].replace(r\"South Africa\", 'Sudafrica', inplace=True)\n\n\n# %%\nmotocicleta_p6 = pd.concat([motocicleta_p6convin, motocicleta_p6sinvin])\n\n\n\n# %%\ndict(motocicleta_p6[\"ORIGEN\"].value_counts())\n\n# %%\nmotocicleta_p6.to_csv(r'D:\\Basededatos\\Limpioparaentregar\\MOTOCICLETAS-COLOMBIA\\motocicleta_p6.csv', index=False)\n\n\n# %%" ]
[ [ "pandas.concat", "pandas.read_excel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
Taerbit/EXP
[ "b7468ae77bfccd7283cdfc0215af9d1032e472ce", "b7468ae77bfccd7283cdfc0215af9d1032e472ce" ]
[ "src/run2.py", "test/test_container.py" ]
[ "import Controller\nimport tensorflow as tf\nimport time\nimport efficientnet.tfkeras\n\n# Run pre-loaded pipelines\nstart_time= time.time()\n\n#E0 - G\nmodel = tf.keras.models.load_model(\"..\\\\models\\\\200324_EfficientNetB0NoisyStudent_001.h5\", compile=False)\n\nimage_tags = [\"C:\\\\Users\\\\finnt\\\\OneDrive\\\\Documents\\\\Uni\\\\Year 4\\\\Honours\\\\Project\\\\Lesions\\\\\", \".jpg\", \"ISIC_\", \"_downsampled\", 0]\nseg_tags = [\"C:\\\\Users\\\\finnt\\\\OneDrive\\\\Documents\\\\Uni\\\\Year 4\\\\Honours\\\\Project\\\\Segmentations\\\\\", \".png\", \"ISIC_\", \"_segmentation\", 0]\nnumpy_tags = [\"C:\\\\Users\\\\finnt\\\\Documents\\\\Honours Results\\\\200324_EfficientNetB0NoisyStudent_001\\\\grad_cam\\\\\", \"_sorted.npy\", \"ISIC_\", \"_downsampled\", 0]\nlabel_tag = [\"C:\\\\Users\\\\finnt\\\\OneDrive\\\\Documents\\\\Uni\\\\Year 4\\\\Honours\\\\Project\\\\Dual Classifier\\\\\", \".csv\", \"ISIC_\", \"_downsampled\", 0]\n\ninput_tags = [image_tags, numpy_tags, seg_tags, label_tag]\n\noutput_dir = \"C:\\\\Users\\\\finnt\\\\Documents\\\\Honours Results\\\\200324_EfficientNetB0NoisyStudent_001\\\\grad_cam\\\\\"\n\nController.pre_loaded_shap(model, input_tags, output=output_dir, save_csv=True)\n\n#E0 - S\nmodel = tf.keras.models.load_model(\"..\\\\models\\\\200324_EfficientNetB0NoisyStudent_001.h5\", compile=False)\n\nimage_tags = [\"C:\\\\Users\\\\finnt\\\\OneDrive\\\\Documents\\\\Uni\\\\Year 4\\\\Honours\\\\Project\\\\Lesions\\\\\", \".jpg\", \"ISIC_\", \"_downsampled\", 0]\nseg_tags = [\"C:\\\\Users\\\\finnt\\\\OneDrive\\\\Documents\\\\Uni\\\\Year 4\\\\Honours\\\\Project\\\\Segmentations\\\\\", \".png\", \"ISIC_\", \"_segmentation\", 0]\nnumpy_tags = [\"C:\\\\Users\\\\finnt\\\\Documents\\\\Honours Results\\\\200324_EfficientNetB0NoisyStudent_001\\\\shap\\\\\", \"_sorted.npy\", \"ISIC_\", \"_downsampled\", 0]\nlabel_tag = [\"C:\\\\Users\\\\finnt\\\\OneDrive\\\\Documents\\\\Uni\\\\Year 4\\\\Honours\\\\Project\\\\Dual Classifier\\\\\", \".csv\", \"ISIC_\", \"_downsampled\", 0]\n\ninput_tags = [image_tags, numpy_tags, seg_tags, label_tag]\n\noutput_dir = \"C:\\\\Users\\\\finnt\\\\Documents\\\\Honours Results\\\\200324_EfficientNetB0NoisyStudent_001\\\\shap\\\\\"\n\nController.pre_loaded_shap(model, input_tags, output=output_dir, save_csv=True)\n\n#E7 - G\nmodel = tf.keras.models.load_model(\"..\\\\models\\\\200411_EfficientNetB7NoisyStudent_001.h5\", compile=False)\n\nimage_tags = [\"C:\\\\Users\\\\finnt\\\\OneDrive\\\\Documents\\\\Uni\\\\Year 4\\\\Honours\\\\Project\\\\Lesions\\\\\", \".jpg\", \"ISIC_\", \"_downsampled\", 0]\nseg_tags = [\"C:\\\\Users\\\\finnt\\\\OneDrive\\\\Documents\\\\Uni\\\\Year 4\\\\Honours\\\\Project\\\\Segmentations\\\\\", \".png\", \"ISIC_\", \"_segmentation\", 0]\nnumpy_tags = [\"C:\\\\Users\\\\finnt\\\\Documents\\\\Honours Results\\\\200411_EfficientNetB7NoisyStudent_001\\\\grad_cam\\\\\", \"_sorted.npy\", \"ISIC_\", \"_downsampled\", 0]\nlabel_tag = [\"C:\\\\Users\\\\finnt\\\\OneDrive\\\\Documents\\\\Uni\\\\Year 4\\\\Honours\\\\Project\\\\Dual Classifier\\\\\", \".csv\", \"ISIC_\", \"_downsampled\", 0]\n\ninput_tags = [image_tags, numpy_tags, seg_tags, label_tag]\n\noutput_dir = \"C:\\\\Users\\\\finnt\\\\Documents\\\\Honours Results\\\\200411_EfficientNetB7NoisyStudent_001\\\\grad_cam\\\\\"\n\nController.pre_loaded_shap(model, input_tags, output=output_dir, save_csv=True)\n\nprint(\"Finished: \" + str((time.time()-start_time)/60) + \" mins\")", "import EXP\nimport cv2\nimport numpy as np\n\noutput_size = (128, 128)\nx = 300\ny = 255\n\n\ndef check_images_match(p1, p2):\n if p1.shape == p2.shape:\n difference = cv2.subtract(p1, p2)\n b, g, r = cv2.split(difference)\n if cv2.countNonZero(b) == 0 and cv2.countNonZero(g) == 0 and cv2.countNonZero(r) == 0:\n return True\n return False\n\n\ndef test_segmentation_base_functions():\n s = s = EXP.Input.Segmentation([\"test_data\\\\\", \"test.png\", 0],\n output_size)\n\n assert s.get_number() == 1\n assert s.increment()\n assert s.get_number() == 2\n s.increment()\n assert s.increment() == False\n\ndef test_segmentation_loading():\n s = EXP.Input.Segmentation([\"test_data\\\\\", \"test.png\", 0],\n output_size)\n\n # Test Loading\n filepaths = [\"C:\\\\Users\\\\finnt\\PycharmProjects\\\\EXP\\\\test\\\\test_data\\\\0001test.png\",\n \"C:\\\\Users\\\\finnt\\PycharmProjects\\\\EXP\\\\test\\\\test_data\\\\0002test.png\",\n \"C:\\\\Users\\\\finnt\\PycharmProjects\\\\EXP\\\\test\\\\test_data\\\\0003test.png\"]\n img = []\n for i in range(3):\n image = cv2.imread(filepaths[i])\n img.append(cv2.resize(image, output_size))\n\n assert check_images_match(s.load(), img[0])\n assert check_images_match(s.load(), img[1])\n assert check_images_match(s.load(), img[2])\n\ndef test_input_image():\n i = EXP.Input.Input_Image([\"test_data\\\\\", \".jpg\"], x, y)\n\n img = i.load()\n\n test = EXP.Input.load_input_image(\"test_data\\\\input0.jpg\", x, y)\n\n assert np.array_equal(img, test)\n\ndef test_labels():\n l = EXP.Input.Label([\"test_data\\\\\", \".csv\"], \"column3\", \"column1\")\n\n assert l.load() == 1.3\n assert l.load() == 2.3\n assert l.load() == 3.3\n\n'''\n\n self.assertTrue(s.get_number(), 2, msg=\"First number is not returned correctly\")\n s.increment()\n self.assertFalse(s.increment(), 1, msg=\"Correct Truth value is not returned by the increment whilst out of the list\")\n\n\nself.assertTrue(s.increment())#,\n #msg=\"Correct Truth value is not returned by the increment whilst still in the list\")\n self.assertTrue(s.get_number())#, 2, msg=\"First number is not returned correctly\")\n s.increment()\n self.assertFalse(s.increment())#, 1,\n #msg=\"Correct Truth value is not returned by the increment whilst out of the list\")\n\n\n'''\n" ]
[ [ "tensorflow.keras.models.load_model" ], [ "numpy.array_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
noe/iterative_expansion_lms
[ "a5533a60f6f749673dae2329eeae0646ee2b740d" ]
[ "src/syntaxd/fairseq/criterion.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom argparse import ArgumentParser\nimport math\nimport torch.nn.functional as F\n\nfrom fairseq import utils\n\nfrom fairseq.criterions import FairseqCriterion, register_criterion\n\nfrom syntaxd.data.dependency.binarize_data import KEY_PREV_LEVEL_TOKENS\n\n\n@register_criterion('token_expansion_cross_entropy')\nclass DoubleCrossEntropyCriterion(FairseqCriterion):\n\n def __init__(self, args, task):\n super().__init__(args, task)\n self.scale_loss_with_padding = args.scale_loss_with_padding\n\n @staticmethod\n def add_args(parser: ArgumentParser):\n parser.add_argument('--scale-loss-with-padding', action='store_true')\n\n def forward(self, model, sample, reduce=True):\n \"\"\"Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n \"\"\"\n net_output = model(**sample['net_input'])\n token_loss, expansion_loss = self.compute_loss(model, net_output, sample, reduce=reduce)\n sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']\n num_batch_tokens = sample['num_batch_tokens']\n num_non_pad_tokens = num_batch_tokens - sample['num_pad_tokens']\n batch_density = float(num_non_pad_tokens) / num_batch_tokens\n loss = (token_loss + expansion_loss)\n if self.scale_loss_with_padding:\n loss = loss * batch_density\n logging_output = {\n 'loss': utils.item(loss.data) if reduce else loss.data,\n 'token_loss': utils.item(token_loss.data) if reduce else token_loss.data,\n 'expansion_loss': utils.item(expansion_loss.data) if reduce else expansion_loss.data,\n 'ntokens': sample['ntokens'],\n 'nsentences': sample['net_input'][KEY_PREV_LEVEL_TOKENS].size(0),\n 'sample_size': sample_size,\n 'target_num_pad_tokens': sample['num_pad_tokens'],\n 'target_num_batch_tokens': sample['num_batch_tokens'],\n 'target_pad_ratio': sample['pad_ratio'],\n }\n return loss, sample_size, logging_output\n\n def compute_loss(self, model, net_output, sample, reduce=True):\n tokens_lprobs = model.get_normalized_probs_tokens(net_output, log_probs=True)\n tokens_lprobs = tokens_lprobs.view(-1, tokens_lprobs.size(-1))\n tokens_target = model.get_targets_tokens(sample, net_output).view(-1)\n token_loss = F.nll_loss(\n tokens_lprobs,\n tokens_target,\n ignore_index=self.padding_idx,\n reduction='sum' if reduce else 'none',\n )\n\n expansions_lprobs = model.get_normalized_probs_expansions(net_output, log_probs=True)\n expansions_lprobs = expansions_lprobs.view(-1, expansions_lprobs.size(-1))\n expansions_target = model.get_targets_expansions(sample, net_output).view(-1)\n expansions_loss = F.nll_loss(\n expansions_lprobs,\n expansions_target,\n ignore_index=self.padding_idx,\n reduction='sum' if reduce else 'none',\n )\n\n return token_loss, expansions_loss\n\n @staticmethod\n def aggregate_logging_outputs(logging_outputs):\n \"\"\"Aggregate logging outputs from data parallel training.\"\"\"\n loss_sum = sum(log.get('loss', 0) for log in logging_outputs)\n token_loss_sum = sum(log.get('token_loss', 0) for log in logging_outputs)\n expansion_loss_sum = sum(log.get('expansion_loss', 0) for log in logging_outputs)\n ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)\n nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)\n sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)\n num_batch_tokens = sum(log.get('target_num_batch_tokens', 0) for log in logging_outputs)\n num_pad_tokens = sum(log.get('target_num_pad_tokens', 0) for log in logging_outputs)\n pad_ratio = float(num_pad_tokens) / num_batch_tokens\n agg_output = {\n 'loss': loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.,\n 'token_loss': token_loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.,\n 'expansion_loss': expansion_loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.,\n 'ntokens': ntokens,\n 'nsentences': nsentences,\n 'sample_size': sample_size,\n 'target_num_pad_tokens': num_pad_tokens,\n 'target_num_non_pad_tokens': num_batch_tokens - num_pad_tokens,\n 'target_num_batch_tokens': num_batch_tokens,\n 'target_pad_ratio': pad_ratio,\n }\n if sample_size != ntokens:\n agg_output['nll_loss'] = loss_sum / ntokens / math.log(2)\n return agg_output\n" ]
[ [ "torch.nn.functional.nll_loss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
1054/a3cosmos-gas-evolution
[ "66027338602ed2830e289cfbb4db6200739b39d6" ]
[ "a3cosmos_gas_evolution/Common_Python_Code/calc_galaxy_luminosity_function.py" ]
[ "#!/usr/bin/env python\n# \n# 20190222\n# copied from \"calc_stellar_mass_function.py\", this code will superceed \"calc_stellar_mass_function.py\". \n# \n\nfrom __future__ import print_function\n\nimport os, sys, re, json, time, astropy\nimport numpy as np\nfrom astropy.table import Table, Column, hstack\nfrom copy import copy\nfrom numpy import log, log10, power, sum, sqrt, pi, exp\npow = power\nlg = log10\nln = log\nfrom scipy.interpolate import InterpolatedUnivariateSpline, interp1d\n\nif not (os.path.dirname(os.path.abspath(__file__)) in sys.path): sys.path.append(os.path.dirname(os.path.abspath(__file__)))\nimport apply_cosmology\ncosmo = apply_cosmology.cosmo\n\nif sys.version_info.major >= 3:\n long = int\nelse:\n pass\n\n\n\n\n# \n# def \n# \ndef Schechter_Function_for_LF(L, L_character, Phi_character, alpha):\n # \n # Schechter (1976)\n # \n # Phi(L) dL = (Phi_*) * (L/L_*)**(alpha) * exp(-L/L_*) dL/L_*\n # = (Phi_*) * x**(alpha) * exp(-x) dx\n # = (Phi_*) * 10**(lgx * alpha) * exp(-10**lgx) dx\n # = (Phi_*) * 10**(lgx * alpha) * exp(-10**lgx) dlnx\n # = (Phi_*) * 10**(lgx * alpha) * exp(-10**lgx) dlgx * ln(10)\n # = (Phi_*) * 10**((lgL-lgL_*)*(alpha+1)) * exp(-10**(lgL-lgL_*)) * ln(10) dlgx\n # = (Our_Phi_Phi_Schechter) dlgx\n # \n #lgx = lgL-lg_L0\n #Phi_Schechter = phi * (10**(lgx*(alpha+1))) * (np.exp(-10**lgx)) * ln(10) # per dex and already multiplied ln(10), so that its integral directly equals \\int Phi(L) / L dL\n # \n Phi_Schechter = Phi_character * (L/L_character)**(alpha) * np.exp(-(L/L_character)) # Mpc-3 dex-1\n #Phi_Schechter = Phi_Schechter * ln(10)\n return Phi_Schechter\n\n\ndef Saunders_Function_for_LF(L, L_character, Phi_character, alpha, sigma):\n # Saunders et al. (1990)\n Phi_Saunders = Phi_character * (L/L_character)**(1-alpha) * np.exp(-1.0/(2.0*sigma**2) * (np.log10(1.0+(L/L_character)))**2 )\n #print('Phi_character', Phi_character)\n #print('(L/L_character)**(1-alpha)', (L/L_character)**(1-alpha))\n #print('np.exp(-1.0/(2.0*sigma**2) * (np.log10(1.0+(L/L_character)))**2 )', np.exp(-1.0/(2.0*sigma**2) * (np.log10(1.0+(L/L_character)))**2 ))\n #print('Phi_Saunders', Phi_Saunders)\n return Phi_Saunders\n\n\n\n\n\n# \n# def \n# \ndef calc_radio_LF_Novak2017(z, lgL=None, galaxy_type = 'SFG'):\n # \n # Novak 2017 bibcode:2017A&A...602A...5N\n # IMF: Chabrier 2003\n # Saunders et al. (1990)\n # Outputs: lgL_grid, lgPhi_grid\n # \n # check z\n if not np.isscalar(z):\n raise ValueError('Please input a float number as the redshift!')\n # \n # check galaxy_type\n if not (type(galaxy_type) is str):\n raise ValueError('Please input either \"ALL\", \"SFG\" or \"QG\" as the galaxy_type!')\n else:\n if not (galaxy_type in ['ALL', 'SFG', 'QG']):\n raise ValueError('Please input either \"ALL\", \"SFG\" or \"QG\" as the galaxy_type!')\n # \n # make lgL_grid\n if lgL is None:\n lgL_grid = np.linspace(18.0, 25.0, num=1000, endpoint=True)\n else:\n lgL_grid = lgL\n # \n L_grid = 10**lgL_grid\n # \n # read LF parameters\n L_character = 1.85e21 # * 1.4e9 / 3.839e25 # vLv(1.4GHz,rest) = W Hz-1 --> Lsun\n Phi_character = 3.55e-3 # Mpc-3 dex-1\n alpha = 1.22\n sigma = 0.63\n # \n #Phi_z0 = Saunders_Function(L_grid, L_character, Phi_character, alpha, sigma)\n # \n # check z\n LF_zmin = 0.0\n LF_zmax = +np.inf\n if z < LF_zmin or z > LF_zmax:\n raise ValueError('calc_radio_LF_Novak2017: The input redshift is out of the allowed range of %s -- %s!'%(LF_zmin, LF_zmax))\n # \n # scale to z via pure luminosity evolution\n alphaL = 3.16\n betaL = -0.32\n L_grid_z = (L_grid / ((1.0+z)**(alphaL+(z*betaL))))\n Phi = Saunders_Function_for_LF(L_grid_z, L_character, Phi_character, alpha, sigma)\n lgPhi = np.log10(Phi)\n # \n if lgL is None:\n return lgL_grid, lgPhi\n else:\n return lgPhi\n\n\n\ndef calc_IR_250um_LF_Koprowski2017(z, lgL=None, galaxy_type = 'SFG'):\n # \n # Koprowski 2017 bibcode:2017MNRAS.471.4155K\n # IMF: Chabrier 2003\n # Saunders et al. (1990)\n # Outputs: lgL_grid, lgPhi_grid\n # \n # check z\n if not np.isscalar(z):\n raise ValueError('Please input a float number as the redshift!')\n # \n # check galaxy_type\n if not (type(galaxy_type) is str):\n raise ValueError('Please input either \"ALL\", \"SFG\" or \"QG\" as the galaxy_type!')\n else:\n if not (galaxy_type in ['ALL', 'SFG', 'QG']):\n raise ValueError('Please input either \"ALL\", \"SFG\" or \"QG\" as the galaxy_type!')\n # \n # make lgL_grid\n if lgL is None:\n lgL_grid = np.linspace(24.0, 27.0, num=1000, endpoint=True)\n else:\n lgL_grid = lgL\n # \n L_grid = 10**lgL_grid\n # \n # read LF parameters\n table_z_lower = [0.5, 1.5, 2.5, 3.5]\n table_z_upper = [1.5, 2.5, 3.5, 4.5]\n table_lgL_character = [25.20, 25.40, 25.63, 25.84] # W Hz-1\n table_lgPhi_character = [-2.88, -3.03, -3.73, -4.59] # Mpc-3 dex-1\n alpha = -0.4\n # \n # check z\n LF_zmin = table_z_lower[0]\n LF_zmax = table_z_upper[-1]\n if z < LF_zmin or z > LF_zmax:\n raise ValueError('calc_IR_250um_LF_Koprowski2017: The input redshift is out of the allowed range of %s -- %s!'%(LF_zmin, LF_zmax))\n # \n # scale to z (using step function... <TODO>)\n Phi = None\n lgPhi = None\n for i in range(len(table_z_upper)):\n if z >= table_z_lower[i] and z <= table_z_upper[i]:\n L_character = 10**(table_lgL_character[i])\n Phi_character = 10**(table_lgPhi_character[i])\n Phi = Schechter_Function_for_LF(L_grid, L_character, Phi_character, alpha)\n lgPhi = np.log10(Phi)\n break\n # \n if lgL is None:\n return lgL_grid, lgPhi\n else:\n return lgPhi\n\n\n\ndef calc_IR_LF_Gruppioni2013(z, lgL=None, galaxy_type = 'SFG'):\n # \n # Gruppioni 2013 bibcode:\n # IMF: Chabrier 2003\n # H0 = 71 km s−1 Mpc−1, Ωm = 0.27, and ΩΛ = 0.73.\n # Saunders et al. (1990)\n # Outputs: lgL_grid, lgPhi_grid\n # \n # check z\n if not np.isscalar(z):\n raise ValueError('Please input a float number as the redshift!')\n # \n # check galaxy_type\n if not (type(galaxy_type) is str):\n raise ValueError('Please input either \"ALL\", \"SFG\" or \"QG\" as the galaxy_type!')\n else:\n if not (galaxy_type in ['ALL', 'SFG', 'QG']):\n raise ValueError('Please input either \"ALL\", \"SFG\" or \"QG\" as the galaxy_type!')\n # \n # make lgL_grid\n if lgL is None:\n lgL_grid = np.linspace(8.0, 14.0, num=1000, endpoint=True)\n else:\n lgL_grid = lgL\n # \n L_grid = 10**lgL_grid\n # \n # read LF parameters (their Table 7)\n table_data = [ [0.0 , 0.3 , 1.15, 0.52, 10.12, -2.29], \n [0.3 , 0.45, 1.2 , 0.5 , 10.41, -2.31], \n [0.45, 0.6 , 1.2 , 0.5 , 10.55, -2.35], \n [0.6 , 0.8 , 1.2 , 0.5 , 10.71, -2.35], \n [0.8 , 1.0 , 1.2 , 0.5 , 10.97, -2.40], \n [1.0 , 1.2 , 1.2 , 0.5 , 11.13, -2.43], \n [1.2 , 1.7 , 1.2 , 0.5 , 11.37, -2.70], \n [1.7 , 2.0 , 1.2 , 0.5 , 11.50, -3.00], \n [2.0 , 2.5 , 1.2 , 0.5 , 11.60, -3.01], \n [2.5 , 3.0 , 1.2 , 0.5 , 11.92, -3.27], \n [3.0 , 4.2 , 1.2 , 0.5 , 11.90, -3.74] ]\n table_data = np.array(table_data).T\n table_z_lower = table_data[0]\n table_z_upper = table_data[1]\n table_alpha = table_data[2]\n table_sigma = table_data[3]\n table_lgL_character = table_data[4] # Lsun\n table_lgPhi_character = table_data[5] # Mpc-3 dex-1\n # \n # check z\n LF_zmin = table_z_lower[0]\n LF_zmax = table_z_upper[-1]\n if z < LF_zmin or z > LF_zmax:\n raise ValueError('calc_IR_LF_Gruppioni2013: The input redshift is out of the allowed range of %s -- %s!'%(LF_zmin, LF_zmax))\n # \n # scale to z (using step function... <TODO>)\n Phi = None\n lgPhi = None\n for i in range(len(table_z_upper)):\n if z >= table_z_lower[i] and z <= table_z_upper[i]:\n L_character = 10**(table_lgL_character[i])\n Phi_character = 10**(table_lgPhi_character[i])\n alpha = table_alpha[i]\n sigma = table_sigma[i]\n Phi = Saunders_Function_for_LF(L_grid, L_character, Phi_character, alpha, sigma)\n lgPhi = np.log10(Phi)\n break\n # \n if lgL is None:\n return lgL_grid, lgPhi\n else:\n return lgPhi\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.linspace", "numpy.log10", "numpy.isscalar", "numpy.array", "numpy.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lindsey98/lightly
[ "6a66a38ccd9fc4422f5ebb0a13009abe8266d8e2", "6a66a38ccd9fc4422f5ebb0a13009abe8266d8e2" ]
[ "mytest/test.py", "lightly/active_learning/scorers/classification.py" ]
[ "import torch\nimport torchvision\nimport lightly.models as models\nimport lightly.loss as loss\nimport lightly.data as data\nimport pytorch_lightning as pl\nimport math\nimport os\nimport shutil\nfrom tqdm import tqdm\nimport logging\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1,0\"\n\nexp_name = 'CIFAR10'\nstart_epoch = 0\navg_loss = 0.\navg_output_std = 0.\nepochs = 800\nout_dim = 2048\ninput_size = 32\n\nlogging.basicConfig(filename='mytest_{}.log'.format(exp_name), level=logging.INFO)\nlogger = logging.getLogger('trace')\n\n\n# the collate function applies random transforms to the input images\ncollate_fn = data.ImageCollateFunction(input_size=input_size, \n # require invariance to flips and rotations?\n hf_prob=0.0, # horizontal flip prob\n vf_prob=0.0, # vertical flip prob\n rr_prob=0.0, # (+90 degree) rotation is applied prob\n min_scale=0.0,\n cj_prob=0.7, # color jitter prob\n )\n\n# create a dataset from your image folder\ndataset = data.LightlyDataset(input_dir='../datasets/{}/train/'.format(exp_name))\nprint('Dataset is loaded')\nlogger.info('Dataset is loaded')\n\n# build a PyTorch dataloader\ndataloader = torch.utils.data.DataLoader(\n dataset, # pass the dataset to the dataloader\n batch_size=64, # a large batch size helps with the learning\n shuffle=True, # shuffling is important!\n collate_fn=collate_fn, # apply transformations to the input images\n drop_last=True) # FIXME: drop_last for distributed training, single-gpu training does not need this to be True\n\nlogger.info('Length of data {}'.format(len(dataloader.dataset)))\n\n# use a resnet50 backbone\nresnet = torchvision.models.resnet.resnet18()\nresnet = torch.nn.Sequential(*list(resnet.children())[:-1])\n\n# build the simsiam model\nmodel = models.SimSiam(resnet, num_ftrs=512)\n\n# use a criterion for self-supervised learning\ncriterion = loss.SymNegCosineSimilarityLoss()\n\n# get a PyTorch optimizer\noptimizer = torch.optim.SGD(model.parameters(), lr=1e-0, weight_decay=1e-5)\n\n# push to device\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nmodel = torch.nn.DataParallel(model)\n\n# if resume\nif os.path.exists('../output/{}.pt'.format(exp_name)):\n model.load_state_dict(torch.load('../output/{}.pt'.format(exp_name), map_location=\"cpu\"))\n logger.info('Resume model {}'.format(exp_name))\n \nmodel.to(device)\nprint('Model is initialized and pushed to device')\nlogger.info('Model is initialized and pushed to device')\n\n# Train!\nfor e in range(start_epoch, epochs):\n \n print('Epoch {}'.format(str(e)))\n logger.info('Epoch {}'.format(str(e)))\n \n for (x0, x1), _, _ in tqdm(dataloader):\n\n # move images to the gpu\n x0 = x0.to(device)\n x1 = x1.to(device)\n\n # run the model on both transforms of the images\n # the output of the simsiam model is a y containing the predictions\n # and projections for each input x\n y0, y1 = model(x0, x1)\n\n # backpropagation\n loss = criterion(y0, y1)\n loss.backward()\n\n optimizer.step()\n optimizer.zero_grad()\n\n # calculate the per-dimension standard deviation of the outputs\n # we can use this later to check whether the embeddings are collapsing\n output, _ = y0\n output = output.detach()\n output = torch.nn.functional.normalize(output, dim=1)\n\n output_std = torch.std(output, 0)\n output_std = output_std.mean()\n\n # use moving averages to track the loss and standard deviation\n w = 0.9\n avg_loss = w * avg_loss + (1 - w) * loss.item()\n avg_output_std = w * avg_output_std + (1 - w) * output_std.item()\n \n torch.save(model.state_dict(), '../output/{}.pt'.format(exp_name))\n # the level of collapse is large if the standard deviation of the l2\n # normalized output is much smaller than 1 / sqrt(dim)\n collapse_level = max(0., 1 - math.sqrt(out_dim) * avg_output_std)\n # print intermediate results\n print(f'[Epoch {e:3d}] '\n f'Loss = {avg_loss:.2f} | '\n f'Collapse Level: {collapse_level:.2f} / 1.00')\n \n logger.info(f'[Epoch {e:3d}] '\n f'Loss = {avg_loss:.2f} | '\n f'Collapse Level: {collapse_level:.2f} / 1.00')\n \n \n ", "import warnings\nfrom typing import *\n\nimport numpy as np\n\nfrom lightly.active_learning.scorers.scorer import Scorer\n\n\ndef _entropy(probs: np.ndarray, axis: int = 1) -> np.ndarray:\n \"\"\"Computes the entropy of a probability matrix over one array\n\n Args:\n probs:\n A probability matrix of shape (N, M)\n axis:\n The axis the compute the probability over, the output does not have this axis anymore\n\n Exammple:\n if probs.shape = (N, C) and axis = 1 then entropies.shape = (N, )\n\n Returns:\n The entropy of the prediction vectors, shape: probs.shape, but without the specified axis\n \"\"\"\n zeros = np.zeros_like(probs)\n log_probs = np.log2(probs, out=zeros, where=probs > 0)\n entropies = -1 * np.sum(probs * log_probs, axis=axis)\n return entropies\n\ndef _margin_largest_secondlargest(probs: np.ndarray) -> np.ndarray:\n \"\"\"Computes the margin of a probability matrix\n\n Args:\n probs:\n A probability matrix of shape (N, M)\n\n Exammple:\n if probs.shape = (N, C) then margins.shape = (N, )\n\n Returns:\n The margin of the prediction vectors\n \"\"\"\n sorted_probs = np.partition(probs, -2, axis=1)\n margins = sorted_probs[:, -1] - sorted_probs[:, -2]\n return margins\n\n\nclass ScorerClassification(Scorer):\n \"\"\"Class to compute active learning scores from the model_output of a classification task.\n\n Currently supports the following scorers:\n\n The following three uncertainty scores are taken from\n http://burrsettles.com/pub/settles.activelearning.pdf, Section 3.1, page 12f\n and also explained in https://towardsdatascience.com/uncertainty-sampling-cheatsheet-ec57bc067c0b\n They all have in common, that the score is highest if all classes have the\n same confidence and are 0 if the model assigns 100% probability to a single class.\n The differ in the number of class confidences they take into account.\n\n `uncertainty_least_confidence`:\n This score is 1 - the highest confidence prediction. It is high\n when the confidence about the most probable class is low.\n\n `uncertainty_margin`\n This score is 1- the margin between the highest conficence\n and second highest confidence prediction. It is high when the model\n cannot decide between the two most probable classes.\n\n `uncertainty_entropy`:\n This scorer computes the entropy of the prediction. The confidences\n for all classes are considered to compute the entropy of a sample.\n\n Attributes:\n model_output:\n Predictions of shape N x C where N is the number of unlabeled samples\n and C is the number of classes in the classification task. Must be\n normalized such that the sum over each row is 1.\n The order of the predictions must be the one specified by\n ActiveLearningAgent.unlabeled_set.\n\n Examples:\n >>> # example with three unlabeled samples\n >>> al_agent.unlabeled_set\n >>> > ['img0.jpg', 'img1.jpg', 'img2.jpg']\n >>> predictions = np.array(\n >>> [\n >>> [0.1, 0.9], # predictions for img0.jpg\n >>> [0.3, 0.7], # predictions for img1.jpg\n >>> [0.8, 0.2], # predictions for img2.jpg\n >>> ] \n >>> )\n >>> np.sum(predictions, axis=1)\n >>> > array([1., 1., 1.])\n >>> scorer = ScorerClassification(predictions)\n\n \"\"\"\n def __init__(self, model_output: Union[np.ndarray, List[List[float]]]):\n if not isinstance(model_output, np.ndarray):\n model_output = np.array(model_output)\n\n validated_model_output = self.ensure_valid_model_output(model_output)\n\n super(ScorerClassification, self).__init__(validated_model_output)\n\n def ensure_valid_model_output(self, model_output: np.ndarray) -> np.ndarray:\n if len(model_output) == 0:\n return model_output\n if len(model_output.shape) != 2:\n raise ValueError(\"ScorerClassification model_output must be a 2-dimensional array\")\n if model_output.shape[1] == 0:\n raise ValueError(\"ScorerClassification model_output must not have an empty dimension 1\")\n if model_output.shape[1] == 1:\n # assuming a binary classification problem with\n # the model_output denoting the probability of the first class\n model_output = np.concatenate([model_output, 1-model_output], axis=1)\n return model_output\n\n @classmethod\n def score_names(cls) -> List[str]:\n \"\"\"Returns the names of the calculated active learning scores\n \"\"\"\n score_names = list(cls(model_output=[[0.5, 0.5]]).calculate_scores().keys())\n return score_names\n\n def calculate_scores(self, normalize_to_0_1: bool = True) -> Dict[str, np.ndarray]:\n \"\"\"Calculates and returns the active learning scores.\n\n Args:\n normalize_to_0_1:\n If this is true, each score is normalized to have a\n theoretical minimum of 0 and a theoretical maximum of 1.\n\n Returns:\n A dictionary mapping from the score name (as string)\n to the scores (as a single-dimensional numpy array).\n \"\"\"\n if len(self.model_output) == 0:\n return {score_name: np.array([]) for score_name in self.score_names()}\n\n scores_with_names = [\n self._get_scores_uncertainty_least_confidence(),\n self._get_scores_uncertainty_margin(),\n self._get_scores_uncertainty_entropy()\n ]\n\n scores = dict()\n for score, score_name in scores_with_names:\n score = np.nan_to_num(score)\n scores[score_name] = score\n\n if normalize_to_0_1:\n scores = self.normalize_scores_0_1(scores)\n\n return scores\n\n def normalize_scores_0_1(self, scores: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:\n num_classes = self.model_output.shape[1]\n model_output_very_sure = np.zeros(shape=(1,num_classes))\n model_output_very_sure[0, 0] = 1\n model_output_very_unsure = np.ones_like(model_output_very_sure)/num_classes\n\n scores_minimum = ScorerClassification(model_output_very_sure).calculate_scores(normalize_to_0_1=False)\n scores_maximum = ScorerClassification(model_output_very_unsure).calculate_scores(normalize_to_0_1=False)\n\n for score_name in scores.keys():\n interp_xp = [float(scores_minimum[score_name]), float(scores_maximum[score_name])]\n interp_fp = [0, 1]\n scores[score_name] = np.interp(scores[score_name], interp_xp, interp_fp)\n\n return scores\n\n def _get_scores_uncertainty_least_confidence(self):\n scores = 1 - np.max(self.model_output, axis=1)\n return scores, \"uncertainty_least_confidence\"\n\n def _get_scores_uncertainty_margin(self):\n scores = 1 - _margin_largest_secondlargest(self.model_output)\n return scores, \"uncertainty_margin\"\n\n def _get_scores_uncertainty_entropy(self):\n scores = _entropy(self.model_output, axis=1)\n return scores, \"uncertainty_entropy\"\n" ]
[ [ "torch.nn.functional.normalize", "torch.utils.data.DataLoader", "torch.std", "torch.cuda.is_available", "torch.nn.DataParallel" ], [ "numpy.partition", "numpy.log2", "numpy.ones_like", "numpy.nan_to_num", "numpy.concatenate", "numpy.max", "numpy.zeros_like", "numpy.interp", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bartolkaruza/pytorch-lightning-bolts
[ "2e903c333c37ea83394c7da2ce826de1b82fb356" ]
[ "pl_bolts/models/self_supervised/simclr/transforms.py" ]
[ "import numpy as np\n\nfrom pl_bolts.utils.warnings import warn_missing_pkg\n\ntry:\n import torchvision.transforms as transforms\nexcept ModuleNotFoundError:\n warn_missing_pkg('torchvision') # pragma: no-cover\n _TORCHVISION_AVAILABLE = False\nelse:\n _TORCHVISION_AVAILABLE = True\n\ntry:\n import cv2\nexcept ModuleNotFoundError:\n warn_missing_pkg('cv2', pypi_name='opencv-python') # pragma: no-cover\n\n\nclass SimCLRTrainDataTransform(object):\n \"\"\"\n Transforms for SimCLR\n\n Transform::\n\n RandomResizedCrop(size=self.input_height)\n RandomHorizontalFlip()\n RandomApply([color_jitter], p=0.8)\n RandomGrayscale(p=0.2)\n GaussianBlur(kernel_size=int(0.1 * self.input_height))\n transforms.ToTensor()\n\n Example::\n\n from pl_bolts.models.self_supervised.simclr.transforms import SimCLRTrainDataTransform\n\n transform = SimCLRTrainDataTransform(input_height=32)\n x = sample()\n (xi, xj) = transform(x)\n \"\"\"\n def __init__(\n self,\n input_height: int = 224,\n gaussian_blur: bool = True,\n jitter_strength: float = 1.,\n normalize=None\n ) -> None:\n\n if not _TORCHVISION_AVAILABLE:\n raise ModuleNotFoundError( # pragma: no-cover\n 'You want to use `transforms` from `torchvision` which is not installed yet.'\n )\n\n self.jitter_strength = jitter_strength\n self.input_height = input_height\n self.gaussian_blur = gaussian_blur\n self.normalize = normalize\n\n self.color_jitter = transforms.ColorJitter(\n 0.8 * self.jitter_strength,\n 0.8 * self.jitter_strength,\n 0.8 * self.jitter_strength,\n 0.2 * self.jitter_strength\n )\n\n data_transforms = [\n transforms.RandomResizedCrop(size=self.input_height),\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.RandomApply([self.color_jitter], p=0.8),\n transforms.RandomGrayscale(p=0.2)\n ]\n\n if self.gaussian_blur:\n kernel_size = int(0.1 * self.input_height)\n if kernel_size % 2 == 0:\n kernel_size += 1\n\n data_transforms.append(GaussianBlur(kernel_size=kernel_size, p=0.5))\n\n data_transforms = transforms.Compose(data_transforms)\n\n if normalize is None:\n self.final_transform = transforms.ToTensor()\n else:\n self.final_transform = transforms.Compose([transforms.ToTensor(), normalize])\n\n self.train_transform = transforms.Compose([data_transforms, self.final_transform])\n\n # add online train transform of the size of global view\n self.online_transform = transforms.Compose([\n transforms.RandomResizedCrop(self.input_height),\n transforms.RandomHorizontalFlip(),\n self.final_transform\n ])\n\n def __call__(self, sample):\n transform = self.train_transform\n\n xi = transform(sample)\n xj = transform(sample)\n\n return xi, xj, self.online_transform(sample)\n\n\nclass SimCLREvalDataTransform(SimCLRTrainDataTransform):\n \"\"\"\n Transforms for SimCLR\n\n Transform::\n\n Resize(input_height + 10, interpolation=3)\n transforms.CenterCrop(input_height),\n transforms.ToTensor()\n\n Example::\n\n from pl_bolts.models.self_supervised.simclr.transforms import SimCLREvalDataTransform\n\n transform = SimCLREvalDataTransform(input_height=32)\n x = sample()\n (xi, xj) = transform(x)\n \"\"\"\n def __init__(\n self,\n input_height: int = 224,\n gaussian_blur: bool = True,\n jitter_strength: float = 1.,\n normalize=None\n ):\n super().__init__(\n normalize=normalize,\n input_height=input_height,\n gaussian_blur=gaussian_blur,\n jitter_strength=jitter_strength\n )\n\n # replace online transform with eval time transform\n self.online_transform = transforms.Compose([\n transforms.Resize(int(self.input_height + 0.1 * self.input_height)),\n transforms.CenterCrop(self.input_height),\n self.final_transform,\n ])\n\n\nclass SimCLRFinetuneTransform(object):\n def __init__(\n self,\n input_height: int = 224,\n jitter_strength: float = 1.,\n normalize=None,\n eval_transform: bool = False\n ) -> None:\n\n self.jitter_strength = jitter_strength\n self.input_height = input_height\n self.normalize = normalize\n\n self.color_jitter = transforms.ColorJitter(\n 0.8 * self.jitter_strength,\n 0.8 * self.jitter_strength,\n 0.8 * self.jitter_strength,\n 0.2 * self.jitter_strength\n )\n\n if not eval_transform:\n data_transforms = [\n transforms.RandomResizedCrop(size=self.input_height),\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.RandomApply([self.color_jitter], p=0.8),\n transforms.RandomGrayscale(p=0.2)\n ]\n else:\n data_transforms = [\n transforms.Resize(int(self.input_height + 0.1 * self.input_height)),\n transforms.CenterCrop(self.input_height)\n ]\n\n if normalize is None:\n final_transform = transforms.ToTensor()\n else:\n final_transform = transforms.Compose([transforms.ToTensor(), normalize])\n\n data_transforms.append(final_transform)\n self.transform = transforms.Compose(data_transforms)\n\n def __call__(self, sample):\n return self.transform(sample)\n\n\nclass GaussianBlur(object):\n # Implements Gaussian blur as described in the SimCLR paper\n def __init__(self, kernel_size, p=0.5, min=0.1, max=2.0):\n self.min = min\n self.max = max\n\n # kernel size is set to be 10% of the image height/width\n self.kernel_size = kernel_size\n self.p = p\n\n def __call__(self, sample):\n sample = np.array(sample)\n\n # blur the image with a 50% chance\n prob = np.random.random_sample()\n\n if prob < self.p:\n sigma = (self.max - self.min) * np.random.random_sample() + self.min\n sample = cv2.GaussianBlur(sample, (self.kernel_size, self.kernel_size), sigma)\n\n return sample\n" ]
[ [ "numpy.array", "numpy.random.random_sample" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gibbbone/ohmnet
[ "1b4ee4c146f526ea6e2f4f8607df7e9687204a9e" ]
[ "ohmnet/gensimmod/utils.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# This OhmNet code is adapted from:\n# Copyright (C) 2010 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\nfrom __future__ import with_statement\n\nimport logging\nimport itertools\n\nlogger = logging.getLogger(__name__)\n\ntry:\n from html.entities import name2codepoint as n2cp\nexcept ImportError:\n from htmlentitydefs import name2codepoint as n2cp\ntry:\n import cPickle as _pickle\nexcept ImportError:\n import pickle as _pickle\n\nimport sys\nimport os\n\nimport numpy\nimport scipy.sparse\n\nif sys.version_info[0] >= 3:\n unicode = str\n\nfrom six import iteritems\n\ntry:\n from smart_open import smart_open\nexcept ImportError:\n logger.info(\"smart_open library not found; falling back to local-filesystem-only\")\n\n def make_closing(base, **attrs):\n \"\"\"\n Add support for `with Base(attrs) as fout:` to the base class if it's missing.\n The base class' `close()` method will be called on context exit, to always close the file properly.\n\n This is needed for gzip.GzipFile, bz2.BZ2File etc in older Pythons (<=2.6), which otherwise\n raise \"AttributeError: GzipFile instance has no attribute '__exit__'\".\n\n \"\"\"\n if not hasattr(base, '__enter__'):\n attrs['__enter__'] = lambda self: self\n if not hasattr(base, '__exit__'):\n attrs['__exit__'] = lambda self, type, value, traceback: self.close()\n return type('Closing' + base.__name__, (base, object), attrs)\n\n def smart_open(fname, mode='rb'):\n _, ext = os.path.splitext(fname)\n if ext == '.bz2':\n from bz2 import BZ2File\n return make_closing(BZ2File)(fname, mode)\n if ext == '.gz':\n from gzip import GzipFile\n return make_closing(GzipFile)(fname, mode)\n return open(fname, mode)\n\n\ndef any2utf8(text, errors='strict', encoding='utf8'):\n \"\"\"Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8.\"\"\"\n if isinstance(text, unicode):\n return text.encode('utf8')\n # do bytestring -> unicode -> utf8 full circle, to ensure valid utf8\n return unicode(text, encoding, errors=errors).encode('utf8')\nto_utf8 = any2utf8\n\n\ndef any2unicode(text, encoding='utf8', errors='strict'):\n \"\"\"Convert a string (bytestring in `encoding` or unicode), to unicode.\"\"\"\n if isinstance(text, unicode):\n return text\n return unicode(text, encoding, errors=errors)\nto_unicode = any2unicode\n\n\nclass SaveLoad(object):\n \"\"\"\n Objects which inherit from this class have save/load functions, which un/pickle\n them to disk.\n\n This uses pickle for de/serializing, so objects must not contain\n unpicklable attributes, such as lambda functions etc.\n\n \"\"\"\n @classmethod\n def load(cls, fname, mmap=None):\n \"\"\"\n Load a previously saved object from file (also see `save`).\n\n If the object was saved with large arrays stored separately, you can load\n these arrays via mmap (shared memory) using `mmap='r'`. Default: don't use\n mmap, load large arrays as normal objects.\n\n If the file being loaded is compressed (either '.gz' or '.bz2'), then\n `mmap=None` must be set. Load will raise an `IOError` if this condition\n is encountered.\n\n \"\"\"\n logger.info(\"loading %s object from %s\" % (cls.__name__, fname))\n\n compress, subname = SaveLoad._adapt_by_suffix(fname)\n\n obj = unpickle(fname)\n obj._load_specials(fname, mmap, compress, subname)\n return obj\n\n\n def _load_specials(self, fname, mmap, compress, subname):\n \"\"\"\n Loads any attributes that were stored specially, and gives the same\n opportunity to recursively included SaveLoad instances.\n\n \"\"\"\n\n mmap_error = lambda x, y: IOError(\n 'Cannot mmap compressed object %s in file %s. ' % (x, y) +\n 'Use `load(fname, mmap=None)` or uncompress files manually.')\n\n for attrib in getattr(self, '__recursive_saveloads', []):\n cfname = '.'.join((fname, attrib))\n logger.info(\"loading %s recursively from %s.* with mmap=%s\" % (\n attrib, cfname, mmap))\n getattr(self, attrib)._load_specials(cfname, mmap, compress, subname)\n\n for attrib in getattr(self, '__numpys', []):\n logger.info(\"loading %s from %s with mmap=%s\" % (\n attrib, subname(fname, attrib), mmap))\n\n if compress:\n if mmap:\n raise mmap_error(attrib, subname(fname, attrib))\n\n val = numpy.load(subname(fname, attrib))['val']\n else:\n val = numpy.load(subname(fname, attrib), mmap_mode=mmap)\n\n setattr(self, attrib, val)\n\n for attrib in getattr(self, '__scipys', []):\n logger.info(\"loading %s from %s with mmap=%s\" % (\n attrib, subname(fname, attrib), mmap))\n sparse = unpickle(subname(fname, attrib))\n if compress:\n if mmap:\n raise mmap_error(attrib, subname(fname, attrib))\n\n with numpy.load(subname(fname, attrib, 'sparse')) as f:\n sparse.data = f['data']\n sparse.indptr = f['indptr']\n sparse.indices = f['indices']\n else:\n sparse.data = numpy.load(subname(fname, attrib, 'data'), mmap_mode=mmap)\n sparse.indptr = numpy.load(subname(fname, attrib, 'indptr'), mmap_mode=mmap)\n sparse.indices = numpy.load(subname(fname, attrib, 'indices'), mmap_mode=mmap)\n\n setattr(self, attrib, sparse)\n\n for attrib in getattr(self, '__ignoreds', []):\n logger.info(\"setting ignored attribute %s to None\" % (attrib))\n setattr(self, attrib, None)\n\n\n @staticmethod\n def _adapt_by_suffix(fname):\n \"\"\"Give appropriate compress setting and filename formula\"\"\"\n if fname.endswith('.gz') or fname.endswith('.bz2'):\n compress = True\n subname = lambda *args: '.'.join(list(args) + ['npz'])\n else:\n compress = False\n subname = lambda *args: '.'.join(list(args) + ['npy'])\n return (compress, subname)\n\n\n def _smart_save(self, fname, separately=None, sep_limit=10 * 1024**2,\n ignore=frozenset(), pickle_protocol=2):\n \"\"\"\n Save the object to file (also see `load`).\n\n If `separately` is None, automatically detect large\n numpy/scipy.sparse arrays in the object being stored, and store\n them into separate files. This avoids pickle memory errors and\n allows mmap'ing large arrays back on load efficiently.\n\n You can also set `separately` manually, in which case it must be\n a list of attribute names to be stored in separate files. The\n automatic check is not performed in this case.\n\n `ignore` is a set of attribute names to *not* serialize (file\n handles, caches etc). On subsequent load() these attributes will\n be set to None.\n\n `pickle_protocol` defaults to 2 so the pickled object can be imported\n in both Python 2 and 3.\n\n \"\"\"\n logger.info(\n \"saving %s object under %s, separately %s\" % (\n self.__class__.__name__, fname, separately))\n\n compress, subname = SaveLoad._adapt_by_suffix(fname)\n\n restores = self._save_specials(fname, separately, sep_limit, ignore, pickle_protocol,\n compress, subname)\n try:\n pickle(self, fname, protocol=pickle_protocol)\n finally:\n # restore attribs handled specially\n for obj, asides in restores:\n for attrib, val in iteritems(asides):\n setattr(obj, attrib, val)\n\n\n def _save_specials(self, fname, separately, sep_limit, ignore, pickle_protocol, compress, subname):\n \"\"\"\n Save aside any attributes that need to be handled separately, including\n by recursion any attributes that are themselves SaveLoad instances.\n\n Returns a list of (obj, {attrib: value, ...}) settings that the caller\n should use to restore each object's attributes that were set aside\n during the default pickle().\n\n \"\"\"\n asides = {}\n sparse_matrices = (scipy.sparse.csr_matrix, scipy.sparse.csc_matrix)\n if separately is None:\n separately = []\n for attrib, val in iteritems(self.__dict__):\n if isinstance(val, numpy.ndarray) and val.size >= sep_limit:\n separately.append(attrib)\n elif isinstance(val, sparse_matrices) and val.nnz >= sep_limit:\n separately.append(attrib)\n\n # whatever's in `separately` or `ignore` at this point won't get pickled\n for attrib in separately + list(ignore):\n if hasattr(self, attrib):\n asides[attrib] = getattr(self, attrib)\n delattr(self, attrib)\n\n recursive_saveloads = []\n restores = []\n for attrib, val in iteritems(self.__dict__):\n if hasattr(val, '_save_specials'): # better than 'isinstance(val, SaveLoad)' if IPython reloading\n recursive_saveloads.append(attrib)\n cfname = '.'.join((fname,attrib))\n restores.extend(val._save_specials(cfname, None, sep_limit, ignore,\n pickle_protocol, compress, subname))\n\n try:\n numpys, scipys, ignoreds = [], [], []\n for attrib, val in iteritems(asides):\n if isinstance(val, numpy.ndarray) and attrib not in ignore:\n numpys.append(attrib)\n logger.info(\"storing numpy array '%s' to %s\" % (\n attrib, subname(fname, attrib)))\n\n if compress:\n numpy.savez_compressed(subname(fname, attrib), val=numpy.ascontiguousarray(val))\n else:\n numpy.save(subname(fname, attrib), numpy.ascontiguousarray(val))\n\n elif isinstance(val, (scipy.sparse.csr_matrix, scipy.sparse.csc_matrix)) and attrib not in ignore:\n scipys.append(attrib)\n logger.info(\"storing scipy.sparse array '%s' under %s\" % (\n attrib, subname(fname, attrib)))\n\n if compress:\n numpy.savez_compressed(subname(fname, attrib, 'sparse'),\n data=val.data,\n indptr=val.indptr,\n indices=val.indices)\n else:\n numpy.save(subname(fname, attrib, 'data'), val.data)\n numpy.save(subname(fname, attrib, 'indptr'), val.indptr)\n numpy.save(subname(fname, attrib, 'indices'), val.indices)\n\n data, indptr, indices = val.data, val.indptr, val.indices\n val.data, val.indptr, val.indices = None, None, None\n\n try:\n # store array-less object\n pickle(val, subname(fname, attrib), protocol=pickle_protocol)\n finally:\n val.data, val.indptr, val.indices = data, indptr, indices\n else:\n logger.info(\"not storing attribute %s\" % (attrib))\n ignoreds.append(attrib)\n\n self.__dict__['__numpys'] = numpys\n self.__dict__['__scipys'] = scipys\n self.__dict__['__ignoreds'] = ignoreds\n self.__dict__['__recursive_saveloads'] = recursive_saveloads\n except:\n # restore the attributes if exception-interrupted\n for attrib, val in iteritems(asides):\n setattr(self, attrib, val)\n raise\n return restores + [(self, asides)]\n\n\n def save(self, fname_or_handle, separately=None, sep_limit=10 * 1024**2,\n ignore=frozenset(), pickle_protocol=2):\n \"\"\"\n Save the object to file (also see `load`).\n\n `fname_or_handle` is either a string specifying the file name to\n save to, or an open file-like object which can be written to. If\n the object is a file handle, no special array handling will be\n performed; all attributes will be saved to the same file.\n\n If `separately` is None, automatically detect large\n numpy/scipy.sparse arrays in the object being stored, and store\n them into separate files. This avoids pickle memory errors and\n allows mmap'ing large arrays back on load efficiently.\n\n You can also set `separately` manually, in which case it must be\n a list of attribute names to be stored in separate files. The\n automatic check is not performed in this case.\n\n `ignore` is a set of attribute names to *not* serialize (file\n handles, caches etc). On subsequent load() these attributes will\n be set to None.\n\n `pickle_protocol` defaults to 2 so the pickled object can be imported\n in both Python 2 and 3.\n\n \"\"\"\n try:\n _pickle.dump(self, fname_or_handle, protocol=pickle_protocol)\n logger.info(\"saved %s object\" % self.__class__.__name__)\n except TypeError: # `fname_or_handle` does not have write attribute\n self._smart_save(fname_or_handle, separately, sep_limit, ignore,\n pickle_protocol=pickle_protocol)\n#endclass SaveLoad\n\ndef pickle(obj, fname, protocol=2):\n \"\"\"Pickle object `obj` to file `fname`.\n `protocol` defaults to 2 so pickled objects are compatible across\n Python 2.x and 3.x.\n \"\"\"\n with open(fname, 'wb') as fout: # 'b' for binary, needed on Windows\n _pickle.dump(obj, fout, protocol=protocol)\n\n\ndef unpickle(fname):\n \"\"\"Load pickled object from `fname`\"\"\"\n with open(fname) as f:\n return _pickle.loads(f.read())\n\n\ndef prune_vocab(vocab, min_reduce, trim_rule=None):\n \"\"\"\n Remove all entries from the `vocab` dictionary with count smaller than `min_reduce`.\n\n Modifies `vocab` in place, returns the sum of all counts that were pruned.\n\n \"\"\"\n result = 0\n old_len = len(vocab)\n for w in list(vocab): # make a copy of dict's keys\n if not keep_vocab_item(w, vocab[w], min_reduce, trim_rule): # vocab[w] <= min_reduce:\n result += vocab[w]\n del vocab[w]\n logger.info(\"pruned out %i tokens with count <=%i (before %i, after %i)\",\n old_len - len(vocab), min_reduce, old_len, len(vocab))\n return result\n\n\ndef qsize(queue):\n \"\"\"Return the (approximate) queue size where available; -1 where not (OS X).\"\"\"\n try:\n return queue.qsize()\n except NotImplementedError:\n # OS X doesn't support qsize\n return -1\n\n\nRULE_DEFAULT = 0\nRULE_DISCARD = 1\nRULE_KEEP = 2\n\n\ndef keep_vocab_item(word, count, min_count, trim_rule=None):\n default_res = count >= min_count\n\n if trim_rule is None:\n return default_res\n else:\n rule_res = trim_rule(word, count, min_count)\n if rule_res == RULE_KEEP:\n return True\n elif rule_res == RULE_DISCARD:\n return False\n else:\n return default_res\n\n\ndef chunkize_serial(iterable, chunksize, as_numpy=False):\n \"\"\"\n Return elements from the iterable in `chunksize`-ed lists. The last returned\n element may be smaller (if length of collection is not divisible by `chunksize`).\n\n >>> print(list(grouper(range(10), 3)))\n [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]\n\n \"\"\"\n import numpy\n it = iter(iterable)\n while True:\n if as_numpy:\n # convert each document to a 2d numpy array (~6x faster when transmitting\n # chunk data over the wire, in Pyro)\n wrapped_chunk = [[numpy.array(doc) for doc in itertools.islice(it, int(chunksize))]]\n else:\n wrapped_chunk = [list(itertools.islice(it, int(chunksize)))]\n if not wrapped_chunk[0]:\n break\n # memory opt: wrap the chunk and then pop(), to avoid leaving behind a dangling reference\n yield wrapped_chunk.pop()\n\ngrouper = chunkize_serial\n\n\nclass RepeatCorpusNTimes(SaveLoad):\n\n def __init__(self, corpus, n):\n \"\"\"\n Repeat a `corpus` `n` times.\n\n >>> corpus = [[(1, 0.5)], []]\n >>> list(RepeatCorpusNTimes(corpus, 3)) # repeat 3 times\n [[(1, 0.5)], [], [(1, 0.5)], [], [(1, 0.5)], []]\n \"\"\"\n self.corpus = corpus\n self.n = n\n\n def __iter__(self):\n for _ in xrange(self.n):\n for document in self.corpus:\n yield document" ]
[ [ "numpy.ascontiguousarray", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Levi-Armstrong/point_cloud_segmentation
[ "ed0db4f49aa378901860dd2a81897b79e0fe1a66" ]
[ "pcs_detection/src_python/pcs_detection/utils.py" ]
[ "'''\n * @file utils.py\n * @brief Helper functions for viewing images and reading/writing config and label files \n *\n * @author Jake Janssen\n * @date Oct 24, 2019\n * @version TODO\n * @bug No known bugs\n *\n * @copyright Copyright (c) 2019, Southwest Research Institute\n *\n * @par License\n * Software License Agreement (Apache License)\n * @par\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n * http://www.apache.org/licenses/LICENSE-2.0\n * @par\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n '''\n\nimport numpy as np\nimport pandas as pd\nfrom lxml import etree\nimport cv2\nimport os\nimport json\n\ndef get_labels_from_xml(label_path):\n '''\n Reads in labels from an xml file with CVATs format\n and returns a dictionary where the keys are whatever is specified as image name (outer directory and filename)\n and the values are the contours for that image\n '''\n anno = dict()\n root = etree.parse(label_path).getroot()\n \n # name of the outer directory this label corresponds to\n # allows for images in different directories to have the same filename \n dataset_name = label_path.split('/')[-2]\n\n for image_tag in root.iter('image'):\n image = {}\n # initialize all types of labels\n for label_name in root.iter('label'):\n image[label_name.find('name').text] = list()\n\n #extract the meta info from the image\n for key, value in image_tag.items():\n image[key] = value\n\n #keys will be collection folder combined with each images\n #this allows for images in different folders to have the same name\n image['name'] = dataset_name + '/' + image['name']\n\n image['contour'] = list()\n\n # loop through the poly elements in the image\n for poly_tag in image_tag:\n # get the type of poly element - eg polyline, polygon\n poly_elem = {}\n poly_type = poly_tag.tag\n poly_elem[poly_type] = []\n\n for key, value in poly_tag.items():\n poly_elem[key] = value\n \n #Get the contour points\n contour_points = []\n shape = str(poly_elem['points']).split(\";\") #get the shape from the polyline \n for pair in shape:\n x, y = pair.split(\",\")\n contour_points.append([int(float(x)), int(float(y))])\n\n #create a contour set of points in the proper order from the points\n contour = np.array(pd.DataFrame(contour_points),\n np.int32\n )\n image[poly_elem['label']].append([contour,poly_type])\n \n anno[image['name']] = image\n \n return anno\n\ndef dump_validation_config(config):\n '''\n Save the config used to train in the same folder in the model weights.\n This config can later be used to apply the same preprocessing, and model selection\n that was used in training \n '''\n\n config_dump = {}\n for key in config.__dict__:\n if (not key.startswith('__')) and (key != 'logger') :\n try:\n config_dump[key] = config.__getattribute__(key)\n except:\n pass\n save_path = os.path.join(os.path.split(config.WEIGHT_SAVE_PATH)[0],'full_config.json')\n with open(save_path, 'w') as outfile:\n json.dump(config_dump, outfile, indent=4) \n\ndef dump_inference_config(config):\n '''\n Save a config used for inference in the same folder as the weights.\n '''\n wanted_keys = ['MODEL', 'VAL_WEIGHT_PATH', 'BATCH_SIZE', 'MODE', 'DISPLAY_SCALE_FACTOR', 'CHANNEL', 'PRE_PROCESS', 'CONFIDENCE_THRESHOLD', 'CLASS_NAMES', 'ORIG_DIMS']\n config_dump = {}\n for key in config.__dict__:\n if key in wanted_keys:\n try:\n config_dump[key] = config.__getattribute__(key)\n except:\n pass\n save_path = os.path.join(os.path.split(config.WEIGHT_SAVE_PATH)[0],'inference_config.json')\n print('_____CONFIG______')\n print(config_dump)\n print('_________________')\n with open(save_path, 'w') as outfile:\n json.dump(config_dump, outfile, indent=4) \n\n\ndef resize(image, scale_factor):\n '''\n Used to resize a display image.\n '''\n new_height = int(image.shape[0] * scale_factor)\n new_width = int(image.shape[1] * scale_factor)\n resized_img = cv2.resize(image, (new_width, new_height))\n return resized_img\n\n\ndef minMaxNormalize(chnls):\n '''\n Normalizes images to have pixel values between 0-255\n This function should only be used for displaying \n '''\n #loop over all of the channels\n for i in range(chnls.shape[-1]):\n # calculate min and ptp of each channel. \n min_arr = np.min(chnls[:, :, i][chnls[:, :, i] != 0]) \n ptp_arr = chnls[:, :, i].ptp()\n\n chnls[:, :, i] = (((chnls[:, :, i] - min_arr) / ptp_arr) * 255)\n\n return chnls.astype(np.uint8)\n\ndef histogram(original_image):\n '''\n Displays a histogram showing the pixel value distributions per channel of the image.\n This function should be used to check the being fed into the network.\n Each channel should have a mean of zero and all standard deviations should be within the same magnitude. \n '''\n img_cv = original_image.copy()\n\n org_means = []\n org_stds = []\n\n for i in range(img_cv.shape[-1]):\n org_means.append(round(img_cv[:,:,i].mean(),2))\n org_stds.append(round(img_cv[:,:,i].std(),2))\n\n img_cv = minMaxNormalize(img_cv)\n\n # Histograms of data distribution \n # split channels \n hist_chnls = cv2.split(img_cv)\n\n # histogram parameters\n histSize = 256\n histRange = (0,256)\n accumulate = False\n hist_w = 512\n hist_h = 400\n bin_w = int(round( hist_w/histSize ))\n histImage = np.zeros((hist_h, hist_w, 4), dtype=np.uint8)\n\n hists = [] \n \n # text colors and colors for plot\n t_colors = [(210,0,0), (0,210,0), (0,0,210)]\n colors = [(255,0,0), (0,255,0), (0,0,255) ]\n\n # starting vertical location of text\n text_h = 30\n\n # get data into histogram format\n for ii in range(len(hist_chnls)):\n temp_hist = cv2.calcHist(hist_chnls, [ii], None, [histSize], histRange, accumulate=accumulate)\n cv2.normalize(temp_hist, temp_hist, alpha = 0, beta=hist_h, norm_type=cv2.NORM_MINMAX)\n hists.append(temp_hist)\n\n # add histogram to image \n for jj in range(1, histSize):\n for curr_hist, color in zip(hists,colors):\n cv2.line(histImage, ( bin_w*(jj-1), hist_h - int((curr_hist[jj-1]).round()) ),\n ( bin_w*(jj), hist_h - int((curr_hist[jj]).round()) ),\n color, thickness=1)\n\n # add text and mean/normal distribution lines \n for ii, color, t_color in zip(range(len(hist_chnls)), colors, t_colors):\n hist_std = int(round(hist_chnls[ii].std()))\n hist_mean = int(round(hist_chnls[ii].mean()))\n cv2.circle(histImage, (2*hist_mean, 400 ), 2*hist_std, color, thickness=4)\n display_str = 'Mean: ' + str(org_means[ii]) + ', Std: ' + str(org_stds[ii])\n cv2.putText(histImage , display_str, (10,text_h), cv2.FONT_HERSHEY_SIMPLEX, .4, t_color, 1,cv2.LINE_AA)\n text_h += 20\n\n # display histogram \n cv2.imshow('RGBT Data Distribution', histImage)\n\ndef colorTriLabel(label, colors):\n display_label = np.zeros((label.shape[0], label.shape[1], 3))\n display_label[:,:,0] = label[:,:,0] * 255\n display_label[:,:,-1] = label[:,:,-1] * 255\n for ii, color in enumerate(colors):\n display_label[label[:,:,ii+1] == 1] = color\n return display_label.astype(np.uint8)\n\ndef colorPrediction(prediction, orig_img, colors):\n '''\n Creates a three channel bgr image and colors it with the prediciton.\n '''\n prediction_display = orig_img.copy() \n prediction = np.argmax(prediction, axis=-1)\n for jj, color in enumerate(colors):\n prediction_display[:,:][prediction==jj+1] = color\n prediction_display = prediction_display.astype(np.uint8)\n return prediction_display\n\ndef LABtoBGR(image, config):\n '''\n Used to convert the LAB color space back to BGR\n '''\n image += config.PRE_PROCESS['lab']\n image[image==0] = 1e-4\n image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_LAB2BGR)\n image *= 255\n return image\n\ndef get_colors(n):\n '''\n Generates a list of colors\n '''\n colors = [[102,255,153], [255, 102, 204], [102, 204, 255], [51, 102, 153]]\n return colors[0:n]\n\n" ]
[ [ "numpy.argmax", "numpy.zeros", "pandas.DataFrame", "numpy.min" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
izumiya-keisuke/mmle
[ "43dbe281ee591a2d7f7cc1e5386ed04651930205" ]
[ "examples/distributed_data_parallel.py" ]
[ "\"\"\"\nCopyright 2021 Keisuke Izumiya\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport torch\nimport torch.distributed as distd\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\n\nimport mmle.distributed as mdistd\nimport mmle.nn as mnn\nfrom mmle.utils.manager import Manager\n\n\nBATCH_SIZE = 400\nDATA_NUM = 40000\nDATA_DIM = 10\nEPOCH_NUM = 50\nLABEL_DIM = 5\nLOG_DIR = \"log\"\nMIDDLE_DIM = 16\n\n\nclass Dataset(torch.utils.data.Dataset):\n def __init__(self):\n super().__init__()\n\n self.data = torch.randn(DATA_NUM, DATA_DIM)\n self.label = torch.randn(DATA_NUM, LABEL_DIM)\n\n def __len__(self):\n return DATA_NUM\n\n def __getitem__(self, idx):\n return self.data[idx], self.label[idx]\n\n\ndef main():\n world_size = distd.get_world_size()\n rank = distd.get_rank()\n\n model = nn.Sequential(\n mnn.FC(DATA_DIM, MIDDLE_DIM), mnn.FC(MIDDLE_DIM, LABEL_DIM, bn=False, activ=\"id\")\n )\n model = mdistd.distribute_module(model)\n\n optimizer = optim.Adam(model.parameters())\n\n dataset = Dataset()\n sampler = DistributedSampler(dataset, world_size, rank)\n loader = DataLoader(dataset, BATCH_SIZE, sampler=sampler, drop_last=True)\n\n if rank == 0:\n manager = Manager(LOG_DIR, use_tensorboard=True)\n manager.tensorboard_writer.add_graph(model, dataset[0][0].repeat(BATCH_SIZE, 1))\n step = 0\n\n for epoch in range(EPOCH_NUM):\n model.train()\n for data, label in loader:\n loss = F.mse_loss(model(data), label.to(rank))\n\n mnn.zero_grad(model)\n loss.backward()\n optimizer.step()\n distd.barrier()\n\n if rank == 0:\n step += world_size\n manager.tensorboard_writer.plot(\"loss\", \"train\", loss.item(), step)\n\n if rank == 0:\n print(f\"Finish epoch {epoch}: loss={loss.item():.3f}\")\n distd.barrier()\n\n if rank == 0:\n manager.save({\"model\": model.state_dict()}, \"model\")\n manager.close()\n\n\nif __name__ == \"__main__\":\n mdistd.spawn(main, nprocs=torch.cuda.device_count())\n" ]
[ [ "torch.utils.data.distributed.DistributedSampler", "torch.randn", "torch.cuda.device_count", "torch.utils.data.DataLoader", "torch.distributed.barrier", "torch.distributed.get_rank", "torch.distributed.get_world_size" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AlphaJia/faster-rcnn.pytorch
[ "b79a83c84f083495e2edb1a55a970946cb59add2" ]
[ "lib/setup.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n# !/usr/bin/env python\n\nimport glob\nimport os\n\nimport torch\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom torch.utils.cpp_extension import CUDAExtension\nfrom torch.utils.cpp_extension import CUDA_HOME\nfrom torch.utils.cpp_extension import CppExtension\n\nrequirements = [\"torch\", \"torchvision\"]\n\n\ndef get_extensions():\n this_dir = os.path.dirname(os.path.abspath(__file__))\n extensions_dir = os.path.join(this_dir, \"model\", \"csrc\")\n\n main_file = glob.glob(os.path.join(extensions_dir, \"*.cpp\"))\n source_cpu = glob.glob(os.path.join(extensions_dir, \"cpu\", \"*.cpp\"))\n source_cuda = glob.glob(os.path.join(extensions_dir, \"cuda\", \"*.cu\"))\n\n sources = main_file + source_cpu\n extension = CppExtension\n\n extra_compile_args = {\"cxx\": []}\n define_macros = []\n\n if torch.cuda.is_available() and CUDA_HOME is not None:\n extension = CUDAExtension\n sources += source_cuda\n define_macros += [(\"WITH_CUDA\", None)]\n extra_compile_args[\"nvcc\"] = [\n \"-DCUDA_HAS_FP16=1\",\n \"-D__CUDA_NO_HALF_OPERATORS__\",\n \"-D__CUDA_NO_HALF_CONVERSIONS__\",\n \"-D__CUDA_NO_HALF2_OPERATORS__\",\n ]\n\n sources = [os.path.join(extensions_dir, s) for s in sources]\n\n include_dirs = [extensions_dir]\n\n ext_modules = [\n extension(\n \"model._C\",\n sources,\n include_dirs=include_dirs,\n define_macros=define_macros,\n extra_compile_args=extra_compile_args,\n )\n ]\n\n return ext_modules\n\n\nsetup(\n name=\"faster_rcnn\",\n version=\"0.1\",\n description=\"object detection in pytorch\",\n packages=find_packages(exclude=(\"configs\", \"tests\",)),\n # install_requires=requirements,\n ext_modules=get_extensions(),\n cmdclass={\"build_ext\": torch.utils.cpp_extension.BuildExtension},\n)\n" ]
[ [ "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
danielaeblancoj/Design-of-experiment-Python
[ "541c00a96fad4a9ef074c4fb4dde9c65a5deb20d" ]
[ "DOE_functions.py" ]
[ "#====================\n# Essential imports\n#====================\nfrom pyDOE import *\nfrom pyDOE_corrected import *\nfrom diversipy import *\nimport pandas as pd\nimport numpy as np\n\n# ===========================================================================================================\n# Function for constructing a DataFrame from a numpy array generated by PyDOE function and individual lists\n# ===========================================================================================================\n\ndef construct_df(x,r):\n df=pd.DataFrame(data=x,dtype='float32')\n for i in df.index:\n for j in range(len(list(df.iloc[i]))):\n df.iloc[i][j]=r[j][int(df.iloc[i][j])]\n return df\n\n# ===================================================================================================\n# Function for constructing a DataFrame from a matrix with floating point numbers between -1 and +1\n# ===================================================================================================\n\ndef construct_df_from_matrix(x,factor_array):\n \"\"\"\n This function constructs a DataFrame out of x and factor_array, both of which are assumed to be numpy arrays.\n It projects the numbers in the x (which is output of a design-of-experiment build) to the factor array ranges.\n Here factor_array is assumed to have only min and max ranges.\n Matrix x is assumed to have numbers ranging from -1 to 1.\n \"\"\"\n \n row_num=x.shape[0] # Number of rows in the matrix x\n col_num=x.shape[1] # Number of columns in the matrix x\n \n empty=np.zeros((row_num,col_num)) \n \n def simple_substitution(idx,factor_list):\n if idx==-1:\n return factor_list[0]\n elif idx==0:\n return factor_list[1]\n elif idx==1:\n return factor_list[2]\n else:\n alpha=np.abs(factor_list[2]-factor_list[0])/2\n if idx<0:\n beta=np.abs(idx)-1\n return factor_list[0]-(beta*alpha)\n else:\n beta=idx-1\n return factor_list[2]+(beta*alpha)\n \n for i in range(row_num):\n for j in range(col_num):\n empty[i,j] = simple_substitution(x[i,j],factor_array[j])\n \n return pd.DataFrame(data=empty)\n\n# =================================================================================================\n# Function for constructing a DataFrame from a matrix with floating point numbers between 0 and 1\n# =================================================================================================\n\ndef construct_df_from_random_matrix(x,factor_array):\n \"\"\"\n This function constructs a DataFrame out of matrix x and factor_array, both of which are assumed to be numpy arrays.\n It projects the numbers in the x (which is output of a design-of-experiment build) to the factor array ranges.\n Here factor_array is assumed to have only min and max ranges.\n Matrix x is assumed to have numbers ranging from 0 to 1 only.\n \"\"\"\n \n row_num=x.shape[0] # Number of rows in the matrix x\n col_num=x.shape[1] # Number of columns in the matrix x\n \n empty=np.zeros((row_num,col_num)) \n \n def simple_substitution(idx,factor_list):\n alpha=np.abs(factor_list[1]-factor_list[0])\n beta=idx\n return factor_list[0]+(beta*alpha)\n \n for i in range(row_num):\n for j in range(col_num):\n empty[i,j] = simple_substitution(x[i,j],factor_array[j])\n \n return pd.DataFrame(data=empty)\n\n# ======================================================================================\n# Function for building full factorial DataFrame from a dictionary of process variables\n# ======================================================================================\n\ndef build_full_fact(factor_level_ranges):\n \"\"\"\n Builds a full factorial design dataframe from a dictionary of factor/level ranges\n Example of the process variable dictionary:\n {'Pressure':[50,60,70],'Temperature':[290, 320, 350],'Flow rate':[0.9,1.0]}\n \"\"\"\n \n factor_lvl_count=[]\n factor_lists=[]\n \n for key in factor_level_ranges:\n factor_lvl_count.append(len(factor_level_ranges[key]))\n factor_lists.append(factor_level_ranges[key])\n \n x = fullfact_corrected(factor_lvl_count)\n df=construct_df(x,factor_lists)\n df.columns=factor_level_ranges.keys()\n \n return df\n\n# ==================================================================================================================================================\n# Function for building 2-level fractional factorial DataFrame from a dictionary and a generator string\n# ================================================================================================================================================================\n\ndef build_frac_fact(factor_level_ranges,gen_string):\n \"\"\"\n Builds a full factorial design dataframe from a dictionary of factor/level ranges.\n Only min and max values of the range are required.\n\tExample of the dictionary:\n {'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]}\n\t\n\tThis function requires a little more knowledge of how the confounding will be allowed. \n\tThis means that some factor effects get muddled with other interaction effects, so it’s harder to distinguish between them).\n\t\n\tLet’s assume that we just can’t afford (for whatever reason) the number of runs in a full-factorial design. We can systematically decide on a fraction of the full-factorial by allowing some of the factor main effects to be confounded with other factor interaction effects. \n\tThis is done by defining an alias structure that defines, symbolically, these interactions. These alias structures are written like “C = AB” or “I = ABC”, or “AB = CD”, etc. \n\tThese define how one column is related to the others.\n\t\n\tEXAMPLE\n ------------\n For example, the alias “C = AB” or “I = ABC” indicate that there are three factors (A, B, and C) and that the main effect of factor C is confounded with the interaction effect of the product AB, and by extension, A is confounded with BC and B is confounded with AC. \n\tA full- factorial design with these three factors results in a design matrix with 8 runs, but we will assume that we can only afford 4 of those runs. \n\tTo create this fractional design, we need a matrix with three columns, one for A, B, and C, only now where the levels in the C column is created by the product of the A and B columns.\n \"\"\"\n \n factor_count=len(factor_level_ranges)\n factor_lists=[]\n \n for key in factor_level_ranges:\n if len(factor_level_ranges[key])!=2:\n factor_level_ranges[key][1]=factor_level_ranges[key][-1]\n factor_level_ranges[key]=factor_level_ranges[key][:2]\n print(f\"{key} had more than two levels. Assigning the end point to the high level.\")\n \n if factor_count!=len(gen_string.split(' ')):\n print(\"Length of the generator string for the fractional factorial build does not match the length of the process variables dictionary\")\n return None\n \n for key in factor_level_ranges:\n factor_lists.append(factor_level_ranges[key])\n \n x = fracfact(gen_string)\n \n def index_change(x):\n if x==-1:\n return 0\n else:\n return x\n vfunc=np.vectorize(index_change)\n x=vfunc(x)\n \n df=construct_df(x,factor_lists)\n df.columns=factor_level_ranges.keys()\n \n return df\n\n# =====================================================================================\n# Function for building Plackett-Burman designs from a dictionary of process variables\n# =====================================================================================\n\ndef build_plackett_burman(factor_level_ranges):\n \"\"\"\n Builds a Plackett-Burman dataframe from a dictionary of factor/level ranges.\n Only min and max values of the range are required.\n Example of the dictionary:\n {'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]}\n\t\n\tPlackett–Burman designs are experimental designs presented in 1946 by Robin L. Plackett and J. P. Burman while working in the British Ministry of Supply.(Their goal was to find experimental designs for investigating the dependence of some measured quantity on a number of independent variables (factors), each taking L levels, in such a way as to minimize the variance of the estimates of these dependencies using a limited number of experiments. \n\t\n Interactions between the factors were considered negligible. The solution to this problem is to find an experimental design where each combination of levels for any pair of factors appears the same number of times, throughout all the experimental runs (refer to table). \n\tA complete factorial design would satisfy this criterion, but the idea was to find smaller designs.\n\t\n\tThese designs are unique in that the number of trial conditions (rows) expands by multiples of four (e.g. 4, 8, 12, etc.). \n\tThe max number of columns allowed before a design increases the number of rows is always one less than the next higher multiple of four.\n \"\"\"\n \n for key in factor_level_ranges:\n if len(factor_level_ranges[key])!=2:\n factor_level_ranges[key][1]=factor_level_ranges[key][-1]\n factor_level_ranges[key]=factor_level_ranges[key][:2]\n print(f\"{key} had more than two levels. Assigning the end point to the high level.\")\n \n factor_count=len(factor_level_ranges)\n factor_lists=[]\n \n for key in factor_level_ranges:\n factor_lists.append(factor_level_ranges[key])\n \n x = pbdesign(factor_count)\n \n def index_change(x):\n if x==-1:\n return 0\n else:\n return x\n vfunc=np.vectorize(index_change)\n x=vfunc(x)\n \n df=construct_df(x,factor_lists)\n df.columns=factor_level_ranges.keys()\n \n return df\n\n# ===================================================================================\n# Function for building Sukharev Grid designs from a dictionary of process variables\n# ===================================================================================\n\ndef build_sukharev(factor_level_ranges,num_samples=None):\n \"\"\"\n Builds a Sukharev-grid hypercube design dataframe from a dictionary of factor/level ranges.\n Number of samples raised to the power of (1/dimension), where dimension is the number of variables, must be an integer.\n Only min and max values of the range are required.\n Example of the dictionary:\n {'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]}\n num_samples: Number of samples to be generated\n\t\n\tSpecial property of this grid is that points are not placed on the boundaries of the hypercube, but at centroids of the subcells constituted by individual samples. \n\tThis design offers optimal results for the covering radius regarding distances based on the max-norm.\n \"\"\"\n for key in factor_level_ranges:\n if len(factor_level_ranges[key])!=2:\n factor_level_ranges[key][1]=factor_level_ranges[key][-1]\n factor_level_ranges[key]=factor_level_ranges[key][:2]\n print(f\"{key} had more than two levels. Assigning the end point to the high level.\")\n \n factor_count=len(factor_level_ranges)\n factor_lists=[]\n \n for key in factor_level_ranges:\n factor_lists.append(factor_level_ranges[key])\n \n check=num_samples**((1/factor_count))\n if (check-int(check)>1e-5):\n num_samples=(int(check)+1)**(factor_count)\n print(\"\\nNumber of samples not adequate to fill a Sukharev grid. Increasing sample size to: \",num_samples)\n \n x = sukharev_grid(num_points=num_samples,dimension=factor_count)\n factor_lists=np.array(factor_lists)\n \n df = construct_df_from_random_matrix(x,factor_lists)\n df.columns=factor_level_ranges.keys()\n return df\n\n# ===================================================================================\n# Function for building Box-Behnken designs from a dictionary of process variables\n# ===================================================================================\n\ndef build_box_behnken(factor_level_ranges,center=1):\n \"\"\"\n Builds a Box-Behnken design dataframe from a dictionary of factor/level ranges.\n Note 3 levels of factors are necessary. If not given, the function will automatically create 3 levels by linear mid-section method.\n Example of the dictionary:\n {'Pressure':[50,60,70],'Temperature':[290, 320, 350],'Flow rate':[0.9,1.0,1.1]}\n\t\n\tIn statistics, Box–Behnken designs are experimental designs for response surface methodology, devised by George E. P. Box and Donald Behnken in 1960, to achieve the following goals:\n\t\t* Each factor, or independent variable, is placed at one of three equally spaced values, usually coded as −1, 0, +1. (At least three levels are needed for the following goal.)\n\t\t* The design should be sufficient to fit a quadratic model, that is, one containing squared terms, products of two factors, linear terms and an intercept.\n\t\t* The ratio of the number of experimental points to the number of coefficients in the quadratic model should be reasonable (in fact, their designs kept it in the range of 1.5 to 2.6).*estimation variance should more or less depend only on the distance from the centre (this is achieved exactly for the designs with 4 and 7 factors), and should not vary too much inside the smallest (hyper)cube containing the experimental points.\n\t\"\"\"\n for key in factor_level_ranges:\n if len(factor_level_ranges[key])==2:\n factor_level_ranges[key].append((factor_level_ranges[key][0]+factor_level_ranges[key][1])/2)\n factor_level_ranges[key].sort()\n print(f\"{key} had only two end points. Creating a mid-point by averaging them\")\n \n factor_count=len(factor_level_ranges)\n factor_lists=[]\n \n for key in factor_level_ranges:\n factor_lists.append(factor_level_ranges[key])\n \n x = bbdesign_corrected(factor_count,center=center)\n x=x+1 #Adjusting the index up by 1\n\n df=construct_df(x,factor_lists)\n df.columns=factor_level_ranges.keys()\n \n return df\n\n# =====================================================================================================\n# Function for building central-composite (Box-Wilson) designs from a dictionary of process variables\n# ===================================================================================================== \n\ndef build_central_composite(factor_level_ranges,center=(2,2),alpha='o',face='ccc'):\n \"\"\"\n Builds a central-composite design dataframe from a dictionary of factor/level ranges.\n Only min and max values of the range are required.\n Example of the dictionary:\n {'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]}\n\t\n\tIn statistics, a central composite design is an experimental design, useful in response surface methodology, for building a second order (quadratic) model for the response variable without needing to use a complete three-level factorial experiment.\n\tThe design consists of three distinct sets of experimental runs:\n\t\t* A factorial (perhaps fractional) design in the factors studied, each having two levels;\n\t\t* A set of center points, experimental runs whose values of each factor are the medians of the values used in the factorial portion. This point is often replicated in order to improve the precision of the experiment;\n\t\t* A set of axial points, experimental runs identical to the centre points except for one factor, which will take on values both below and above the median of the two factorial levels, and typically both outside their range. All factors are varied in this way.\n \"\"\"\n for key in factor_level_ranges:\n if len(factor_level_ranges[key])!=2:\n factor_level_ranges[key][1]=factor_level_ranges[key][-1]\n factor_level_ranges[key]=factor_level_ranges[key][:2]\n print(f\"{key} had more than two levels. Assigning the end point to the high level.\")\n \n \n # Creates the mid-points by averaging the low and high levels\n for key in factor_level_ranges:\n if len(factor_level_ranges[key])==2:\n factor_level_ranges[key].append((factor_level_ranges[key][0]+factor_level_ranges[key][1])/2)\n factor_level_ranges[key].sort()\n \n factor_count=len(factor_level_ranges)\n factor_lists=[]\n \n for key in factor_level_ranges:\n factor_lists.append(factor_level_ranges[key])\n \n x = ccdesign(factor_count,center=center,alpha=alpha,face=face)\n factor_lists=np.array(factor_lists)\n \n df = construct_df_from_matrix(x,factor_lists)\n df.columns=factor_level_ranges.keys()\n return df\n\n# ====================================================================================\n# Function for building simple Latin Hypercube from a dictionary of process variables\n# ====================================================================================\n\ndef build_lhs(factor_level_ranges, num_samples=None, prob_distribution=None):\n \"\"\"\n Builds a Latin Hypercube design dataframe from a dictionary of factor/level ranges.\n Only min and max values of the range are required.\n Example of the dictionary:\n {'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]}\n num_samples: Number of samples to be generated\n prob_distribution: Analytical probability distribution to be applied over the randomized sampling. \n\tTakes strings like: 'Normal', 'Poisson', 'Exponential', 'Beta', 'Gamma'\n\n\tLatin hypercube sampling (LHS) is a form of stratified sampling that can be applied to multiple variables. The method commonly used to reduce the number or runs necessary for a Monte Carlo simulation to achieve a reasonably accurate random distribution. LHS can be incorporated into an existing Monte Carlo model fairly easily, and work with variables following any analytical probability distribution.\n \"\"\"\n for key in factor_level_ranges:\n if len(factor_level_ranges[key])!=2:\n factor_level_ranges[key][1]=factor_level_ranges[key][-1]\n factor_level_ranges[key]=factor_level_ranges[key][:2]\n print(f\"{key} had more than two levels. Assigning the end point to the high level.\")\n \n factor_count=len(factor_level_ranges)\n factor_lists=[]\n \n if num_samples==None:\n num_samples=factor_count\n \n for key in factor_level_ranges:\n factor_lists.append(factor_level_ranges[key])\n \n x = lhs(n=factor_count,samples=num_samples)\n factor_lists=np.array(factor_lists)\n \n df = construct_df_from_random_matrix(x,factor_lists)\n df.columns=factor_level_ranges.keys()\n return df\n\n# ============================================================================================\n# Function for building space-filling Latin Hypercube from a dictionary of process variables\n# ============================================================================================\n\ndef build_space_filling_lhs(factor_level_ranges, num_samples=None):\n \"\"\"\n Builds a space-filling Latin Hypercube design dataframe from a dictionary of factor/level ranges.\n Only min and max values of the range are required.\n Example of the dictionary:\n {'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]}\n num_samples: Number of samples to be generated\n \"\"\"\n for key in factor_level_ranges:\n if len(factor_level_ranges[key])!=2:\n factor_level_ranges[key][1]=factor_level_ranges[key][-1]\n factor_level_ranges[key]=factor_level_ranges[key][:2]\n print(f\"{key} had more than two levels. Assigning the end point to the high level.\")\n \n factor_count=len(factor_level_ranges)\n factor_lists=[]\n \n if num_samples==None:\n num_samples=factor_count\n \n for key in factor_level_ranges:\n factor_lists.append(factor_level_ranges[key])\n \n x = transform_spread_out(lhd_matrix(num_points=num_samples,dimension=factor_count)) # create latin hypercube design\n factor_lists=np.array(factor_lists)\n \n df = construct_df_from_random_matrix(x,factor_lists)\n df.columns=factor_level_ranges.keys()\n return df\n\n# =====================================================================================================\n# Function for building designs with random _k-means_ clusters from a dictionary of process variables\n# =====================================================================================================\n\ndef build_random_k_means(factor_level_ranges, num_samples=None):\n \"\"\"\n This function aims to produce a centroidal Voronoi tesselation of the unit random hypercube and generate k-means clusters.\n Only min and max values of the range are required.\n Example of the dictionary:\n {'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]}\n num_samples: Number of samples to be generated\n \"\"\"\n for key in factor_level_ranges:\n if len(factor_level_ranges[key])!=2:\n factor_level_ranges[key][1]=factor_level_ranges[key][-1]\n factor_level_ranges[key]=factor_level_ranges[key][:2]\n print(f\"{key} had more than two levels. Assigning the end point to the high level.\")\n \n factor_count=len(factor_level_ranges)\n factor_lists=[]\n \n if num_samples==None:\n num_samples=factor_count\n \n for key in factor_level_ranges:\n factor_lists.append(factor_level_ranges[key])\n \n x = random_k_means(num_points=num_samples,dimension=factor_count) # create latin hypercube design\n factor_lists=np.array(factor_lists)\n \n df = construct_df_from_random_matrix(x,factor_lists)\n df.columns=factor_level_ranges.keys()\n return df\n\n# =============================================================================================\n# Function for building maximin reconstruction matrix from a dictionary of process variables\n# =============================================================================================\n\ndef build_maximin(factor_level_ranges, num_samples=None):\n \"\"\"\n Builds a maximin reconstructed design dataframe from a dictionary of factor/level ranges.\n Only min and max values of the range are required.\n Example of the dictionary:\n {'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]}\n num_samples: Number of samples to be generated\n\t\n\tThis algorithm carries out a user-specified number of iterations to maximize the minimal distance of a point in the set to \n\t\t* other points in the set, \n\t\t* existing (fixed) points, \n\t\t* the boundary of the hypercube.\n \"\"\"\n for key in factor_level_ranges:\n if len(factor_level_ranges[key])!=2:\n factor_level_ranges[key][1]=factor_level_ranges[key][-1]\n factor_level_ranges[key]=factor_level_ranges[key][:2]\n print(f\"{key} had more than two levels. Assigning the end point to the high level.\")\n \n factor_count=len(factor_level_ranges)\n factor_lists=[]\n \n if num_samples==None:\n num_samples=factor_count\n \n for key in factor_level_ranges:\n factor_lists.append(factor_level_ranges[key])\n \n x = maximin_reconstruction(num_points=num_samples,dimension=factor_count) # create latin hypercube design\n factor_lists=np.array(factor_lists)\n \n df = construct_df_from_random_matrix(x,factor_lists)\n df.columns=factor_level_ranges.keys()\n return df\n\n# ========================================================================================\n# Function for building Halton matrix based design from a dictionary of process variables\n# ========================================================================================\n\ndef build_halton(factor_level_ranges, num_samples=None):\n \"\"\"\n Builds a quasirandom dataframe from a dictionary of factor/level ranges using prime numbers as seed.\n Only min and max values of the range are required.\n Example of the dictionary:\n {'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]}\n num_samples: Number of samples to be generated\n\n Quasirandom sequence using the default initialization with first n prime numbers equal to the number of factors/variables.\n \"\"\"\n for key in factor_level_ranges:\n if len(factor_level_ranges[key])!=2:\n factor_level_ranges[key][1]=factor_level_ranges[key][-1]\n factor_level_ranges[key]=factor_level_ranges[key][:2]\n print(f\"{key} had more than two levels. Assigning the end point to the high level.\")\n \n factor_count=len(factor_level_ranges)\n factor_lists=[]\n \n if num_samples==None:\n num_samples=factor_count\n \n for key in factor_level_ranges:\n factor_lists.append(factor_level_ranges[key])\n \n x = halton(num_points=num_samples,dimension=factor_count) # create Halton matrix design\n factor_lists=np.array(factor_lists)\n \n df = construct_df_from_random_matrix(x,factor_lists)\n df.columns=factor_level_ranges.keys()\n return df\n\n# ==========================================================================================\n# Function for building uniform random design matrix from a dictionary of process variables\n# ==========================================================================================\n\ndef build_uniform_random (factor_level_ranges, num_samples=None):\n \"\"\"\n Builds a design dataframe with samples drawn from uniform random distribution based on a dictionary of factor/level ranges.\n Only min and max values of the range are required.\n Example of the dictionary:\n {'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]}\n num_samples: Number of samples to be generated\n \"\"\"\n for key in factor_level_ranges:\n if len(factor_level_ranges[key])!=2:\n factor_level_ranges[key][1]=factor_level_ranges[key][-1]\n factor_level_ranges[key]=factor_level_ranges[key][:2]\n print(f\"{key} had more than two levels. Assigning the end point to the high level.\")\n \n factor_count=len(factor_level_ranges)\n factor_lists=[]\n \n if num_samples==None:\n num_samples=factor_count\n \n for key in factor_level_ranges:\n factor_lists.append(factor_level_ranges[key])\n \n x = random_uniform(num_points=num_samples,dimension=factor_count) # create Halton matrix design\n factor_lists=np.array(factor_lists)\n \n df = construct_df_from_random_matrix(x,factor_lists)\n df.columns=factor_level_ranges.keys()\n return df\n" ]
[ [ "numpy.abs", "pandas.DataFrame", "numpy.vectorize", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
kidakoji/ImgRecogRecipe
[ "14b9dec4d581c0109539f96072d883ec4f9d64a1" ]
[ "docker-python3-flask-ml-app/app/keras_mnist/kerasPredict.py" ]
[ "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n#\n#\nimport os\n\nimport numpy as np\nfrom keras import backend as Keras\nfrom keras.models import load_model\n\n# -----------------------------------------------------------------------------\n#\nKeras.clear_session()\n# 学習済みモデル\nkeras_mnist_model = load_model(\n os.path.abspath(os.path.dirname(__file__)) + '/keras-mnist-model.h5')\n\nkeras_mnist_model._make_predict_function()\nkeras_mnist_model.summary()\nprint('Keras MNIST model is loaded.')\n\n\ndef result(input_data):\n input_data = np.expand_dims(input_data, axis=0)\n input_data = input_data.reshape(input_data.shape[0], 28, 28, 1)\n result = np.argmax(keras_mnist_model.predict(input_data))\n return int(result)\n" ]
[ [ "numpy.expand_dims" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
avielfedida/DecisiveML
[ "cf0feceeda0fc4abd4af6cc766c7c14ee655b576", "cf0feceeda0fc4abd4af6cc766c7c14ee655b576" ]
[ "decisiveml/montecarlo.py", "tests/test_helpers.py" ]
[ "import pandas as pd\nimport random\nimport statistics\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n# MonkeyPatch Python 3.6 choices into random 3.5.5\nimport bisect as _bisect\nimport itertools as _itertools\n\n\ndef choices(population, weights=None, cum_weights=None, k=1):\n \"\"\"Return a k sized list of population elements chosen with\nreplacement.\n If the relative weights or cumulative weights are not specified,\n the selections are made with equal probability.\n \"\"\"\n if cum_weights is None:\n if weights is None:\n total = len(population)\n return [population[int(random.random() * total)] for i in range(k)]\n cum_weights = list(_itertools.accumulate(weights))\n elif weights is not None:\n raise TypeError(\"Cannot specify both weights and cumulative weights\")\n if len(cum_weights) != len(population):\n raise ValueError(\"The number of weights does not match the population\")\n bisect = _bisect.bisect\n total = cum_weights[-1]\n hi = len(cum_weights) - 1\n return [population[bisect(cum_weights, random() * total, 0, hi)] for i in range(k)]\n\n\nrandom.choices = choices\n\n\nclass ExcessiveBaseEquity(Exception):\n pass\n\n\nclass MonteCarlo(object):\n def __init__(self, trades_list):\n self.trades_list = trades_list\n self.num_trades_total = len(self.trades_list)\n self.num_trades_per_year = None\n self.ruin_equity = None\n self.runs = None\n\n self._MONTECARLO_RUNS = 2500\n logger.info(\n \"Initialize \\t| Trades: {} \\t| MC Runs: {}\".format(\n self.num_trades_total, self._MONTECARLO_RUNS\n )\n )\n\n def settings(self, ruin_equity, start_date, end_date):\n self._set_ruin_equity(ruin_equity)\n self._set_trades_per_year(start_date, end_date)\n logger.info(\n \"Settings \\t| Ruin: {} \\t| Trades Per Year: {}\".format(\n self.ruin_equity, self.num_trades_per_year\n )\n )\n\n def _set_ruin_equity(self, ruin_equity):\n self.ruin_equity = ruin_equity\n\n def _set_trades_per_year(self, start_date, end_date):\n td = end_date - start_date\n self.num_trades_per_year = int(self.num_trades_total * 365 / td.days)\n logger.debug(\"TimeDelta: {} {} trades/yr\".format(td, self.num_trades_per_year))\n\n def _random_trade(self, starting_equity):\n assert self.num_trades_per_year\n assert self.ruin_equity\n\n trades = random.choices(self.trades_list, k=self.num_trades_per_year)\n # logger.debug(\"{} {}\".format(len(trades), trades))\n\n # Check for ruin at any point in the trades list\n is_ruined = 0\n equity = starting_equity\n for trade in trades:\n equity = equity + trade\n if equity < self.ruin_equity:\n is_ruined = 1\n break\n\n stats = {\n \"profit\": sum(trades),\n \"returns_pct\": int(\n 100 * ((starting_equity + sum(trades)) / starting_equity - 1)\n ),\n \"drawdown_pct\": self._drawdown(starting_equity, trades),\n \"is_ruined\": is_ruined,\n \"is_profitable\": 1 if sum(trades) >= 0 else 0,\n }\n try:\n stats[\"returns_per_drawdown\"] = stats[\"returns_pct\"] / stats[\"drawdown_pct\"]\n except ZeroDivisionError:\n stats[\"returns_per_drawdown\"] = 0\n logger.debug(stats)\n return stats\n\n def _drawdown(self, starting_equity, trades):\n \"\"\"Returns the maximum drawdown in a set of trades\"\"\"\n equity = starting_equity\n hwm = starting_equity\n max_drawdown_pct = 0\n drawdown_pct = 0\n\n for trade in trades:\n equity = equity + trade\n\n # Set High Water Mark\n if equity > hwm:\n hwm = equity\n\n # Look for drawdown\n if equity < hwm:\n drawdown_pct = 100 * (1 - (equity / hwm))\n if drawdown_pct > max_drawdown_pct:\n max_drawdown_pct = drawdown_pct\n\n # logger.debug(\"{} eq:{} \\t hwm:{} \\t dd:{} mdd:{}\".format(trade, equity, hwm, drawdown_pct, max_drawdown_pct))\n return max_drawdown_pct\n\n def _median_stats_run(self, starting_equity):\n montecarlo = {}\n median_montecarlo = {}\n\n # runs\n for _ in range(self._MONTECARLO_RUNS):\n\n # make the list for every key\n stats = self._random_trade(starting_equity)\n for k, v in stats.items():\n if k not in montecarlo.keys():\n montecarlo[k] = []\n montecarlo[k].append(v)\n\n # run statistics on all the lists of every key\n for k, v in montecarlo.items():\n # ignore non-median stats\n if k != \"is_ruined\" or k != \"is_profitable\":\n median_montecarlo[k] = statistics.median(montecarlo[k])\n\n logger.debug((montecarlo[\"profit\"]))\n logger.debug((montecarlo[\"is_ruined\"]))\n logger.debug(sum(montecarlo[\"is_ruined\"]))\n median_montecarlo[\"is_ruined\"] = (\n 100 * sum(montecarlo[\"is_ruined\"]) / self._MONTECARLO_RUNS\n )\n median_montecarlo[\"is_profitable\"] = (\n 100 * sum(montecarlo[\"is_profitable\"]) / self._MONTECARLO_RUNS\n )\n median_montecarlo[\"equity\"] = starting_equity\n\n # calculate risk of ruin\n logger.debug(\"Median {}: {}\".format(starting_equity, median_montecarlo))\n\n return median_montecarlo\n\n def run(self, base_equity, steps=11):\n \"\"\"Create the results for the MonteCarlo, adding equity to the\n base_equity\n\n Args:\n base_equity (int): starting equity to add to\n steps (:obj:`int`, optional). Default is 11 runs.\n\n Returns:\n pd.DataFrame: results for each run with various starting equities\n\n Example:\n >>> mc = dvm.MonteCarlo(trade_list)\n >>> start_date = trade_list.index[0].to_pydatetime()\n >>> end_date = trade_list.index[-1].to_pydatetime()\n >>> mc.settings(ruin_equity=5000, start_date=start_date, end_date=end_date)\n >>> results = mc.run(base_equity=starting_equity)\n\n \"\"\"\n step_size = int(base_equity / 4)\n end_eq = base_equity + step_size * steps\n starting_equities_list = range(base_equity, end_eq, step_size)\n results = self._run_equity_list(starting_equities_list)\n df = pd.DataFrame(results)\n return df\n\n def _run_equity_list(self, starting_equities_list):\n runs = []\n for starting_equity in starting_equities_list:\n runs.append(self._median_stats_run(starting_equity))\n self.runs = runs\n return runs\n\n def best_run(self, target_risk_of_ruin_pct=10):\n assert self.runs\n for run in self.runs:\n if run[\"is_ruined\"] < target_risk_of_ruin_pct:\n return run\n return None\n\n def recommendation(self, start_date, end_date):\n \"\"\"Create a recommendation from the best run\n\n Args:\n start_date (datetime): trade start date\n end_date (datetime): trade end date\n\n Returns:\n pd.Series: recommendation categories\n\n Example:\n >>> mc = dvm.MonteCarlo(trade_list)\n >>> start_date = trade_list.index[0].to_pydatetime()\n >>> end_date = trade_list.index[-1].to_pydatetime()\n >>> mc.settings(ruin_equity=5000, start_date=start_date, end_date=end_date)\n >>> results = mc.run(base_equity=starting_equity)\n >>> my_rec = mc.recommendation(start_date, end_date)\n \"\"\"\n\n # Get the recommended values\n best = self.best_run()\n my_rec = pd.Series(best)\n\n if my_rec.empty:\n raise ExcessiveBaseEquity(f\"No best run found\")\n\n # Determine result\n if my_rec[\"is_ruined\"] > 10 or my_rec[\"returns_per_drawdown\"] < 2.0:\n logger.info(\"MonteCarlo Risk Assessment: FAILED\")\n my_rec[\"is_pass\"] = False\n else:\n logger.info(\"MonteCarlo Risk Assessment: PASSED\")\n my_rec[\"is_pass\"] = True\n\n # Add additional calculations\n my_rec[\"start_date\"] = start_date\n my_rec[\"end_date\"] = end_date\n my_rec[\"months\"] = (end_date - start_date).days / 30\n my_rec[\"avg_monthly_profit\"] = my_rec[\"profit\"] / my_rec[\"months\"]\n\n return my_rec\n", "#!/usr/bin/env python3\nimport unittest\nfrom nose.tools import eq_\nimport pandas as pd\nimport os\nimport decisiveml as dml\n\nTEST_DIR = os.path.dirname(os.path.realpath(__file__))\n\n\nclass TestTradingHolidays(unittest.TestCase):\n def setUp(self):\n self.short_holiday = pd.read_csv(\n os.path.join(\n TEST_DIR, \"test_data/holidays/Holidays_Daily_2019-01-02_2020-02-20.csv\"\n ),\n index_col=\"Date\",\n )\n\n def test_trading_holidays_in_range(self):\n \"\"\"Test that we can remove our own holidays\"\"\"\n holidays = dml.trading_holidays_in_range(start=\"2018-12-01\", end=\"2018-12-31\")\n eq_(len(holidays), 2)\n eq_(holidays[0], \"2018-12-25\")\n eq_(holidays[1], \"2019-01-01\")\n eq_(\"2018-12-05\" not in holidays, True)\n\n def test_trading_holidays_in_range_with_short_holiday(self):\n \"\"\"Test using HOLIDAY data and fix missing dates in pandas_market_calendar\"\"\"\n df = self.short_holiday\n holidays = dml.trading_holidays_in_range(start=df.index[0], end=df.index[-1])\n eq_(\"2020-01-20\" in holidays, True)\n eq_(\"2020-02-17\" in holidays, True)\n\n def test_tradingday_offset_short(self):\n \"\"\"Test that we can offset a business day through a holiday\"\"\"\n offsets = dml.tradingday_offset(\n start=\"2017-04-10\",\n end=\"2017-04-20\",\n dates=[\"2017-04-17\"],\n offsets=-1,\n roll=\"backward\",\n )\n eq_(offsets[0], pd.to_datetime(\"2017-04-13\"))\n\n def test_tradingday_offset_long(self):\n \"\"\"Test that we can offset a business day through a holiday\"\"\"\n offsets = dml.tradingday_offset(\n start=\"2017-01-01\",\n end=\"2017-05-01\",\n dates=[\"2017-01-01\", \"2017-04-17\"],\n offsets=-1,\n roll=\"backward\",\n )\n eq_(offsets[0], pd.to_datetime(\"2016-12-29\"))\n eq_(offsets[1], pd.to_datetime(\"2017-04-13\"))\n" ]
[ [ "pandas.Series", "pandas.DataFrame" ], [ "pandas.to_datetime" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
deeplearningunb/NextValue
[ "50c04ea55fa3141009d1f018197e02344fdda327" ]
[ "src/App.py" ]
[ "import tkinter as tk\nfrom os import listdir\nfrom os.path import isfile, join\nfrom pages.StartPage import StartPage\nfrom pages.ConfigurationPage import ConfigurationPage\nfrom pages.TrainingPage import TrainingPage\nfrom pages.ResultPage import ResultPage\nfrom pages.ChooseDatePage import ChooseDatePage\nfrom pages.ChooseIntervalPage import ChooseIntervalPage\nfrom Layer import (\n Layer,\n DEFAULT_DAYS,\n DEFAULT_OPTIMIZER,\n DEFAULT_LOSS,\n DEFAULT_EPOCHS,\n DEFAULT_BATCH_SIZE,\n DEFAULT_LAYERS\n)\nimport pandas as pd\nimport numpy as np\nimport math\nimport page_list\nfrom sklearn.preprocessing import MinMaxScaler\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import Dropout\nimport datetime\n\nDATA_PATH = \"../Prices\"\n\nclass App(tk.Tk):\n def __init__(self, master=None):\n tk.Tk.__init__(self)\n\n self.shared_data = {\n \"days\": tk.StringVar(),\n \"optimizer\": tk.StringVar(),\n \"loss\": tk.StringVar(),\n \"epochs\": tk.StringVar(),\n \"batch\": tk.StringVar(),\n \"layers\": DEFAULT_LAYERS,\n \"cryptocurrency\": \"Bitcoin\",\n \"cryptocurrency_list\": [f for f in listdir(DATA_PATH) if (isfile(join(DATA_PATH, f)) and f.endswith(\".csv\"))],\n }\n self.shared_data[\"days\"].set(DEFAULT_DAYS)\n self.shared_data[\"optimizer\"].set(DEFAULT_OPTIMIZER)\n self.shared_data[\"loss\"].set(DEFAULT_LOSS)\n self.shared_data[\"epochs\"].set(DEFAULT_EPOCHS)\n self.shared_data[\"batch\"].set(DEFAULT_BATCH_SIZE)\n\n self.NUMBER_OF_CRYPTOCURRENCIES = len(self.shared_data[\"cryptocurrency_list\"])\n self.shared_data[\"cryptocurrency_list\"].sort()\n\n self.process_data(self.shared_data[\"cryptocurrency_list\"])\n\n container = tk.Frame(self)\n container.pack(side=\"top\", fill=\"both\", expand=True)\n container.grid_rowconfigure(0, weight = 1)\n container.grid_columnconfigure(0, weight = 1)\n\n self.frames = {}\n for F in (StartPage, ConfigurationPage, TrainingPage, ResultPage, ChooseDatePage, ChooseIntervalPage):\n frame = F(container, self)\n self.frames[F] = frame\n frame.grid(row = 0, column = 0, sticky =\"nsew\")\n\n self.show_frame(page_list.START_PAGE)\n\n def show_frame(self, c):\n if c == page_list.START_PAGE:\n frame = self.frames[StartPage]\n elif c == page_list.CONFIGURATION_PAGE:\n frame = self.frames[ConfigurationPage]\n elif c == page_list.TRAINING_PAGE:\n frame = self.frames[TrainingPage]\n elif c == page_list.RESULT_PAGE:\n frame = self.frames[ResultPage]\n elif c == page_list.CHOOSE_DATE_PAGE:\n frame = self.frames[ChooseDatePage]\n elif c == page_list.CHOOSE_INTERVAL_PAGE:\n frame = self.frames[ChooseIntervalPage]\n else:\n return\n\n frame.tkraise()\n \n def generate_initial_values(self):\n initial_values = {}\n\n for i in range(self.dataset.shape[0]):\n initial_values[self.dataset.iloc[i, 0]] = self.dataset.iloc[i, 2:].values\n\n return initial_values\n\n def process_data(self, cryptocurrency_list):\n cryptocurrency_list.sort()\n TRAINING_RATE = 0.95\n \n self.dataset = pd.read_csv(DATA_PATH + \"/Bitcoin.csv\")\n self.dataset = self.dataset.iloc[:, 1:3]\n\n for i in range(0, self.NUMBER_OF_CRYPTOCURRENCIES):\n file = pd.read_csv(DATA_PATH + \"/\" + cryptocurrency_list[i])\n file = file.iloc[:, 1:3]\n self.dataset = pd.merge(self.dataset, file, on='Date', how='outer')\n\n self.dataset = self.dataset.replace(np.nan, 0)\n\n self.initial_values = self.generate_initial_values()\n\n self.dataset = self.dataset.iloc[:, 2:self.NUMBER_OF_CRYPTOCURRENCIES+2]\n\n NUMBER_OF_ROWS = self.dataset.shape[0]\n self.TRAINING_SET_SIZE = math.ceil(NUMBER_OF_ROWS*TRAINING_RATE)\n self.TEST_SET_SIZE = NUMBER_OF_ROWS - self.TRAINING_SET_SIZE\n\n self.test_set = self.dataset.iloc[self.TRAINING_SET_SIZE:, : ].values\n\n self.sc = MinMaxScaler(feature_range = (0, 1))\n\n def build_rnn(self):\n self.PREVIOUS_DAYS = int(self.shared_data[\"days\"].get())\n self.values = self.initial_values.copy()\n\n training_set = self.dataset.iloc[:self.TRAINING_SET_SIZE, : ].values\n training_set_scaled = self.sc.fit_transform(training_set)\n\n self.X_train = []\n self.y_train = []\n for i in range(self.PREVIOUS_DAYS, self.TRAINING_SET_SIZE):\n self.X_train.append(training_set_scaled[i-self.PREVIOUS_DAYS:i, 0:self.NUMBER_OF_CRYPTOCURRENCIES])\n self.y_train.append(training_set_scaled[i, 0:self.NUMBER_OF_CRYPTOCURRENCIES])\n\n self.X_train, self.y_train = np.array(self.X_train), np.array(self.y_train)\n\n self.X_train = self.X_train.reshape((self.X_train.shape[0], self.X_train.shape[1], self.NUMBER_OF_CRYPTOCURRENCIES))\n\n layer_list = self.shared_data[\"layers\"]\n NUMBER_OF_LAYERS = len(layer_list)\n\n regressor = Sequential()\n\n if NUMBER_OF_LAYERS == 1:\n layer = layer_list[0]\n units = layer.units\n dropout = layer.dropout\n\n regressor.add(LSTM(units = units, input_shape = (self.X_train.shape[1], self.NUMBER_OF_CRYPTOCURRENCIES)))\n\n if dropout > 0:\n regressor.add(Dropout(dropout))\n else:\n first_layer = layer_list[0]\n units = first_layer.units\n dropout = first_layer.dropout\n\n regressor.add(LSTM(units = units, return_sequences = True, input_shape = (self.X_train.shape[1], self.NUMBER_OF_CRYPTOCURRENCIES)))\n\n if dropout > 0:\n regressor.add(Dropout(dropout))\n\n for i in range(1, NUMBER_OF_LAYERS-1):\n layer = layer_list[i]\n units = layer.units\n dropout = layer.dropout\n\n regressor.add(LSTM(units = units, return_sequences = True))\n\n if dropout > 0:\n regressor.add(Dropout(dropout))\n\n last_layer = layer_list[-1]\n units = last_layer.units\n dropout = last_layer.dropout\n\n regressor.add(LSTM(units = units))\n\n if dropout > 0:\n regressor.add(Dropout(dropout))\n\n regressor.add(Dense(units = self.NUMBER_OF_CRYPTOCURRENCIES))\n regressor.compile(optimizer = self.shared_data[\"optimizer\"].get(), loss = self.shared_data[\"loss\"].get())\n\n self.rnn = regressor\n \n def get_test_predict(self):\n inputs = self.dataset[self.TRAINING_SET_SIZE-self.PREVIOUS_DAYS:].values\n inputs = inputs.reshape((inputs.shape[0], inputs.shape[1]))\n inputs = self.sc.transform(inputs)\n\n self.X_test = []\n for i in range(self.PREVIOUS_DAYS, self.PREVIOUS_DAYS+self.TEST_SET_SIZE):\n self.X_test.append(inputs[i-self.PREVIOUS_DAYS:i, 0:self.NUMBER_OF_CRYPTOCURRENCIES])\n\n self.X_test = np.array(self.X_test)\n self.X_test = np.reshape(self.X_test, (self.X_test.shape[0], self.X_test.shape[1], self.NUMBER_OF_CRYPTOCURRENCIES))\n\n predict = self.rnn.predict(self.X_test)\n predict = self.sc.inverse_transform(predict)\n\n return predict\n \n def predict_day(self, date):\n if date in self.values:\n return self.values[date]\n \n delta = datetime.timedelta(days=self.PREVIOUS_DAYS)\n end_date = datetime.date.fromisoformat(date)\n start_date = end_date - delta\n delta = datetime.timedelta(days=1)\n\n values = []\n while start_date < end_date:\n d = start_date.strftime('%Y-%m-%d')\n inputs = self.predict_day(d)\n inputs = inputs.reshape(1, -1)\n inputs = self.sc.transform(inputs)\n values.append(inputs)\n start_date += delta\n \n values = [values]\n values = np.array(values)\n values = np.reshape(values, (values.shape[0], values.shape[1], self.NUMBER_OF_CRYPTOCURRENCIES))\n\n predict = self.rnn.predict(values)\n predict = self.sc.inverse_transform(predict)\n\n self.values[date] = predict[0]\n return predict[0]\n\n def predict(self, cryptocurrency, date, end=None):\n cryptocurrency_list = [c[:-4] for c in self.shared_data[\"cryptocurrency_list\"]]\n cryptocurrency_index = cryptocurrency_list.index(cryptocurrency)\n\n if end == None:\n predict = self.predict_day(date)\n return predict[cryptocurrency_index]\n else:\n start_date = datetime.date.fromisoformat(date)\n end_date = datetime.date.fromisoformat(end)\n delta = datetime.timedelta(days=1)\n predict_list = []\n\n while start_date <= end_date:\n d = start_date.strftime('%Y-%m-%d')\n predict = self.predict_day(d)\n predict_list.append(predict[cryptocurrency_index])\n start_date += delta\n\n return predict_list" ]
[ [ "pandas.merge", "pandas.read_csv", "numpy.reshape", "numpy.array", "sklearn.preprocessing.MinMaxScaler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
brian220/Sketch2PointCloud
[ "17e8657ffc6605804ab4f1da89f446ea4d37665c" ]
[ "models/networks_graphx_refine_no_img_encoder.py" ]
[ "import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nimport torchvision.models\nimport os\n\nimport utils.network_utils\nfrom utils.pointnet2_utils import PointNetSetAbstraction,PointNetFeaturePropagation\n\nimport cuda.emd.emd_module as emd\n\n# Set the path for pretrain weight\nos.environ['TORCH_HOME'] = '/media/caig/FECA2C89CA2C406F/sketch3D/pretrain_models'\n\nConv = nn.Conv2d\n\ndef wrapper(func, *args, **kwargs):\n class Wrapper(nn.Module):\n def __init__(self):\n super().__init__()\n self.func = func\n\n def forward(self, input):\n return self.func(input, *args, **kwargs)\n\n return Wrapper()\n\n\nclass TransformPC(nn.Module):\n \"\"\"\n Transform point cloud to camera coordinate\n\n Input:\n xyz: float tensor, (BS,N_PTS,3); input point cloud\n values assumed to be in (-1,1)\n az: float tensor, (BS); azimuthal angle of camera in radians\n el: float tensor, (BS); elevation of camera in radians\n \n Output:\n xyz_out: float tensor, (BS,N_PTS,3); output point cloud in camera\n co-ordinates\n \"\"\"\n def __init__(self, cfg):\n super().__init__()\n self.cfg = cfg\n self.n_pts = cfg.CONST.NUM_POINTS\n \n def forward(self, xyz, az, el):\n batch_size = xyz.size(0)\n cam_xyz = self.world2cam(xyz, az, el, batch_size, N_PTS=self.n_pts)\n return cam_xyz\n\n def world2cam(self, xyz, az, el, batch_size, N_PTS=1024):\n # y ---> x\n rotmat_az=[\n [torch.cos(az),torch.sin(az),torch.zeros_like(az)],\n [-torch.sin(az),torch.cos(az),torch.zeros_like(az)],\n [torch.zeros_like(az),torch.zeros_like(az), torch.ones_like(az)]\n ]\n rotmat_az = [ torch.stack(x) for x in rotmat_az ]\n \n # z ---> x, in dataloader, az = original az - 90 degree, which means here is actually x ----> -z \n rotmat_el=[\n [torch.cos(el),torch.zeros_like(az), torch.sin(el)],\n [torch.zeros_like(az),torch.ones_like(az),torch.zeros_like(az)],\n [-torch.sin(el),torch.zeros_like(az), torch.cos(el)]\n ]\n rotmat_el = [ torch.stack(x) for x in rotmat_el ]\n \n rotmat_az = torch.stack(rotmat_az, 0) # [3,3,B]\n rotmat_el = torch.stack(rotmat_el, 0) # [3,3,B]\n rotmat_az = rotmat_az.permute(2, 0, 1) # [B,3,3]\n rotmat_el = rotmat_el.permute(2, 0, 1) # [B,3,3]\n rotmat = torch.matmul(rotmat_el, rotmat_az)\n\n # Transformation(t)\n # Distance of object from camera - fixed to 2\n d = 2.\n # Calculate translation params\n tx, ty, tz = [0, 0, d]\n \n tr_mat = torch.unsqueeze(torch.tensor([tx, ty, tz]), 0).repeat(batch_size, 1) # [B,3]\n tr_mat = torch.unsqueeze(tr_mat,2) # [B,3,1]\n tr_mat = tr_mat.permute(0, 2, 1) # [B,1,3]\n tr_mat = tr_mat.repeat(1, N_PTS, 1) # [B,N_PTS,3]\n tr_mat = utils.network_utils.var_or_cuda(tr_mat) # [B,N_PTS,3]\n\n xyz_out = torch.matmul(rotmat, xyz.permute(0, 2, 1)) - tr_mat.permute(0, 2, 1)\n\n return xyz_out.permute(0, 2, 1)\n\n\nclass FeatureProjection(nn.Module):\n \"\"\"\n Project the pointcloud to 2d image and get the corresponding image features at\n the project location\n \n Input:\n img_feats: multi-scale image features \n pc: input point clouds (in camera coordinate) [B, N, 3]\n\n Output:\n pc_feats_trans: pointcloud xyz + multi-view image features (by feature ptojection)\n\n \"\"\"\n def __init__(self, cfg):\n super().__init__()\n self.cfg = cfg\n self.concat = wrapper(torch.cat, dim=-1)\n\n def forward(self, img_feats, pc):\n pc_feats = []\n pc_feats += [self.get_projection(img_feat, pc) for img_feat in img_feats]\n pc_feats_trans = self.concat(pc_feats)\n return pc_feats_trans\n\n def _project(self, img_feats, xs, ys):\n x, y = xs.flatten(), ys.flatten()\n idb = torch.arange(img_feats.shape[0], device=img_feats.device)\n idb = idb[None].repeat(xs.shape[1], 1).t().flatten().long()\n\n x1, y1 = torch.floor(x), torch.floor(y)\n x2, y2 = torch.ceil(x), torch.ceil(y)\n q11 = img_feats[idb, :, x1.long(), y1.long()].to(img_feats.device)\n q12 = img_feats[idb, :, x1.long(), y2.long()].to(img_feats.device)\n q21 = img_feats[idb, :, x2.long(), y1.long()].to(img_feats.device)\n q22 = img_feats[idb, :, x2.long(), y2.long()].to(img_feats.device)\n\n weights = ((x2 - x) * (y2 - y)).unsqueeze(1)\n q11 *= weights\n\n weights = ((x - x1) * (y2 - y)).unsqueeze(1)\n q21 *= weights\n\n weights = ((x2 - x) * (y - y1)).unsqueeze(1)\n q12 *= weights\n\n weights = ((x - x1) * (y - y1)).unsqueeze(1)\n q22 *= weights\n out = q11 + q12 + q21 + q22\n return out.view(img_feats.shape[0], -1, img_feats.shape[1])\n\n def get_projection(self, img_feat, pc):\n _, _, h_, w_ = tuple(img_feat.shape)\n X, Y, Z = pc[..., 0], pc[..., 1], pc[..., 2]\n w = (420.*X/abs(Z) + (111.5))\n h = (420.*Y/abs(Z) + (111.5))\n w = torch.clamp(w, 0., 223.)\n h = torch.clamp(h, 0., 223.)\n \n x = w / (223. / (w_ - 1.))\n y = h / (223. / (h_ - 1.))\n feats = self._project(img_feat, x, y)\n return feats\n\n\nclass PointNet2(nn.Module):\n \"\"\"\n Point cloud segmentation (set abstraction + feature propagation) in pointnet++\n \n Input:\n xyz: input points position [B, N, 3]\n\n output:\n point_feature: per-point features encode by pointnet [B, 128, N]\n \"\"\"\n def __init__(self, cfg):\n super().__init__()\n self.cfg = cfg\n \n self.sa1 = PointNetSetAbstraction(npoint=1024, radius=0.1, nsample=64, in_channel=3, mlp=[64, 64, 128], group_all=False)\n self.sa2 = PointNetSetAbstraction(npoint=384, radius=0.2, nsample=64, in_channel=128 + 3, mlp=[128, 128, 256], group_all=False)\n self.sa3 = PointNetSetAbstraction(npoint=128, radius=0.4, nsample=64, in_channel=256 + 3, mlp=[256, 256, 512], group_all=False)\n self.sa4 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None, in_channel=512 + 3, mlp=[512, 512, 1024], group_all=True)\n \n self.fp4 = PointNetFeaturePropagation(in_channel=512 + 1024, mlp=[512, 512])\n self.fp3 = PointNetFeaturePropagation(in_channel=256 + 512 , mlp=[512, 256])\n self.fp2 = PointNetFeaturePropagation(in_channel=128 + 256 , mlp=[256, 128])\n self.fp1 = PointNetFeaturePropagation(in_channel=0 + 128 , mlp=[128, 128, 128])\n \n def forward(self, xyz):\n xyz = xyz.transpose(2, 1) # [B, C, N]\n \n l0_xyz = xyz\n l0_points = None\n\n l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)\n l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)\n l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)\n l4_xyz, l4_points = self.sa4(l3_xyz, l3_points)\n\n l3_points = self.fp4(l3_xyz, l4_xyz, l3_points, l4_points)\n l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)\n l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)\n l0_points = self.fp1(l0_xyz, l1_xyz, l0_points, l1_points)\n \n return l0_points\n\n\nclass LinearDisplacementNet(nn.Module):\n \"\"\"\n Predict the displacement from pointcloud features and image features\n\n Input:\n pc_features: poincloud features from pointnet2 [B, D, N]\n proj_features: image features from feature projection [B, N, D']\n noises: noises vector [B, N, n_length]\n\n Output:\n displacement: perpoint displacement [B, C, N]\n\n \"\"\"\n\n def __init__(self, cfg):\n super().__init__()\n self.cfg = cfg\n\n self.conv1 = nn.Conv1d(1120, 960, 1)\n self.bn1 = nn.BatchNorm1d(960)\n self.conv2 = nn.Conv1d(960, 512, 1)\n self.bn2 = nn.BatchNorm1d(512)\n self.conv3 = nn.Conv1d(512, 256, 1)\n self.bn3 = nn.BatchNorm1d(256)\n self.conv4 = nn.Conv1d(256, 128, 1)\n self.bn4 = nn.BatchNorm1d(128)\n self.conv5 = nn.Conv1d(128, 64, 1)\n self.bn5 = nn.BatchNorm1d(64)\n self.conv6 = nn.Conv1d(64, 3, 1)\n\n def forward(self, transform_xyz, proj_features, pc_features, noises):\n noises = noises.transpose(2, 1) # [B, n_length, N]\n noises = utils.network_utils.var_or_cuda(noises)\n \n proj_features = proj_features.transpose(2, 1) # [B, D', N]\n proj_features = utils.network_utils.var_or_cuda(proj_features)\n \n # concat the img features after each point features\n refine_features = torch.cat((pc_features, proj_features, noises), 1) # [B, D+D'+n_length, N]\n \n refine_features = F.relu(self.bn1(self.conv1(refine_features)))\n refine_features = F.relu(self.bn2(self.conv2(refine_features)))\n refine_features = F.relu(self.bn3(self.conv3(refine_features)))\n refine_features = F.relu(self.bn4(self.conv4(refine_features)))\n refine_features = F.relu(self.bn5(self.conv5(refine_features)))\n displacements = self.conv6(refine_features)\n\n displacements = F.sigmoid(displacements) * self.cfg.REFINE.RANGE_MAX * 2 - self.cfg.REFINE.RANGE_MAX\n \n return displacements\n\n\nclass GRAPHX_REFINE_MODEL(nn.Module):\n \"\"\"\n Refine the point cloud based on the input image\n\n Input:\n xyz: point cloud from reconstruction model\n\n Ouput:\n update_pc: updated point cloud\n \"\"\"\n\n def __init__(self, cfg, in_channels, optimizer=None):\n super().__init__()\n self.cfg = cfg\n \n # Refinement\n self.transform_pc = TransformPC(cfg)\n self.feature_projection = FeatureProjection(cfg)\n self.pc_encode = PointNet2(cfg)\n self.displacement_net = LinearDisplacementNet(cfg)\n \n self.optimizer = None if optimizer is None else optimizer(self.parameters())\n \n # emd loss\n self.emd_dist = emd.emdModule()\n\n if torch.cuda.is_available():\n self.transform_pc = torch.nn.DataParallel(self.transform_pc, device_ids=cfg.CONST.DEVICE).cuda()\n self.feature_projection = torch.nn.DataParallel(self.feature_projection, device_ids=cfg.CONST.DEVICE).cuda()\n self.pc_encode = torch.nn.DataParallel(self.pc_encode, device_ids=cfg.CONST.DEVICE).cuda()\n self.displacement_net = torch.nn.DataParallel(self.displacement_net, device_ids=cfg.CONST.DEVICE).cuda()\n self.emd_dist = torch.nn.DataParallel(self.emd_dist, device_ids=cfg.CONST.DEVICE).cuda()\n self.cuda()\n \n def train_step(self, img_features, xyz, gt_pc, view_az, view_el):\n '''\n Input:\n img_features\n init pc: [B, N, 3]\n gt pc: [B, N, 3]\n view_az: [B]\n view_el: [B]\n\n Output:\n loss\n pred_pc: [B, N, 3]\n '''\n refine_pc = self.refine(img_features, xyz, view_az, view_el)\n # compute reconstruction loss\n emd_loss, _ = self.emd_dist(\n refine_pc, gt_pc, eps=0.005, iters=50\n )\n rec_loss = torch.sqrt(emd_loss).mean(1).mean()\n\n self.refiner_backward(rec_loss)\n\n rec_loss_np = rec_loss.detach().item()\n\n return rec_loss_np*1000\n\n def valid_step(self, img_features, xyz, gt_pc, view_az, view_el):\n # refine the point cloud\n refine_pc = self.refine(img_features, xyz, view_az, view_el)\n # compute reconstruction loss\n emd_loss, _ = self.emd_dist(\n refine_pc, gt_pc, eps=0.005, iters=50\n )\n rec_loss = torch.sqrt(emd_loss).mean(1).mean()\n\n return rec_loss*1000, pred_pc\n\n def refine(self, img_features, xyz, view_az, view_el):\n # img_features = self.img_enc(img)\n transform_xyz = self.transform_pc(xyz, view_az, view_el)\n proj_features = self.feature_projection(img_features, transform_xyz)\n pc_features = self.pc_encode(transform_xyz)\n noises = torch.normal(mean=0.0, std=1, size=(self.cfg.CONST.BATCH_SIZE, self.cfg.CONST.NUM_POINTS, self.cfg.REFINE.NOISE_LENGTH))\n displacements = self.displacement_net(transform_xyz, proj_features, pc_features, noises)\n displacements = displacements.transpose(2, 1)\n refine_pc = xyz + displacements\n\n return refine_pc\n\n def refiner_backward(self, rec_loss):\n self.train(True)\n self.optimizer.zero_grad()\n rec_loss.backward()\n self.optimizer.step()\n\n" ]
[ [ "torch.cat", "torch.sin", "torch.cuda.is_available", "torch.clamp", "torch.sqrt", "torch.tensor", "torch.nn.functional.sigmoid", "torch.arange", "torch.ones_like", "torch.cos", "torch.normal", "torch.nn.BatchNorm1d", "torch.floor", "torch.zeros_like", "torch.unsqueeze", "torch.nn.Conv1d", "torch.stack", "torch.ceil", "torch.matmul", "torch.nn.DataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
escribano89/cartpole-REINFORCE
[ "e2d79ce721a50feea783c368a66c6f80631cdf26" ]
[ "policy.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions import Categorical\n\nclass Policy(nn.Module):\n def __init__(self, s_size=4, h_size=8, a_size=2):\n super(Policy, self).__init__()\n \n self.fc1 = nn.Linear(s_size, h_size)\n self.fc2 = nn.Linear(h_size, a_size)\n\n def forward(self, x):\n x = F.selu(self.fc1(x))\n x = self.fc2(x)\n return F.softmax(x, dim=1)\n \n def act(self, state, device):\n state = torch.from_numpy(state).float().unsqueeze(0).to(device)\n probs = self.forward(state).cpu()\n # >>> m = Categorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ]))\n # >>> m.sample() # equal probability of 0, 1, 2, 3\n m = Categorical(probs)\n action = m.sample()\n return action.item(), m.log_prob(action)" ]
[ [ "torch.nn.Linear", "torch.nn.functional.softmax", "torch.distributions.Categorical", "torch.from_numpy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dpton/tensorflow-attention-rnn
[ "a1b8884640ca49d6fdebc7df2167e4353364a243" ]
[ "utils/reprocessing.py" ]
[ "# _*_ coding:utf-8 _*_\n# !/usr/bin/env python\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\nimport logging\nimport nltk\nfrom nltk.corpus import stopwords\nfrom gensim.models.wrappers import FastText\nfrom gensim.models import Word2Vec\nimport random\nimport threading\nimport tensorflow as tf\n\n\n\nimport re\nimport unicodedata\n\nfrom ftfy import fix_text\nfrom unidecode import unidecode\n\nfrom textacy.compat import unicode_\nfrom textacy.constants import (CURRENCIES, URL_REGEX, SHORT_URL_REGEX, EMAIL_REGEX,\n PHONE_REGEX, NUMBERS_REGEX, CURRENCY_REGEX,\n LINEBREAK_REGEX, NONBREAKING_SPACE_REGEX,\n PUNCT_TRANSLATE_UNICODE,\n PUNCT_TRANSLATE_BYTES)\n\n\nUNK = u'_UNK'\nGO = u'_GO'\nEOS = u'_EOS'\nPAD = u'_PAD'\nPAD_ID = 0\nGO_ID = 1\nEOS_ID = 2\nUNK_ID = 3\nSPECIAL_WORDS = [PAD, GO, EOS, UNK]\ndecode_max_length = 25\n\ndef fix_bad_unicode(text, normalization='NFC'):\n \"\"\"\n Fix unicode text that's \"broken\" using `ftfy <http://ftfy.readthedocs.org/>`_;\n this includes mojibake, HTML entities and other code cruft,\n and non-standard forms for display purposes.\n\n Args:\n text (str): raw text\n normalization ({'NFC', 'NFKC', 'NFD', 'NFKD'}): if 'NFC',\n combines characters and diacritics written using separate code points,\n e.g. converting \"e\" plus an acute accent modifier into \"é\"; unicode\n can be converted to NFC form without any change in its meaning!\n if 'NFKC', additional normalizations are applied that can change\n the meanings of characters, e.g. ellipsis characters will be replaced\n with three periods\n\n Returns:\n str\n \"\"\"\n return fix_text(text, normalization=normalization)\n\n\ndef transliterate_unicode(text):\n \"\"\"\n Try to represent unicode data in ascii characters similar to what a human\n with a US keyboard would choose using unidecode <https://pypi.python.org/pypi/Unidecode>\n \"\"\"\n return unidecode(text)\n\n\ndef normalize_whitespace(text):\n \"\"\"\n Given ``text`` str, replace one or more spacings with a single space, and one\n or more linebreaks with a single newline. Also strip leading/trailing whitespace.\n \"\"\"\n return NONBREAKING_SPACE_REGEX.sub(' ', LINEBREAK_REGEX.sub(r'\\n', text)).strip()\n\n\ndef unpack_contractions(text):\n \"\"\"\n Replace *English* contractions in ``text`` str with their unshortened forms.\n N.B. The \"'d\" and \"'s\" forms are ambiguous (had/would, is/has/possessive),\n so are left as-is.\n \"\"\"\n # standard\n text = re.sub(\n r\"(\\b)([Aa]re|[Cc]ould|[Dd]id|[Dd]oes|[Dd]o|[Hh]ad|[Hh]as|[Hh]ave|[Ii]s|[Mm]ight|[Mm]ust|[Ss]hould|[Ww]ere|[Ww]ould)n't\", r\"\\1\\2 not\", text)\n text = re.sub(\n r\"(\\b)([Hh]e|[Ii]|[Ss]he|[Tt]hey|[Ww]e|[Ww]hat|[Ww]ho|[Yy]ou)'ll\", r\"\\1\\2 will\", text)\n text = re.sub(\n r\"(\\b)([Tt]hey|[Ww]e|[Ww]hat|[Ww]ho|[Yy]ou)'re\", r\"\\1\\2 are\", text)\n text = re.sub(\n r\"(\\b)([Ii]|[Ss]hould|[Tt]hey|[Ww]e|[Ww]hat|[Ww]ho|[Ww]ould|[Yy]ou)'ve\", r\"\\1\\2 have\", text)\n # non-standard\n text = re.sub(r\"(\\b)([Cc]a)n't\", r\"\\1\\2n not\", text)\n text = re.sub(r\"(\\b)([Ii])'m\", r\"\\1\\2 am\", text)\n text = re.sub(r\"(\\b)([Ll]et)'s\", r\"\\1\\2 us\", text)\n text = re.sub(r\"(\\b)([Ww])on't\", r\"\\1\\2ill not\", text)\n text = re.sub(r\"(\\b)([Ss])han't\", r\"\\1\\2hall not\", text)\n text = re.sub(r\"(\\b)([Yy])(?:'all|a'll)\", r\"\\1\\2ou all\", text)\n return text\n\n\ndef replace_urls(text, replace_with='<url>'):\n \"\"\"Replace all URLs in ``text`` str with ``replace_with`` str.\"\"\"\n return URL_REGEX.sub(replace_with, SHORT_URL_REGEX.sub(replace_with, text))\n\n\ndef replace_emails(text, replace_with='<email>'):\n \"\"\"Replace all emails in ``text`` str with ``replace_with`` str.\"\"\"\n return EMAIL_REGEX.sub(replace_with, text)\n\n\ndef replace_phone_numbers(text, replace_with='<phone>'):\n \"\"\"Replace all phone numbers in ``text`` str with ``replace_with`` str.\"\"\"\n return PHONE_REGEX.sub(replace_with, text)\n\n\ndef replace_numbers(text, replace_with='<number>'):\n \"\"\"Replace all numbers in ``text`` str with ``replace_with`` str.\"\"\"\n return NUMBERS_REGEX.sub(replace_with, text)\n\n\ndef replace_currency_symbols(text, replace_with='<currency>'):\n \"\"\"\n Replace all currency symbols in ``text`` str with string specified by ``replace_with`` str.\n\n Args:\n text (str): raw text\n replace_with (str): if None (default), replace symbols with\n their standard 3-letter abbreviations (e.g. '$' with 'USD', '£' with 'GBP');\n otherwise, pass in a string with which to replace all symbols\n (e.g. \"*CURRENCY*\")\n\n Returns:\n str\n \"\"\"\n if replace_with is None:\n for k, v in CURRENCIES.items():\n text = text.replace(k, v)\n return text\n else:\n return CURRENCY_REGEX.sub(replace_with, text)\n\n\ndef remove_punct(text, marks=None):\n \"\"\"\n Remove punctuation from ``text`` by replacing all instances of ``marks``\n with an empty string.\n\n Args:\n text (str): raw text\n marks (str): If specified, remove only the characters in this string,\n e.g. ``marks=',;:'`` removes commas, semi-colons, and colons.\n Otherwise, all punctuation marks are removed.\n\n Returns:\n str\n\n .. note:: When ``marks=None``, Python's built-in :meth:`str.translate()` is\n used to remove punctuation; otherwise,, a regular expression is used\n instead. The former's performance is about 5-10x faster.\n \"\"\"\n if marks:\n return re.sub('[{}]+'.format(re.escape(marks)), '', text, flags=re.UNICODE)\n else:\n if isinstance(text, unicode_):\n return text.translate(PUNCT_TRANSLATE_UNICODE)\n else:\n return text.translate(None, PUNCT_TRANSLATE_BYTES)\n\n\ndef remove_accents(text, method='unicode'):\n \"\"\"\n Remove accents from any accented unicode characters in ``text`` str, either by\n transforming them into ascii equivalents or removing them entirely.\n\n Args:\n text (str): raw text\n method ({'unicode', 'ascii'}): if 'unicode', remove accented\n char for any unicode symbol with a direct ASCII equivalent; if 'ascii',\n remove accented char for any unicode symbol\n\n NB: the 'ascii' method is notably faster than 'unicode', but less good\n\n Returns:\n str\n\n Raises:\n ValueError: if ``method`` is not in {'unicode', 'ascii'}\n \"\"\"\n if method == 'unicode':\n return ''.join(c for c in unicodedata.normalize('NFKD', text)\n if not unicodedata.combining(c))\n elif method == 'ascii':\n return unicodedata.normalize('NFKD', text).encode('ascii', errors='ignore').decode('ascii')\n else:\n msg = '`method` must be either \"unicode\" and \"ascii\", not {}'.format(\n method)\n raise ValueError(msg)\n\n\ndef replace_hashtag(text, replace_with_hashtag='<hashtag>'):\n text = ' '.join(\n re.sub(\"(#[A-Za-z0-9]+)\", replace_with_hashtag, text).split())\n return text\n\n\ndef replace_nametag(text, replace_with_nametag='<nametag>'):\n text = ' '.join(\n re.sub(\"(@[A-Za-z0-9]+)\", replace_with_nametag, text).split())\n return text\n\n\ndef preprocess_text(text, fix_unicode=False, lowercase=False, transliterate=False,\n no_urls=False, no_emails=False, no_phone_numbers=False,\n no_numbers=False, no_currency_symbols=False, no_punct=False,\n no_contractions=False, no_accents=False, no_hashtag=False, no_nametag=False):\n \"\"\"\n This is a modified version of the function \"textacy.preprocess_text\".\n Normalize various aspects of a raw text doc before parsing it with Spacy.\n A convenience function for applying all other preprocessing functions in one go.\n\n Args:\n text (str): raw text to preprocess\n fix_unicode (bool): if True, fix \"broken\" unicode such as\n mojibake and garbled HTML entities\n lowercase (bool): if True, all text is lower-cased\n transliterate (bool): if True, convert non-ascii characters\n into their closest ascii equivalents\n no_urls (bool): if True, replace all URL strings with '*URL*'\n no_emails (bool): if True, replace all email strings with '*EMAIL*'\n no_phone_numbers (bool): if True, replace all phone number strings\n with '<phone>'\n no_numbers (bool): if True, replace all number-like strings\n with '<number>'\n no_currency_symbols (bool): if True, replace all currency symbols\n with their standard 3-letter abbreviations\n no_punct (bool): if True, remove all punctuation (replace with\n empty string)\n no_contractions (bool): if True, replace *English* contractions\n with their unshortened forms\n no_accents (bool): if True, replace all accented characters\n with unaccented versions; NB: if `transliterate` is True, this option\n is redundant\n not_hashtag (bool): if True, replace all hashtag (twitter, facebook)\n\n Returns:\n str: input ``text`` processed according to function args\n\n .. warning:: These changes may negatively affect subsequent NLP analysis\n performed on the text, so choose carefully, and preprocess at your own\n risk!\n \"\"\"\n if fix_unicode is True:\n text = fix_bad_unicode(text, normalization='NFC')\n if transliterate is True:\n text = transliterate_unicode(text)\n if no_urls is True:\n text = replace_urls(text)\n if no_emails is True:\n text = replace_emails(text)\n if no_phone_numbers is True:\n text = replace_phone_numbers(text)\n if no_numbers is True:\n text = replace_numbers(text)\n if no_currency_symbols is True:\n text = replace_currency_symbols(text)\n if no_contractions is True:\n text = unpack_contractions(text)\n if no_accents is True:\n text = remove_accents(text, method='unicode')\n if no_punct is True:\n text = remove_punct(text)\n if lowercase is True:\n text = text.lower()\n if no_hashtag is True:\n text = replace_hashtag(text)\n if no_nametag is True:\n text = replace_nametag(text)\n # always normalize whitespace; treat linebreaks separately from spacing\n text = normalize_whitespace(text)\n\n return text\n\n\ndef text_normalize(string, convert2digit=True):\n text = preprocess_text(text=string, fix_unicode=False, lowercase=True, transliterate=False,\n no_urls=True, no_emails=True, no_phone_numbers=True,\n no_numbers=True, no_currency_symbols=True, no_punct=False,\n no_contractions=True, no_accents=True, not_hashtag=True)\n if convert2digit:\n return text2num(text)\n else:\n return text\n\n\ndef remove_stopwords(word_list):\n filtered_words = [\n word for word in word_list if word not in stopwords.words('english')]\n return filtered_words\n\n\ndef tokenize(string):\n text = text_normalize(string)\n return nltk.word_tokenize(text, language='english')\n\n\nclass wordEmbedding(object):\n '''\n This class wraps the two popular models using for word embedding, FastText and Word2Vec\n '''\n\n def __init__(self, model_path, model_type='fasttext', **kwarg):\n if model_type == \"fasttext\":\n self._model = FastText.load_fasttext_format(model_path)\n elif model_type == \"word2vec\":\n self._model = Word2Vec.load_word2vec_format(model_path)\n else:\n raise NotImplementedError(\"other model is not supported\")\n\n def sentence_to_index(self, sentence):\n list_of_index = [self._model.wv.vocab[\n word].index for word in tokenize(sentence)]\n return list_of_index\n\n def get_embedding_matrix(self):\n return self._model.syn0\n\ndef create_queue(sess = None, coord = None, encode_data = None,\n decode_data = None, capacity = 1024, batch_size = 32, scope = None):\n\n encode = tf.placeholder(tf.int32, shape=[None], name=\"encode\")\n decode = tf.placeholder(tf.int32, shape=[decode_max_length + 2], name=\"decode\")\n weight = tf.placeholder(tf.float32, shape=[decode_max_length + 1], name=\"weight\")\n queue = tf.PaddingFIFOQueue(capacity = capacity,\n dtypes = [tf.int32, tf.int32, tf.float32],\n shapes = [[None], [decode_max_length + 2], [decode_max_length + 1]],\n name = 'FIFOQueue')\n enqueue_op = queue.enqueue([encode, decode, weight])\n\n\n def _iterator():\n assert len(encode_data) == len(decode_data)\n data = list(zip(encode_data, decode_data))\n random.shuffle(data)\n encode, decode = [list(t) for t in zip(*data)]\n\n for i in range(len(data)):\n# if len(encode[i]) > encode_max_length - 1 or len(decode[i]) > decode_max_length - 1:\n# raise 'the sentence is longer than max_length'\n #_encode = encode[i][:encode_max_length]\n #_encode = _encode + [PAD_ID] * (encode_max_length - len(_encode))\n _encode = encode[i]\n _decode = decode[i][:decode_max_length]\n \n \n \n _decode_padding_size = decode_max_length - len(_decode)\n _weight = [1.0] * (len(_decode) + 1) + [0.0] * _decode_padding_size\n _decode = [GO_ID] + _decode + [EOS_ID] + [PAD_ID] * _decode_padding_size\n \n yield _encode, _decode, _weight#, _encode_length, _decode_length\n def basic_enqueue(sess, encode_input, decode_input = None):\n# if len(encode_input) > encode_max_length:\n# encode_input = encode_input[:encode_max_length]\n# _encode = encode_input + [PAD_ID] * (encode_max_length - len(encode_input))\n _encode = encode_input\n if decode_input is None:\n _decode = [GO_ID] + [PAD_ID] * (decode_max_length + 1)\n _weight = [1.0] * (decode_max_length + 1)\n else:\n _decode_padding_size = decode_max_length - len(decode_input)\n _decode = [GO_ID] + decode_input + [EOS_ID] + [PAD_ID] * _decode_padding_size\n _weight = [1.0] * (len(decode_input) + 1) + [0.0] * _decode_padding_size\n feed_dict = {\n encode: _encode,\n decode: _decode,\n weight: _weight\n }\n # Push all the training examples to the queue\n sess.run(enqueue_op, feed_dict=feed_dict)\n def _enqueue(sess, coord):\n try:\n while not coord.should_stop():\n for _encode, _decode, _weight in _iterator():\n feed_dict = {\n encode: _encode,\n decode: _decode,\n weight: _weight,\n }\n # Push all the training examples to the queue\n sess.run(enqueue_op, feed_dict=feed_dict)\n except tf.errors.CancelledError:\n coord.request_stop()\n #Start thread enqueue data\n # if encode_data is None or decode_data is None:\n # return queue, None, basic_enqueue\n enqueue_threads = []\n ## enqueue asynchronously\n for i in range(num_threads):\n enqueue_thread = threading.Thread(target=_enqueue, args=(sess, coord))\n enqueue_thread.setDaemon(True)\n enqueue_threads.append(enqueue_thread)\n return queue, enqueue_threads, basic_enqueue\n\nif __name__ == '__main__':\n print(tokenize(\"http://google.com.vn I love the cat @Peter with 69USD\"))\n" ]
[ [ "tensorflow.placeholder", "tensorflow.PaddingFIFOQueue" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
zzm422/automl
[ "38d6623e0983d6ee1d16bb5b1db5c6b2ccbb3ecf" ]
[ "efficientdet/inference.py" ]
[ "# Copyright 2020 Google Research. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Inference related utilities.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n# gtype import\nfrom __future__ import print_function\n\nimport copy\nimport functools\nimport os\nimport time\n\nfrom absl import logging\nimport numpy as np\nfrom PIL import Image\nimport tensorflow.compat.v1 as tf\nfrom typing import Text, Dict, Any, List, Tuple, Union\nimport yaml\n\nimport anchors\nimport dataloader\nimport det_model_fn\nimport hparams_config\nimport utils\nfrom visualize import vis_utils\n\ncoco_id_mapping = {\n 1: 'person', 2: 'bicycle', 3: 'car', 4: 'motorcycle', 5: 'airplane',\n 6: 'bus', 7: 'train', 8: 'truck', 9: 'boat', 10: 'traffic light',\n 11: 'fire hydrant', 13: 'stop sign', 14: 'parking meter', 15: 'bench',\n 16: 'bird', 17: 'cat', 18: 'dog', 19: 'horse', 20: 'sheep', 21: 'cow',\n 22: 'elephant', 23: 'bear', 24: 'zebra', 25: 'giraffe', 27: 'backpack',\n 28: 'umbrella', 31: 'handbag', 32: 'tie', 33: 'suitcase', 34: 'frisbee',\n 35: 'skis', 36: 'snowboard', 37: 'sports ball', 38: 'kite',\n 39: 'baseball bat', 40: 'baseball glove', 41: 'skateboard', 42: 'surfboard',\n 43: 'tennis racket', 44: 'bottle', 46: 'wine glass', 47: 'cup', 48: 'fork',\n 49: 'knife', 50: 'spoon', 51: 'bowl', 52: 'banana', 53: 'apple',\n 54: 'sandwich', 55: 'orange', 56: 'broccoli', 57: 'carrot', 58: 'hot dog',\n 59: 'pizza', 60: 'donut', 61: 'cake', 62: 'chair', 63: 'couch',\n 64: 'potted plant', 65: 'bed', 67: 'dining table', 70: 'toilet', 72: 'tv',\n 73: 'laptop', 74: 'mouse', 75: 'remote', 76: 'keyboard', 77: 'cell phone',\n 78: 'microwave', 79: 'oven', 80: 'toaster', 81: 'sink', 82: 'refrigerator',\n 84: 'book', 85: 'clock', 86: 'vase', 87: 'scissors', 88: 'teddy bear',\n 89: 'hair drier', 90: 'toothbrush',\n} # pyformat: disable\n\n\ndef image_preprocess(image, image_size: Union[int, Tuple[int, int]]):\n \"\"\"Preprocess image for inference.\n\n Args:\n image: input image, can be a tensor or a numpy arary.\n image_size: single integer of image size for square image or tuple of two\n integers, in the format of (image_height, image_width).\n\n Returns:\n (image, scale): a tuple of processed image and its scale.\n \"\"\"\n input_processor = dataloader.DetectionInputProcessor(image, image_size)\n input_processor.normalize_image()\n input_processor.set_scale_factors_to_output_size()\n image = input_processor.resize_and_crop_image()\n image_scale = input_processor.image_scale_to_original\n return image, image_scale\n\n\[email protected]_graph\ndef batch_image_files_decode(image_files):\n raw_images = tf.TensorArray(tf.uint8, size=0, dynamic_size=True)\n for i in tf.range(tf.shape(image_files)[0]):\n image = tf.io.decode_image(image_files[i])\n image.set_shape([None, None, None])\n raw_images = raw_images.write(i, image)\n return raw_images.stack()\n\n\ndef batch_image_preprocess(raw_images,\n image_size: Union[int, Tuple[int, int]],\n batch_size: int = None):\n \"\"\"Preprocess batched images for inference.\n\n Args:\n raw_images: a list of images, each image can be a tensor or a numpy arary.\n image_size: single integer of image size for square image or tuple of two\n integers, in the format of (image_height, image_width).\n batch_size: if None, use map_fn to deal with dynamic batch size.\n\n Returns:\n (image, scale): a tuple of processed images and scales.\n \"\"\"\n if not batch_size:\n # map_fn is a little bit slower due to some extra overhead.\n map_fn = functools.partial(image_preprocess, image_size=image_size)\n images, scales = tf.map_fn(\n map_fn, raw_images, dtype=(tf.float32, tf.float32), back_prop=False)\n return (images, scales)\n\n # If batch size is known, use a simple loop.\n scales, images = [], []\n for i in range(batch_size):\n image, scale = image_preprocess(raw_images[i], image_size)\n scales.append(scale)\n images.append(image)\n images = tf.stack(images)\n scales = tf.stack(scales)\n return (images, scales)\n\n\ndef build_inputs(image_path_pattern: Text, image_size: Union[int, Tuple[int,\n int]]):\n \"\"\"Read and preprocess input images.\n\n Args:\n image_path_pattern: a path to indicate a single or multiple files.\n image_size: single integer of image size for square image or tuple of two\n integers, in the format of (image_height, image_width).\n\n Returns:\n (raw_images, images, scales): raw images, processed images, and scales.\n\n Raises:\n ValueError if image_path_pattern doesn't match any file.\n \"\"\"\n raw_images, images, scales = [], [], []\n for f in tf.io.gfile.glob(image_path_pattern):\n image = Image.open(f)\n raw_images.append(image)\n image, scale = image_preprocess(image, image_size)\n images.append(image)\n scales.append(scale)\n if not images:\n raise ValueError(\n 'Cannot find any images for pattern {}'.format(image_path_pattern))\n return raw_images, tf.stack(images), tf.stack(scales)\n\n\ndef build_model(model_name: Text, inputs: tf.Tensor, **kwargs):\n \"\"\"Build model for a given model name.\n\n Args:\n model_name: the name of the model.\n inputs: an image tensor or a numpy array.\n **kwargs: extra parameters for model builder.\n\n Returns:\n (cls_outputs, box_outputs): the outputs for class and box predictions.\n Each is a dictionary with key as feature level and value as predictions.\n \"\"\"\n model_arch = det_model_fn.get_model_arch(model_name)\n cls_outputs, box_outputs = utils.build_model_with_precision(\n kwargs.get('precision', None), model_arch, inputs, model_name, **kwargs)\n if kwargs.get('precision', None):\n # Post-processing has multiple places with hard-coded float32.\n # TODO(tanmingxing): Remove them once post-process can adpat to dtypes.\n cls_outputs = {k: tf.cast(v, tf.float32) for k, v in cls_outputs.items()}\n box_outputs = {k: tf.cast(v, tf.float32) for k, v in box_outputs.items()}\n return cls_outputs, box_outputs\n\n\ndef restore_ckpt(sess, ckpt_path, ema_decay=0.9998, export_ckpt=None):\n \"\"\"Restore variables from a given checkpoint.\n\n Args:\n sess: a tf session for restoring or exporting models.\n ckpt_path: the path of the checkpoint. Can be a file path or a folder path.\n ema_decay: ema decay rate. If None or zero or negative value, disable ema.\n export_ckpt: whether to export the restored model.\n \"\"\"\n sess.run(tf.global_variables_initializer())\n if tf.io.gfile.isdir(ckpt_path):\n ckpt_path = tf.train.latest_checkpoint(ckpt_path)\n if ema_decay > 0:\n ema = tf.train.ExponentialMovingAverage(decay=ema_decay)\n ema_vars = utils.get_ema_vars()\n var_dict = ema.variables_to_restore(ema_vars)\n ema_assign_op = ema.apply(ema_vars)\n else:\n var_dict = utils.get_ema_vars()\n ema_assign_op = None\n tf.train.get_or_create_global_step()\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver(var_dict, max_to_keep=1)\n saver.restore(sess, ckpt_path)\n\n if export_ckpt:\n print('export model to {}'.format(export_ckpt))\n if ema_assign_op is not None:\n sess.run(ema_assign_op)\n saver = tf.train.Saver(max_to_keep=1, save_relative_paths=True)\n saver.save(sess, export_ckpt)\n\n\ndef det_post_process_combined(params, cls_outputs, box_outputs, scales,\n min_score_thresh, max_boxes_to_draw):\n \"\"\"A combined version of det_post_process with dynamic batch size support.\"\"\"\n batch_size = tf.shape(list(cls_outputs.values())[0])[0]\n cls_outputs_all = []\n box_outputs_all = []\n # Concatenates class and box of all levels into one tensor.\n for level in range(params['min_level'], params['max_level'] + 1):\n if params['data_format'] == 'channels_first':\n cls_outputs[level] = tf.transpose(cls_outputs[level], [0, 2, 3, 1])\n box_outputs[level] = tf.transpose(box_outputs[level], [0, 2, 3, 1])\n\n cls_outputs_all.append(tf.reshape(\n cls_outputs[level],\n [batch_size, -1, params['num_classes']]))\n box_outputs_all.append(tf.reshape(\n box_outputs[level], [batch_size, -1, 4]))\n cls_outputs_all = tf.concat(cls_outputs_all, 1)\n box_outputs_all = tf.concat(box_outputs_all, 1)\n\n # Create anchor_label for picking top-k predictions.\n eval_anchors = anchors.Anchors(params['min_level'], params['max_level'],\n params['num_scales'], params['aspect_ratios'],\n params['anchor_scale'], params['image_size'])\n anchor_boxes = eval_anchors.boxes\n scores = tf.math.sigmoid(cls_outputs_all)\n # apply bounding box regression to anchors\n boxes = anchors.decode_box_outputs_tf(box_outputs_all, anchor_boxes)\n boxes = tf.expand_dims(boxes, axis=2)\n scales = tf.expand_dims(scales, axis=-1)\n nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections = (\n tf.image.combined_non_max_suppression(\n boxes,\n scores,\n max_boxes_to_draw,\n max_boxes_to_draw,\n score_threshold=min_score_thresh,\n clip_boxes=False))\n del valid_detections # to be used in futue.\n\n image_ids = tf.cast(\n tf.tile(\n tf.expand_dims(tf.range(batch_size), axis=1), [1, max_boxes_to_draw]),\n dtype=tf.float32)\n y = nmsed_boxes[..., 0] * scales\n x = nmsed_boxes[..., 1] * scales\n height = nmsed_boxes[..., 2] * scales - y\n width = nmsed_boxes[..., 3] * scales - x\n detection_list = [\n # Format: (image_ids, y, x, height, width, score, class)\n image_ids, y, x, height, width, nmsed_scores,\n tf.cast(nmsed_classes + 1, tf.float32)\n ]\n detections = tf.stack(detection_list, axis=2, name='detections')\n return detections\n\n\ndef det_post_process(params: Dict[Any, Any], cls_outputs: Dict[int, tf.Tensor],\n box_outputs: Dict[int, tf.Tensor], scales: List[float],\n min_score_thresh, max_boxes_to_draw):\n \"\"\"Post preprocessing the box/class predictions.\n\n Args:\n params: a parameter dictionary that includes `min_level`, `max_level`,\n `batch_size`, and `num_classes`.\n cls_outputs: an OrderDict with keys representing levels and values\n representing logits in [batch_size, height, width, num_anchors].\n box_outputs: an OrderDict with keys representing levels and values\n representing box regression targets in [batch_size, height, width,\n num_anchors * 4].\n scales: a list of float values indicating image scale.\n min_score_thresh: A float representing the threshold for deciding when to\n remove boxes based on score.\n max_boxes_to_draw: Max number of boxes to draw.\n\n Returns:\n detections_batch: a batch of detection results. Each detection is a tensor\n with each row representing [image_id, x, y, width, height, score, class].\n \"\"\"\n if not params['batch_size']:\n # Use combined version for dynamic batch size.\n return det_post_process_combined(params, cls_outputs, box_outputs, scales,\n min_score_thresh, max_boxes_to_draw)\n\n # TODO(tanmingxing): refactor the code to make it more explicity.\n outputs = {\n 'cls_outputs_all': [None],\n 'box_outputs_all': [None],\n 'indices_all': [None],\n 'classes_all': [None]\n }\n det_model_fn.add_metric_fn_inputs(params, cls_outputs, box_outputs, outputs,\n -1)\n\n # Create anchor_label for picking top-k predictions.\n eval_anchors = anchors.Anchors(params['min_level'], params['max_level'],\n params['num_scales'], params['aspect_ratios'],\n params['anchor_scale'], params['image_size'])\n anchor_labeler = anchors.AnchorLabeler(eval_anchors, params['num_classes'])\n\n # Add all detections for each input image.\n detections_batch = []\n for index in range(params['batch_size']):\n cls_outputs_per_sample = outputs['cls_outputs_all'][index]\n box_outputs_per_sample = outputs['box_outputs_all'][index]\n indices_per_sample = outputs['indices_all'][index]\n classes_per_sample = outputs['classes_all'][index]\n detections = anchor_labeler.generate_detections(\n cls_outputs_per_sample,\n box_outputs_per_sample,\n indices_per_sample,\n classes_per_sample,\n image_id=[index],\n image_scale=[scales[index]],\n min_score_thresh=min_score_thresh,\n max_boxes_to_draw=max_boxes_to_draw,\n disable_pyfun=params.get('disable_pyfun'))\n if params['batch_size'] > 1:\n # pad to fixed length if batch size > 1.\n padding_size = max_boxes_to_draw - tf.shape(detections)[0]\n detections = tf.pad(detections, [[0, padding_size], [0, 0]])\n detections_batch.append(detections)\n return tf.stack(detections_batch, name='detections')\n\n\ndef visualize_image(image,\n boxes,\n classes,\n scores,\n id_mapping,\n min_score_thresh=anchors.MIN_SCORE_THRESH,\n max_boxes_to_draw=anchors.MAX_DETECTIONS_PER_IMAGE,\n line_thickness=2,\n **kwargs):\n \"\"\"Visualizes a given image.\n\n Args:\n image: a image with shape [H, W, C].\n boxes: a box prediction with shape [N, 4] ordered [ymin, xmin, ymax, xmax].\n classes: a class prediction with shape [N].\n scores: A list of float value with shape [N].\n id_mapping: a dictionary from class id to name.\n min_score_thresh: minimal score for showing. If claass probability is below\n this threshold, then the object will not show up.\n max_boxes_to_draw: maximum bounding box to draw.\n line_thickness: how thick is the bounding box line.\n **kwargs: extra parameters.\n\n Returns:\n output_image: an output image with annotated boxes and classes.\n \"\"\"\n category_index = {k: {'id': k, 'name': id_mapping[k]} for k in id_mapping}\n img = np.array(image)\n vis_utils.visualize_boxes_and_labels_on_image_array(\n img,\n boxes,\n classes,\n scores,\n category_index,\n min_score_thresh=min_score_thresh,\n max_boxes_to_draw=max_boxes_to_draw,\n line_thickness=line_thickness,\n **kwargs)\n return img\n\n\ndef parse_label_id_mapping(\n label_id_mapping: Union[Text, Dict[int, Text]] = None) -> Dict[int, Text]:\n \"\"\"Parse label id mapping from a string or a yaml file.\n\n The label_id_mapping is a dict that maps class id to its name, such as:\n\n {\n 1: \"person\",\n 2: \"dog\"\n }\n\n Args:\n label_id_mapping:\n\n Returns:\n A dictionary with key as integer id and value as a string of name.\n \"\"\"\n if label_id_mapping is None:\n return coco_id_mapping\n\n if isinstance(label_id_mapping, dict):\n label_id_dict = label_id_mapping\n elif isinstance(label_id_mapping, str):\n with tf.io.gfile.GFile(label_id_mapping) as f:\n label_id_dict = yaml.load(f, Loader=yaml.FullLoader)\n else:\n raise TypeError('label_id_mapping must be a dict or a yaml filename, '\n 'containing a mapping from class ids to class names.')\n\n return label_id_dict\n\n\ndef visualize_image_prediction(image,\n prediction,\n disable_pyfun=True,\n label_id_mapping=None,\n **kwargs):\n \"\"\"Viusalize detections on a given image.\n\n Args:\n image: Image content in shape of [height, width, 3].\n prediction: a list of vector, with each vector has the format of [image_id,\n y, x, height, width, score, class].\n disable_pyfun: disable pyfunc for faster post processing.\n label_id_mapping: a map from label id to name.\n **kwargs: extra parameters for vistualization, such as min_score_thresh,\n max_boxes_to_draw, and line_thickness.\n\n Returns:\n a list of annotated images.\n \"\"\"\n boxes = prediction[:, 1:5]\n classes = prediction[:, 6].astype(int)\n scores = prediction[:, 5]\n\n if not disable_pyfun:\n # convert [x, y, width, height] to [y, x, height, width]\n boxes[:, [0, 1, 2, 3]] = boxes[:, [1, 0, 3, 2]]\n\n label_id_mapping = label_id_mapping or coco_id_mapping\n boxes[:, 2:4] += boxes[:, 0:2]\n return visualize_image(image, boxes, classes, scores, label_id_mapping,\n **kwargs)\n\n\nclass ServingDriver(object):\n \"\"\"A driver for serving single or batch images.\n\n This driver supports serving with image files or arrays, with configurable\n batch size.\n\n Example 1. Serving streaming image contents:\n\n driver = inference.ServingDriver(\n 'efficientdet-d0', '/tmp/efficientdet-d0', batch_size=1)\n driver.build()\n for m in image_iterator():\n predictions = driver.serve_files([m])\n driver.visualize(m, predictions[0])\n # m is the new image with annotated boxes.\n\n Example 2. Serving batch image contents:\n\n imgs = []\n for f in ['/tmp/1.jpg', '/tmp/2.jpg']:\n imgs.append(np.array(Image.open(f)))\n\n driver = inference.ServingDriver(\n 'efficientdet-d0', '/tmp/efficientdet-d0', batch_size=len(imgs))\n driver.build()\n predictions = driver.serve_images(imgs)\n for i in range(len(imgs)):\n driver.visualize(imgs[i], predictions[i])\n\n Example 3: another way is to use SavedModel:\n\n # step1: export a model.\n driver = inference.ServingDriver('efficientdet-d0', '/tmp/efficientdet-d0')\n driver.build()\n driver.export('/tmp/saved_model_path')\n\n # step2: Serve a model.\n with tf.Session() as sess:\n tf.saved_model.load(sess, ['serve'], self.saved_model_dir)\n raw_images = []\n for f in tf.io.gfile.glob('/tmp/images/*.jpg'):\n raw_images.append(np.array(PIL.Image.open(f)))\n detections = sess.run('detections:0', {'image_arrays:0': raw_images})\n driver = inference.ServingDriver(\n 'efficientdet-d0', '/tmp/efficientdet-d0')\n driver.visualize(raw_images[0], detections[0])\n PIL.Image.fromarray(raw_images[0]).save(output_image_path)\n \"\"\"\n\n def __init__(self,\n model_name: Text,\n ckpt_path: Text,\n batch_size: int = 1,\n use_xla: bool = False,\n min_score_thresh: float = None,\n max_boxes_to_draw: float = None,\n line_thickness: int = None,\n model_params: Dict[Text, Any] = None):\n \"\"\"Initialize the inference driver.\n\n Args:\n model_name: target model name, such as efficientdet-d0.\n ckpt_path: checkpoint path, such as /tmp/efficientdet-d0/.\n batch_size: batch size for inference.\n use_xla: Whether run with xla optimization.\n min_score_thresh: minimal score threshold for filtering predictions.\n max_boxes_to_draw: the maximum number of boxes per image.\n line_thickness: the line thickness for drawing boxes.\n model_params: model parameters for overriding the config.\n \"\"\"\n self.model_name = model_name\n self.ckpt_path = ckpt_path\n self.batch_size = batch_size\n\n self.params = hparams_config.get_detection_config(model_name).as_dict()\n\n if model_params:\n self.params.update(model_params)\n self.params.update(dict(is_training_bn=False))\n self.label_id_mapping = parse_label_id_mapping(\n self.params.get('label_id_mapping', None))\n\n self.signitures = None\n self.sess = None\n self.disable_pyfun = True\n self.use_xla = use_xla\n\n self.min_score_thresh = min_score_thresh or anchors.MIN_SCORE_THRESH\n self.max_boxes_to_draw = (\n max_boxes_to_draw or anchors.MAX_DETECTIONS_PER_IMAGE)\n self.line_thickness = line_thickness\n\n def __del__(self):\n if self.sess:\n self.sess.close()\n\n def _build_session(self):\n sess_config = tf.ConfigProto()\n if self.use_xla:\n sess_config.graph_options.optimizer_options.global_jit_level = (\n tf.OptimizerOptions.ON_2)\n return tf.Session(config=sess_config)\n\n def build(self, params_override=None):\n \"\"\"Build model and restore checkpoints.\"\"\"\n params = copy.deepcopy(self.params)\n if params_override:\n params.update(params_override)\n\n if not self.sess:\n self.sess = self._build_session()\n with self.sess.graph.as_default():\n image_files = tf.placeholder(tf.string, name='image_files', shape=[None])\n raw_images = batch_image_files_decode(image_files)\n raw_images = tf.identity(raw_images, name='image_arrays')\n images, scales = batch_image_preprocess(raw_images, params['image_size'],\n self.batch_size)\n if params['data_format'] == 'channels_first':\n images = tf.transpose(images, [0, 3, 1, 2])\n class_outputs, box_outputs = build_model(self.model_name, images,\n **params)\n params.update(\n dict(batch_size=self.batch_size, disable_pyfun=self.disable_pyfun))\n detections = det_post_process(params, class_outputs, box_outputs, scales,\n self.min_score_thresh,\n self.max_boxes_to_draw)\n\n restore_ckpt(\n self.sess,\n self.ckpt_path,\n ema_decay=self.params['moving_average_decay'],\n export_ckpt=None)\n\n self.signitures = {\n 'image_files': image_files,\n 'image_arrays': raw_images,\n 'prediction': detections,\n }\n return self.signitures\n\n def visualize(self, image, prediction, **kwargs):\n \"\"\"Visualize prediction on image.\"\"\"\n return visualize_image_prediction(\n image,\n prediction,\n disable_pyfun=self.disable_pyfun,\n label_id_mapping=self.label_id_mapping,\n **kwargs)\n\n def serve_files(self, image_files: List[Text]):\n \"\"\"Serve a list of input image files.\n\n Args:\n image_files: a list of image files with shape [1] and type string.\n\n Returns:\n A list of detections.\n \"\"\"\n if not self.sess:\n self.build()\n predictions = self.sess.run(\n self.signitures['prediction'],\n feed_dict={self.signitures['image_files']: image_files})\n return predictions\n\n def benchmark(self, image_arrays, trace_filename=None):\n \"\"\"Benchmark inference latency/throughput.\n\n Args:\n image_arrays: a list of images in numpy array format.\n trace_filename: If None, specify the filename for saving trace.\n \"\"\"\n if not self.sess:\n self.build()\n\n # init session\n self.sess.run(\n self.signitures['prediction'],\n feed_dict={self.signitures['image_arrays']: image_arrays})\n\n start = time.perf_counter()\n for _ in range(10):\n self.sess.run(\n self.signitures['prediction'],\n feed_dict={self.signitures['image_arrays']: image_arrays})\n end = time.perf_counter()\n inference_time = (end - start) / 10\n\n print('Per batch inference time: ', inference_time)\n print('FPS: ', self.batch_size / inference_time)\n\n if trace_filename:\n run_options = tf.RunOptions()\n run_options.trace_level = tf.RunOptions.FULL_TRACE\n run_metadata = tf.RunMetadata()\n self.sess.run(\n self.signitures['prediction'],\n feed_dict={self.signitures['image_arrays']: image_arrays},\n options=run_options,\n run_metadata=run_metadata)\n with tf.io.gfile.GFile(trace_filename, 'w') as trace_file:\n from tensorflow.python.client import timeline # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top\n trace = timeline.Timeline(step_stats=run_metadata.step_stats)\n trace_file.write(trace.generate_chrome_trace_format(show_memory=True))\n\n def serve_images(self, image_arrays):\n \"\"\"Serve a list of image arrays.\n\n Args:\n image_arrays: A list of image content with each image has shape [height,\n width, 3] and uint8 type.\n\n Returns:\n A list of detections.\n \"\"\"\n if not self.sess:\n self.build()\n predictions = self.sess.run(\n self.signitures['prediction'],\n feed_dict={self.signitures['image_arrays']: image_arrays})\n return predictions\n\n def load(self, saved_model_dir_or_frozen_graph: Text):\n \"\"\"Load the model using saved model or a frozen graph.\"\"\"\n if not self.sess:\n self.sess = self._build_session()\n self.signitures = {\n 'image_files': 'image_files:0',\n 'image_arrays': 'image_arrays:0',\n 'prediction': 'detections:0',\n }\n\n # Load saved model if it is a folder.\n if tf.io.gfile.isdir(saved_model_dir_or_frozen_graph):\n return tf.saved_model.load(self.sess, ['serve'],\n saved_model_dir_or_frozen_graph)\n\n # Load a frozen graph.\n graph_def = tf.GraphDef()\n with tf.gfile.GFile(saved_model_dir_or_frozen_graph, 'rb') as f:\n graph_def.ParseFromString(f.read())\n return tf.import_graph_def(graph_def, name='')\n\n def freeze(self):\n \"\"\"Freeze the graph.\"\"\"\n output_names = [self.signitures['prediction'].op.name]\n graphdef = tf.graph_util.convert_variables_to_constants(\n self.sess, self.sess.graph_def, output_names)\n return graphdef\n\n def export(self, output_dir):\n \"\"\"Export a saved model.\"\"\"\n signitures = self.signitures\n signature_def_map = {\n 'serving_default':\n tf.saved_model.predict_signature_def(\n {signitures['image_arrays'].name: signitures['image_arrays']},\n {signitures['prediction'].name: signitures['prediction']}),\n 'serving_base64':\n tf.saved_model.predict_signature_def(\n {signitures['image_files'].name: signitures['image_files']},\n {signitures['prediction'].name: signitures['prediction']}),\n }\n b = tf.saved_model.Builder(output_dir)\n b.add_meta_graph_and_variables(\n self.sess,\n tags=['serve'],\n signature_def_map=signature_def_map,\n assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS),\n clear_devices=True)\n b.save()\n logging.info('Model saved at %s', output_dir)\n\n # also save freeze pb file.\n graphdef = self.freeze()\n pb_path = os.path.join(output_dir, self.model_name + '_frozen.pb')\n tf.io.gfile.GFile(pb_path, 'wb').write(graphdef.SerializeToString())\n logging.info('Free graph saved at %s', pb_path)\n\n\nclass InferenceDriver(object):\n \"\"\"A driver for doing batch inference.\n\n Example usage:\n\n driver = inference.InferenceDriver('efficientdet-d0', '/tmp/efficientdet-d0')\n driver.inference('/tmp/*.jpg', '/tmp/outputdir')\n\n \"\"\"\n\n def __init__(self,\n model_name: Text,\n ckpt_path: Text,\n model_params: Dict[Text, Any] = None):\n \"\"\"Initialize the inference driver.\n\n Args:\n model_name: target model name, such as efficientdet-d0.\n ckpt_path: checkpoint path, such as /tmp/efficientdet-d0/.\n model_params: model parameters for overriding the config.\n \"\"\"\n self.model_name = model_name\n self.ckpt_path = ckpt_path\n self.params = hparams_config.get_detection_config(model_name).as_dict()\n if model_params:\n self.params.update(model_params)\n self.params.update(dict(is_training_bn=False))\n self.label_id_mapping = parse_label_id_mapping(\n self.params.get('label_id_mapping', None))\n\n self.disable_pyfun = True\n\n def inference(self, image_path_pattern: Text, output_dir: Text, **kwargs):\n \"\"\"Read and preprocess input images.\n\n Args:\n image_path_pattern: Image file pattern such as /tmp/img*.jpg\n output_dir: the directory for output images. Output images will be named\n as 0.jpg, 1.jpg, ....\n **kwargs: extra parameters for for vistualization, such as\n min_score_thresh, max_boxes_to_draw, and line_thickness.\n\n Returns:\n Annotated image.\n \"\"\"\n params = copy.deepcopy(self.params)\n with tf.Session() as sess:\n # Buid inputs and preprocessing.\n raw_images, images, scales = build_inputs(image_path_pattern,\n params['image_size'])\n if params['data_format'] == 'channels_first':\n images = tf.transpose(images, [0, 3, 1, 2])\n # Build model.\n class_outputs, box_outputs = build_model(self.model_name, images,\n **self.params)\n restore_ckpt(\n sess,\n self.ckpt_path,\n ema_decay=self.params['moving_average_decay'],\n export_ckpt=None)\n\n # for postprocessing.\n params.update(\n dict(batch_size=len(raw_images), disable_pyfun=self.disable_pyfun))\n\n # Build postprocessing.\n detections_batch = det_post_process(\n params,\n class_outputs,\n box_outputs,\n scales,\n min_score_thresh=kwargs.get('min_score_thresh',\n anchors.MIN_SCORE_THRESH),\n max_boxes_to_draw=kwargs.get('max_boxes_to_draw',\n anchors.MAX_DETECTIONS_PER_IMAGE))\n predictions = sess.run(detections_batch)\n # Visualize results.\n for i, prediction in enumerate(predictions):\n img = visualize_image_prediction(\n raw_images[i],\n prediction,\n disable_pyfun=self.disable_pyfun,\n label_id_mapping=self.label_id_mapping,\n **kwargs)\n output_image_path = os.path.join(output_dir, str(i) + '.jpg')\n Image.fromarray(img).save(output_image_path)\n logging.info('writing file to %s', output_image_path)\n\n return predictions\n" ]
[ [ "tensorflow.compat.v1.math.sigmoid", "tensorflow.compat.v1.concat", "tensorflow.compat.v1.train.ExponentialMovingAverage", "tensorflow.compat.v1.io.gfile.glob", "tensorflow.compat.v1.shape", "tensorflow.compat.v1.gfile.GFile", "tensorflow.compat.v1.train.Saver", "tensorflow.python.client.timeline.Timeline", "tensorflow.compat.v1.identity", "tensorflow.compat.v1.reshape", "tensorflow.compat.v1.train.get_or_create_global_step", "tensorflow.compat.v1.GraphDef", "tensorflow.compat.v1.transpose", "tensorflow.compat.v1.image.combined_non_max_suppression", "tensorflow.compat.v1.import_graph_def", "tensorflow.compat.v1.graph_util.convert_variables_to_constants", "tensorflow.compat.v1.map_fn", "tensorflow.compat.v1.io.gfile.GFile", "tensorflow.compat.v1.RunOptions", "tensorflow.compat.v1.io.gfile.isdir", "tensorflow.compat.v1.get_collection", "tensorflow.compat.v1.saved_model.Builder", "tensorflow.compat.v1.saved_model.predict_signature_def", "tensorflow.compat.v1.cast", "tensorflow.compat.v1.train.latest_checkpoint", "numpy.array", "tensorflow.compat.v1.RunMetadata", "tensorflow.compat.v1.stack", "tensorflow.compat.v1.ConfigProto", "tensorflow.compat.v1.expand_dims", "tensorflow.compat.v1.TensorArray", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.range", "tensorflow.compat.v1.saved_model.load", "tensorflow.compat.v1.io.decode_image", "tensorflow.compat.v1.pad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
marcelodiaz558/BasicSR
[ "1d5138ed567e966965fd1540838d27e6f5082b70" ]
[ "basicsr/ops/upfirdn2d/upfirdn2d.py" ]
[ "# modify from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/upfirdn2d.py # noqa:E501\n\nimport os\nimport torch\nfrom torch.autograd import Function\nfrom torch.nn import functional as F\n\nBASICSR_JIT = os.getenv('BASICSR_JIT')\nif BASICSR_JIT == 'True':\n from torch.utils.cpp_extension import load\n module_path = os.path.dirname(__file__)\n upfirdn2d_ext = load(\n 'upfirdn2d',\n sources=[\n os.path.join(module_path, 'src', 'upfirdn2d.cpp'),\n os.path.join(module_path, 'src', 'upfirdn2d_kernel.cu'),\n ],\n )\nelse:\n try:\n from . import upfirdn2d_ext\n except ImportError:\n pass\n # avoid annoying print output\n # print(f'Cannot import deform_conv_ext. Error: {error}. You may need to: \\n '\n # '1. compile with BASICSR_EXT=True. or\\n '\n # '2. set BASICSR_JIT=True during running')\n\n\nclass UpFirDn2dBackward(Function):\n\n @staticmethod\n def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size):\n\n up_x, up_y = up\n down_x, down_y = down\n g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad\n\n grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)\n\n grad_input = upfirdn2d_ext.upfirdn2d(\n grad_output,\n grad_kernel,\n down_x,\n down_y,\n up_x,\n up_y,\n g_pad_x0,\n g_pad_x1,\n g_pad_y0,\n g_pad_y1,\n )\n grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3])\n\n ctx.save_for_backward(kernel)\n\n pad_x0, pad_x1, pad_y0, pad_y1 = pad\n\n ctx.up_x = up_x\n ctx.up_y = up_y\n ctx.down_x = down_x\n ctx.down_y = down_y\n ctx.pad_x0 = pad_x0\n ctx.pad_x1 = pad_x1\n ctx.pad_y0 = pad_y0\n ctx.pad_y1 = pad_y1\n ctx.in_size = in_size\n ctx.out_size = out_size\n\n return grad_input\n\n @staticmethod\n def backward(ctx, gradgrad_input):\n kernel, = ctx.saved_tensors\n\n gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1)\n\n gradgrad_out = upfirdn2d_ext.upfirdn2d(\n gradgrad_input,\n kernel,\n ctx.up_x,\n ctx.up_y,\n ctx.down_x,\n ctx.down_y,\n ctx.pad_x0,\n ctx.pad_x1,\n ctx.pad_y0,\n ctx.pad_y1,\n )\n # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0],\n # ctx.out_size[1], ctx.in_size[3])\n gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1])\n\n return gradgrad_out, None, None, None, None, None, None, None, None\n\n\nclass UpFirDn2d(Function):\n\n @staticmethod\n def forward(ctx, input, kernel, up, down, pad):\n up_x, up_y = up\n down_x, down_y = down\n pad_x0, pad_x1, pad_y0, pad_y1 = pad\n\n kernel_h, kernel_w = kernel.shape\n batch, channel, in_h, in_w = input.shape\n ctx.in_size = input.shape\n\n input = input.reshape(-1, in_h, in_w, 1)\n\n ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))\n\n out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1\n out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1\n ctx.out_size = (out_h, out_w)\n\n ctx.up = (up_x, up_y)\n ctx.down = (down_x, down_y)\n ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)\n\n g_pad_x0 = kernel_w - pad_x0 - 1\n g_pad_y0 = kernel_h - pad_y0 - 1\n g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1\n g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1\n\n ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)\n\n out = upfirdn2d_ext.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1)\n # out = out.view(major, out_h, out_w, minor)\n out = out.view(-1, channel, out_h, out_w)\n\n return out\n\n @staticmethod\n def backward(ctx, grad_output):\n kernel, grad_kernel = ctx.saved_tensors\n\n grad_input = UpFirDn2dBackward.apply(\n grad_output,\n kernel,\n grad_kernel,\n ctx.up,\n ctx.down,\n ctx.pad,\n ctx.g_pad,\n ctx.in_size,\n ctx.out_size,\n )\n\n return grad_input, None, None, None, None\n\n\ndef upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):\n if input.device.type == 'cpu':\n out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1])\n else:\n out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1]))\n\n return out\n\n\ndef upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1):\n _, channel, in_h, in_w = input.shape\n input = input.reshape(-1, in_h, in_w, 1)\n\n _, in_h, in_w, minor = input.shape\n kernel_h, kernel_w = kernel.shape\n\n out = input.view(-1, in_h, 1, in_w, 1, minor)\n out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])\n out = out.view(-1, in_h * up_y, in_w * up_x, minor)\n\n out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)])\n out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(-pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :, ]\n\n out = out.permute(0, 3, 1, 2)\n out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1])\n w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)\n out = F.conv2d(out, w)\n out = out.reshape(\n -1,\n minor,\n in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,\n in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,\n )\n out = out.permute(0, 2, 3, 1)\n out = out[:, ::down_y, ::down_x, :]\n\n out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1\n out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1\n\n return out.view(-1, channel, out_h, out_w)\n" ]
[ [ "torch.flip", "torch.nn.functional.conv2d", "torch.nn.functional.pad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tdiethe/MXFusion
[ "fbdba79ca85cb5a9760722ffd932b9ec4c401745", "fbdba79ca85cb5a9760722ffd932b9ec4c401745" ]
[ "testing/distributions/gp/cond_gp_test.py", "testing/distributions/gp/kernel_test.py" ]
[ "import pytest\nimport mxnet as mx\nimport numpy as np\nfrom mxfusion.models import Model\nfrom mxfusion.components.variables.runtime_variable import is_sampled_array, get_num_samples\nfrom mxfusion.components.distributions import ConditionalGaussianProcess\nfrom mxfusion.components.distributions.gp.kernels import RBF\nfrom mxfusion.components.variables import Variable\nfrom mxfusion.util.testutils import prepare_mxnet_array\nfrom mxfusion.util.testutils import MockMXNetRandomGenerator\nfrom scipy.stats import multivariate_normal\nimport matplotlib\nmatplotlib.use('Agg')\nimport GPy\n\n\[email protected](\"set_seed\")\nclass TestConditionalGaussianProcessDistribution(object):\n\n @pytest.mark.parametrize(\"dtype, X, X_isSamples, X_cond, X_cond_isSamples, Y_cond, Y_cond_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples, rv, rv_isSamples, num_samples\", [\n (np.float64, np.random.rand(5,2), False, np.random.rand(8,2), False, np.random.rand(8,1), False, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, np.random.rand(3,5,1), True, 3),\n (np.float64, np.random.rand(3,5,2), True, np.random.rand(8,2), False, np.random.rand(8,1), False, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, np.random.rand(5,1), False, 3),\n (np.float64, np.random.rand(3,5,2), True, np.random.rand(8,2), False, np.random.rand(3,8,1), True, np.random.rand(3,2)+0.1, True, np.random.rand(3,1)+0.1, True, np.random.rand(3,5,1), True, 3),\n (np.float64, np.random.rand(5,2), False, np.random.rand(8,2), False, np.random.rand(8,1), False, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, np.random.rand(5,1), False, 1),\n ])\n def test_log_pdf(self, dtype, X, X_isSamples, X_cond, X_cond_isSamples, Y_cond, Y_cond_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples,\n rv, rv_isSamples, num_samples):\n from scipy.linalg.lapack import dtrtrs\n X_mx = prepare_mxnet_array(X, X_isSamples, dtype)\n X_cond_mx = prepare_mxnet_array(X_cond, X_cond_isSamples, dtype)\n Y_cond_mx = prepare_mxnet_array(Y_cond, Y_cond_isSamples, dtype)\n rbf_lengthscale_mx = prepare_mxnet_array(rbf_lengthscale, rbf_lengthscale_isSamples, dtype)\n rbf_variance_mx = prepare_mxnet_array(rbf_variance, rbf_variance_isSamples, dtype)\n rv_mx = prepare_mxnet_array(rv, rv_isSamples, dtype)\n rv_shape = rv.shape[1:] if rv_isSamples else rv.shape\n\n rbf = RBF(2, True, 1., 1., 'rbf', None, dtype)\n X_var = Variable(shape=(5,2))\n X_cond_var = Variable(shape=(8,2))\n Y_cond_var = Variable(shape=(8,1))\n gp = ConditionalGaussianProcess.define_variable(X=X_var, X_cond=X_cond_var, Y_cond=Y_cond_var, kernel=rbf, shape=rv_shape, dtype=dtype).factor\n\n variables = {gp.X.uuid: X_mx, gp.X_cond.uuid: X_cond_mx, gp.Y_cond.uuid: Y_cond_mx, gp.rbf_lengthscale.uuid: rbf_lengthscale_mx, gp.rbf_variance.uuid: rbf_variance_mx, gp.random_variable.uuid: rv_mx}\n log_pdf_rt = gp.log_pdf(F=mx.nd, variables=variables).asnumpy()\n\n log_pdf_np = []\n for i in range(num_samples):\n X_i = X[i] if X_isSamples else X\n X_cond_i = X_cond[i] if X_cond_isSamples else X_cond\n Y_cond_i = Y_cond[i] if Y_cond_isSamples else Y_cond\n lengthscale_i = rbf_lengthscale[i] if rbf_lengthscale_isSamples else rbf_lengthscale\n variance_i = rbf_variance[i] if rbf_variance_isSamples else rbf_variance\n rv_i = rv[i] if rv_isSamples else rv\n rbf_np = GPy.kern.RBF(input_dim=2, ARD=True)\n rbf_np.lengthscale = lengthscale_i\n rbf_np.variance = variance_i\n K_np = rbf_np.K(X_i)\n Kc_np = rbf_np.K(X_cond_i, X_i)\n Kcc_np = rbf_np.K(X_cond_i)\n\n L = np.linalg.cholesky(Kcc_np)\n LInvY = dtrtrs(L, Y_cond_i, lower=1, trans=0)[0]\n LinvKxt = dtrtrs(L, Kc_np, lower=1, trans=0)[0]\n\n mu = LinvKxt.T.dot(LInvY)\n cov = K_np - LinvKxt.T.dot(LinvKxt)\n log_pdf_np.append(multivariate_normal.logpdf(rv_i[:,0], mean=mu[:,0], cov=cov))\n log_pdf_np = np.array(log_pdf_np)\n isSamples_any = any([X_isSamples, rbf_lengthscale_isSamples, rbf_variance_isSamples, rv_isSamples])\n assert np.issubdtype(log_pdf_rt.dtype, dtype)\n assert is_sampled_array(mx.nd, log_pdf_rt) == isSamples_any\n if isSamples_any:\n assert get_num_samples(mx.nd, log_pdf_rt) == num_samples\n assert np.allclose(log_pdf_np, log_pdf_rt)\n\n @pytest.mark.parametrize(\"dtype, X, X_isSamples, X_cond, X_cond_isSamples, Y_cond, Y_cond_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples, rv_shape, num_samples\", [\n (np.float64, np.random.rand(5,2), False, np.random.rand(8,2), False, np.random.rand(8,1), False, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, (5,1), 3),\n (np.float64, np.random.rand(3,5,2), True, np.random.rand(3,8,2), True, np.random.rand(8,1), False, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, (5,1), 3),\n (np.float64, np.random.rand(3,5,2), True, np.random.rand(3,8,2), True, np.random.rand(3,8,1), True, np.random.rand(3,2)+0.1, True, np.random.rand(3,1)+0.1, True, (5,1), 3),\n (np.float64, np.random.rand(5,2), False, np.random.rand(8,2), False, np.random.rand(8,1), False, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, (5,1), 1),\n ])\n def test_draw_samples(self, dtype, X, X_isSamples, X_cond, X_cond_isSamples, Y_cond, Y_cond_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples,\n rv_shape, num_samples):\n from scipy.linalg.lapack import dtrtrs\n X_mx = prepare_mxnet_array(X, X_isSamples, dtype)\n X_cond_mx = prepare_mxnet_array(X_cond, X_cond_isSamples, dtype)\n Y_cond_mx = prepare_mxnet_array(Y_cond, Y_cond_isSamples, dtype)\n rbf_lengthscale_mx = prepare_mxnet_array(rbf_lengthscale, rbf_lengthscale_isSamples, dtype)\n rbf_variance_mx = prepare_mxnet_array(rbf_variance, rbf_variance_isSamples, dtype)\n\n rand = np.random.randn(num_samples, *rv_shape)\n rand_gen = MockMXNetRandomGenerator(mx.nd.array(rand.flatten(), dtype=dtype))\n\n rbf = RBF(2, True, 1., 1., 'rbf', None, dtype)\n X_var = Variable(shape=(5,2))\n X_cond_var = Variable(shape=(8,2))\n Y_cond_var = Variable(shape=(8,1))\n gp = ConditionalGaussianProcess.define_variable(X=X_var, X_cond=X_cond_var, Y_cond=Y_cond_var, kernel=rbf, shape=rv_shape, dtype=dtype, rand_gen=rand_gen).factor\n\n variables = {gp.X.uuid: X_mx, gp.X_cond.uuid: X_cond_mx, gp.Y_cond.uuid: Y_cond_mx, gp.rbf_lengthscale.uuid: rbf_lengthscale_mx, gp.rbf_variance.uuid: rbf_variance_mx}\n samples_rt = gp.draw_samples(F=mx.nd, variables=variables, num_samples=num_samples).asnumpy()\n\n samples_np = []\n for i in range(num_samples):\n X_i = X[i] if X_isSamples else X\n X_cond_i = X_cond[i] if X_cond_isSamples else X_cond\n Y_cond_i = Y_cond[i] if Y_cond_isSamples else Y_cond\n lengthscale_i = rbf_lengthscale[i] if rbf_lengthscale_isSamples else rbf_lengthscale\n variance_i = rbf_variance[i] if rbf_variance_isSamples else rbf_variance\n rand_i = rand[i]\n rbf_np = GPy.kern.RBF(input_dim=2, ARD=True)\n rbf_np.lengthscale = lengthscale_i\n rbf_np.variance = variance_i\n K_np = rbf_np.K(X_i)\n Kc_np = rbf_np.K(X_cond_i, X_i)\n Kcc_np = rbf_np.K(X_cond_i)\n\n L = np.linalg.cholesky(Kcc_np)\n LInvY = dtrtrs(L, Y_cond_i, lower=1, trans=0)[0]\n LinvKxt = dtrtrs(L, Kc_np, lower=1, trans=0)[0]\n\n mu = LinvKxt.T.dot(LInvY)\n cov = K_np - LinvKxt.T.dot(LinvKxt)\n L_cov_np = np.linalg.cholesky(cov)\n sample_np = mu + L_cov_np.dot(rand_i)\n samples_np.append(sample_np)\n samples_np = np.array(samples_np)\n assert np.issubdtype(samples_rt.dtype, dtype)\n assert get_num_samples(mx.nd, samples_rt) == num_samples\n print(samples_np, samples_rt)\n assert np.allclose(samples_np, samples_rt)\n\n @pytest.mark.parametrize(\"dtype, X, X_isSamples, X_cond, X_cond_isSamples, Y_cond, Y_cond_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples, rv, rv_isSamples, num_samples\", [\n (np.float64, np.random.rand(5,2), False, np.random.rand(8,2), False, np.random.rand(8,1), False, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, np.random.rand(3,5,1), True, 3),\n ])\n def test_clone_cond_gp(self, dtype, X, X_isSamples, X_cond, X_cond_isSamples, Y_cond, Y_cond_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples,\n rv, rv_isSamples, num_samples):\n from scipy.linalg.lapack import dtrtrs\n X_mx = prepare_mxnet_array(X, X_isSamples, dtype)\n X_cond_mx = prepare_mxnet_array(X_cond, X_cond_isSamples, dtype)\n Y_cond_mx = prepare_mxnet_array(Y_cond, Y_cond_isSamples, dtype)\n rbf_lengthscale_mx = prepare_mxnet_array(rbf_lengthscale, rbf_lengthscale_isSamples, dtype)\n rbf_variance_mx = prepare_mxnet_array(rbf_variance, rbf_variance_isSamples, dtype)\n rv_mx = prepare_mxnet_array(rv, rv_isSamples, dtype)\n rv_shape = rv.shape[1:] if rv_isSamples else rv.shape\n\n rbf = RBF(2, True, 1., 1., 'rbf', None, dtype)\n m = Model()\n m.X_var = Variable(shape=(5,2))\n m.X_cond_var = Variable(shape=(8,2))\n m.Y_cond_var = Variable(shape=(8,1))\n m.Y = ConditionalGaussianProcess.define_variable(X=m.X_var, X_cond=m.X_cond_var, Y_cond=m.Y_cond_var, kernel=rbf, shape=rv_shape, dtype=dtype)\n\n gp = m.clone()[0].Y.factor\n\n variables = {gp.X.uuid: X_mx, gp.X_cond.uuid: X_cond_mx, gp.Y_cond.uuid: Y_cond_mx, gp.rbf_lengthscale.uuid: rbf_lengthscale_mx, gp.rbf_variance.uuid: rbf_variance_mx, gp.random_variable.uuid: rv_mx}\n log_pdf_rt = gp.log_pdf(F=mx.nd, variables=variables).asnumpy()\n\n log_pdf_np = []\n for i in range(num_samples):\n X_i = X[i] if X_isSamples else X\n X_cond_i = X_cond[i] if X_cond_isSamples else X_cond\n Y_cond_i = Y_cond[i] if Y_cond_isSamples else Y_cond\n lengthscale_i = rbf_lengthscale[i] if rbf_lengthscale_isSamples else rbf_lengthscale\n variance_i = rbf_variance[i] if rbf_variance_isSamples else rbf_variance\n rv_i = rv[i] if rv_isSamples else rv\n rbf_np = GPy.kern.RBF(input_dim=2, ARD=True)\n rbf_np.lengthscale = lengthscale_i\n rbf_np.variance = variance_i\n K_np = rbf_np.K(X_i)\n Kc_np = rbf_np.K(X_cond_i, X_i)\n Kcc_np = rbf_np.K(X_cond_i)\n\n L = np.linalg.cholesky(Kcc_np)\n LInvY = dtrtrs(L, Y_cond_i, lower=1, trans=0)[0]\n LinvKxt = dtrtrs(L, Kc_np, lower=1, trans=0)[0]\n\n mu = LinvKxt.T.dot(LInvY)\n cov = K_np - LinvKxt.T.dot(LinvKxt)\n log_pdf_np.append(multivariate_normal.logpdf(rv_i[:,0], mean=mu[:,0], cov=cov))\n log_pdf_np = np.array(log_pdf_np)\n isSamples_any = any([X_isSamples, rbf_lengthscale_isSamples, rbf_variance_isSamples, rv_isSamples])\n assert np.issubdtype(log_pdf_rt.dtype, dtype)\n assert is_sampled_array(mx.nd, log_pdf_rt) == isSamples_any\n if isSamples_any:\n assert get_num_samples(mx.nd, log_pdf_rt) == num_samples\n assert np.allclose(log_pdf_np, log_pdf_rt)\n", "import pytest\nimport mxnet as mx\nimport numpy as np\nfrom mxfusion.components.variables import Variable\nfrom mxfusion.components.variables.runtime_variable import add_sample_dimension, is_sampled_array, get_num_samples\nfrom mxfusion.components.distributions.gp.kernels import RBF, Linear, Bias, White\nfrom mxfusion.util.testutils import numpy_array_reshape, prepare_mxnet_array\n\n# These test cases depends on GPy. Put them in try/except.\ntry:\n import GPy\n\n def gpy_kernel_test(X, X_isSamples, X2, X2_isSamples, kernel_params, num_samples, dtype, mf_kernel_create, gpy_kernel_create):\n X_mx = prepare_mxnet_array(X, X_isSamples, dtype)\n X2_mx = prepare_mxnet_array(X2, X2_isSamples, dtype)\n kern = mf_kernel_create().replicate_self()\n kernel_params_mx = {kern.name + '_' + k:\n prepare_mxnet_array(v[0], v[1], dtype) for k, v in\n kernel_params.items()}\n K_XX_mx = kern.K(mx.nd, X=X_mx, **kernel_params_mx)\n K_XX2_mx = kern.K(mx.nd, X=X_mx, X2=X2_mx, **kernel_params_mx)\n Kdiag_mx = kern.Kdiag(mx.nd, X=X_mx, **kernel_params_mx)\n\n kern_gpy = gpy_kernel_create()\n K_XX_gpy, K_XX2_gpy, Kdiag_gpy = [], [], []\n for i in range(num_samples):\n X_i = X[i] if X_isSamples else X\n X2_i = X2[i] if X2_isSamples else X2\n kernel_params_gpy = {k: v[0][i] if v[1] else v[0] for k, v in\n kernel_params.items()}\n for k, v in kernel_params_gpy.items():\n setattr(kern_gpy, k, v)\n K_XX_gpy.append(np.expand_dims(kern_gpy.K(X_i), axis=0))\n K_XX2_gpy.append(np.expand_dims(kern_gpy.K(X_i, X2_i), axis=0))\n Kdiag_gpy.append(np.expand_dims(kern_gpy.Kdiag(X_i), axis=0))\n K_XX_gpy = np.vstack(K_XX_gpy)\n K_XX2_gpy = np.vstack(K_XX2_gpy)\n Kdiag_gpy = np.vstack(Kdiag_gpy)\n\n assert np.issubdtype(K_XX_mx.dtype, dtype)\n assert np.issubdtype(K_XX2_mx.dtype, dtype)\n assert np.issubdtype(Kdiag_mx.dtype, dtype)\n assert np.allclose(K_XX_gpy, K_XX_mx.asnumpy())\n assert np.allclose(K_XX2_gpy, K_XX2_mx.asnumpy())\n assert np.allclose(Kdiag_gpy, Kdiag_mx.asnumpy())\n\n\n def gpy_comb_kernel_test(X, X_isSamples, X2, X2_isSamples, kernel_params, num_samples, dtype, mf_kernel_create, gpy_kernel_create):\n X_mx = prepare_mxnet_array(X, X_isSamples, dtype)\n X2_mx = prepare_mxnet_array(X2, X2_isSamples, dtype)\n kern = mf_kernel_create().replicate_self()\n kernel_params_mx = {kern.name + '_' + k + '_' + k2:\n prepare_mxnet_array(v2[0], v2[1], dtype) for k, v in\n kernel_params.items() for k2, v2 in v.items()}\n K_XX_mx = kern.K(mx.nd, X=X_mx, **kernel_params_mx)\n K_XX2_mx = kern.K(mx.nd, X=X_mx, X2=X2_mx, **kernel_params_mx)\n Kdiag_mx = kern.Kdiag(mx.nd, X=X_mx, **kernel_params_mx)\n\n kern_gpy = gpy_kernel_create()\n K_XX_gpy, K_XX2_gpy, Kdiag_gpy = [], [], []\n for i in range(num_samples):\n X_i = X[i] if X_isSamples else X\n X2_i = X2[i] if X2_isSamples else X2\n for k, v in kernel_params.items():\n kern_1 = getattr(kern_gpy, k)\n for k2, v2 in v.items():\n setattr(kern_1, k2, v2[0][i] if v2[1] else v2[0])\n K_XX_gpy.append(np.expand_dims(kern_gpy.K(X_i), axis=0))\n K_XX2_gpy.append(np.expand_dims(kern_gpy.K(X_i, X2_i), axis=0))\n Kdiag_gpy.append(np.expand_dims(kern_gpy.Kdiag(X_i), axis=0))\n K_XX_gpy = np.vstack(K_XX_gpy)\n K_XX2_gpy = np.vstack(K_XX2_gpy)\n Kdiag_gpy = np.vstack(Kdiag_gpy)\n\n assert np.issubdtype(K_XX_mx.dtype, dtype)\n assert np.issubdtype(K_XX2_mx.dtype, dtype)\n assert np.issubdtype(Kdiag_mx.dtype, dtype)\n assert np.allclose(K_XX_gpy, K_XX_mx.asnumpy())\n assert np.allclose(K_XX2_gpy, K_XX2_mx.asnumpy())\n assert np.allclose(Kdiag_gpy, Kdiag_mx.asnumpy())\n\n\n @pytest.mark.usefixtures(\"set_seed\")\n class TestGPKernels(object):\n\n @pytest.mark.parametrize(\"dtype, X, X_isSamples, X2, X2_isSamples, lengthscale, lengthscale_isSamples, variance, variance_isSamples, num_samples, input_dim, ARD\", [\n (np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(2)+1e-4, False, np.random.rand(1)+1e-4, False, 1, 2, True),\n (np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(3,2)+1e-4, True, np.random.rand(1)+1e-4, False, 3, 2, True),\n (np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(2)+1e-4, False, np.random.rand(3,1)+1e-4, True, 3, 2, True),\n (np.float64, np.random.rand(3,5,2), True, np.random.rand(3,4,2), True, np.random.rand(2)+1e-4, False, np.random.rand(1)+1e-4, False, 3, 2, True),\n (np.float64, np.random.rand(3,5,2), True, np.random.rand(3,4,2), True, np.random.rand(1)+1e-4, False, np.random.rand(1)+1e-4, False, 3, 2, False),\n ])\n def test_kernel_as_MXFusionFunction(self, dtype, X, X_isSamples, X2,\n X2_isSamples, lengthscale, lengthscale_isSamples, variance,\n variance_isSamples, num_samples, input_dim, ARD):\n\n X_mx = prepare_mxnet_array(X, X_isSamples, dtype)\n X2_mx = prepare_mxnet_array(X2, X2_isSamples, dtype)\n var_mx = prepare_mxnet_array(variance, variance_isSamples, dtype)\n l_mx = prepare_mxnet_array(lengthscale, lengthscale_isSamples,\n dtype)\n\n X_mf = Variable(shape=X.shape)\n l_mf = Variable(shape=lengthscale.shape)\n var_mf = Variable(shape=variance.shape)\n rbf = RBF(input_dim, ARD, 1., 1., 'rbf', None, dtype)\n eval = rbf(X_mf, rbf_lengthscale=l_mf, rbf_variance=var_mf).factor\n variables = {eval.X.uuid: X_mx, eval.rbf_lengthscale.uuid: l_mx, eval.rbf_variance.uuid: var_mx}\n res_eval = eval.eval(F=mx.nd, variables=variables)\n kernel_params = rbf.fetch_parameters(variables)\n res_direct = rbf.K(F=mx.nd, X=X_mx, **kernel_params)\n assert np.allclose(res_eval.asnumpy(), res_direct.asnumpy())\n\n X_mf = Variable(shape=X.shape)\n X2_mf = Variable(shape=X2.shape)\n l_mf = Variable(shape=lengthscale.shape)\n var_mf = Variable(shape=variance.shape)\n rbf = RBF(input_dim, ARD, 1., 1., 'rbf', None, dtype)\n eval = rbf(X_mf, X2_mf, rbf_lengthscale=l_mf, rbf_variance=var_mf).factor\n variables = {eval.X.uuid: X_mx, eval.X2.uuid: X2_mx, eval.rbf_lengthscale.uuid: l_mx, eval.rbf_variance.uuid: var_mx}\n res_eval = eval.eval(F=mx.nd, variables=variables)\n kernel_params = rbf.fetch_parameters(variables)\n res_direct = rbf.K(F=mx.nd, X=X_mx, X2=X2_mx, **kernel_params)\n assert np.allclose(res_eval.asnumpy(), res_direct.asnumpy())\n\n @pytest.mark.parametrize(\"dtype, X, X_isSamples, X2, X2_isSamples, lengthscale, lengthscale_isSamples, variance, variance_isSamples, num_samples, input_dim, ARD\", [\n (np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(2)+1e-4, False, np.random.rand(1)+1e-4, False, 1, 2, True),\n (np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(3,2)+1e-4, True, np.random.rand(1)+1e-4, False, 3, 2, True),\n (np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(2)+1e-4, False, np.random.rand(3,1)+1e-4, True, 3, 2, True),\n (np.float64, np.random.rand(3,5,2), True, np.random.rand(3,4,2), True, np.random.rand(2)+1e-4, False, np.random.rand(1)+1e-4, False, 3, 2, True),\n (np.float64, np.random.rand(3,5,2), True, np.random.rand(3,4,2), True, np.random.rand(1)+1e-4, False, np.random.rand(1)+1e-4, False, 3, 2, False),\n ])\n def test_RBF_kernel(self, dtype, X, X_isSamples, X2, X2_isSamples,\n lengthscale, lengthscale_isSamples, variance,\n variance_isSamples, num_samples, input_dim, ARD):\n def create_rbf():\n return RBF(input_dim, ARD, 1., 1., 'rbf', None, dtype)\n\n def create_gpy_rbf():\n return GPy.kern.RBF(input_dim=input_dim, ARD=ARD)\n\n kernel_params = {'lengthscale': (lengthscale, lengthscale_isSamples),\n 'variance': (variance, variance_isSamples)}\n\n gpy_kernel_test(X, X_isSamples, X2, X2_isSamples, kernel_params,\n num_samples, dtype, create_rbf, create_gpy_rbf)\n\n\n @pytest.mark.parametrize(\"dtype, X, X_isSamples, X2, X2_isSamples, variances, variances_isSamples, num_samples, input_dim, ARD\", [\n (np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(2)+1e-4, False, 1, 2, True),\n (np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(3,2)+1e-4, True, 3, 2, True),\n (np.float64, np.random.rand(3,5,2), True, np.random.rand(3,4,2), True, np.random.rand(2)+1e-4, False, 3, 2, True),\n (np.float64, np.random.rand(3,5,2), True, np.random.rand(3,4,2), True, np.random.rand(1)+1e-4, False, 3, 2, False),\n ])\n def test_Linear_kernel(self, dtype, X, X_isSamples, X2, X2_isSamples,\n variances, variances_isSamples, num_samples, input_dim,\n ARD):\n def create_linear():\n return Linear(input_dim, ARD, 1., 'linear', None, dtype)\n\n def create_gpy_linear():\n return GPy.kern.Linear(input_dim=input_dim, ARD=ARD)\n\n kernel_params = {'variances': (variances, variances_isSamples)}\n\n gpy_kernel_test(X, X_isSamples, X2, X2_isSamples, kernel_params,\n num_samples, dtype, create_linear, create_gpy_linear)\n\n\n @pytest.mark.parametrize(\"dtype, X, X_isSamples, X2, X2_isSamples, variance, variance_isSamples, num_samples, input_dim\", [\n (np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(1)+1e-4, False, 1, 2),\n (np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(3,1)+1e-4, True, 3, 2),\n (np.float64, np.random.rand(3,5,2), True, np.random.rand(3,4,2), True, np.random.rand(1)+1e-4, False, 3, 2)\n ])\n def test_Bias_kernel(self, dtype, X, X_isSamples, X2, X2_isSamples,\n variance, variance_isSamples, num_samples, input_dim):\n def create_bias():\n return Bias(input_dim, 1., 'bias', None, dtype)\n\n def create_gpy_bias():\n return GPy.kern.Bias(input_dim=input_dim)\n\n kernel_params = {'variance': (variance, variance_isSamples)}\n\n gpy_kernel_test(X, X_isSamples, X2, X2_isSamples, kernel_params,\n num_samples, dtype, create_bias, create_gpy_bias)\n\n @pytest.mark.parametrize(\"dtype, X, X_isSamples, X2, X2_isSamples, variance, variance_isSamples, num_samples, input_dim\", [\n (np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(1)+1e-4, False, 1, 2),\n (np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(3,1)+1e-4, True, 3, 2),\n (np.float64, np.random.rand(3,5,2), True, np.random.rand(3,4,2), True, np.random.rand(1)+1e-4, False, 3, 2)\n ])\n def test_White_kernel(self, dtype, X, X_isSamples, X2, X2_isSamples,\n variance, variance_isSamples, num_samples, input_dim):\n def create_white():\n return White(input_dim, 1., 'bias', None, dtype)\n\n def create_gpy_white():\n return GPy.kern.White(input_dim=input_dim)\n\n kernel_params = {'variance': (variance, variance_isSamples)}\n\n gpy_kernel_test(X, X_isSamples, X2, X2_isSamples, kernel_params,\n num_samples, dtype, create_white, create_gpy_white)\n\n @pytest.mark.parametrize(\"dtype, X, X_isSamples, X2, X2_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples, linear_variances, linear_variances_isSamples, num_samples, input_dim\", [\n (np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(2)+1e-4, False, np.random.rand(1)+1e-4, False, np.random.rand(2)+1e-4, False, 1, 2),\n (np.float64, np.random.rand(3,5,2), True, np.random.rand(3,4,2), True, np.random.rand(2)+1e-4, False, np.random.rand(1)+1e-4, False, np.random.rand(2)+1e-4, False, 3, 2),\n (np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(3,2)+1e-4, True, np.random.rand(1)+1e-4, False, np.random.rand(2)+1e-4, False, 3, 2),\n (np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(2)+1e-4, False, np.random.rand(1)+1e-4, False, np.random.rand(3,2)+1e-4, True, 3, 2),\n (np.float64, np.random.rand(3,5,2), True, np.random.rand(3,4,2), True, np.random.rand(3,2)+1e-4, True, np.random.rand(3,1)+1e-4, True, np.random.rand(3,2)+1e-4, True, 3, 2)\n ])\n def test_add_kernel(self, dtype, X, X_isSamples, X2, X2_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples, linear_variances, linear_variances_isSamples, num_samples, input_dim):\n def create_rbf_plus_linear():\n return RBF(input_dim, True, 1., 1., 'rbf', None, dtype) + Linear(input_dim, True, 1, 'linear', None, dtype)\n\n def create_gpy_rbf_plus_linear():\n return GPy.kern.RBF(input_dim=input_dim, ARD=True) + GPy.kern.Linear(input_dim=input_dim, ARD=True)\n\n kernel_params = {'rbf': {'lengthscale': (rbf_lengthscale, rbf_lengthscale_isSamples), 'variance': (rbf_variance, rbf_variance_isSamples)},\n 'linear': {'variances': (linear_variances, linear_variances_isSamples)}\n }\n\n gpy_comb_kernel_test(X, X_isSamples, X2, X2_isSamples, kernel_params,\n num_samples, dtype, create_rbf_plus_linear,\n create_gpy_rbf_plus_linear)\n\n @pytest.mark.parametrize(\"dtype, X, X_isSamples, X2, X2_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples, linear_variances, linear_variances_isSamples, num_samples, input_dim\", [\n (np.float64, np.random.rand(3,5,2), True, np.random.rand(3,4,2), True, np.random.rand(3,2)+1e-4, True, np.random.rand(3,1)+1e-4, True, np.random.rand(3,2)+1e-4, True, 3, 2)\n ])\n def test_adding_add_kernel(self, dtype, X, X_isSamples, X2, X2_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples, linear_variances, linear_variances_isSamples, num_samples, input_dim):\n def create_rbf_plus_linear():\n return RBF(input_dim, True, 1., 1., 'rbf', None, dtype) + (RBF(input_dim, True, 1., 1., 'rbf', None, dtype) + Linear(input_dim, True, 1, 'linear', None, dtype))\n\n def create_gpy_rbf_plus_linear():\n return GPy.kern.RBF(input_dim=input_dim, ARD=True, name='rbf') + GPy.kern.RBF(input_dim=input_dim, ARD=True, name='rbf0') + GPy.kern.Linear(input_dim=input_dim, ARD=True)\n\n kernel_params = {'rbf': {'lengthscale': (rbf_lengthscale, rbf_lengthscale_isSamples), 'variance': (rbf_variance, rbf_variance_isSamples)},\n 'rbf0': {'lengthscale': (rbf_lengthscale, rbf_lengthscale_isSamples), 'variance': (rbf_variance, rbf_variance_isSamples)},\n 'linear': {'variances': (linear_variances, linear_variances_isSamples)}\n }\n\n gpy_comb_kernel_test(X, X_isSamples, X2, X2_isSamples, kernel_params,\n num_samples, dtype, create_rbf_plus_linear,\n create_gpy_rbf_plus_linear)\n\n @pytest.mark.parametrize(\"dtype, X, X_isSamples, X2, X2_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples, linear_variances, linear_variances_isSamples, num_samples, input_dim\", [\n (np.float64, np.random.rand(5,6), False, np.random.rand(4,6), False, np.random.rand(2)+1e-4, False, np.random.rand(1)+1e-4, False, np.random.rand(3)+1e-4, False, 1, 6),\n (np.float64, np.random.rand(3,5,6), True, np.random.rand(3,4,6), True, np.random.rand(3,2)+1e-4, True, np.random.rand(3,1)+1e-4, True, np.random.rand(3,3)+1e-4, True, 3, 6)\n ])\n def test_kernel_active_dims(self, dtype, X, X_isSamples, X2, X2_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples, linear_variances, linear_variances_isSamples, num_samples, input_dim):\n def create_rbf_plus_linear():\n return RBF(2, True, 1., 1., 'rbf', [2,3], dtype) + Linear(3, True, 1, 'linear', [4, 1, 5], dtype)\n\n def create_gpy_rbf_plus_linear():\n return GPy.kern.RBF(input_dim=2, ARD=True, active_dims=[2,3]) + GPy.kern.Linear(input_dim=3, ARD=True, active_dims=[4,1,5])\n\n kernel_params = {'rbf': {'lengthscale': (rbf_lengthscale, rbf_lengthscale_isSamples), 'variance': (rbf_variance, rbf_variance_isSamples)},\n 'linear': {'variances': (linear_variances, linear_variances_isSamples)}\n }\n\n gpy_comb_kernel_test(X, X_isSamples, X2, X2_isSamples, kernel_params,\n num_samples, dtype, create_rbf_plus_linear,\n create_gpy_rbf_plus_linear)\nexcept ImportError:\n pass\n" ]
[ [ "numpy.allclose", "matplotlib.use", "numpy.issubdtype", "scipy.linalg.lapack.dtrtrs", "numpy.random.randn", "scipy.stats.multivariate_normal.logpdf", "numpy.random.rand", "numpy.linalg.cholesky", "numpy.array" ], [ "numpy.issubdtype", "numpy.random.rand", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
irfankhan10/pytket-extensions
[ "8ab33c1dcff91dfc50d471fbc160277e82d2492b", "8ab33c1dcff91dfc50d471fbc160277e82d2492b" ]
[ "modules/pytket-aqt/tests/convert_test.py", "modules/pytket-pyquil/pytket/extensions/pyquil/backends/forest.py" ]
[ "# Copyright 2020-2021 Cambridge Quantum Computing\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Tuple, List\nimport json\nimport os\nimport numpy as np\nimport pytest\nfrom pytket.circuit import Circuit, OpType # type: ignore\nfrom pytket.extensions.aqt.backends.aqt import _translate_aqt, AQTBackend, _aqt_rebase\n\nskip_remote_tests: bool = os.getenv(\"PYTKET_RUN_REMOTE_TESTS\") is None\nREASON = \"PYTKET_RUN_REMOTE_TESTS not set (requires configuration of AQT access token)\"\n\n\ndef tk_to_aqt(circ: Circuit) -> Tuple[List[List], str]:\n \"\"\"Convert a circuit to AQT list representation\"\"\"\n c = circ.copy()\n AQTBackend(device_name=\"sim/noise-model-1\").default_compilation_pass().apply(c)\n return _translate_aqt(c)\n\n\[email protected](skip_remote_tests, reason=REASON)\ndef test_convert() -> None:\n circ = Circuit(4, 4)\n circ.H(0).CX(0, 1)\n circ.add_gate(OpType.noop, [1])\n circ.CRz(0.5, 1, 2)\n circ.add_barrier([2])\n circ.ZZPhase(0.3, 2, 3).CX(3, 0).Tdg(1)\n circ.Measure(0, 0)\n circ.Measure(1, 2)\n circ.Measure(2, 3)\n circ.Measure(3, 1)\n\n circ_aqt = tk_to_aqt(circ)\n assert json.loads(circ_aqt[1]) == [0, 3, 1, 2]\n assert all(gate[0] in [\"X\", \"Y\", \"MS\"] for gate in circ_aqt[0])\n\n\ndef test_rebase_CX() -> None:\n circ = Circuit(2)\n circ.CX(0, 1)\n orig_circ = circ.copy()\n\n _aqt_rebase().apply(circ)\n\n u1 = orig_circ.get_unitary()\n u2 = circ.get_unitary()\n\n assert np.allclose(u1, u2)\n\n\ndef test_rebase_singleq() -> None:\n circ = Circuit(1)\n # some arbitrary unitary\n circ.add_gate(OpType.U3, [0.01231, 0.848, 38.200], [0])\n orig_circ = circ.copy()\n\n _aqt_rebase().apply(circ)\n\n u1 = orig_circ.get_unitary()\n u2 = circ.get_unitary()\n\n assert np.allclose(u1, u2)\n\n\ndef test_rebase_large() -> None:\n circ = Circuit(3)\n # some arbitrary unitary\n circ.Rx(0.21, 0).Rz(0.12, 1).Rz(8.2, 2).X(2).CX(0, 1).CX(1, 2).Rz(0.44, 1).Rx(\n 0.43, 0\n )\n orig_circ = circ.copy()\n\n _aqt_rebase().apply(circ)\n\n u1 = orig_circ.get_unitary()\n u2 = circ.get_unitary()\n\n assert np.allclose(u1, u2)\n", "# Copyright 2019-2021 Cambridge Quantum Computing\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nfrom typing import cast, Iterable, List, Optional, Sequence, Union\nfrom uuid import uuid4\nfrom logging import warning\n\nimport numpy as np\nfrom pyquil import get_qc\nfrom pyquil.api import QuantumComputer, WavefunctionSimulator\nfrom pyquil.gates import I\nfrom pyquil.paulis import ID, PauliSum, PauliTerm\nfrom pyquil.quilatom import Qubit as Qubit_\n\nfrom pytket.circuit import Circuit, OpType, Qubit # type: ignore\nfrom pytket.backends import (\n Backend,\n CircuitNotRunError,\n CircuitStatus,\n ResultHandle,\n StatusEnum,\n)\nfrom pytket.backends.backend import KwargTypes\nfrom pytket.backends.backendinfo import BackendInfo\nfrom pytket.backends.backendresult import BackendResult\nfrom pytket.backends.resulthandle import _ResultIdTuple\nfrom pytket.extensions.pyquil._metadata import __extension_version__\nfrom pytket.passes import ( # type: ignore\n BasePass,\n EulerAngleReduction,\n CXMappingPass,\n RebaseQuil,\n SequencePass,\n SynthesiseTket,\n DecomposeBoxes,\n FullPeepholeOptimise,\n CliffordSimp,\n FlattenRegisters,\n SimplifyInitial,\n)\nfrom pytket.pauli import QubitPauliString # type: ignore\nfrom pytket.predicates import ( # type: ignore\n NoSymbolsPredicate,\n ConnectivityPredicate,\n GateSetPredicate,\n NoClassicalControlPredicate,\n NoFastFeedforwardPredicate,\n NoMidMeasurePredicate,\n DefaultRegisterPredicate,\n Predicate,\n)\nfrom pytket.extensions.pyquil.pyquil_convert import (\n process_characterisation,\n get_avg_characterisation,\n tk_to_pyquil,\n)\nfrom pytket.routing import NoiseAwarePlacement, Architecture # type: ignore\nfrom pytket.utils import prepare_circuit\nfrom pytket.utils.operators import QubitPauliOperator\nfrom pytket.utils.outcomearray import OutcomeArray\n\n\nclass PyQuilJobStatusUnavailable(Exception):\n \"\"\"Raised when trying to retrieve unknown job status.\"\"\"\n\n def __init__(self) -> None:\n super().__init__(\"The job status cannot be retrieved.\")\n\n\n_STATUS_MAP = {\n \"done\": StatusEnum.COMPLETED,\n \"running\": StatusEnum.RUNNING,\n \"loaded\": StatusEnum.SUBMITTED,\n \"connected\": StatusEnum.SUBMITTED,\n}\n\n\ndef _default_q_index(q: Qubit) -> int:\n if q.reg_name != \"q\" or len(q.index) != 1:\n raise ValueError(\"Non-default qubit register\")\n return int(q.index[0])\n\n\nclass ForestBackend(Backend):\n _supports_shots = True\n _supports_counts = True\n _supports_contextual_optimisation = True\n _persistent_handles = True\n _GATE_SET = {OpType.CZ, OpType.Rx, OpType.Rz, OpType.Measure, OpType.Barrier}\n\n def __init__(self, qc_name: str, simulator: bool = True):\n \"\"\"Backend for running circuits on a Rigetti QCS device or simulating with the\n QVM.\n\n :param qc_name: The name of the particular QuantumComputer to use. See the\n pyQuil docs for more details.\n :type qc_name: str\n :param simulator: Simulate the device with the QVM (True), or run on the QCS\n (False). Defaults to True.\n :type simulator: bool, optional\n \"\"\"\n super().__init__()\n self._qc: QuantumComputer = get_qc(qc_name, as_qvm=simulator)\n\n char_dict: dict = process_characterisation(self._qc)\n arch = char_dict.get(\"Architecture\", Architecture([]))\n node_errors = char_dict.get(\"NodeErrors\")\n link_errors = char_dict.get(\"EdgeErrors\")\n averaged_errors = get_avg_characterisation(char_dict)\n self._backend_info = BackendInfo(\n type(self).__name__,\n qc_name,\n __extension_version__,\n arch,\n self._GATE_SET,\n all_node_gate_errors=node_errors,\n all_edge_gate_errors=link_errors,\n averaged_node_gate_errors=averaged_errors[\"node_errors\"],\n averaged_edge_gate_errors=averaged_errors[\"link_errors\"],\n )\n\n @property\n def required_predicates(self) -> List[Predicate]:\n return [\n NoClassicalControlPredicate(),\n NoFastFeedforwardPredicate(),\n NoMidMeasurePredicate(),\n GateSetPredicate(self.backend_info.gate_set),\n ConnectivityPredicate(self.backend_info.architecture),\n ]\n\n def default_compilation_pass(self, optimisation_level: int = 1) -> BasePass:\n assert optimisation_level in range(3)\n passlist = [\n DecomposeBoxes(),\n FlattenRegisters(),\n ]\n if optimisation_level == 1:\n passlist.append(SynthesiseTket())\n elif optimisation_level == 2:\n passlist.append(FullPeepholeOptimise())\n passlist.append(\n CXMappingPass(\n self.backend_info.architecture,\n NoiseAwarePlacement(\n self._backend_info.architecture,\n self._backend_info.averaged_node_gate_errors,\n self._backend_info.averaged_edge_gate_errors,\n ),\n directed_cx=False,\n delay_measures=True,\n )\n )\n if optimisation_level == 2:\n passlist.append(CliffordSimp(False))\n if optimisation_level > 0:\n passlist.append(SynthesiseTket())\n passlist.append(RebaseQuil())\n if optimisation_level > 0:\n passlist.extend(\n [\n EulerAngleReduction(OpType.Rx, OpType.Rz),\n SimplifyInitial(\n allow_classical=False, create_all_qubits=True, xcirc=_xcirc\n ),\n ]\n )\n return SequencePass(passlist)\n\n @property\n def _result_id_type(self) -> _ResultIdTuple:\n return (int, str)\n\n def process_circuits(\n self,\n circuits: Sequence[Circuit],\n n_shots: Union[None, int, Sequence[Optional[int]]] = None,\n valid_check: bool = True,\n **kwargs: KwargTypes,\n ) -> List[ResultHandle]:\n \"\"\"\n See :py:meth:`pytket.backends.Backend.process_circuits`.\n Supported kwargs: `seed`.\n \"\"\"\n circuits = list(circuits)\n n_shots_list = Backend._get_n_shots_as_list(\n n_shots, len(circuits), optional=False\n )\n\n if valid_check:\n self._check_all_circuits(circuits)\n\n postprocess = kwargs.get(\"postprocess\", False)\n\n handle_list = []\n for circuit, n_shots in zip(circuits, n_shots_list):\n if postprocess:\n c0, ppcirc = prepare_circuit(circuit, allow_classical=False)\n ppcirc_rep = ppcirc.to_dict()\n else:\n c0, ppcirc_rep = circuit, None\n p, bits = tk_to_pyquil(c0, return_used_bits=True)\n p.wrap_in_numshots_loop(n_shots)\n ex = self._qc.compiler.native_quil_to_executable(p)\n qam = self._qc.qam\n qam.random_seed = kwargs.get(\"seed\") # type: ignore\n pyquil_handle = qam.execute(ex)\n handle = ResultHandle(uuid4().int, json.dumps(ppcirc_rep))\n measures = circuit.n_gates_of_type(OpType.Measure)\n if measures == 0:\n self._cache[handle] = {\n \"handle\": pyquil_handle,\n \"c_bits\": sorted(bits),\n \"result\": self.empty_result(circuit, n_shots=n_shots),\n }\n else:\n self._cache[handle] = {\"handle\": pyquil_handle, \"c_bits\": sorted(bits)}\n handle_list.append(handle)\n return handle_list\n\n def circuit_status(self, handle: ResultHandle) -> CircuitStatus:\n \"\"\"\n Return a CircuitStatus reporting the status of the circuit execution\n corresponding to the ResultHandle.\n\n This will throw an PyQuilJobStatusUnavailable exception if the results\n have not been retrieved yet, as pyQuil does not currently support asynchronous\n job status queries.\n\n :param handle: The handle to the submitted job.\n :type handle: ResultHandle\n :returns: The status of the submitted job.\n :raises PyQuilJobStatusUnavailable: Cannot retrieve job status.\n :raises CircuitNotRunError: The handle does not correspond to a valid job.\n \"\"\"\n if handle in self._cache and \"result\" in self._cache[handle]:\n return CircuitStatus(StatusEnum.COMPLETED)\n if handle in self._cache:\n # retrieving status is not supported yet\n # see https://github.com/rigetti/pyquil/issues/1370\n raise PyQuilJobStatusUnavailable()\n raise CircuitNotRunError(handle)\n\n def get_result(self, handle: ResultHandle, **kwargs: KwargTypes) -> BackendResult:\n \"\"\"\n See :py:meth:`pytket.backends.Backend.get_result`.\n Supported kwargs: none.\n \"\"\"\n try:\n return super().get_result(handle)\n except CircuitNotRunError:\n if handle not in self._cache:\n raise CircuitNotRunError(handle)\n\n pyquil_handle = self._cache[handle][\"handle\"]\n raw_shots = self._qc.qam.get_result(pyquil_handle).readout_data[\"ro\"]\n if raw_shots is None:\n raise ValueError(\"Could not read job results in memory\")\n shots = OutcomeArray.from_readouts(raw_shots.tolist())\n ppcirc_rep = json.loads(cast(str, handle[1]))\n ppcirc = Circuit.from_dict(ppcirc_rep) if ppcirc_rep is not None else None\n res = BackendResult(\n shots=shots, c_bits=self._cache[handle][\"c_bits\"], ppcirc=ppcirc\n )\n self._cache[handle].update({\"result\": res})\n return res\n\n @property\n def backend_info(self) -> BackendInfo:\n return self._backend_info\n\n\nclass ForestStateBackend(Backend):\n _supports_state = True\n _supports_expectation = True\n _expectation_allows_nonhermitian = False\n _persistent_handles = False\n _GATE_SET = {\n OpType.X,\n OpType.Y,\n OpType.Z,\n OpType.H,\n OpType.S,\n OpType.T,\n OpType.Rx,\n OpType.Ry,\n OpType.Rz,\n OpType.CZ,\n OpType.CX,\n OpType.CCX,\n OpType.CU1,\n OpType.U1,\n OpType.SWAP,\n }\n\n def __init__(self) -> None:\n \"\"\"Backend for running simulations on the Rigetti QVM Wavefunction Simulator.\"\"\"\n super().__init__()\n self._sim = WavefunctionSimulator()\n\n @property\n def required_predicates(self) -> List[Predicate]:\n return [\n NoClassicalControlPredicate(),\n NoFastFeedforwardPredicate(),\n NoMidMeasurePredicate(),\n NoSymbolsPredicate(),\n GateSetPredicate(self._GATE_SET),\n DefaultRegisterPredicate(),\n ]\n\n def default_compilation_pass(self, optimisation_level: int = 1) -> BasePass:\n assert optimisation_level in range(3)\n passlist = [DecomposeBoxes(), FlattenRegisters()]\n if optimisation_level == 1:\n passlist.append(SynthesiseTket())\n elif optimisation_level == 2:\n passlist.append(FullPeepholeOptimise())\n passlist.append(RebaseQuil())\n if optimisation_level > 0:\n passlist.append(EulerAngleReduction(OpType.Rx, OpType.Rz))\n return SequencePass(passlist)\n\n @property\n def _result_id_type(self) -> _ResultIdTuple:\n return (int,)\n\n def process_circuits(\n self,\n circuits: Iterable[Circuit],\n n_shots: Optional[Union[int, Sequence[int]]] = None,\n valid_check: bool = True,\n **kwargs: KwargTypes,\n ) -> List[ResultHandle]:\n handle_list = []\n if valid_check:\n self._check_all_circuits(circuits)\n for circuit in circuits:\n p = tk_to_pyquil(circuit)\n for qb in circuit.qubits:\n # Qubits with no gates will not be included in the Program\n # Add identities to ensure all qubits are present and dimension\n # is as expected\n p += I(Qubit_(qb.index[0]))\n handle = ResultHandle(uuid4().int)\n state = np.array(self._sim.wavefunction(p).amplitudes)\n try:\n phase = float(circuit.phase)\n coeff = np.exp(phase * np.pi * 1j)\n state *= coeff\n except ValueError:\n warning(\n \"Global phase is dependent on a symbolic parameter, so cannot \"\n \"adjust for phase\"\n )\n implicit_perm = circuit.implicit_qubit_permutation()\n res_qubits = [\n implicit_perm[qb] for qb in sorted(circuit.qubits, reverse=True)\n ]\n res = BackendResult(q_bits=res_qubits, state=state)\n self._cache[handle] = {\"result\": res}\n handle_list.append(handle)\n return handle_list\n\n def circuit_status(self, handle: ResultHandle) -> CircuitStatus:\n if handle in self._cache:\n return CircuitStatus(StatusEnum.COMPLETED)\n raise CircuitNotRunError(handle)\n\n def _gen_PauliTerm(self, term: QubitPauliString, coeff: complex = 1.0) -> PauliTerm:\n pauli_term = ID() * coeff\n for q, p in term.to_dict().items():\n pauli_term *= PauliTerm(p.name, _default_q_index(q))\n return pauli_term # type: ignore\n\n def get_pauli_expectation_value(\n self, state_circuit: Circuit, pauli: QubitPauliString\n ) -> complex:\n \"\"\"Calculates the expectation value of the given circuit using the built-in QVM\n functionality\n\n :param state_circuit: Circuit that generates the desired state\n :math:`\\\\left|\\\\psi\\\\right>`.\n :type state_circuit: Circuit\n :param pauli: Pauli operator\n :type pauli: QubitPauliString\n :return: :math:`\\\\left<\\\\psi | P | \\\\psi \\\\right>`\n :rtype: complex\n \"\"\"\n prog = tk_to_pyquil(state_circuit)\n pauli_term = self._gen_PauliTerm(pauli)\n return complex(self._sim.expectation(prog, [pauli_term]))\n\n def get_operator_expectation_value(\n self, state_circuit: Circuit, operator: QubitPauliOperator\n ) -> complex:\n \"\"\"Calculates the expectation value of the given circuit with respect to the\n operator using the built-in QVM functionality\n\n :param state_circuit: Circuit that generates the desired state\n :math:`\\\\left|\\\\psi\\\\right>`.\n :type state_circuit: Circuit\n :param operator: Operator :math:`H`.\n :type operator: QubitPauliOperator\n :return: :math:`\\\\left<\\\\psi | H | \\\\psi \\\\right>`\n :rtype: complex\n \"\"\"\n prog = tk_to_pyquil(state_circuit)\n pauli_sum = PauliSum(\n [self._gen_PauliTerm(term, coeff) for term, coeff in operator._dict.items()]\n )\n return complex(self._sim.expectation(prog, pauli_sum))\n\n\n_xcirc = Circuit(1).Rx(1, 0)\n_xcirc.add_phase(0.5)\n" ]
[ [ "numpy.allclose" ], [ "numpy.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Ipuch/Humanoid2D
[ "3afd3926b9fb2ddc39be9bef99b89f864f41dcb7", "3afd3926b9fb2ddc39be9bef99b89f864f41dcb7" ]
[ "examples/walk_integration.py", "examples/walk10Dof.py" ]
[ "import biorbd\nimport numpy as np\n\nfrom bioptim import OdeSolver, CostType, RigidBodyDynamics\nfrom bioptim import Solver, DefectType\n\nfrom humanoid_2d import Humanoid2D, Integration, add_custom_plots, HumanoidOcp, HumanoidOcpMultiPhase\n\n\ndef torque_driven_dynamics(model: biorbd.Model, states: np.array, controls: np.array, params: np.array):\n q = states[: model.nbQ()]\n qdot = states[model.nbQ() :]\n tau = controls\n qddot = model.ForwardDynamicsConstraintsDirect(q, qdot, tau).to_array()\n return np.hstack((qdot, qddot))\n\n\ndef main():\n n_shooting = 30\n ode_solver = OdeSolver.RK4()\n # ode_solver = OdeSolver.COLLOCATION()\n time = 0.3\n n_threads = 8\n # for human in Humanoid2D:\n human = Humanoid2D.HUMANOID_3DOF\n model_path = human\n print(human)\n # --- Solve the program --- #\n humanoid = HumanoidOcpMultiPhase(\n biorbd_model_path=model_path.value,\n phase_time=time,\n n_shooting=n_shooting,\n ode_solver=ode_solver,\n rigidbody_dynamics=RigidBodyDynamics.ODE,\n n_threads=n_threads,\n nb_phases=1,\n )\n\n add_custom_plots(humanoid.ocp)\n humanoid.ocp.add_plot_penalty(CostType.ALL)\n # humanoid.ocp.print()\n\n solv = Solver.IPOPT(show_online_optim=False, show_options=dict(show_bounds=True))\n solv.set_maximum_iterations(1000)\n solv.set_linear_solver(\"ma57\")\n solv.set_print_level(5)\n sol = humanoid.ocp.solve(solv)\n\n # --- Show results --- #\n print(sol.status)\n sol.print_cost()\n\n from bioptim import Shooting, SolutionIntegrator\n\n sol.integrate(\n shooting_type=Shooting.SINGLE_CONTINUOUS,\n keep_intermediate_points=False,\n merge_phases=False,\n continuous=True,\n integrator=SolutionIntegrator.SCIPY_DOP853,\n )\n print(sol.states[\"q\"])\n\n integration = Integration(\n ocp=humanoid.ocp, solution=sol, state_keys=[\"q\", \"qdot\"], control_keys=[\"tau\"], function=torque_driven_dynamics\n )\n\n out = integration.integrate(\n shooting_type=Shooting.SINGLE_CONTINUOUS,\n keep_intermediate_points=False,\n merge_phases=False,\n continuous=True,\n integrator=SolutionIntegrator.SCIPY_DOP853,\n )\n print(out.states[\"q\"])\n\n print(sol.states[\"q\"] - out.states[\"q\"])\n\n import matplotlib.pyplot as plt\n\n plt.figure(1)\n plt.plot(sol.states[\"q\"][0, :])\n plt.plot(out.states[\"q\"][0, :])\n plt.show()\n # plot in red\n\n # ça plante pas à vérifier ;)\n\n\nif __name__ == \"__main__\":\n main()\n", "import numpy as np\nimport biorbd_casadi as biorbd\n\nfrom bioptim import (\n OptimalControlProgram,\n DynamicsFcn,\n DynamicsList,\n Bounds,\n QAndQDotBounds,\n InitialGuess,\n ObjectiveFcn,\n ObjectiveList,\n ConstraintList,\n ConstraintFcn,\n InterpolationType,\n Node,\n BoundsList,\n OdeSolver,\n Solver,\n CostType,\n PhaseTransitionList,\n PhaseTransitionFcn,\n)\n\n\ndef prepare_ocp(\n biorbd_model_path: str,\n final_time: float,\n n_shooting: int,\n ode_solver: OdeSolver = OdeSolver.RK4(),\n use_sx: bool = False,\n n_threads: int = 1,\n implicit_dynamics: bool = False,\n) -> OptimalControlProgram:\n \"\"\"\n The initialization of an ocp\n\n Parameters\n ----------\n biorbd_model_path: str\n The path to the biorbd model\n final_time: float\n The time in second required to perform the task\n n_shooting: int\n The number of shooting points to define int the direct multiple shooting program\n ode_solver: OdeSolver = OdeSolver.RK4()\n Which type of OdeSolver to use\n use_sx: bool\n If the SX variable should be used instead of MX (can be extensive on RAM)\n n_threads: int\n The number of threads to use in the paralleling (1 = no parallel computing)\n implicit_dynamics: bool\n implicit\n Returns\n -------\n The OptimalControlProgram ready to be solved\n \"\"\"\n\n model = biorbd.Model(biorbd_model_path)\n n_q = model.nbQ()\n n_qdot = model.nbQdot()\n n_tau = model.nbGeneralizedTorque()\n tau_min, tau_max, tau_init = -400, 400, 0\n\n # --- Dynamics --- #\n dynamics = DynamicsList()\n dynamics.add(DynamicsFcn.TORQUE_DRIVEN, with_contact=True, phase=0)\n\n # --- Objective function --- #\n objective_functions = ObjectiveList()\n objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key=\"tau\", phase=0)\n\n # torso stability\n objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_QDDOT, phase=0, index=[0, 1, 2], weight=0.01)\n # head stability\n objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_QDDOT, derivative=True, phase=0, index=3, weight=0.01)\n objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_STATE, key=\"qdot\", phase=0, index=3, weight=0.01)\n\n # keep velocity CoM around 1.5 m/s\n objective_functions.add(ObjectiveFcn.Mayer.MINIMIZE_COM_VELOCITY, index=1, target=1.5, node=Node.START, weight=1000)\n objective_functions.add(ObjectiveFcn.Mayer.MINIMIZE_COM_VELOCITY, index=1, target=1.5, node=Node.END, weight=1000)\n\n # instead of phase transition\n objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_COM_VELOCITY, index=2, weight=0.1)\n\n # --- Constraints --- #\n constraints = ConstraintList()\n # Contact force in Z are positive\n constraints.add(\n ConstraintFcn.TRACK_CONTACT_FORCES, min_bound=0, max_bound=np.inf, node=Node.ALL, contact_index=1, phase=0\n ) # FP0 > 0 en Z\n\n # contact node at zero position and zero speed\n constraints.add(ConstraintFcn.TRACK_MARKERS, node=Node.START, marker_index=\"RFoot\", phase=0)\n constraints.add(ConstraintFcn.TRACK_MARKERS_VELOCITY, node=Node.START, marker_index=\"RFoot\", phase=0)\n\n # first and last step constraints\n constraints.add(\n ConstraintFcn.TRACK_MARKERS, target=np.array([0, -0.4, 0]), node=Node.START, marker_index=\"LFoot\", phase=0\n )\n # Ensure lift of foot\n constraints.add(\n ConstraintFcn.TRACK_MARKERS,\n index=2,\n min_bound=0.05,\n max_bound=np.inf,\n node=Node.MID,\n marker_index=\"LFoot\",\n phase=0,\n )\n constraints.add(\n ConstraintFcn.TRACK_MARKERS, target=np.array([0, 0.4, 0]), node=Node.END, marker_index=\"LFoot\", phase=0\n )\n\n phase_transitions = PhaseTransitionList()\n phase_transitions.add(PhaseTransitionFcn.CYCLIC, index=[0, 1, 2, 3], weight=1000) # key=\"q\"\n # phase_transitions.add(custom_phase_transition, phase_pre_idx=2, coef=0.5)\n\n x_bounds = BoundsList()\n x_bounds.add(bounds=QAndQDotBounds(model))\n\n x_bounds[0][n_q + 3, 0] = 0 # head velocity zero at the beginning\n x_bounds[0][n_q + 3, -1] = 0 # head velocity zero at the end\n x_bounds[0].max[2, :] = 0 # torso bended forward\n x_bounds[0].min[n_q - 2 : n_q, 0] = -np.pi / 8 # driving knees\n\n # Supervised shoulders\n i = 1 # 1 if head\n x_bounds[0][5 + i, 0] = -np.pi / 6\n x_bounds[0][6 + i, 0] = np.pi / 6\n x_bounds[0][5 + i, -1] = np.pi / 6\n x_bounds[0][6 + i, -1] = -np.pi / 6\n\n x_bounds[0][5 + i + n_q, 0] = 0\n x_bounds[0][5 + i + n_q, -1] = 0\n x_bounds[0][6 + i + n_q, 0] = 0\n x_bounds[0][6 + i + n_q, -1] = 0\n\n # Unsupervised arms not working trying another time with cyclic constraints\n # x_bounds[0].max[5, 0] = -1e-5 # position is negative at start\n # x_bounds[0].min[6, 0] = 1e-5 # position is positive at start\n #\n # x_bounds[0].min[5, -1] = 1e-5 # position is positive at the end\n # x_bounds[0].max[6, -1] = -1e-5 # position is negative at the end\n #\n # x_bounds[0][n_q + 5, [0, -1]] = 0 # velocity of shoulders zero at begining and end\n # x_bounds[0][n_q + 6, [0, -1]] = 0 # velocity of shoulders zero at begining and end\n # x_bounds[0].max[n_q + 6, 1] = -1e-5 # velocity of left shoulder negative\n # x_bounds[0].min[n_q + 6, 1] = -5 # velocity of left shoulder negative\n # x_bounds[0].min[n_q + 5, 1] = 1e-5 # velocity of right shoulder positive\n # x_bounds[0].max[n_q + 5, 1] = 5 # velocity of right shoulder positive\n\n u_bounds = BoundsList()\n u_bounds.add([tau_min] * n_tau, [tau_max] * n_tau)\n # root is not actuated\n u_bounds[0][:3, :] = 0\n\n # --- Initial guess --- #\n q0 = [0] * n_q\n # Torso over the floor and bent\n q0[1] = 0.8\n q0[2] = -3.14 / 6\n qdot0 = [0] * n_qdot\n X0 = []\n X0.extend(q0)\n X0.extend(qdot0)\n x_init = InitialGuess(X0, interpolation=InterpolationType.CONSTANT)\n u_init = InitialGuess([tau_init] * n_tau)\n\n return OptimalControlProgram(\n biorbd_model=model,\n dynamics=dynamics,\n n_shooting=n_shooting,\n ode_solver=ode_solver,\n phase_time=final_time,\n x_init=x_init,\n u_init=u_init,\n x_bounds=x_bounds,\n u_bounds=u_bounds,\n objective_functions=objective_functions,\n constraints=constraints,\n phase_transitions=phase_transitions,\n use_sx=use_sx,\n n_threads=n_threads,\n )\n\n\ndef main():\n model_path = \"models/Humanoid10Dof.bioMod\"\n n_shooting = 10\n ode_solver = OdeSolver.RK4(n_integration_steps=5)\n # ode_solver = OdeSolver.COLLOCATION()\n time = 0.3\n n_threads = 8\n # --- Solve the program --- #\n\n ocp = prepare_ocp(\n biorbd_model_path=model_path,\n final_time=time,\n n_shooting=n_shooting,\n ode_solver=ode_solver,\n implicit_dynamics=False,\n n_threads=n_threads,\n )\n # ocp.print(to_graph=True)\n # ocp.add_plot_penalty(CostType.ALL)\n\n # Plot CoM pos and velocity\n for i, nlp in enumerate(ocp.nlp):\n ocp.add_plot(\n \"CoM\", lambda t, x, u, p: plot_com(x, nlp), phase=i, legend=[\"CoMy\", \"Comz\", \"CoM_doty\", \"CoM_dotz\"]\n )\n\n solv = Solver.IPOPT(show_online_optim=True, show_options=dict(show_bounds=True))\n sol = ocp.solve(solv)\n\n # --- Show results --- #\n sol.print()\n sol.animate()\n sol.graphs(show_bounds=True)\n\n\ndef plot_com(x, nlp):\n com_func = biorbd.to_casadi_func(\"CoMPlot\", nlp.model.CoM, nlp.states[\"q\"].mx, expand=False)\n com_dot_func = biorbd.to_casadi_func(\n \"Compute_CoM\", nlp.model.CoMdot, nlp.states[\"q\"].mx, nlp.states[\"qdot\"].mx, expand=False\n )\n q = nlp.states[\"q\"].mapping.to_second.map(x[nlp.states[\"q\"].index, :])\n qdot = nlp.states[\"qdot\"].mapping.to_second.map(x[nlp.states[\"qdot\"].index, :])\n\n return np.concatenate((np.array(com_func(q)[1:, :]), np.array(com_dot_func(q, qdot)[1:, :])))\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "matplotlib.pyplot.plot", "numpy.hstack", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]